-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscraper.py
More file actions
141 lines (126 loc) · 5.47 KB
/
scraper.py
File metadata and controls
141 lines (126 loc) · 5.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import time
import json
import csv
from selenium import webdriver
from selenium.webdriver.chrome.service import Service as Service
from selenium.webdriver.chrome.options import Options as EdgeOptions
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
# ========== Scraper Function ==========
def scrape_pickaboo_product(driver, product_url):
driver.get(product_url)
time.sleep(3)
def safe_get_text(by, selector, default="N/A"):
try:
return driver.find_element(by, selector).text.strip()
except NoSuchElementException:
return default
def safe_get_element(by, selector):
try:
return driver.find_element(by, selector)
except NoSuchElementException:
return None
product_name = safe_get_text(By.CLASS_NAME, "TitleH1__StyledTitleH1-sc-3my69t-0")
brand = safe_get_text(By.XPATH, "//div[contains(text(), 'Brand:')]/span")
discounted_price = safe_get_text(By.CSS_SELECTOR, 'h2.jyJxBK.title')
listed_price = safe_get_text(By.CSS_SELECTOR, 'h2.cIqnho.title', discounted_price)
discount_percent = safe_get_text(By.CSS_SELECTOR, 'div.discount-div span', "0%")
warranty_info = safe_get_text(By.CSS_SELECTOR, "p.p-warranty > span", "No warranty info")
availability = "Out of stock" if driver.find_elements(By.CLASS_NAME, "out-of-stock") else "In stock"
seller_name = safe_get_text(By.CSS_SELECTOR, "div.desktop-view > span[style*='cursor: pointer']", "Pickaboo")
average_rating = safe_get_text(By.CSS_SELECTOR, "div.rating-div span", "No rating")
rating_count = safe_get_text(By.CSS_SELECTOR, "h2.gcHBkv", "0").strip("()")
product_url = driver.current_url
image_url = ""
image_element = safe_get_element(By.CLASS_NAME, "image-clickable-wrapper")
if image_element:
try:
image_url = image_element.find_element(By.TAG_NAME, "img").get_attribute("src")
except:
image_url = ""
product_description = ""
try:
desc_block = driver.find_element(By.ID, "description")
headings = desc_block.find_elements(By.TAG_NAME, "h3")
for h in headings:
section_title = h.text.strip()
product_description += f"\n{section_title}:\n"
ul = h.find_element(By.XPATH, "following-sibling::ul[1]")
items = ul.find_elements(By.TAG_NAME, "li")
for li in items:
product_description += f"- {li.text.strip()}\n"
except:
product_description = "Description not found or failed to parse."
reviews = []
review_elements = driver.find_elements(By.CLASS_NAME, "ratting-comments")
for review in review_elements:
try:
name = review.find_element(By.CSS_SELECTOR, ".head-name .approved p").text.strip()
except:
name = None
try:
rating = review.find_element(By.CSS_SELECTOR, ".rating p").text.strip("()")
except:
rating = None
try:
date = review.find_element(By.TAG_NAME, "h3").text.replace("Posted on", "").strip()
except:
date = None
try:
comment = review.find_element(By.CSS_SELECTOR, ".comments p").text.strip()
except:
comment = None
reviews.append({
"reviewer": name,
"rating": rating,
"date": date,
"comment": comment
})
return {
"product_name": product_name,
"brand": brand,
"discounted_price": discounted_price,
"listed_price": listed_price,
"discount_percent": discount_percent,
"warranty_info": warranty_info,
"availability": availability,
"seller_name": seller_name,
"average_rating": average_rating,
"rating_count": rating_count,
"image_url": image_url,
"product_url": product_url,
"product_description": product_description.strip().replace("\n", " "),
"reviews": json.dumps(reviews, ensure_ascii=False)
}
# ========== Load Product Links ==========
product_urls = []
with open("product_links.csv", "r", encoding="utf-8") as f:
reader = csv.reader(f)
next(reader) # Skip header
for row in reader:
if row and row[0].startswith("http"):
product_urls.append(row[0])
print(f"📦 Total product links to scrape: {len(product_urls)}")
# ========== Setup Selenium ==========
options = EdgeOptions()
options.add_argument("--headless")
driver = webdriver.Edge(service=EdgeService(executable_path=r"C:/Users/User/Desktop/UIU/Spring 25/DS 3885/webscraping selenium/driver/msedgedriver.exe"), options=options)
# ========== Scrape and Save ==========
with open("pickaboo_products.csv", mode="w", newline="", encoding="utf-8") as file:
fieldnames = [
"product_name", "brand", "discounted_price", "listed_price", "discount_percent",
"warranty_info", "availability", "seller_name", "average_rating", "rating_count",
"image_url", "product_url", "product_description", "reviews"
]
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for i, url in enumerate(product_urls, 1):
print(f"\n🔎 Scraping {i}/{len(product_urls)}: {url}")
try:
data = scrape_pickaboo_product(driver, url)
writer.writerow(data)
except Exception as e:
print(f"❌ Failed to scrape {url}: {e}")
continue
driver.quit()
print("\n✅ Done! All product data saved to pickaboo_products.csv")