element with class "image-wrapper"
image_div = product_element.find_element(By.CLASS_NAME, 'image-wrapper')
# Find the
element within the
element
img_element = image_div.find_element(By.TAG_NAME, 'img')
# Get the value of the "srcset" attribute
srcset_value = img_element.get_attribute('srcset')
# Extract the link of the image from the srcset value
image_link = srcset_value.split(',')[0].split(' ')[0]
info['image'] = image_link
except NoSuchElementException:
pass
# If we decide to get extra information
if extra_info:
# sales
try:
# sales_elem = product_element.find_element(By.XPATH, ".//div[@class='styles__StyledQtySold-sc-732h27-2']")
# sales_elem = product_element.find_element(By.CLASS_NAME, 'quantity has-border')
# info['sales'] = sales_elem
# info['sales'] = int(re.sub(r'\D', '', sales_elem.get_attribute('innerHTML')))
# Find the element with class "quantity"
quantity_span = product_element.find_element(By.CLASS_NAME, 'quantity')
# Get the text content of the element
info['sales'] = quantity_span.text
except (NoSuchElementException, ValueError):
info['sales'] = 0
# # rating
# try:
# # rating = product_element.find_element(By.XPATH, ".//div[@class='average']").get_attribute('style')
# rating = product_element.find_element(By.CLASS_NAME, 'average').get_attribute('style')
# # info['rating'] = float(re.sub(r'\D','', rating))/100*5 # With regex
# info['rating'] = float(''.join([c for c in rating if c.isdigit()]))/100*5 # Without regex
# except NoSuchElementException:
# info['rating'] = 0
try:
# Try to get discount using class name
discount = product_element.find_element(By.CLASS_NAME, 'price-discount__discount').get_attribute('innerHTML')
info['discount'] = discount.replace('-', '') # Remove any dashes
except (NoSuchElementException, ValueError):
try:
# Try to get discount using another method
discount_div = product_element.find_element(By.CLASS_NAME, 'style__DiscountPercentStyled-sc-e9h7mj-1')
info['discount'] = discount_div.text.replace('-', '') # Remove any dashes
except NoSuchElementException:
# If both attempts fail, set discount to 0
info['discount'] = '0'
# # tiki now
# try:
# info['tiki_now'] = bool(product_element.find_element(By.CLASS_NAME, 'badge-service').find_element(By.CLASS_NAME, 'item'))
# except NoSuchElementException:
# info['tiki_now'] = False
# # freeship, official seller, and/or trusted seller
# try:
# info['freeship'] = False
# info['official'] = False
# info['trusted'] = False
# thumbnail_tag = product_element.find_element(By.CLASS_NAME, 'thumbnail')
# list_img = thumbnail_tag.find_elements(By.TAG_NAME, 'img')
# # list_img = product_element.find_elements(By.XPATH, ".//div[@class='thumbnail']/img")
# for img in list_img:
# if img.get_attribute('src') == 'https://salt.tikicdn.com/ts/upload/dc/0d/49/3251737db2de83b74eba8a9ad6d03338.png':
# info['freeship'] = True
# elif img.get_attribute('src') == 'https://salt.tikicdn.com/ts/upload/b9/1f/4b/557eac9c67a4466ccebfa74cde854215.png':
# info['official'] = True
# elif img.get_attribute('src') == 'https://salt.tikicdn.com/ts/upload/e0/41/da/bb0fc684a838eff5e264ce0534a148f0.png':
# info['trusted'] = True
# except NoSuchElementException:
# pass
# # under price
# try:
# # info['under_price'] = bool(product_element.find_element(By.XPATH, ".//div[@class='badge-under-price']/child::div[@class='item']"))
# info['under_price'] = bool(product_element.find_element(By.CLASS_NAME, 'badge-under-price').find_element(By.CLASS_NAME, 'item'))
# except NoSuchElementException:
# info['under_price'] = False
# # installment
# try:
# # info['installment'] = bool(product_element.find_element(By.XPATH, ".//div[@class='badge-benefits']//child::img[1]"))
# info['installment'] = bool(product_element.find_element(By.CLASS_NAME, 'badge-benefits').find_element(By.TAG_NAME, 'img'))
# except NoSuchElementException:
# info['installment'] = False
# # gift
# try:
# # info['gift'] = bool(product_element.find_element(By.XPATH, ".//div[@class='freegift-list']"))
# info['gift'] = bool(product_element.find_element(By.CLASS_NAME, 'freegift-list'))
# except NoSuchElementException:
# info['gift'] = False
return info
### Function to scrape all products from a page
def get_tiki_product_info_from_page(page_url, extra_info=False):
"""
Extract info from all products of a specfic page_url on Tiki website
Args:
page_url: (string) url of the page to scrape
Returns:
data: (list) a list of dictionary of products info. If no products
found, return empty list.
"""
global DRIVER
data = []
DRIVER.get(page_url) # Use the driver to get info from the product page
time.sleep(3)
try:
# no_product_found = bool(DRIVER.find_element(By.XPATH, "//div[@class='style__StyledNotFoundProductView-sc-1uz0b49-0']"))
no_product_found = bool(DRIVER.find_element(By.CLASS_NAME, 'style__StyledNotFoundProductView-sc-1uz0b49-0'))
print("EMPTY PAGE")
return data
except NoSuchElementException:
no_product_found = False
# FIND ALL PRODUCT ITEMS
# products = DRIVER.find_elements(By.XPATH, "//a[@class='product-item']")
products = DRIVER.find_elements(By.CLASS_NAME, 'product-item')
print(f'Found {len(products)} products')
if (not no_product_found) and len(products)>0:
for i in products:
product_dict = get_tiki_product_info_single(i, extra_info)
data.append(product_dict)
return data
### Function to get product info from a main category
def get_tiki_product_info_from_category(cat_url, max_page=0, extra_info=False):
'''
Scrape for multiple pages of products of a category.
Uses get_product_info_from_page().
Args:
cat_url: (string) a url string of a category
max_page: (int) an integer denoting the maximum number of pages to scrape.
Default value is 0 to scrape all pages.
Returns:
products: a list in which every element is a dictionary of one product's information
'''
products = []
page_n = 1
cat_page_url = cat_url + f'&page={page_n}'
print(cat_page_url)
product_list = get_tiki_product_info_from_page(cat_page_url, extra_info=extra_info)
while len(product_list)>0:
products.extend(product_list)
page_n += 1
# stop_flag = False if max_page <= 0 else (page_n > max_page)
stop_flag = max_page>0 and page_n>max_page # For stopping the scrape according to max_page
if stop_flag:
break
cat_page_url = cat_url + f'&page={page_n}'
print(cat_page_url)
product_list = get_tiki_product_info_from_page(cat_page_url, extra_info=extra_info)
return products
def scrap_tiki(search_product, num_max_page, extra_info):
start_driver(force_restart=True)
url = 'https://tiki.vn/search?sort=default&q="' + search_product +'"'
prod_data = [] # STORE YOUR PRODUCT INFO DICTIONARIES IN HERE
# prod_per_cat = get_product_info_from_category(main_cat['URL'], num_max_page, extra_info=extra_info)
prod_per_cat = get_tiki_product_info_from_category(url, num_max_page, extra_info = extra_info)
prod_data.extend(prod_per_cat)
close_driver() # Close driver when we're done
return prod_data