File size: 6,785 Bytes
39a482a
 
 
 
6d0cb99
39a482a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d0cb99
39a482a
 
 
 
 
6d0cb99
39a482a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d0cb99
39a482a
 
 
 
 
 
 
 
 
 
 
6d0cb99
39a482a
 
 
 
 
 
 
 
 
 
 
 
 
6d0cb99
39a482a
 
6d0cb99
39a482a
 
 
 
 
 
 
 
 
 
6d0cb99
39a482a
 
 
 
 
 
 
 
 
 
 
 
6d0cb99
39a482a
 
 
 
 
 
 
 
 
6d0cb99
39a482a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d0cb99
39a482a
 
 
 
 
 
6d0cb99
39a482a
 
6d0cb99
39a482a
 
 
 
 
 
6d0cb99
39a482a
 
 
 
 
 
 
 
 
 
 
 
 
6d0cb99
 
 
39a482a
 
 
 
 
 
 
 
 
6d0cb99
 
39a482a
 
 
 
 
 
 
 
 
6d0cb99
39a482a
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
import time

# Global driver to use throughout the script
DRIVER = None

# Wrapper to close driver if its created
def close_driver():
    global DRIVER
    if DRIVER is not None:
        DRIVER.close()
    DRIVER = None

# Function to (re)start driver
def start_driver(force_restart=False):
    global DRIVER

    if force_restart:
        close_driver()

    # Setting up the driver
    service = Service()
    options = Options()
    options.add_argument('-headless') # we don't want a chrome browser opens, so it will run in the background
    options.add_argument('-no-sandbox')
    options.add_argument('-disable-dev-shm-usage')

    DRIVER = webdriver.Chrome(service=service, options=options)

### Function to extract product info from the necessary html and json tags
def get_lazada_product_info_single(product_element, extra_info):
    """
    Extract info from a single product element from the driver.
    Args:
        product_item: (WebDriverElement) the product whose info needs to be
                        extracted.
    Returns:
        info: (dict) a dictionary of info of the product. Every product
                should at least have four pieces of information: name, price,
                link to the product page, and link to the product image.
    """
    info = {'source': 'lazada',
            'name':'',
            'price':-1,
            'product_url':'',
            'image':''}
    
    # print(product_element.get_attribute('outerHTML'))
    try:
        # Find the <a> element within the <div class="RfADt">
        product_title_element = product_element.find_element(By.XPATH, "//div[@class='RfADt']/a")
        
        # Get the text content of the <a> element
        info['name'] = product_title_element.text

    except NoSuchElementException:
        info['name'] = ""


    # price
    try:
        # Find the <span> element with class "ooOxS" within the <div class="aBrP0">
        price_element = product_element.find_element(By.XPATH, "//div[@class='aBrP0']/span[@class='ooOxS']")
    
        # Get the text content of the <span> element
        price_text = price_element.text

        # Extract the price value
        info['price'] = int(price_text.split(" ")[0].replace('.', ''))

    except (NoSuchElementException, ValueError):
        pass

    # link
    try:
        # Find the <a> element within the <div class="RfADt">
        product_link_element = product_element.find_element(By.XPATH, "//div[@class='RfADt']/a")
        
        # Get the href attribute of the <a> element
        product_link = product_link_element.get_attribute("href")
       
        # Extract the URL from the href attribute
        info['product_url'] = product_link.split("//")[1]

    except NoSuchElementException:
        pass

    # thumbnail
    try:
        # Find the <img> element within the <div class="_95X4G">
        image_element = product_element.find_element(By.XPATH, "//div[@class='_95X4G']/a/div/img")
        
        # Get the src attribute of the <img> element
        info['image'] = image_element.get_attribute("src")

    except NoSuchElementException:
        pass

    # If we decide to get extra information
    if extra_info:
        # sales
        try:
            # Find the <span> element within the <div class="_6uN7R">
            sold_element = product_element.find_element(By.XPATH, "//div[@class='_6uN7R']/span[@class='_1cEkb']/span[1]")
            
            # Get the text content of the <span> element
            info['sales'] = sold_element.text

        except (NoSuchElementException, ValueError):
            info['sales'] = 0

        try:
            # Find the <span> element within the <div class="WNoq3">
            discount_element = product_element.find_element(By.XPATH, "//div[@class='WNoq3']/span[@class='IcOsH']")
            
            # Get the text content of the <span> element
            info['discount'] = discount_element.text

        except (NoSuchElementException, ValueError):
            info['discount'] = '0'

    return info

### Function to scrape all products from a page
def get_lazada_product_info_from_page(page_url, extra_info=False):
    """
    Extract info from all products of a specfic page_url on Tiki website
    Args:
        page_url: (string) url of the page to scrape
    Returns:
        data: (list) a list of dictionary of products info. If no products
                found, return empty list.
    """
    global DRIVER
    
    data = []
    DRIVER.get(page_url) # Use the driver to get info from the product page
    time.sleep(3)

    # FIND ALL PRODUCT ITEMS
    products = DRIVER.find_elements(By.CLASS_NAME, 'Bm3ON')
    print(products)
    print(f'Found {len(products)} products')

    if len(products)>0:
        for i in products:
            product_dict = get_lazada_product_info_single(i, extra_info)
            data.append(product_dict)
    return data

### Function to get product info from a main category
def get_lazada_product_info_from_category(search_product, max_page=0, extra_info=False):
    '''
    Scrape for multiple pages of products of a category.
    Uses get_product_info_from_page().

    Args:
        cat_url: (string) a url string of a category
        max_page: (int) an integer denoting the maximum number of pages to scrape.
                  Default value is 0 to scrape all pages.
    Returns:
        products: a list in which every element is a dictionary of one product's information
    '''
    products = []
    page_n = 1
    cat_url = 'https://www.lazada.vn/catalog/?q=' + search_product

    product_list = get_lazada_product_info_from_page(cat_url, extra_info=extra_info)

    while len(product_list)>0:
        products.extend(product_list)
        page_n += 1

        # stop_flag = False if max_page <= 0 else (page_n > max_page)
        stop_flag = max_page>0 and page_n>max_page # For stopping the scrape according to max_page
        if stop_flag:
            break
        cat_url = 'https://www.lazada.vn/catalog/?page=' + page_n + '&q=' + search_product
        product_list = get_lazada_product_info_from_page(cat_url, extra_info=extra_info)

    return products

def scrap_lazada(search_product, num_max_page, extra_info):

    start_driver(force_restart=True)

    prod_data = [] # STORE YOUR PRODUCT INFO DICTIONARIES IN HERE

    prod_per_cat = get_lazada_product_info_from_category(search_product, num_max_page, extra_info=extra_info)
    prod_data.extend(prod_per_cat)

    close_driver() # Close driver when we're done

    return prod_data