import re
import requests
from lxml import etree


class MeituanSpiders(object):
    def __init__(self):
        self.url = 'https://www.walmart.com/search/?query={}'
        self.heaeder = {
            'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'
            }
        self.querys = ['mobile phone', 'chanel', 'hermes','clothes','Washing machine','savings spotlight','samsung savings','outdoor','electronics','health','personal care','home','beauty','backyard fun','gifting','graduation','tech savings',
                       'computers','cell phones','video games','women','men','shop all home','featured brands',
                       'pool time','garden center','nursery','cooling','tools','toys','shop by age',
                       'shop all food','all pet supplies','pet rs','beauty','bikes','sports','tires',
                       'photo center','gift cards','wedding shop','special occasions','gift wrap','home decor','an electric appliance',
                       'Table','Wrist watch','Bag','Smart speaker']

    def pares(self):
        for query in self.querys:
            url = self.url.format(query)
            print(url)
            res = requests.get(url=url, headers=self.heaeder)
            if res.status_code == 200:
                self.content(res)

    def content(self, response):
        a=etree.HTML(response.text).xpath('//*[@id="searchProductResult"]/ul/li')
        for i in a:

        # 商品URL
            xpath = './div/div[2]/div[2]/a/@href'
            urls = i.xpath(xpath)
            # print(urls)

            xpath = './div/div[2]/div[2]/a/div/img/@srcset'
            imgs = i.xpath(xpath)
            # print(imgs)
            # 商品名称
            xpath = './div/div[2]/div[5]/div/a//text()'
            title = i.xpath(xpath)
            # print(title)
            # 商品销量
            xpath = './div/div[2]/div[6]/div[1]/div/a/div/span[3]/span/span[1]/text()'
            enum = i.xpath(xpath)
            # print(enum)
            # 商品价格

            xpath = './div/div[2]/div[7]/div/span/span/span[2]/span/span[2]//text()'
            price = i.xpath(xpath)
            # print(price)
            # 商品
            xpath = './div/div[2]/div[6]/div[1]/div/a/div/span[3]/span/span[1]/text()'

            preferential = i.xpath(xpath)
            # print(preferential)
            try:
                # print(price)
                if int(price[1]) >100 and int(preferential[0])>100:

                    good_content = f'''
                                              商品链接: {'https://www.walmart.com{}'.format(urls[0])}
                                              商品图片: {'{}'.format(imgs[0])}
                                              商品标题: {'{}'.format(''.join(title))}
                                              销量：{'{}'.format(enum[0])}
                                              价格: {'{}'.format(''.join(price))}
                                              评论：{'{}'.format(enum[0])}
                                              \n
                                              '''
                    print(good_content)
                    with open('wr.txt', 'a', encoding='utf-8') as f:
                         f.write(good_content)
                else:
                    pass
            except:
                pass

if __name__ == '__main__':
    aa =MeituanSpiders()
    aa.pares()















