from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from lxml import etree
from selenium.webdriver.common.keys import Keys
import time
import re
import requests
from lxml import etree

class Second(object):

    def __init__(self):
        self.url = 'https://www.walmart.com/'
        self.chrome_options = Options()
        self.chrome_options.add_argument('--headless')
        self.chrome_options.add_argument('--disable-gpu')

    def downloader(self):
        """使用Selenium下载网页"""
        self.driver = webdriver.Chrome('./chromedriver.exe')
        self.driver.get(self.url)

        # 点击分类图标显示一级分类
        time.sleep(3)
        self.driver.find_element_by_xpath("//*[@id='hf-header']/div[2]/div/div[1]/div[1]/button/span/img").click()
        # input("input any key continue")

        #循环遍历所有一级分类名称
        # b = self.driver.find_element_by_xpath('//*[@id="vh-spark-main-menu"]')
        # for i in b:
        #     xpath = './button[1]'
        #     bnt = i.xpath(xpath)
        #     print(bnt)
        #     print('****************************')

        #点击see all全部 显示二级分类
        time.sleep(1)
        self.driver.find_element_by_xpath('//*[@id="vh-spark-main-menu"]/div[3]/div[1]/a').click()
        time.sleep(2)
        print('11111111')

        #点击一个二级分类进入商品列表
        self.driver.find_element_by_xpath('/html/body/div[1]/div/div/div[2]/div/div/div[3]/div[3]/div/div[1]/ul[2]/li[2]/ul/li[6]/a').click()
        print('开始爬分类')

        #点击翻页
        # self.driver.find_element_by_xpath('// *[ @ id = "decorator-rightArrow"]').click()

        print('结束爬分类，返回页面源码')
        with open('1.html', 'w', encoding='utf-8') as f:
            f.write(self.driver.page_source)
        return self.driver.page_source

    def content(self, response):
        print('开始解析页面')
        with open('wr.html', 'w', encoding='utf-8') as f:
            f.write(response)
            print('---------------------------')
        #循环列表所有商品的链接
        a = etree.HTML(response).xpath('//*[ @ id = "cp-center-module-2"] / div / div[2]')
        for i in a:

            # 商品URL
            xpath = './ div / div[2] / div / div[1] / ul / li[2] / div / div / a/@href'
            urls = i.xpath(xpath)
            # print(urls)
            print('商品url：https://www.walmart.com{}'.format(urls[0]))
            self.driver.find_element_by_xpath('//*[@id="cp-center-module-2"]/div/div[2]/div/div[2]/div/div[1]/ul/li[1]/div/div/a').click()
            time.sleep(2)

            c = etree.HTML(response).xpath('/html/body/div[1]/div[1]/div/div[2]/div/div/div/div/div')
            with open('im.html', 'w', encoding='utf-8') as f:
                f.write(c)
            print(c,'123444444444444444444444444')

            for i in c:
                #商品图片
                xpath = './div/div/div/div[3]/div[5]/div/div[2]/div[2]/div/div/div/div/div[1]/button/span/div[2]/div[1]/img/@srcset'
                imgs = i.xpath(xpath)
                print(imgs)
            # 商品名称
            xpath = './div[5]/div/div[3]/div/h1//text()'
            title = i.xpath(xpath)
            print(title)


            # 商品价格
            xpath = './div[5]/div/div[3]/div/div[2]/div[1]/section/div/div[1]/div[1]//text()'
            price = i.xpath(xpath)
            print(price)
            # 商品评论
            xpath = './div[5]/div/div[3]/div/div[1]/div[4]/div[1]/div/div/span[7]/span[1]/text()'
            preferential = i.xpath(xpath)
            print(preferential)
            # try:
            #     # print(price)
            #     if int(price[1]) > 100 and int(preferential[0]) > 100:
            #
            #         good_content = f'''
            #                                               商品所属一级分类：{}
            #                                               商品所属二级分类：{}
            #                                               商品链接: {'https://www.walmart.com{}'.format(urls[0])}
            #                                               商品图片: {'{}'.format(imgs[0])}
            #                                               商品标题: {'{}'.format(''.join(title))}
            #
            #                                               价格: {'{}'.format(''.join(price))}
            #                                               评论：{'{}'.format(enum[0])}
            #                                               \n
            #                                               '''
            #         print(good_content)
            #         with open('wr.txt', 'a', encoding='utf-8') as f:
            #             f.write(good_content)
            #     else:
            #         pass
            # except:
            #     pass






        # a = etree.HTML(response.text).xpath('/html/body/div[1]/div/div/div[2]/div/div/div[3]/div[4]/div/div[1]/ul[1]/li[3]/ul/li/a')
        # #/html/body/div[1]/div/div/div[2]/div/div/div[3]/div[4]/div/div[1]/ul[1]/li[3]/ul/li/a
        # for i in a:
        #     xpath = './ div[3] / div[1] / div / div[1] / ul[2] / li[9] / ul / li / a/@href'
        #     urls = i.xpath(xpath)
        #     print(urls)


        # self.driver.find_element_by_xpath('//*[@id="nav-link-0"]').click()
        # time.sleep(3)
        # print('2222222')
        # self.driver.find_element_by_xpath('//*[@id="dept-vHSNmakX-children"]/div[2]/div[1]/div[1]/div/span/a').click()
        # print('33333')
        #
        # return self.driver.page_source.encode()







        # btn = br.find_element_by_xpath('//*[@id="vh-department-menu"]/button[1]')
        # br.execute_script('$(arguments[0]).click()', btn)
        #


    def parse(self,content):
        pass

        # with open('walmart.html','wb') as f:
        #     f.write(content)
        # text = self.driver.page_source
        # tree_one = etree.HTML(text)
        # href=tree_one.xpath('//*[@id="submit-video-list"]/ul[2]/li/a[1]/@href')
        # start=tree_one.xpath('//*[@id="submit-video-list"]/ul[2]/li/a[2]/text()')
        #
        # print(href)
        # print(start)
        # urls="https:"  #准备进行链接拼接
        # with open('di.txt', 'a', encoding='utf-8') as f:
        #     for i in href:  #循环出 根据每个链接进行拼接
        #         img=urls+i
        #     for j in start:
        #         f.write(img+j+'\n')
        #
        # print(len(href))
        # print(len(start))

    def save(self,cout):
        pass


#
if __name__ == '__main__':
    spider = Second()

    res = spider.downloader()
    print(res)
    a = spider.content(res)
    spider.parse(res)

