import requests
import json
import re
import time
import random
import pymysql as mydb
from lxml import etree
from fake_useragent import UserAgent
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from lxml import etree
from selenium.webdriver.common.keys import Keys
import time

class WalmartSpider():
    def __init__(self):
        self.url = 'https://www.walmart.com/all-departments'
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'
        }

    def com_db(self, department, k, url, page_num):
        conn = mydb.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='walmart', charset='utf8')
        cursor = conn.cursor()
        page_url = ''
        for page in range(1, int(page_num)+1):
            if url.startswith('https://www.walmart.com/browse/') and '?_refineresult=true' in url:
                page_url = url.replace('?_refineresult=true','?page={}'.format(page))
            else:
                page_url = url + '?page={}'.format(str(page))


            driver = webdriver.Chrome('./chromedriver.exe')
            driver.get(page_url)

            b = etree.HTML(driver.page_source).xpath('//*[@id="searchProductResult"]/ul/li')
            # print(b, '111111111111111')

            for i in b:
                # 商品URL
                xpath = './div/div[2]/div[7]/div/span[2]/a/@href'
                urls = 'https://www.walmart.com/'+ i.xpath(xpath)[0]
                print(urls)

                xpath = './div/div[2]/div[2]/div/div/a/img/@src'
                imgs = i.xpath(xpath)[0]
                print(imgs)
                # 商品名称
                xpath = './div/div[2]/div[7]/div/span[2]/a/span//text()'
                title = i.xpath(xpath)[0]
                print(title)
                # 商品销量
                xpath = './div/div[2]/div[8]/div/div[1]/div/a/div/span[3]/span/span[1]/text()'
                enum = i.xpath(xpath)[0]
                print(enum)
                # 商品价格
                xpath = './div/div[2]/div[9]/div/span/div/div/div/span/span[1]//text()'
                price = i.xpath(xpath)[0]
                print(price)
                # 商品
                xpath = './div/div[2]/div[8]/div/div[1]/div/a/div/span[3]/span/span[1]/text()'
                preferential = i.xpath(xpath)[0]
                print(preferential)

                if int(price[1]) > 100 and int(preferential[0]) > 100:

                    sql = "insert into wrm(one,two,url,imgs,title,enum,price,preferential)value('{}','{}','{}','{}','{}','{}','{}','{}')".format(department, k, page_url)
                    print(sql)
                    cursor.execute(sql)
                    conn.commit()
                driver.close()

        #关闭数据库连接
        conn.close()

    def goods_list(self, departments):
        """
        :param departments:    json{一级类目：【二级类目列表】}
        :return: （一级类目，二级类目的字典）
        """
        def goods_page(department, departments_dic):
            for k, v in departments_dic.items():
                """二级类目的名称以及url"""
                """针对不同的分类会有不同的变化，暂时不支持/cp/模本。后期可以针对页面添加对应模板"""
                # print(v)
                if v.startswith('#') or v.startswith('/cp/') or v.startswith('https://www.walmart.com/cp/') or v.startswith('/home/') or v.startswith('https://video'):
                    return
                elif v.startswith('/browse/') or v.startswith('/m/'):
                    url = 'https://www.walmart.com{}'.format(v)
                elif v.startswith('https:'):
                    url = v
                else:
                    url = v
                try:
                    src = requests.get(url, headers=self.headers, allow_redirects=False)
                    if src.status_code == 200:
                        # if v.startswith('https://www.walmart.com/m/'):
                        #     """应该是新旧版本原因，导致会有两个模板"""
                        #     json_ = \
                        #     re.compile('<script id="category" type="application/json">(.*?)</script>').findall(src.text)[0]
                        #     pagr_num = \
                        #     json.loads(json_)['category']['presoData']['modules']['center'][0]['configs']['pagination'][
                        #         'pages'][-1]['page']
                        #
                        #     print('{}\n包含{}页数据'.format(url,pagr_num))
                        # else:
                        #     json_ = re.compile('<script id="searchContent" type="application/json">(.*?)</script>').findall(
                        #         src.text)[0]
                        #     pagr_num = json.loads(json_)['searchContent']['preso']['pagination']['pages'][-1]['page']
                        #
                        #     print('{}\n包含{}页数据'.format(url,pagr_num))
                        if 'Sorry, no products matched' in src.text:
                            print('ERROR：该类目木有商品数据：Sorry, no products matched')
                            return
                        # print('一级类目：{}\n{}'.format(department, url))
                        page_num = re.compile(',{"page":(.*?),"url"').findall(src.text)[-1]
                        # print('包含{}页数据'.format(page_num))
                        self.com_db(department, k, url, page_num)
                        time.sleep(0.5)
                except Exception as e:
                    # print('ERROR:{}'.format(e))
                    time.sleep(random.randint(1, 5))
                    goods_page(department, departments_dic)

        for department in departments:
            """遍历一级类目下的所有二级类目"""
            department_lists = departments[department]
            # print(department_lists)
            for departments_dic in department_lists:
                goods_page(department, departments_dic)

    def parse(self):
        """获取所有分类"""
        src = requests.get(self.url, headers=self.headers, allow_redirects=False)
        # print(src.status_code)
        category_json = re.compile('<script id="home" type="application/json">(.*?)</script>').findall(src.text)[0]
        category_json = json.loads(category_json)
        departments = category_json['home']['home']['quimbyData']['global_header_ny']['headerZone1']['configs'][
            'departments']
        print('=' * 100)
        for i in departments:
            """循环 获取一级类目对应的二级类目"""
            department = {i['name']: [{j['department']['title']: j['department']['clickThrough']['value']} for j in
                              i['departments']]}
            # print(department)
            """传入的是一级类目下的所有二级类目json"""
            self.goods_list(department)
            time.sleep(2)


if __name__ == '__main__':
    aa = WalmartSpider()
    aa.parse()
"""
https://www.walmart.com/browse/5438_5993826?cat_id=5438_5993826&facet=special_offers%3AClearance%7C%7Cspecial_offers%3ARollback%7C%7Cspecial_offers%3ASpecial+Buy%7C%7Cspecial_offers%3AReduced+Price


"""