import requests
from lxml import etree
from lxml import html
from com.cjc import conn_mysql
from com.cjc.dict2 import Dict
from selenium import webdriver
import com.cjc
import time
import re

url = "https://www.smzdm.com"
print("…………正在下载页面：")
header = {
    "user-agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
    "cookie": '__ckguid=Gth5WW8lVkYr8BWiW57kkg6; device_id=2130706433158389929829043560f622d29688a60c72cd2335e4a31769; homepage_sug=b; r_sort_type=score; __jsluid_s=ab548f8e769e802e54690973bcd4cf1e; _ga=GA1.2.139432992.1583899303; shequ_pc_sug=a; sess=MTQwNjF8MTU4ODkwNTEzNXw5OTg1Mjg0OTg2fDY0ODZhZTVjMDAwMDgwZTkxMzYyN2UyOTUzZTNiN2Zl; user=user%3A9985284986%7C9985284986; smzdm_user_source=1D0721FCBDF631797C4863B9ACB70D3E; smzdm_id=9985284986; userId=user:9985284986|9985284986; PHPSESSID=6a3dcdeec8e6ad0d0001a8c0b63b1709; Hm_lvt_9b7ac3d38f30fe89ff0b8a0546904e58=1584951657,1585016686,1585017140,1585121691; _zdmA.uid=ZDMA.FlxkuWQBS.1585121692.2419200; zdm_qd=%7B%22referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DZS7pilFWR3HIb6msGjur1Qj7HYAQzNrS8D3L6MIxI7u%26wd%3D%26eqid%3Dde72e9f60001702c000000035e7b0997%22%7D; _gid=GA1.2.380098399.1585121693; ad_date=25; bannerCounter=%5B%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%2C%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%5D; ad_json_feed=%7B%22J_feed_ad4%22%3A%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%7D; wt3_eid=%3B999768690672041%7C2158389975500234536%232158512484900723973; wt3_sid=%3B999768690672041; ad_load_feed4=%7B%22J_feed_ad4%22%3A%7B%22number%22%3A0%2C%22surplus%22%3A1%7D%7D; _zdmA.time=1585128780869.14459.https%3A%2F%2Fwww.smzdm.com%2F; _gat_UA-27058866-1=1; Hm_lpvt_9b7ac3d38f30fe89ff0b8a0546904e58=1585128881'}
# 获取网页响应正文
response = requests.get(url, headers=header)
# 解码
response_json_str = response.content.decode()
response_html = etree.HTML(response_json_str)
li_arr = response_html.xpath('//*[@id="category"]/ul/li')
dict_type_code=0
dict_type1_names=['电脑数码','家用电器','日用百货','运动户外','个性化妆','母婴用品','食品生鲜','文化娱乐','汽车消费']
dict_type2_name=""
dict_type3_name=""
dict_item2_code=""
dict_item3_code=""
dict_sort=0
dicts=[]





for i in range(2,10):
    dict_type_code=dict_type_code+1
    div_arr=li_arr[i].xpath('./div/div/div')
    dict_type1_name=dict_type1_names[i-2]
    for j in range(0,len(div_arr)):


        dict_type2_name=div_arr[j].xpath("./a/text()")
        dict_item2_code = div_arr[j].xpath("./a/@href")
        if str(dict_item2_code).find("'") != -1:
            dict_item2_code = str(dict_item2_code).split("'")[1]
        else:
            dict_item2_code = ""
        dict_item2_code = str(dict_item2_code).split("/")[4]
        if str(dict_type2_name).find("'") != -1:
            dict_type2_name = str(dict_type2_name).split("'")[1]
        else:
            dict_type2_name = ""

        dict_type3_names = div_arr[j].xpath("./div/a/text()")
        dict_item3_codes=div_arr[j].xpath("./div/a/@href")

        # print(str(dict_type3_names))
        for k in range(0,len(dict_type3_names)):
            dict_type3_name =dict_type3_names[k]
            dict_item3_code=dict_item3_codes[k]
            dict_item3_code = str(dict_item3_code).split("/")[4]
            dict_sort=k+1
            print(str(dict_type_code),dict_type1_name, dict_type2_name, dict_type3_name,dict_item2_code,dict_item3_code,dict_sort)
            dict1=Dict(str(dict_type_code),dict_type1_name, dict_type2_name, dict_type3_name,dict_item2_code,dict_item3_code,str(dict_sort))
            dicts.append(dict1)
    conn_mysql.save_category(dicts)
    dicts.clear()

