# -*- coding: utf-8 -*-
from time import sleep
from 爬虫练习.jd.connf.redis_config import *

from requests import get
from lxml import etree

url_lists=[]
#获取各大板块下的分类
def gets_page_shouye(all_leifen):
    #print(all_leifen)
    #获取首页各板块的下列分类
    tree=etree.HTML(all_leifen)
    #获取一级分类
    all_div=tree.xpath('//*[@id="J_cate"]/ul/li')
    for two_fenlei in all_div:
        #解析出二级板块分类
        all_hrefs=two_fenlei.xpath('./a/@href')
        all_hrefs_text = two_fenlei.xpath('./a/text()')
        for num_all_hrefs in range(len(all_hrefs)):
            hrref='https:'+all_hrefs[num_all_hrefs]
            #hrefss=hrref.replace("\r","")
            title="".join(all_hrefs_text[num_all_hrefs])
            into_redis(hrref,title)
            #print(title)
            #url_lists.append(hrref)

def into_redis(url,title):
    # if url.endswith('.html'):
    #     print(url)
    # else:
    #     print('没有',url)
    try:
        reds=into_Redis_url()
        reds.clint_redis_count.set(url,title)
        print(reds.clint_redis_count.keys())

    except Exception as e :
        print(e)
        pass


#每次发送请求调用此方法
def pares_requests(url):
    headers={
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "accept-encoding": "gzip, deflate, br",
        "accept-language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6",
        "cache-control": "max-age=0",
        "cookie": "__jdv=122270672|direct|-|none|-|1621780922061; shshshfpb=x99v56Sm%20pZuZzzpGVVekTw%3D%3D; shshshfpa=4279b4ae-5897-4d3a-aa96-84376dc6f011-1621666746; __jdu=16217809220612103003282; areaId=13; ipLoc-djd=13-1007-0-0; o2State={%22webp%22:true}; PCSYCityID=CN_370000_370200_0; __jda=122270672.16217809220612103003282.1621780922.1621780922.1621780922.1; __jdc=122270672; shshshfp=6095f9a01b3b1d91465ce75f6c1b2bc7; 3AB9D23F7A4B3C9B=6V4FJSGEW3632EB3ZDGCWKVVNHYVQPQ4SFWYG3M7A744QIVJ7RO743PYFAATIAGRJ47YW726GMWF77RBCRONP5PRLM",
        'sec-ch-ua': 'Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"',
        "sec-ch-ua-mobile": "?0",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"
    }

    all_fen=get(url=url,headers=headers).text
    return  all_fen



def main():
    url = 'https://www.jd.com/'
    cc=pares_requests(url)
    gets_page_shouye(cc)

if __name__ == '__main__':
    #url='https://www.jd.com/'
    main()
    #print(url_lists)
