#_*_encoding:utf-8_*_
from com.zjs.crawer.urlcontent.baseurlcontent import BaseUrlContent
from com.zjs.zjsqueue import zurlpathqueue

from bs4 import BeautifulSoup as BS
from com.zjs.util.taobao_download import request
from com.zjs.zjsqueue import zurlcontentqueue
from queue import Empty
import logging
from com.zjs.crawer.urlcontent.baseurlcontent import BaseUrlContent
import importlib,sys
importlib.reload(sys)
import json

class taobaoPaths(BaseUrlContent):
    
    name = "taobao"
    
    def __init__(self):
        logging.debug("taobaoPaths方法")
    
    def run(self):
        count=[]
        loc=["安徽","福建","甘肃","广东","广西","贵州","海南","河北",
         "河南","湖北","湖南","江苏","江西","吉林","辽宁","宁夏",
        "青海","山东","山西","陕西","云南","四川","西藏","新疆",
        "浙江","澳门","香港","台湾","内蒙古","黑龙江","北京","上海","海外","重庆","天津"
        ]
        i=0
        while True:
            try:
                url = zurlpathqueue.get(self.name)
                total_count=self.dojob(url)
                if total_count==None or total_count<20:
                    total_count=1
                    url="https://shopsearch.taobao.com/search?app=shopsearch&q=%E6%95%B0%E7%A0%81&imgfile=&js=1&stats_click=search_radio_all%3A1&initiative_id=staobaoz_20170609&ie=utf8&loc={0}".format(loc[i])   
                    zurlcontentqueue.put(self.name,url)
                elif total_count>20:
                    start=0
                    for j in range(0,total_count//20):
                        url="https://shopsearch.taobao.com/search?app=shopsearch&q=%E6%95%B0%E7%A0%81&imgfile=&js=1&stats_click=search_radio_all%3A1&initiative_id=staobaoz_20170609&ie=utf8&loc={0}&s={1}".format(loc[i], start)  
                        zurlcontentqueue.put(self.name,url)
                    start+=20
                i+=1
            except Exception as ex:
                logging.error("[path]["+self.name+"]:"+str(url)+"解析失败")
                logging.error(ex)
        
         
        
    def dojob(self,url_b):
        html=request.get(url_b,1)  
        soup=BS(html.text, "lxml")
        # 保存A类链接的集合,避免重复,用set去重
        # 通过分析页面,抓取所有的class="pic-panel"的div元素下的a标签的href
        # 先通过find_all抓取所有符合条件的div
        try:     
                script_con=soup.find_all("script")
                need_con=script_con[5].text
                need_con=need_con.strip()
                text_list=need_con.split("g_srp_loadCss()")
                g_page_config=text_list[0].strip()
                g_page_config=g_page_config.replace("\n","")
                page_list=g_page_config.split("=")
                storelist=page_list[1]         
                storelist=storelist[:-1]    #去掉最后一个;
                dict_json=json.loads(storelist)
                for d1,x1 in dict_json.items():     #遍历第一层数据
                        '''d为key  x为value'''
                        if d1=="mods":
                            dict(x1)
                            for d2,x2 in x1.items():     #遍历第二层数据 
                                if d2=="pager":
                                    dict(x2)
                                    for d3,x3 in x2.items():    #遍历第三层数据  
                                        if d3=="data":
                                            dict(x3)
                                            for d4,x4 in x3.items():
                                                if d4=="totalCount":
                                                    return x4                                     
        except:
            pass
