# -*- coding: utf-8 -*-
import json
import re

import scrapy
from scrapy.exceptions import CloseSpider

import hashlib
import redis
from scrapy.exceptions import CloseSpider
from scrapy.utils.python import to_bytes
from w3lib.url import canonicalize_url

from KeywordSpider.settings import mysql_conf, redis_conf

class BaidusugSpider(scrapy.Spider):
    name = 'baidusug'
    allowed_domains = ['baidu.com']
    # start_urls = ['http://baidu.com/']
    # https://sp0.baidu.com/5a1Fazu8AA54nxGko9WTAnF6hhy/su?wd={}
    def __init__(self, kws=None, *args, **kwargs):
        super(BaidusugSpider, self).__init__(*args, **kwargs)
        self.kws = eval(kws)
    def start_requests(self):
        #self.kws = [('保险', 0, 1)]
        self.red = redis.StrictRedis(host=redis_conf['host'], port=redis_conf['port'],
                                     db=redis_conf['db'], password=redis_conf['passwd'])
        print(self.kws)
        for i in self.kws:
            num = 3
            keyword = i[0]
            print(keyword)
            parent_id = i[2]
            level = 1
            url = 'https://sp0.baidu.com/5a1Fazu8AA54nxGko9WTAnF6hhy/su?wd={}&json=1'.format(i[0])
            yield scrapy.Request(
                url=url,
                meta={"keyword": keyword, "num": num, "level": level},
                callback=self.parse,
                # dont_filter=True
            )

    def parse(self, response):
        keyword = response.meta["keyword"]
        num = response.meta["num"]
        level = response.meta.get("level")
        # window.baidu.sug({q:"保险排名",p:false,s:["中国保险排名","保险公司排名","保险排名前十","会计师事务所排名","小儿保险排名","保险海报","保险资管排名","兴证资管排名","保险区域总经理月薪","出租车保险公司"]});
        ret = response.text
        ret_txt = re.search(r"\((.*)\)",ret)
        if ret_txt:
            ret_txt = ret_txt.group(1)
            ret =json.loads(ret_txt)
            key_list = ret.get("s")
            if key_list:
                for kw in key_list:
                    item = dict(  # 放入字典
                        keyword=keyword,
                        text=kw,
                        level=level,
                        related_results=None
                    )
                    # print(item)
                    yield item
                if level < num:
                    level += 1
                    for i in key_list:
                        yield scrapy.Request(
                            url='https://sp0.baidu.com/5a1Fazu8AA54nxGko9WTAnF6hhy/su?wd={}&json=1'.format(i),
                            meta={"keyword": i,"level": level,"num":num},
                            callback=self.parse,
                            # dont_filter = True
                        )
                else:
                    print("抓取完成！！！")
        else:
            # 删除指纹信息
            url = 'https://sp0.baidu.com/5a1Fazu8AA54nxGko9WTAnF6hhy/su?wd={}&json=1'.format(keyword)
            self.del_fingerprint(url)
            # 剖出异常结束爬虫
            raise CloseSpider
    def del_fingerprint(self, url):
        """请求失败,删除redis中的指纹"""
        print(url)
        fp = hashlib.sha1()
        fp.update(to_bytes('GET'))
        fp.update(to_bytes(canonicalize_url(url)))
        fp.update(b'')
        print(fp.hexdigest())
        # 这里存的是本爬虫的url指纹集合
        # print("*"*20)
        # print(self.red)
        print(self.red.srem("sogoukeyword:dupefilter", fp.hexdigest()))
