# -*- coding: utf-8 -*-
import re
from pymysql import *
import scrapy

import hashlib
import redis
from scrapy.exceptions import CloseSpider
from scrapy.utils.python import to_bytes
from w3lib.url import canonicalize_url
from KeywordSpider.settings import mysql_conf, redis_conf
from KeywordSpider.custom_settings import custom_settings_for_baidukeyword

class BaidukeywordSpider(scrapy.Spider):
    custom_settings = custom_settings_for_baidukeyword
    name = 'baidukeyword_new'
    allowed_domains = ['www.baidu.com']
    start_urls = []
    def __init__(self,kws=None,*args,**kwargs):
        super(BaidukeywordSpider, self).__init__(*args, **kwargs)
        if kws:
            self.kws = eval(kws)
        else:
            self.kws = ""
    err_urls = []
    def start_requests(self):
        # 创建redis链接
        self.red = redis.StrictRedis(host=redis_conf['host'], port=redis_conf['port'],db=redis_conf['db'], password=redis_conf['passwd'])

        self.conn = connect(  # host='localhost',
            host=mysql_conf.get('host'),
            port=mysql_conf.get('port'),
            database=mysql_conf.get('db'),
            user=mysql_conf.get('user'),
            password=mysql_conf.get('passwd'),
            charset=mysql_conf.get('charset'),
        )
        self.cs = self.conn.cursor()
        # self.kws = [('黑龙江保险', '0', 1)]
        print(self.kws)
        for i in self.kws:
            if i[1] == '0':
                num = 2
            elif i[1] == '-1':
                num = 6
            else:
                continue
            keyword = i[0]
            print(keyword)
            parent_id = i[2]
            level = 1
            url = 'http://www.baidu.com/s?wd={}'.format(i[0])
            yield scrapy.Request(
                url=url,
                meta={"keyword": keyword,"num":num,"level":level},
                callback= self.parse
            )
    def parse(self, response):
        key = []
        m=response.meta["keyword"]
        num = response.meta["num"]
        level = response.meta["level"]
        # print(response.text)
        # print(response.xpath("//table[@cellpadding='0']//tr"))
        # 百度搜索相关结果
        # related_results =re.search(r"\d+", response.xpath("//div[@class='nums']/span/text()").extract_first()).group() if response.xpath("//div[@class='nums']/span/text()").extract_first() else "百度搜索相关结果无"
        related = response.xpath("//div[@class='nums']/span/text()").extract_first()
        related_results = ''
        # related = ''
        if related:
            related = re.findall(r"\d+", related)
            for i in related:
                related_results += str(i)
            # print(re.findall(r"\d+",related_results))
            # 搜索层级
            if response.xpath("//div/table[@cellpadding='0']//tr/th"):
                for th in response.xpath("//div/table[@cellpadding='0']//tr/th"):
                    # 相关搜索
                    text = th.xpath("./a/text()").extract_first()
                    if text:
                        item = dict(  # 放入字典
                            keyword = m,
                            text=text,
                            level = level,
                            related_results = related_results,
                        )
                        key.append(text)
                        # yield item
                        print(item)
                if level < num:
                    if key:
                        # print(key)
                        level += 1
                        for i in key:
                            yield scrapy.Request(
                                url= 'http://www.baidu.com/s?wd={}'.format(i),
                                meta={"keyword":i,"level":level,"num":num},
                                callback=self.parse
                            )
                else:
                    print("采集完成！")
        else:
            err_url = 'http://www.baidu.com/s?wd={}'.format(m)
            # self.err_urls.append(err_url)
            # sql = 'insert ignore into bx_caiji_errurl (err_url) values("{}")'.format(err_url)
            # self.conn.ping(reconnect=True)
            # rt = self.cs.execute(sql)
            # self.conn.commit()
            # 删除指纹信息
            self.del_fingerprint(err_url)
            # 剖出异常结束爬虫
            raise CloseSpider
            # self.crawler.engine.close_spider(self)

    def del_fingerprint(self, url):
        """请求失败,删除redis中的指纹"""
        print(url)
        fp = hashlib.sha1()
        fp.update(to_bytes('GET'))
        fp.update(to_bytes(canonicalize_url(url)))
        fp.update(b'')
        print(fp.hexdigest())
        # 这里存的是本爬虫的url指纹集合
        print(self.red.srem("baidukeyword_new:dupefilter", fp.hexdigest()))
