# -*- coding: utf-8 -*-
import re
import urllib

from pymysql import *
import scrapy

import hashlib
import redis
from urllib.parse import quote
from scrapy.exceptions import CloseSpider
from scrapy.utils.python import to_bytes
from w3lib.url import canonicalize_url

from KeywordSpider.settings import mysql_conf, redis_conf
from KeywordSpider.custom_settings import custom_settings_for_baiduzhidaokeyword

class BaidukeywordSpider(scrapy.Spider):
    custom_settings = custom_settings_for_baiduzhidaokeyword
    handle_httpstatus_list = [404]
    name = 'baiduzhidaokeyword_new'
    allowed_domains = ['baidu.com']
    # def __init__(self,key=None,*args,**kwargs):
    #     super(BaidukeywordSpider, self).__init__(*args, **kwargs)
    #     self.key = key
    #     self.start_urls = ['http://www.baidu.com/s?wd={}'.format(self.key)]
    start_urls = []

    def __init__(self,kws=None,*args,**kwargs):
        super(BaidukeywordSpider, self).__init__(*args, **kwargs)
        if kws:
            self.kws = eval(kws)
        else:
            self.kws = ""
    #LEVEL_1 = 0
    #LEVEL_2 = -1
    #sql = 'select keyword,level,id from bx_caiji_keywords where level={} or level={}'.format(LEVEL_1,LEVEL_2)
    #rt = cs.execute(sql)
    #rt_list = cs.fetchall()
    # print(rt_list)
    #for i in rt_list:
     #   if i[1] == '0':
      #      tier = 2
            # https://zhidao.baidu.com/search?word=%E8%BA%AB%E6%95%85%E9%99%A9%E5%92%8C%E6%AD%BB%E4%BA%A1%E9%99%A9%E7%9A%84%E5%8C%BA%E5%88%AB
       #     start_urls.append(('https://zhidao.baidu.com/search?word={}'.format(i[0]),i[2],tier))
       # elif i[1] == '-1':
        #    tier = 6
         #   start_urls.append(('https://zhidao.baidu.com/search?word={}'.format(i[0]),i[2],tier))
    err_urls = []
    # print(start_urls)
    # key = input("请输入搜索词：")
    # num = input("请设定抓取关联词深度层级：")
    # start_urls = [('https://zhidao.baidu.com/search?word=黑龙江保险', 734462, 2)]
    def start_requests(self):
        # 创建redis链接
        self.red = redis.StrictRedis(host=redis_conf['host'], port=redis_conf['port'],
                                     db=redis_conf['db'], password=redis_conf['passwd'])

        self.conn = connect(
            host=mysql_conf.get("host"),
            port=mysql_conf.get("port"),
            database=mysql_conf.get("db"),
            user=mysql_conf.get("user"),
            password=mysql_conf.get("passwd"),
            charset=mysql_conf.get("charset"),
        )
        self.cs = self.conn.cursor()
        # self.kws = [('黑龙江保险', '0', 1)]
        print(self.kws)
        for i in self.kws:
            if i[1] == '0':
                num = 2
                # https://zhidao.baidu.com/search?word=%E8%BA%AB%E6%95%85%E9%99%A9%E5%92%8C%E6%AD%BB%E4%BA%A1%E9%99%A9%E7%9A%84%E5%8C%BA%E5%88%AB
                #start_urls.append(('https://zhidao.baidu.com/search?word={}'.format(i[0]),i[2],tier))
                keyword = i[0]
                print(keyword)
                url = 'https://zhidao.baidu.com/search?word={}'.format(i[0])
                yield scrapy.Request(
                    url=url,
                    meta={"keyword": keyword,"num":num},
                    callback= self.parse
                )
            elif i[1] == '-1':
                num = 6
                #start_urls.append(('https://zhidao.baidu.com/search?word={}'.format(i[0]),i[2],tier))
                keyword = i[0]
                url = 'https://zhidao.baidu.com/search?word={}'.format(i[0])
                yield scrapy.Request(
                    url=url,
                    meta={"keyword": keyword,"num":num},
                    callback= self.parse
                )
    def parse(self, response):
        m=response.meta["keyword"]
        if response.status != 404:
            # print("响应",response.text)
            # parent_id = response.meta["parent_id"]
            num = response.meta["num"]
            level = response.meta.get("level")
            if not level:
                level = 1
                # 百度搜索相关结果
                # related_results =re.search(r"\d+", response.xpath("//div[@class='nums']/span/text()").extract_first()).group() if response.xpath("//div[@class='nums']/span/text()").extract_first() else "百度搜索相关结果无"
            header = response.xpath("//header[@id='header']//li/a/text()").extract()
            if header:
                related = response.xpath("//div[@class='picker-header']/span/text()").extract_first()
                related_results = ''
                if related:
                    related = re.findall(r"\d+", related)
                    related_results = related_results.join(related)
                    # print(re.findall(r"\d+",related_results))
                    # 搜索层级
                    # url = response.xpath("//div[@id='bdrecContainer']//@src")
                    # https://m.baidu.com/recsys/ui/api/rs?query=%BA%DA%C1%FA%BD%AD%B1%A3%CF%D5&title=%BA%DA%C1%FA%BD%AD%B1%A3%CF%D5&url=https%3A%2F%2Fzhidao.baidu.com%2Fsearch%3Fword%3D%25E9%25BB%2591%25E9%25BE%2599%25E6%25B1%259F%25E4%25BF%259D%25E9%2599%25A9&ak=ZQ4m31EXvKem1HPYzaK8Ekq6opqfhKFK&pc=1&charset=gbk&contentTitleText=去网页搜索&entityNum=9&tn=SE_PcZhidaoqwyss_e1nmnxgw&random=45054358024700814
                    # https://m.baidu.com/recsys/ui/api/rs?query=%E8%BA%AB%E6%95%85%E9%99%A9%E5%92%8C%E6%AD%BB%E4%BA%A1%E9%99%A9%E7%9A%84%E5%8C%BA%E5%88%AB&url=https://zhidao.baidu.com/search?word=%E8%BA%AB%E6%95%85%E9%99%A9%E5%92%8C%E6%AD%BB%E4%BA%A1%E9%99%A9%E7%9A%84%E5%8C%BA%E5%88%AB&entityNum=9&tn=SE_PcZhidaoqwyss_e1nmnxgw&random=44960116134217065&ak=ZQ4m31EXvKem1HPYzaK8Ekq6opqfhKFK&pc=1&charset=gbk
                    try:
                        kw = urllib.parse.quote(m, safe='/', encoding='gbk', errors=None)
                    except Exception as e:
                        print(e)
                    else:
                        url = "https://m.baidu.com/recsys/ui/api/rs?query={}&ak=ZQ4m31EXvKem1HPYzaK8Ekq6opqfhKFK&pc=1&charset=gbk&entityNum=9".format(kw)
                        #print(url)
                        yield scrapy.Request(
                            url=url,
                            meta={"keyword": m, "level": level, "num": num,"related_results":related_results},
                            callback=self.parse1
                        )
        else:
            err_url = 'https://zhidao.baidu.com/search?word={}'.format(m)
            # self.err_urls.append(err_url)
            # sql = 'insert ignore into bx_caiji_errurl (err_url) values("{}")'.format(err_url)
            # self.conn.ping(reconnect=True)
            # rt = self.cs.execute(sql)
            # self.conn.commit()
            # 删除指纹信息
            self.del_fingerprint(err_url)
            # 剖出异常结束爬虫
            raise CloseSpider
            # self.crawler.engine.close_spider(self)
    def parse1(self,response):
        key = []
        keyword = response.meta["keyword"]
        num = response.meta["num"]
        # 搜索层级
        level = response.meta["level"]
        # 百度搜索相关结果
        related_results = response.meta["related_results"]

        kw = response.xpath("//div[@class='c-row']/span/a/text()").extract()
        # print(kw)
        # kw = ''
        if kw:
            for i in kw:
                if i :
                    item = dict(  # 放入字典
                        keyword=keyword,
                        text=i,
                        level=level,
                        related_results=related_results
                    )
                    key.append(i)
                    # print(item)
                    yield item
            if level < num:
                level += 1
                for k in key:
                    yield scrapy.Request(
                        url='https://zhidao.baidu.com/search?word={}'.format(k),
                        meta={"keyword": k, "level": level, "num": num},
                        callback=self.parse
                    )
            else:
                print("抓取完成！！！")
        else:
            # print(response.url)
            kw = urllib.parse.quote(keyword, safe='/', encoding='gbk', errors=None)
            err_url = "https://m.baidu.com/recsys/ui/api/rs?query={}&ak=ZQ4m31EXvKem1HPYzaK8Ekq6opqfhKFK&pc=1&charset=gbk&entityNum=9".format(kw)
            # self.err_urls.append(err_url)
            # sql = 'insert ignore into bx_caiji_errurl (err_url) values("{}")'.format(err_url)
            # self.conn.ping(reconnect=True)
            # rt = self.cs.execute(sql)
            # self.conn.commit()
            # 删除指纹信息
            self.del_fingerprint(err_url)
            # 剖出异常结束爬虫
            # print("^" * 50)
            print("被反扒爬虫结束运行！！！")
            raise CloseSpider
            # self.crawler.engine.close_spider(self)

    def del_fingerprint(self, url):
        """请求失败,删除redis中的指纹"""
        print(url)
        fp = hashlib.sha1()
        fp.update(to_bytes('GET'))
        fp.update(to_bytes(canonicalize_url(url)))
        fp.update(b'')
        print(fp.hexdigest())
        # 这里存的是本爬虫的url指纹集合
        print(self.red.srem("baiduzhidaokeyword_new:dupefilter", fp.hexdigest()))
