# -*- coding: utf-8 -*-
"""
@author:xieyabin
@file: sogouweixinkeyword.py
@time: 2019/02/21
"""
import time
import re
from pymysql import *
import scrapy

import hashlib
import redis
from scrapy.exceptions import CloseSpider
from scrapy.utils.python import to_bytes
from w3lib.url import canonicalize_url

from KeywordSpider.settings import mysql_conf, redis_conf
from KeywordSpider.custom_settings import custom_settings_for_sogouweixinkeyword

class SogoukeywordSpider(scrapy.Spider):
    custom_settings = custom_settings_for_sogouweixinkeyword
    name = 'sogouweixinkeyword'
    allowed_domains = ['sogou.com']
    start_urls = []
    err_urls = []
    #def __init__(self,kws=None,*args,**kwargs):
     #   super(SogoukeywordSpider, self).__init__(*args, **kwargs)
      #  self.kws = eval(kws)

    # start_urls = ['https://www.sogou.com/web?query={}'.format(key)]
    # print(start_urls)
    #start_urls = [('https://weixin.sogou.com/weixin?query=保险&type=2', 734462, 2)]
    def __init__(self,kws=None,*args,**kwargs):
        super(SogoukeywordSpider, self).__init__(*args, **kwargs)
        if kws:
            self.kws = eval(kws)
        else:
            self.kws = ""

    def start_requests(self):

        self.red = redis.StrictRedis(host=redis_conf['host'], port=redis_conf['port'],
                                     db=redis_conf['db'], password=redis_conf['passwd'])

        self.conn = connect(
            host=mysql_conf.get("host"),
            port=mysql_conf.get("port"),
            database=mysql_conf.get("db"),
            user=mysql_conf.get("user"),
            password=mysql_conf.get("passwd"),
            charset=mysql_conf.get("charset"),
        )
        self.cs = self.conn.cursor()
        # LEVEL_1 = 0
        # LEVEL_2 = -1
        # sql = 'select keyword,level,id from bx_caiji_keywords where level={} or level={}'.format(LEVEL_1, LEVEL_2)
        # rt = self.cs.execute(sql)
        # rt_list = self.cs.fetchall()
        # # print(rt_list)
        # self.kws = [('黑龙江保险', '0', 1)]
        print(self.kws)
        for i in self.kws:
            if i[1] == '0':
                num = 2
                # https://weixin.sogou.com/weixin?query=保险&type=2
                # self.start_urls.append(('https://weixin.sogou.com/weixin?query={}&type=2'.format(i[0]), i[2], tier))
            elif i[1] == '-1':
                num = 6
                # self.start_urls.append(('https://weixin.sogou.com/weixin?query={}&type=2'.format(i[0]), i[2], tier))
            else:
                continue
            keyword = i[0]
            print(keyword)
            url = 'https://weixin.sogou.com/weixin?query={}&type=2'.format(keyword)
            yield scrapy.Request(
                url=url,
                meta={"keyword": keyword, "num": num},
                callback=self.parse
            )
    def parse(self, response):
        key = []
        keyword = response.meta["keyword"]
        num = response.meta["num"]
        #搜索相关结果条数
        souse = response.xpath("//div[@id='text']/text()").extract()
        # souse = ''
        if souse:
            related = response.xpath("//div[@class='mun']/text()").extract_first()
            related_results = ''
            if related:
                related = re.findall(r"\d+", related)
                related_results = related_results.join(related)
            else:
                related_results = None
            # 搜索层级
            level = response.meta.get("level")
            if not level:
                level = 1
            key_list = response.xpath("//tbody/tr/td/a/text()").extract()
            if key_list:
                for a in key_list:
                    # 相关搜索
                    text = a
                    if text:
                        item = dict(  # 放入字典
                            keyword=keyword,
                            text=text,
                            level=level,
                            related_results=related_results
                        )
                        key.append(text)
                        #print(item)
                        yield item
                if level < num:
                    if key:
                        level += 1
                        for i in key:
                            yield scrapy.Request(
                                url='https://weixin.sogou.com/weixin?query={}&type=2'.format(i),
                                meta={"keyword": i,"level": level,"num":num},
                                callback=self.parse
                            )
                else:
                    print("抓取完成！！！")
        else:
            url = 'https://weixin.sogou.com/weixin?query={}&type=2'.format(keyword)
            # self.err_urls.append(url)
            # sql = 'insert ignore into bx_caiji_errurl (err_url) values("{}")'.format(url)
            # self.conn.ping(reconnect=True)
            # rt = self.cs.execute(sql)
            # self.conn.commit()
            # 删除指纹信息
            self.del_fingerprint(url)
            # 剖出异常结束爬虫
            print("^"*50)
            print("被反扒爬虫结束运行！！！")
            raise CloseSpider

    def del_fingerprint(self, url):
        """请求失败,删除redis中的指纹"""
        print(url)
        fp = hashlib.sha1()
        fp.update(to_bytes('GET'))
        fp.update(to_bytes(canonicalize_url(url)))
        fp.update(b'')
        print(fp.hexdigest())
        # 这里存的是本爬虫的url指纹集合
        print(self.red.srem("sogouweixinkeyword:dupefilter", fp.hexdigest()))
        #self.red.delete("sogouweixinkeyword:dupefilter")
        #self.red.delete("sogouweixinkeyword:requests")