# -*- coding: utf-8 -*-
import re
import hashlib
import redis
from scrapy.utils.python import to_bytes
from w3lib.url import canonicalize_url

from copy import deepcopy
from pymysql import *
import scrapy
from scrapy.exceptions import CloseSpider

from KeywordSpider.settings import mysql_conf, redis_conf
from KeywordSpider.custom_settings import custom_settings_for_baidu_news
from remote_rpc.Client import RpcClient

class BaiduKwdetailSpider(scrapy.Spider):
    custom_settings = custom_settings_for_baidu_news
    err_urls = []
    name = 'baidu_kwdetail'
    allowed_domains = ['www.baidu.com']
    def __init__(self,kws=None,*args,**kwargs):
        super(BaiduKwdetailSpider, self).__init__(*args, **kwargs)
        if kws:
            self.kws = eval(kws)
        else:
            self.kws = ""
    # print(rt_list)
    #for a in rt_list:
     #   if a[1]:
      #      start_urls.append(('https://www.baidu.com/s?wd={}'.format(a[1]+'    www.sohu.com/a/'),a[0]))

    # cs.close()
    # conn.close()
    # print(start_urls)
    def start_requests(self):

        # 创建redis链接
        self.red = redis.StrictRedis(host=redis_conf['host'], port=redis_conf['port'],
                                     db=redis_conf['db'], password=redis_conf['passwd'])

        self.conn = connect(  # host='localhost',
            host=mysql_conf.get('host'),
            port=mysql_conf.get('port'),
            database=mysql_conf.get('db'),
            user=mysql_conf.get('user'),
            password=mysql_conf.get('passwd'),
            charset=mysql_conf.get('charset'),
        )
        self.cs = self.conn.cursor()
        # self.kws = [('黑龙江保险', 1)]
        print(self.kws)
        for a in self.kws:
            if a[0]:
                self.start_urls.append(('http://www.baidu.com/s?wd={}'.format(a[0] + '    www.sohu.com/a/'), a[1]))
                keyword = a[0]
                key_id = a[1]
                N = 1
                url = 'http://www.baidu.com/s?wd={}'.format(a[0] + '    www.sohu.com/a/')
                yield scrapy.Request(
                    url=url,
                    meta={"keyword": keyword, "N": N, "key_id": key_id},
                    callback=self.parse,
                    errback=self.parse_err,
                    dont_filter=True
                )
    def parse(self, response):
        # www.sohu.com/a/...
        N = response.meta["N"]
        # print("+"*50)
        # print(N)
        keyword = response.meta["keyword"]
        key_id = response.meta["key_id"]
        related = response.xpath("//div[@class='nums']/span/text()").extract()
        if related:
            div_list = response.xpath("//div[@id='content_left']/div")
            if div_list:
                for div in div_list:
                    # text = div.xpath(
                    #     "./div[@class='c-row c-gap-top-small']/div[@class='c-span18 c-span-last']/div[@class='f13']/a[1]/b/text()").extract_first()
                    text = div.xpath(".//div[@class='f13']/a[1]/b/text()").extract_first()
                    # print(text)
                    if not text:
                        # text = div.xpath(
                        #     "./div[@class='c-row c-gap-top-small']/div[@class='c-span18 c-span-last']/div[@class='f13']/a[1]/text()").extract_first()
                        text = div.xpath(".//div[@class='f13']/a[1]/text()").extract_first()
                        if not text:
                            text = div.xpath(".//div[@class='f13']/a[1]/span/text()").extract_first()
                            if text:
                                text = re.match(r"搜狐网", text).group() if re.match(r"搜狐网", text) else ''
                        else:
                            text = re.match(r"www.sohu.com/a/", text).group() if re.match(r"www.sohu.com/a/", text) else ''
                    else:
                        text = re.match(r"www.sohu.com/a/", text).group() if re.match(r"www.sohu.com/a/", text) else ''
                    if text:
                        url =div.xpath(
                            ".//div[@class='f13']/a[1]/@href").extract_first()
                        # title = ''.join(div.xpath("./h3/a//text()").extract()) if div.xpath("./h3/a//text()").extract() else '无'
                        site = text
                        item = dict(  # 放入字典
                            url=url,
                            # title=title,
                            site=site,
                            keyword=keyword,
                            key_id = key_id
                        )
                        #print(item)
                        yield scrapy.Request(
                            url= item["url"],
                            meta={"item": deepcopy(item)},
                            callback=self.parse1,
                            errback=self.parse_err,
                        )
            else:
                err_url = response.url
                # self.err_urls.append(err_url)
                # sql = 'insert ignore into bx_caiji_errurl (err_url) values("{}")'.format(err_url)
                # self.conn.ping(reconnect=True)
                # rt = self.cs.execute(sql)
                # self.conn.commit()
                # 删除指纹信息
                self.del_fingerprint(err_url)
                # self.crawler.engine.close_spider(self)
        else:
            # err_url = response.url
            # url = re.match(r"http://verify.baidu.com/vcode\?(.*)", err_url).group(1)
            # self.err_urls.append(url)
            # sql = 'insert ignore into bx_caiji_errurl (err_url) values("{}")'.format(url)
            # self.conn.ping(reconnect=True)
            # rt = self.cs.execute(sql)
            # self.conn.commit()
            # 删除指纹信息
            url = 'http://www.baidu.com/s?wd={}'.format(keyword + '    www.sohu.com/a/')
            self.del_fingerprint(url)
            # 剖出异常结束爬虫
            raise CloseSpider
        # 列表下一页
        # if N < 3:
        #     next_url = response.xpath("//a[text()='下一页>']/@href").extract_first()
        #     if next_url:
        #         next_url = "https://www.baidu.com"+next_url
        #         # print("*"*50)
        #         # print(next_url)
        #         N += 1
        #         yield scrapy.Request(
        #             url=next_url,
        #             meta={"keyword": keyword,"N":N,"key_id":key_id},
        #             callback=self.parse,
        #         )

    def parse1(self,response):
        item = response.meta["item"]
        title = response.xpath("//div[@class='text-title']/h1//text()").extract()
        if not title:
            title = response.xpath("//h3[@class='article-title']//text()").extract()
            if not title:
                title = response.xpath("//h2[@class='title-info']//text()").extract()
        if title:
            title = "".join(title).strip()
        else:
            title = ''
        content_txt = response.xpath("//article//p//text()").extract()
        content_txt = '\n'.join(content_txt)
        con = content_txt.replace("\n", "").replace(" ", "")
        # con = ''
        if con:
            #item["content_txt"] = content_txt
            item["abstract"] = RpcClient('GetSummary', content=content_txt)
            img_url = str(response.xpath("//article//@src").extract()) if response.xpath("//article//@src").extract() else ''
            #content = response.text
            # print(content1)
            #if re.search(r'<article class="article" id="mp-editor">.*</article>|<article class="article-text">.*</article>|<article data-spm="content">.*</article>', content,re.S|re.M):
            #    content= re.search(r'<article class="article" id="mp-editor">.*</article>|<article class="article-text">.*</article>|<article data-spm="content">.*</article>', content,re.S|re.M).group()
            #else:
            #    content = ''
            item["title"] = title
            item["img_url"] = img_url
            #item["content"] = content
            item["content"] = response.xpath("//article").extract_first()
            # print(item)
            yield item
        else:
            url = item["url"]
            self.del_fingerprint(url)
    def parse_err(self,failure):
        self.logger.error(repr(failure))

    def del_fingerprint(self, url):
        """请求失败,删除redis中的指纹"""
        print(url)
        fp = hashlib.sha1()
        fp.update(to_bytes('GET'))
        fp.update(to_bytes(canonicalize_url(url)))
        fp.update(b'')
        print(fp.hexdigest())
        # 这里存的是本爬虫的url指纹集合
        print(self.red.srem("baidu_kwdetail:dupefilter", fp.hexdigest()))
