# -*- coding: utf-8 -*-
import json

import scrapy
from pymysql import *
import re
import hashlib
import redis
from scrapy.utils.python import to_bytes
from w3lib.url import canonicalize_url
from KeywordSpider.settings import mysql_conf, redis_conf
from KeywordSpider.custom_settings import custom_settings_for_sohu_news
from remote_rpc.Client import RpcClient

class SohuNewsSpider(scrapy.Spider):
    custom_settings = custom_settings_for_sohu_news
    name = 'sohu_news'
    allowed_domains = ['sohu.com']
    # url = "http://search.sohu.com/search/meta?keyword={}&terminalType=pc&source=null&size=30&searchType=news"
    url = "http://search.sohu.com/search/meta?keyword={}&terminalType=pc&ip=&spm-pre=&size=30&searchType=news"
    # 中国保险理财排名第一
    def __init__(self,kws=None,*args,**kwargs):
        super(SohuNewsSpider, self).__init__(*args, **kwargs)
        if kws:
            self.kws = eval(kws)
        else:
            self.kws = ""
    def start_requests(self):

        # 创建redis链接
        self.red = redis.StrictRedis(host=redis_conf['host'], port=redis_conf['port'],
                                     db=redis_conf['db'], password=redis_conf['passwd'])

        self.conn = connect(
            host=mysql_conf.get('host'),
            port=mysql_conf.get('port'),
            database=mysql_conf.get('db'),
            user=mysql_conf.get('user'),
            password=mysql_conf.get('passwd'),
            charset=mysql_conf.get('charset'),
        )
        self.cs = self.conn.cursor()
        print(self.kws)
        # self.kws = [('中国保险理财排名第一', 1)]
        for kw in self.kws:
            if kw[0]:
                key_id = kw[1]
                keyword = kw[0]
                # url = self.url.format(keyword)
                url = self.url.format(keyword)
                yield scrapy.Request(
                    url=url,
                    meta={"keyword": keyword, "key_id": key_id},
                    callback=self.parse,
                    errback=self.parse_err,
                    dont_filter=True
                )
    def parse(self, response):
        keyword = response.meta["keyword"]
        key_id = response.meta["key_id"]
        ret = json.loads(response.text)
        data = ret.get('data')
        news = data.get('news')
        # print(news)
        if news:
            for new in news:
                item = dict()
                type = new.get('type')
                # print(type)
                if type != 3:
                    item["title"] = new.get('title')
                    item["img_url"] =str(new.get('imagesList'))
                    item["url"] = new.get('url')
                    item["keyword"] = keyword
                    item["key_id"] = key_id
                    # print(item)
                    yield scrapy.Request(
                        item['url'],
                        callback=self.parse_detail,
                        errback=self.parse_err,
                        meta={'item': item}
                    )
                else:
                    continue
        # else:
        #     err_url = response.url
        #     # self.err_urls.append(err_url)
        #     sql = 'insert ignore into bx_caiji_errurl (err_url) values("{}")'.format(err_url)
        #     self.conn.ping(reconnect=True)
        #     rt = self.cs.execute(sql)
        #     self.conn.commit()
        #     # 删除指纹信息
        #     self.del_fingerprint(err_url)
    def parse_detail(self,response):
        item = response.meta["item"]
        content_txt = response.xpath("//article//p//text()").extract()
        content_txt = '\n'.join(content_txt)
        con = content_txt.replace("\n", "").replace(" ", "")
        # print(con)
        # con = ''
        if con:
            try:
                item["abstract"], code = RpcClient('GetSummary', content=content_txt)
            except Exception as e:
                print("rpc报错!!!!!!")
                item["content_txt"] = content_txt
            else:
                if int(code) == 1:
                    print("rpc调用结果异常！")
                    item["content_txt"] = content_txt
            item["content"] = response.xpath("//article").extract_first()
            # print(item["title"])
            yield item
        else:
            url = item["url"]
            self.del_fingerprint(url)


    def parse_err(self,failure):
        self.logger.error(repr(failure))


    def del_fingerprint(self, url):
        """请求失败,删除redis中的指纹"""
        print(url)
        fp = hashlib.sha1()
        fp.update(to_bytes('GET'))
        fp.update(to_bytes(canonicalize_url(url)))
        fp.update(b'')
        print(fp.hexdigest())
        # 这里存的是本爬虫的url指纹集合
        print(self.red.srem("sohu_news:dupefilter", fp.hexdigest()))

"""
寻找2019年第一条锦鲤
太康“爱心妈妈”李丹入选2月“中国好人榜”
“地铁色狼”险，这样的保险你买吗？
购买少儿保险的几大理由
美国小伙来中国玩，临走前请中国朋友吃顿小龙虾，看到账单愣了
区住建局开年第一件事：督促推进项目建设
【戏码头那些角儿】京剧第一名丑——朱世慧
流浪地球——反好莱坞的中国好莱坞
现役NBA最强五大扣将，詹皇仅排第五，第一毫无悬念
我们找到了中国人防脱发的10个方法
沈梦辰被爸妈宠成公主，早上起床第一件事就是做这个，细节见教养
美业界的超级社群导师：何沐萱联合发起第一届社群新零售大会
历届S赛拳头官方选手排名：常青树只有一人，Uzi的争议在这
中国古代文化中是否有平等精神？
艺术学院召开第一次综合素质培训会议
志玲姐姐提醒您道路千万条安全第一条
为中华民族伟大复兴中国梦而奋斗
2018年炉石第一卡牌竟然是它……
http://www.sohu.com/a/293675738_100142589
8c5e47b7055b5a6c28460e93b0ef4d9bffad28f7
1

"""
