# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import hashlib
import time

from scrapy.utils.python import to_bytes

import re
from pymysql import *
from snownlp import SnowNLP
from w3lib.url import canonicalize_url

from KeywordSpider.settings import mysql_conf

class KeywordspiderPipeline(object):

    def open_spider(self, spider):
        self.mysql_conf = mysql_conf
        # 连接数据库
        self.conn = connect(
            host=self.mysql_conf.get('host'),
            port=self.mysql_conf.get('port'),
            database=self.mysql_conf.get('db'),
            user=self.mysql_conf.get('user'),
            password=self.mysql_conf.get('passwd'),
            charset=self.mysql_conf.get('charset'),
        )

        # 通过cursor执行增删查改
        self.cs= self.conn.cursor()
        # self.connect.autocommit(True)
        # 创建redis链接
        # self.red = redis.StrictRedis(host='192.168.3.191',
        #                     port=6379,
        #                     db=11)

    def close_spider(self, spider):
        # 关闭爬虫前删除反扒指纹
        # err_urls = spider.err_urls
        # name = spider.name
        # name = '{}:dupefilter'.format(name)
        # for url in err_urls:
        #     self.del_fingerprint(url,name)
        self.cs.close()
        self.conn.close()

    def del_fingerprint(self, url,name):
        """请求失败,删除redis中的指纹"""
        print(url)
        fp = hashlib.sha1()
        fp.update(to_bytes('GET'))
        fp.update(to_bytes(canonicalize_url(url)))
        fp.update(b'')
        print(fp.hexdigest())
        # 这里存的是本爬虫的url指纹集合
        self.red.srem(name, fp.hexdigest())

    def process_item(self, item, spider):
        # mysql数据库保
        if spider.name =='baidukeyword_new':
            # keyword = re.match(r"[\u4e00-\u9fa5]+",item["keyword"]).group()
            keyword = item["keyword"]
            text = item["text"]
            level = item["level"]
            addtime = int(time.time())
            results_num = item["related_results"]
            # 获取parent_id
            sql = 'select id from bx_caiji_keywords where keyword="{}"'.format(keyword)
            self.conn.ping(reconnect=True)
            rt = self.cs.execute(sql)
            rt_list = self.cs.fetchone()
            parent_id = rt_list[0] if rt_list else None
            # parent_id = item["parent_id"]
            from_source = 1
            # 数据插入数据库
            # sql = 'insert ignore into keyword_search (keyword,relatedwords,parent_id,level,results_num,addtime,from_source) values(%s,%s,%s,%s,%s,%s,%s)'
            sql = 'insert ignore into bx_caiji_keywords (parent_keyword,keyword,parent_id,level,se_results_num,addtime,from_source) values(%s,%s,%s,%s,%s,%s,%s)'
            self.conn.ping(reconnect=True)
            count1 = self.cs.execute(sql, (keyword, text, parent_id, level, results_num, addtime, from_source))
            self.conn.commit()
            # print(item)
        elif spider.name =='baidu_kwdetail':
            keyword = item["keyword"]
            # 获取文章主旨
            abstract = item["abstract"]
            # print(abstract)
            keyid = item["key_id"]
            title = item["title"]
            url = item["url"]
            # content = item["content"]
            content = item["content"].replace("'", '"')
            img_url = item["img_url"]
            # site = item["site"]
            site = "搜狐网"
            addtime = int(time.time())
            source = 1
            status = 0
            sql = 'insert ignore into bx_caiji_keywords_news (key_id,keyword,title,source_url,abstract,content,img_url,site,addtime,from_source,status) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
            self.conn.ping(reconnect=True)
            count1 = self.cs.execute(sql, (keyid, keyword, title, url, abstract, content, img_url, site, addtime, source, status))
            self.conn.commit()
            # print(item)
        elif spider.name == 'baiduzhidaokeyword_new':
            keyword = item["keyword"]
            text = item["text"]
            level = item["level"]
            addtime = int(time.time())
            results_num = item["related_results"]
            # 获取parent_id
            sql = 'select id from bx_caiji_keywords where keyword="{}"'.format(keyword)
            self.conn.ping(reconnect=True)
            rt = self.cs.execute(sql)
            rt_list = self.cs.fetchone()
            parent_id = rt_list[0] if rt_list else None
            # parent_id = item["parent_id"]
            from_source = 5
            # print(keyword, text, parent_id, level, results_num, addtime, from_source)
            # 数据插入数据库
            # sql = 'insert ignore into keyword_search (keyword,relatedwords,parent_id,level,results_num,addtime,from_source) values(%s,%s,%s,%s,%s,%s,%s)'
            sql = 'insert ignore into bx_caiji_keywords (parent_keyword,keyword,parent_id,level,se_results_num,addtime,from_source) values(%s,%s,%s,%s,%s,%s,%s)'
            self.conn.ping(reconnect=True)
            count1 = self.cs.execute(sql, (keyword, text, parent_id, level, results_num, addtime, from_source))
            self.conn.commit()
            # print(item)
        elif spider.name == 'sogoukeyword':
            keyword = item["keyword"]
            text = item["text"]
            level = item["level"]
            addtime = int(time.time())
            results_num = item["related_results"]
            # 获取parent_id
            sql = 'select id from bx_caiji_keywords where keyword="{}"'.format(keyword)
            self.conn.ping(reconnect=True)
            rt = self.cs.execute(sql)
            rt_list = self.cs.fetchone()
            parent_id = rt_list[0] if rt_list else None
            # parent_id = item["parent_id"]
            from_source = 3
            # print(keyword, text, parent_id, level, results_num, addtime, from_source)
            # 数据插入数据库
            # sql = 'insert ignore into keyword_search (keyword,relatedwords,parent_id,level,results_num,addtime,from_source) values(%s,%s,%s,%s,%s,%s,%s)'
            sql = 'insert ignore into bx_caiji_keywords (parent_keyword,keyword,parent_id,level,se_results_num,addtime,from_source) values(%s,%s,%s,%s,%s,%s,%s)'
            self.conn.ping(reconnect=True)
            count1 = self.cs.execute(sql, (keyword, text, parent_id, level, results_num, addtime, from_source))
            self.conn.commit()
            # sogouweixinkeyword
        elif spider.name == 'sogouweixinkeyword':
            keyword = item["keyword"]
            text = item["text"]
            level = item["level"]
            addtime = int(time.time())
            results_num = item["related_results"]
            # 获取parent_id
            sql = 'select id from bx_caiji_keywords where keyword="{}"'.format(keyword)
            self.conn.ping(reconnect=True)
            rt = self.cs.execute(sql)
            rt_list = self.cs.fetchone()
            parent_id = rt_list[0] if rt_list else None
            # parent_id = item["parent_id"]
            from_source = 6
            # print(keyword, text, parent_id, level, results_num, addtime, from_source)
            # 数据插入数据库
            # sql = 'insert ignore into keyword_search (keyword,relatedwords,parent_id,level,results_num,addtime,from_source) values(%s,%s,%s,%s,%s,%s,%s)'
            sql = 'insert ignore into bx_caiji_keywords (parent_keyword,keyword,parent_id,level,se_results_num,addtime,from_source) values(%s,%s,%s,%s,%s,%s,%s)'
            self.conn.ping(reconnect=True)
            count1 = self.cs.execute(sql, (keyword, text, parent_id, level, results_num, addtime, from_source))
            self.conn.commit()
        elif spider.name =='sohu_news':
            keyword = item["keyword"]
            # 获取文章主旨
            text = item.get("content_txt")
            if text:
                s = SnowNLP(text)
                abstract = ''.join(s.summary(1))
            else:
                abstract = item.get("abstract")
            keyid = item["key_id"]
            title = item["title"]
            url = item["url"]
            # content = item["content"]
            content = item["content"].replace("'", '"')
            img_url = item["img_url"]
            # site = item["site"]
            site = "搜狐网"
            addtime = int(time.time())
            source = 1
            status = 0
            sql = 'insert ignore into bx_caiji_keywords_news (key_id,keyword,title,source_url,abstract,content,img_url,site,addtime,from_source,status) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
            self.conn.ping(reconnect=True)
            count1 = self.cs.execute(sql, (
            keyid, keyword, title, url, abstract, content, img_url, site, addtime, source, status))
            self.conn.commit()
            # print(item)
        elif spider.name == 'baidusug':
            keyword = item["keyword"]
            text = item["text"]
            level = item["level"]
            addtime = int(time.time())
            results_num = item["related_results"]
            sql = 'select id from bx_caiji_keywords where keyword="{}"'.format(keyword)
            self.conn.ping(reconnect=True)
            rt = self.cs.execute(sql)
            rt_list = self.cs.fetchone()
            parent_id = rt_list[0] if rt_list else None
            from_source = 7
            sql = 'insert ignore into bx_caiji_keywords  (parent_keyword,keyword,parent_id,level,se_results_num,addtime,from_source) values(%s,%s,%s,%s,%s,%s,%s)'
            self.conn.ping(reconnect=True)
            count1 = self.cs.execute(sql, (keyword, text, parent_id, level, results_num, addtime, from_source))
            self.conn.commit()

