# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import redis
import pymysql
import time
from . import settings


class BaiduPipeline(object):
    def __init__(self):
        redis_pool = redis.ConnectionPool(
            host=settings.REDIS_HOST, port=settings.REDIS_PORT, password='')
        self.r = redis.Redis(connection_pool=redis_pool)

    def open_spider(self, spider):
        self.r.set('CURRENT_KEY', spider.kw)    # 在redis里设置关键词
        self.r.delete('sinaNews:start_urls')
        self.r.delete('neteaseNews:start_urls')

    def process_item(self, item, spider):
        spider_name = ''
        # 判断新闻网站
        if 'sina' in item['url']:
            spider_name = "sinaNews"
        elif '163' in item['url']:
            spider_name = "neteaseNews"
        # 判断是否爬过
        # if self.r.sismember('finished_news_urls', item['url']) is False:
        self.r.sadd(spider_name + ':start_urls', item['url'])
        return item


class NewssitePipeline(object):
    def open_spider(self, spider):
        redis_pool = redis.ConnectionPool(
            host=settings.REDIS_HOST, port=settings.REDIS_PORT, password='')
        self.r = redis.Redis(connection_pool=redis_pool)
        self.conn = pymysql.connect(host=settings.MYSQL_HOST, user=settings.MYSQL_USER,
                                    passwd=settings.MYSQL_PASSWD, db=settings.MYSQL_DBNAME, charset='utf8')
        self.cur = self.conn.cursor()

        self.flag = True
        self.key = ''

    def process_item(self, item, spider):
        if self.key == '' or self.key != bytes.decode(self.r.get('CURRENT_KEY')):
            self.flag = True
        # 在第一次运行时初始化数据库
        if self.flag:
            self.flag = False
            self.key = bytes.decode(self.r.get('CURRENT_KEY'))  # 从redis中取关键词

            sql = '''CREATE TABLE IF NOT EXISTS `zhifou`.`{}`  (
                `newsID` int(10) NOT NULL AUTO_INCREMENT,
                `title` varchar(255) NULL,
                `url` varchar(255) NULL,
                `content` varchar(255) NULL,
                `source` varchar(2000) NULL,
                `sourceTitle` varchar(255) NULL,
                `date` timestamp(0) NULL,
                `crawl_time` timestamp(0) NULL,
                `has_show` int(1) DEFAULT 0,
                PRIMARY KEY (`newsID`)
                );'''.format(self.key.replace(" ", "_") + '_' + spider.name)
            self.cur.execute(sql)
            self.conn.commit()

        url = item.get("url")
        title = item.get("title")
        sourceTitle = item.get("sourceTitle")
        content = item.get("content")
        source = item.get("source")
        date = item.get("date")
        crawl_time = time.strftime(
            '%Y-%m-%d %H:%M:%S', time.localtime())

        sql = '''insert ignore into {}(
                            url, title, sourceTitle, content, source, date, crawl_time)
                    VALUES (%s, %s, %s, %s, %s, %s, %s)'''.format(self.key.replace(" ", "_") + '_' + spider.name)
        self.cur.execute(sql, (url, title, sourceTitle, content, source, date, crawl_time))
        self.conn.commit()

        # 记录任务
        self.r.sadd('finished_news_urls', item['url'])
        return item
