# coding:utf-8

import datetime
import MySQLdb
import MySQLdb.cursors
import pymongo
import json
import codecs
from twisted.enterprise import adbapi
# import scrapy
# from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem


# Json
class JsonWithEncodingPipeline(object):
    def __init__(self):
        self.file = codecs.open('people_comment.json', 'w', encoding='utf-8')

    def process_item(self, item, spider):
        line = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.file.write(line)
        return item

    def spider_closed(self):
        self.file.close(
        )


# Mysql
class MySQLStorePipeline(object):
    def __init__(self, dbpool):
        self.dbpool = dbpool

    @classmethod
    def from_settings(cls, settings):
        dbargs = dict(
            host=settings['MYSQL_HOST'],
            db=settings['MYSQL_DBNAME'],
            user=settings['MYSQL_USER'],
            passwd=settings['MYSQL_PASSWD'],
            charset='utf8',
            cursorclass=MySQLdb.cursors.DictCursor,
            use_unicode=True,
        )
        dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
        return cls(dbpool)

    # pipeline默认调用
    def process_item(self, item, spider):
        self.dbpool.runInteraction(self.__do_insert, item, spider)
        return item

    # 将每行更新或写入数据库中
    def __do_insert(self, conn, item, spider):
        conn.execute("""insert into test(title, href, time, content, img)values(%s, %s, %s, %s, %s)""",
                     (item['title'], item['href'], item['time'], item['content'], item['img']))


# mongodb
class MongoDBPipeline(object):
    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]
        self.collection = self.db['news']

    def close_spider(self, spider):
        self.client.close()

    # 查询最近 1月 的记录 主题
    def find_title(self, source):
        title_list = []
        begin_time = str(datetime.date.today() - datetime.timedelta(30)) + " 00:00:00"
        end_time = str(datetime.datetime.now().date()) + " 23:59:59"
        qres = self.collection.find({'time': {'$gt': begin_time, '$lt': end_time}, 'source': source})
        for info in qres:
            title_list.append(info['title'].strip())
        return title_list

    def process_item(self, item, spider):
        title_list = self.find_title(item['source'])
        valid = True
        for data in item:
            if not data:
                valid = False
                raise DropItem("Missing {0}!".format(data))
        if valid:
            if item['title'] not in title_list:
                self.collection.insert_one(dict(item))
        return item

# # 图片
# class MyImagesPipeline(ImagesPipeline):
#     def get_media_requests(self, item, info):
#         for image_url in item['image_urls']:
#             yield scrapy.Request(image_url)
#
#     def item_completed(self, results, item, info):
#         image_paths = [x['path'] for ok, x in results if ok]
#         if not image_paths:
#             raise DropItem("Item contains no images")
#         item['image_paths'] = image_paths
#         return item
