# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

from scrapy.exceptions import DropItem
from scrapy.mail import MailSender
from jieba import analyse
from pymongo import MongoClient
from bson import ObjectId
from gensim import corpora, models, similarities
from util import STOP_WORK, tag_mapping
import jieba
import pymongo
from settings import DB_INFO, TFIDF_MODEL, DICTIONARY, SEND_TO
from items import NewsItem

import time
import threading
import demjson
import pickle
import sys
import numpy as np


class NewsPipeline(object):
    #新增数目
    new_count = 0
    #无用数目
    drop_count = 0
    #重复数目
    repeat_count = 0
    #缓存新闻，等待插入
    news_buf = list()

    def process_item(self, item, spider):
        if len(item["content"]) is 0:
            self.drop_count += 1
            raise DropItem("The length of 'content' is zero")
        keys = analyse.extract_tags(item["text"], withWeight=True, topK=250)
        #提取前五个关键字
        item["key_word"] = []
        for key in keys:
            if len(item["key_word"]) >= 5:
                break
            if key[0].isdigit():
                continue
            else:
                item["key_word"].append({"key": key[0], "value": key[1]})
        # item["key_word"] = [{"key": key[0], "value": key[1]} for key in keys[:5]]
        # tag 转换
        new_tag = tag_mapping(item["tag"])
        if item['tag'] is not new_tag:
            item['tag'] = new_tag

        # 匹配文章相似度
        # 方法一 提取关键字(不好用啊)
        cut = [key[0] for key in keys]
        # 方法二 jieba分词(词量太大匹配相似度耗时)
        # cut = []
        # for key in jieba.cut(item["text"]):
        #     if key not in STOP_WORK and len(key) > 1:
        #         cut.append(key)
        # 保存到item中
        item["cut"] = cut
        # 生成文档向量
        
        vec = corpora.Dictionary.load_from_text(DICTIONARY).doc2bow(cut)
        # 构造LSI模型（方法一）（不好用啊）
        # tfidf = models.LsiModel(self.corpus)
        # 构造tf-idf模型(方法二)
        # self.tfidf.initialize(self.corpus)  #models.TfidfModel(self.corpus)
        # index = similarities.SparseMatrixSimilarity(
        #     self.tfidf[self.corpus], num_features=len(self.dictionary.keys()), num_best=20
        # )
        index = similarities.SparseMatrixSimilarity(
            self.corpus_tfidf, num_features=self.dictionary_len, num_best=20
        )
        sim = index[self.tfidf[vec]]
        # sim = sorted(enumerate(sim), key=lambda item: -item[1])

        item["similar"] = []
        for s in sim:
            sim_percent = s[1].astype(float)
            if s[1] > 0.90: 
                repeat = list()
                repeat.append(self.news_list[s[0]]["from_url"])
                repeat.append(item["from_url"])
                spider.repeats.append(repeat)
                self.repeat_count += 1
                raise DropItem("The news " + repeat[0] + " as same as " + repeat[1])
            item["similar"].append([self.news_list[s[0]]["_id"], sim_percent])
            if len(item["similar"]) >= 5:
                break
        #放入缓冲区
        _id = ObjectId()
        item["_id"] = _id
        self.news_buf.append(item)
        if len(self.news_buf) >= 20:
            self.flush()
        # insert_res = self.col.insert_one(item) 
        self.new_count += 1
        # 将这篇新闻添加到词袋和语料库中
        # self.dictionary.add_documents([cut])
        #添加文档tfidf
        self.corpus_tfidf.append(self.tfidf[vec])
        # news_list
        news = {
            "_id": _id,
            "cut": cut,
            "from_url": item["from_url"],
        }
        self.news_list.append(news)
        spider.logger.info("crawled:    " + item["from_url"])
        # return item

    def open_spider(self, spider):
        self.start_time = time.time()
        self.mongoclient = MongoClient(DB_INFO["host"], DB_INFO["port"])
        # self.mongoclient = MongoClient("127.0.0.1", 27017)
        self.db = self.mongoclient[DB_INFO["db_name"]]
        self.col = self.db[DB_INFO["collection"]]
        #测试库
        # self.col = self.db.new
        # 从数据库中获取历史新闻
        spider.logger.info("提取数据库数据.......")

        all_news_cursor = self.col.find({}, {"_id": 1, "from_url": 1, "cut": 1}).limit(12000).sort("time",pymongo.DESCENDING)
        all_news = [news for news in all_news_cursor]
        news_docs = [news["cut"] for news in all_news]
        self.news_list = [{"_id":news["_id"], "from_url":news["from_url"]} for news in all_news]
        spider.logger.info("提取完毕.......")

        #加载语料库
        try:
            dictionary = corpora.Dictionary.load_from_text(DICTIONARY)
        except (FileNotFoundError, EOFError) as e:
            spider.logger.warn(str(FileExistsError)+"   "+str(e))
            #构造语料库
            dictionary = corpora.Dictionary([news for news in news_docs])
            #保存语料库
            dictionary.save_as_text(DICTIONARY)
        self.dictionary_len = len(dictionary.keys())
        # 构造词袋
        corpus = [dictionary.doc2bow(news) for news in news_docs]

        #加载模型、语料库
        #TODO
        try:
            self.tfidf = models.TfidfModel.load(TFIDF_MODEL)
        except (FileNotFoundError, EOFError) as e:
            spider.logger.warn(str(FileExistsError)+"   "+str(e))
            #创建模型
            self.tfidf =  models.TfidfModel(corpus)
            #保存模型
            self.tfidf.save(TFIDF_MODEL)
        #生产文档tf-idf值
        self.corpus_tfidf = list(self.tfidf[corpus])

    def close_spider(self, spider):
        #确保缓冲区以全部写入
        if len(self.news_buf):
            self.flush()
        #发送mail
        mail = MailSender.from_settings(spider.settings)
        time_comsuming = (time.time() - self.start_time) / 60
        mail_subject = time.strftime("%Y%m%d") + spider.name
        repeats = demjson.encode(spider.repeats)
        repeats = repeats.replace(",", "\n").replace("]", "\n]").replace("[", "[\n")
        end = "共爬%d条数据，无用数据%d条，重复数据%d条，共耗时%f分\n" % (self.new_count, self.drop_count, self.repeat_count, time_comsuming)
        repeats = end + repeats 
        mail.send(to=[SEND_TO], subject=mail_subject, body=repeats)
        self.mongoclient.close()   
        spider.logger.info(end)

    #将缓冲区中的新闻写入数据库,清理缓冲区
    def flush(self):
        self.col.insert_many(self.news_buf)
        self.news_buf.clear()

    #更新文章相似度(这个好烦啊不做了)
    # def update_similar(self, similar_arr, similar):
    #     similar_arr.append(similar)
    #     sorted(similar_arr, lambda s:s[1])


