import traceback
import os
import json
from collections import defaultdict, Counter

import mysql.connector
from scrapy import log
from snownlp import SnowNLP
import jieba
import jieba.analyse

from query_news_info.settings import DB_CONF
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html


class QueryNewsInfoPipeline(object):

    def __init__(self):
        #positive_num, negative_num, key_words
        self.counter = defaultdict(lambda: [0, 0])
        self.content_lst = defaultdict(list)
        jieba.initialize()

    def news_sentiment_analysis(self, text, threshold):
        if text.strip():
            if type(text) == str:
                text = text.decode("utf-8", "ignore").strip()
            r = SnowNLP(text)
            if r.sentiments >= threshold:
                return 'positive', r.sentiments
            else:
                return 'negative', r.sentiments
        else:
            return None, -1

    def process_item(self, item, spider):
        try:
            r, v = self.news_sentiment_analysis(item["text"], 0.5)
        except Exception:
            r, v = None, -1
            log.msg("nlp analysis error:%s!" % traceback.format_exc(), level=log.WARNING)

        if r == 'positive':
            self.counter[item["site_type"]][0] += 1
        elif r == 'negative':
            self.counter[item["site_type"]][1] += 1

        self.content_lst[item["site_type"]].append(item["title"].decode("utf-8", "ignore")
                                                   + item["text"].decode("utf-8", "ignore"))

        rdir = "/".join((spider.rdir, spider.query, item["site"]))
        if not os.path.isdir(rdir):
            os.makedirs(rdir)
        fname = rdir + "/" + spider.start_time
        with open(fname, "a+") as f:
            tmp = dict(item)
            tmp["sentiments_value"] = v
            line = json.dumps(tmp, ensure_ascii=False) + "\n"
            f.write(line)
            return item

    def write_statistic_info(self, spider):
        cnx = mysql.connector.connect(**DB_CONF)
        cursor = cnx.cursor()
        sql_1 = ("INSERT INTO  news_stat "
                "(contentid, query, sitetype, fetchdate, result_num, positive_num, negative_num)"
                "VALUES (%s, %s, %s, %s, %s, %s, %s)")
        sql_2 = ("INSERT INTO  news_keyword "
                "(contentid, query, sitetype, fetchdate, keywords)"
                "VALUES (%s, %s, %s, %s, %s)")
        data_lst_1 = []
        data_lst_2 = []

        for site_type, (positive_num, negative_num) in self.counter.iteritems():
            data_1 = (spider.content_id, spider.query, site_type, spider.fetch_time,
                      positive_num + negative_num, positive_num, negative_num)
            data_2 = (spider.content_id, spider.query, site_type, spider.fetch_time,
                      u",".join(jieba.analyse.extract_tags(u"".join(self.content_lst[site_type]),
                                                           topK=30)).encode("utf-8", "ignore").strip())
            data_lst_1.append(data_1)
            data_lst_2.append(data_2)
        if data_lst_1:
            cursor.executemany(sql_1, data_lst_1)
        if data_lst_2:
            cursor.executemany(sql_2, data_lst_2)
        cnx.commit()
        cursor.close()
        cnx.close()

    def close_spider(self, spider):
        with open(spider.existed_urls_file_path, "w") as f:
            info = "\n".join(spider.existed_urls_sets)
            f.write(info)
        self.write_statistic_info(spider)

