# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import json
from collections import defaultdict

from scrapy import log
from snownlp import SnowNLP
import mysql.connector
import jieba
import jieba.analyse

from query_video_info.settings import DB_CONF

class QueryVideoInfoPipeline(object):

    def comment_sentiment_analysis(self, text, threshold):
        if text.strip():
            if type(text) == str:
                text = text.decode("utf-8", "ignore").strip()
            if SnowNLP(text.strip()).sentiments >= threshold:
                return 'positive'
            else:
                return 'negative'
        else:
            return None

    def __init__(self):
        self.files = {}
        #play_num, like_num, hate_num, comment_num, review_num_positive, review_num_negative
        self.counter = defaultdict(lambda: [0, 0, 0, 0, 0, 0])
        self.comments_lst = []

    def open_spider(self, spider):
        rdir = "/".join((spider.rdir, spider.query, spider.domain))
        if not os.path.isdir(rdir):
            os.makedirs(rdir)
        fname = rdir + "/" + spider.start_time
        self.files[spider.name] = open(fname, "w")

    def process_item(self, item, spider):
        self.comments_lst.extend(map(lambda x: x['comments_content'].decode("utf-8", "ignore"),
                                     item["comment_content"]))
        if item["ugc"]:
            key = "ugc"
        else:
            key = item["title"]
        self.counter[key][0] += item["play_times"]
        self.counter[key][1] += item["up_times"]
        self.counter[key][2] += item["down_times"]
        self.counter[key][3] += item["comment_num"]
        for comment in item["comment_content"]:
            try:
                r = self.comment_sentiment_analysis(comment["comments_content"], 0.5)
            except Exception:
                r = None
                log.msg("nlp analysis error!", level=log.WARNING)
            if r == 'positive':
                self.counter[key][4] += 1
            elif r == 'negative':
                self.counter[key][5] += 1
        line = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.files[spider.name].write(line)
        return item

    def write_statistic_info(self, spider):
        cnx = mysql.connector.connect(**DB_CONF)
        cursor = cnx.cursor()
        sql_1 = ("INSERT INTO  video_stat "
               "(contentid, query, fetchdate, site, title, play_num, like_num, hate_num,"
               " review_num_positive, review_num_negative)"
               "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")

        sql_2 = ("INSERT INTO  video_keyword "
                "(contentid, query, site, fetchdate, keywords)"
                "VALUES (%s, %s, %s, %s, %s)")

        data_lst_1 = []
        for title, (play_num, like_num, hate_num,
                    comment_num, review_num_positive,
                    review_num_negative) in self.counter.iteritems():
            if not title.strip():
                continue
            if review_num_negative + review_num_positive != 0:
                negative_num = int(comment_num * float(review_num_negative) /
                                   (review_num_positive + review_num_negative))
            else:
                negative_num = 0
            positive_num = comment_num - negative_num
            data = (spider.content_id, spider.query, spider.fetch_time, spider.name, title,
                    play_num, like_num, hate_num, positive_num, negative_num)
            data_lst_1.append(data)
        else:
            cursor.executemany(sql_1, data_lst_1)

        data_2 = (spider.content_id, spider.query, spider.domain, spider.fetch_time,
                  u",".join(jieba.analyse.extract_tags(u"".join(self.comments_lst),
                                                       topK=30)).encode("utf-8", "ignore").strip())
        cursor.execute(sql_2, data_2)

        cnx.commit()
        cursor.close()
        cnx.close()

    def close_spider(self, spider):
        for k, v in self.files.iteritems():
            v.close()
        self.write_statistic_info(spider)