import math
import re
from datetime import datetime

from dao.mongo_db import MongoDB
from .keyword_extractor import KeywordExtractor


class ContentLabel(object):
    def __init__(self):
        self.mongo_scrapy = MongoDB(db='admin')
        self.mongo_recommendation = MongoDB(db='admin')
        self.scrapy_collection = self.mongo_scrapy.db_scrapy['content_ori']
        self.content_label_collection = self.mongo_recommendation.db_recommendation['content_label']
        self.keyword_extractor = KeywordExtractor()

    def get_data_from_mongodb(self):
        datas = self.scrapy_collection.find()
        return datas

    def make_content_labels(self):
        datas = self.get_data_from_mongodb()
        for data in datas:
            content_collection = dict()
            content_collection['describe'] = data['desc']
            content_collection['type'] = data['type']
            content_collection['title'] = data['title']
            content_collection['news_date'] = data['times']
            content_collection['hot_heat'] = 10000
            content_collection['likes'] = 0
            content_collection['read'] = 0
            content_collection['collections'] = 0
            content_collection['create_time'] = datetime.utcnow()

            words_num = self.get_words_nums(data['desc'])
            content_collection['words_num'] = words_num

            # title_keywords = get_keywords_through_tfidf(data['title'])
            # desc_keywords = get_keywords_through_tfidf(data['desc'])
            # content_collection['keywords'] = list(set([w[0] for w in title_keywords] + [w[0] for w in desc_keywords]))

            title_keywords = self.get_keyword_list(data['title'])
            desc_keywords = self.get_keyword_list(data['desc'])
            content_collection['keywords'] = list(set(title_keywords + desc_keywords))

            # print(content_collection)
            self.content_label_collection.insert_one(content_collection)

    def get_keyword_list(self, word_list):
        word_list = self.keyword_extractor.process_text(word_list)
        tfidf_keyword = self.keyword_extractor.get_keyword_list(word_list, param='tfidf')
        textrank_keyword = self.keyword_extractor.get_keyword_list(word_list, param='textrank')

        keyword_interact = self.keyword_extractor.keyword_interact(tfidf_keyword, textrank_keyword)
        print('keyword_interact', keyword_interact)
        return keyword_interact
        # keyword_topk = self.keyword_extractor.keyword_topk(tfidf_keyword, textrank_keyword, 3)
        # print('keyword_topk', keyword_topk)

    def decay_hot_heat(self):
        datas = self.content_label_collection.find()
        for data in datas:
            data['hot_heat'] = self.get_decay_value(temp=data['hot_heat'],
                                                    last_updated_at=data.get("last_updated_at") or data['news_date'])
            data['last_updated_at'] = datetime.utcnow()
            self.content_label_collection.update_one({"_id": data["_id"]},
                                                     {
                                                         "$set": {
                                                             "hot_heat": data["hot_heat"],
                                                             "last_updated_at": data['last_updated_at']
                                                         }
                                                     })

    @staticmethod
    def get_words_nums(contents):
        ch = re.findall('([\u4e00-\u9fa5])', contents)
        num = len(ch)
        return num

    @staticmethod
    # 计算热度递减后的值
    def get_decay_value(alpha=0.01, temp=10000, last_updated_at: datetime = None):
        # 计算当前时间和新闻时间天数差
        day = (datetime.now() - last_updated_at).days
        return temp / math.pow(day + 1, alpha)  # * math.exp(-alpha * math.log(t + 1))
