from jieba import lcut
from gensim.similarities import SparseMatrixSimilarity
from gensim.corpora import Dictionary
from gensim.models import TfidfModel
import numpy as np
import re
from common.custom_task_data import CustomTaskData

class TFIDFSimilarity:
    def __init__(self):
        self.custom_task = CustomTaskData()
        self.title_id = {}
        self.data = []
        pass

    def train(self, task_id):
        self.title_id = {}
        self.data = []
        records = self.custom_task.query_task_detail_all(task_id)
        for r in records:
            self.data.append(r[3])
            self.title_id[r[3]] = r[0]
        '''
        训练模型，需转入待匹配列表
        '''
        texts = [lcut(s) for s in self.data]
        self.dictionary = Dictionary(texts)
        corpus = [self.dictionary.doc2bow(text) for text in texts]
        self.tfidf = TfidfModel(corpus)
        tf_texts = self.tfidf[corpus]
        num_features = len(self.dictionary.token2id)
        self.sparse_matrix = SparseMatrixSimilarity(tf_texts, num_features)

    def check(self):
        for d in self.data:
            index = 0
            for sim in self.get_similarities(d, 6):
                current_index = self.title_id[self.data[sim[0]]]
                if sim[1] < 0.75:
                    continue
                if index == 0:
                    index = current_index
                    continue
                if index < current_index:
                    self.custom_task.update_task_detail_status(index)
                    index = current_index
                else:
                    self.custom_task.update_task_detail_status(current_index)

            #print(d + str(self.get_similarities(d, 6)))
            #self.get_similarities(d, 10)

    def get_similarities(self, string, topN = 20):
        '''
        从模型中找最相近的 topN 个匹配项，返回其索引号和近似度
        '''
        text = lcut(string)
        kw_vector = self.dictionary.doc2bow(text)
        tf_kw = self.tfidf[kw_vector]
        similarities = self.sparse_matrix.get_similarities(tf_kw)
        index = np.argsort(similarities)[:-topN-1:-1]
        return [(i,s) for i,s in zip(index,similarities[index])]

    def do(self):
        tasks = self.custom_task.query_task_all()
        for task in tasks:
            #基金新闻可能出现相似度极高的不同新闻(净值数据)
            if re.search("基金", task[1]):
                continue
            self.train(task[0])
            self.check()

data = ['霍乱，由于O1群霍乱弧菌，霍乱生物型所致', 
        '古典生物型霍乱', 
        '霍乱，由于O1群霍乱弧菌，埃尔托生物型所致']

s_tools = TFIDFSimilarity()
s_tools.do()
#print(s_tools.get_similarities('埃尔托生物霍乱',3))
