import jieba

from utils.sql_handler import SqlHandler
import re
from config import Config
import json
from utils.triple_extraction import TripleExtractor
from utils.title_info import LtpParser
import datetime
from utils.redis_handler import RedisHandler
from utils.log import Log
import logging
from tqdm import tqdm
import time
import torch
import jieba
import jieba.posseg as pseg


class Correlation:
    def __init__(self, config):
        self.config = config
        Log(config.log_dir)
        self.current = []
        # self.extractor = TripleExtractor(config)
        self.parse = LtpParser(config)
        self.red = RedisHandler(config)
        self.COVID = config.COVID_RE
        self.model = torch.jit.load(config.LTP_DIR + 'textsim.pt')
        self.model.eval()
        self.word2idx = json.loads(open(config.stopword_path + '/word2id.json', 'r', encoding='utf-8').read())
        self.count = 1
        with SqlHandler(self.config) as sql:
            self.history = sql.searchhistory()

    def correlation_event(self, group_id):
        file = open('simchart.txt', 'a+', encoding='utf-8')
        valid = open('valid.txt','a+',encoding='utf-8')
        with SqlHandler(self.config) as sql:
            result = []
            sample = sql.search_by_group_id(group_id)
            title, time, classes = sample.get('title'), sample.get('gtime'), sample.get('category')
            info = self.getinfo(title)
            words = info.get('words')
            history_data = sql.search_by_words(words)
            for recall in history_data:
                r_title, r_time, r_classes = recall.get('title'), recall.get('gtime'), recall.get('category')
                if title == r_title: continue
                if time.timestamp() - r_time.timestamp() < 86400: continue
                r_info = self.getinfo(r_title)
                r_words = r_info.get('words')
                if len(r_words)<=2:continue
                score = self.Tversky(words, r_words)
                same = self.Tversky(list(title), list(r_title))
                if classes == r_classes: score *= 1.2
                if score >= 0.2 and same < 0.8:
                    res = {'title': recall.get('title'),
                           'group_id': recall.get('ext0'),
                           'gtime': int(recall.get('gtime').timestamp())}
                    if [True for i in result if self.Tversky(list(r_title), list(i[0].get('title'))) >= 0.5]:
                        valid.write(f"{title}\t{r_title}\t{0}\n")
                        continue
                    t3 = self.getinputs(title, r_title)
                    pred = self.model(t3)
                    sim = torch.log(pred[0][1] / pred[0][0]).item()
                    print(title,r_title,sim,score)
                    if classes == r_classes: sim *= 1.3
                    if self.COVID.search(title) or self.COVID.search(r_title):
                        if self.COVID.search(title) and self.COVID.search(r_title):
                            if info.get('areas') or r_info.get('areas'):
                                if set(info.get('areas')).intersection(set(r_info.get('areas'))):
                                    result.append((res, sim * 1.6 + 10*score + 5))
                                    valid.write(f"{title}\t{r_title}\t{1}\n")
                                    continue
                                else:
                                    sim *= 0.1
                            else:
                                sim += 3
                        else:
                            sim *=0.5
                    if info.get('comp') and r_info.get('comp'):
                        if info.get('comp') == r_info.get('comp'):
                            result.append((res, sim * 1.5 + 5 * score))
                            valid.write(f"{title}\t{r_title}\t{1}\n")
                            continue
                        else:
                            sim *= 0.5
                    if info.get('names') or r_info.get('names'):
                        if len(set(''.join(info.get('names'))).intersection(set(''.join(r_info.get('names')))))>1:
                            result.append((res, sim * 1.7+ 5*score))
                            valid.write(f"{title}\t{r_title}\t{1}\n")
                            continue
                        if r_info.get('names'):
                            sim *= 0.3
                    if info.get('eng') and r_info.get('eng'):
                        if set(info.get('eng')).intersection(set(r_info.get('eng'))):
                            result.append((res, sim * 1.5+ 5*score))
                            valid.write(f"{title}\t{r_title}\t{1}\n")
                            continue
                        else:
                            sim *= 0.3
                    if info.get('areas') and r_info.get('areas'):
                        if set(info.get('areas')).intersection(set(r_info.get('areas'))):
                            if score>0.3:
                                result.append((res, sim * 1.2+ 5*score))
                                valid.write(f"{title}\t{r_title}\t{1}\n")
                                continue
                        else:
                            sim *= 0.3
                    if score >0.5:
                        result.append((res, sim + 15 * score))
                        continue
                    if sim < 3.5:
                        valid.write(f"{title}\t{r_title}\t{0}\n")
                        print(f"------{title}\t{r_title}\tsim{sim}")
                        continue
                    # if score < 0.22:
                    #     valid.write(f"{title}\t{r_title}\t{0}")
                    #     print(f"------{title}\t{r_title}\tscore{score}")
                    #     continue
                    result.append((res, sim + 5*score))
                    valid.write(f"{title}\t{r_title}\t{1}\n")
            if not result: return
            result = sorted(result, key=lambda x: x[1], reverse=True)
            result = result[:5]
            print(self.count,'-' * 6 + title + '-' * 6)
            file.write('-' * 6 + title + '-' * 6 + '\n')
            self.count+=1
            for i in result:
                print(i[0].get('title'), i[1])
                file.write(i[0].get('title') + '\n')
            file.write('\n')
            print()

    def getinfo(self, title):
        words = jieba.lcut(title)
        psg = self.parse.getpseg(words)
        wp = {w: p for w, p in zip(words,psg)}
        words = list(wp.keys())
        names = [w for w, p in wp.items() if p.startswith('nh')]
        areas = [w for w, p in wp.items() if p.startswith('ns')]
        comp = [w for w, p in wp.items() if p.startswith('nz')]
        abb = [w for w, p in wp.items() if p.startswith('j')]
        eng = [w for w, p in wp.items() if p.startswith('ws')]
        return {'words': words, 'names': names, 'areas': areas, 'comp': comp, 'abb': abb,'eng':eng}

    def Tversky(self, list_1, list_2):
        if not list_1 or not list_2: return 0.0
        inter = float(len(set(list_1).intersection(set(list_2))))  # 交集
        l1_except = float(len(set(list_1).difference(set(list_2))))  # 在l1不在l2
        l2_except = float(len(set(list_2).difference(set(list_1))))  # 在l2不在l1
        return inter / (inter + l1_except + l2_except) if inter else 0

    def getinputs(self, t1, t2):
        t1 = [self.word2idx.get(i, 1) for i in t1][:16]
        t2 = [self.word2idx.get(i, 1) for i in t2][:16]
        t1.extend([0] * (16 - len(t1)))
        t2.extend([0] * (16 - len(t2)))
        t1, t2 = torch.tensor([t1]), torch.tensor([t2])
        return torch.stack((t1, t2), 0)

    def get_correlation(self, group_id):
        with SqlHandler(self.config) as sql:
            title = sql.groupid_title(group_id)
            event_exits = self.red.title_exist(title)
            return event_exits

    def run(self):
        with SqlHandler(self.config) as sql:
            groupid_list = sql.search_current()
            # for id in tqdm(groupid_list):
            for id in tqdm(groupid_list):
                if id:
                    self.correlation_event(id[0])

    def search_groupid(self, groupid):
        self.correlation_event(groupid)

    def valid(self):
        with SqlHandler(self.config) as sql:
            h = f"select ext0 from sina_news_hot_search where ext0 != '' and ext1='1'order by gtime desc limit 1000"
            res = sql.large_search(h)
            data = res.drop_duplicates(keep='first')
            for i in tqdm(data.values):
                if i: self.correlation_event(i[0])
            print('-' * 50)
            # for i in tqdm(data.values):
            #     print(self.get_correlation(i[0]))

    def test(self):
        with SqlHandler(self.config) as sql:
            h = f"select ext0 from sina_news_hot_search where ext0 != '' and ext1='0'order by gtime desc"
            groupid_list = sql.single_search(h)
            for id in tqdm(groupid_list):
                if id:
                    self.correlation_event(id[0])


if __name__ == '__main__':
    opt = Config()
    tc = Correlation(opt)
    # tc.test()
    tc.valid()
    # tc.search_groupid('543196548538662912')
    # tc.search_groupid('543158787529981952')
    # tc.search_groupid('543420363977736192')
    # tc.search_groupid('543264412322856960')
    # tc.search_groupid('540644900176498688')
    # print(tc.get_correlation('540644900176498688'))
