import numpy as np
import string
import os
punc = string.punctuation + ',.，。：；、》《-=+——！@#￥%…………&啊的吗了就是'
print(punc)
# os._exit(0)
from gensim.models.keyedvectors import KeyedVectors
# 这个函数读取wordembedding,计算两个词之间的相似度

# 加载词向量
def get_vocab_all(file):
    with open(file,encoding= 'utf-8') as vocab_file:
        all_lines = vocab_file.readlines()[1:]
        vocab =dict()
        all_key = set()
        for line in all_lines:
            line_parts = line.strip().split()
            key = line_parts[0]
            all_key.add(key)
            vector = line_parts[1:]
            if len(vector) != 300:
                print(key,"有问题")
            else:
                v = np.array([float(num) for num in vector])
                vocab[key] = v
    if 'unk' in all_key:
        print("又unk")
    print("词典大小为：",len(vocab),)
    return vocab,all_key

def cos_similarity(vec1, vec2):

    norm1 = np.linalg.norm(vec1)
    norm2 = np.linalg.norm(vec2)
    norm = norm1 * norm2
    dot = np.dot(vec1, vec2)
    result = dot / norm if norm > 0 else 0
    return result

# 这个函数输入一个双层list，进行铺平，输出一层list:
def list_duoble_flatten(lsit_of_list):
    words_flatten = []
    for word_list_ans in lsit_of_list:
        words_flatten = words_flatten + word_list_ans
    return words_flatten

# 改函数，输入一个query下的所有ans_List,输出每一个ans的共性词和特异词
# file是预训练好的词向量文件
def key_word_sort(ans_list,word_vectors):
    new_ans_list = []
    ans_len = len(ans_list)
    if ans_len <= 1:
        return None,None,None
    # 把原本的句子按照空格拆分，每一句话就是一个词语的list，然后一个query下的所有ans是所有句子的list
    word_ans_list = [ ans.split() for ans in ans_list]

    # 读取所有ans中的一个ans
    # 所有的similarkey和percurialitykey
    simialrity_keyword_list = []
    peculiarity_keyword_list = []
    for ans_i in range(len(word_ans_list)):
        # word_list_ans是一句话中的词的list，set_this_ans是这个句子中的词去掉了重复
        set_this_ans = set(word_ans_list[ans_i])

        # 接下来想得到该query以下的所有其他ans之中的的词的list：（其实根本不需要去掉重复对吧）
        other_ans_list = word_ans_list[:ans_i]+word_ans_list[ans_i+1:]
        # 把所有的其他ans中的词语铺平
        all_other_words = list_duoble_flatten(other_ans_list)


        # 对这一个句子之中的所有词，进行他与其他句子中的所有的词的attention 的计算并存在某个词典中，因为set没有顺序，怕混了
        all_score_for_an_ans = dict()
        for words_i in set_this_ans:
            if words_i in word_vectors and words_i not in punc:
                # print("词i:",words_i,"在词表中")
                word_i_all_score = []
                for word_j in all_other_words:
                    if word_j in word_vectors and word_j not in punc:
                        # print("词j:", word_j, "在词表中")
                        att_score = cos_similarity(word_vectors[words_i],word_vectors[word_j])
                        word_i_all_score.append(att_score)
                    else:
                        pass
                        # print("词j:", word_j, "不在词表中")
                if len(word_i_all_score) == 0:
                    print("当前的长度为0句子：",set_this_ans)
                    print("当前检查的词为：",words_i,)
                    print("所有的其他ans为：",all_other_words)
                else:
                    all_score_for_an_ans[words_i] = sum(word_i_all_score)/len(word_i_all_score)

            else:
                # print("词i:",words_i,"不在词表中")
                pass
        if len(all_score_for_an_ans) == 0:
            print("当前的长度为0句子：", set_this_ans)
            print("当前的长度为0句子：", set_this_ans)
            print("所有的其他ans为：", all_other_words)
        else:
            similar_key_word = max(all_score_for_an_ans,key = all_score_for_an_ans.get)
            peculiarity_keyword = min(all_score_for_an_ans,key = all_score_for_an_ans.get)

            simialrity_keyword_list.append(similar_key_word)
            peculiarity_keyword_list.append(peculiarity_keyword)

            new_ans_list.append(ans_list[ans_i])
    ans_len_new = len(new_ans_list)
    if len(simialrity_keyword_list) != ans_len_new or len(peculiarity_keyword_list) != ans_len_new:
        print("当前的结果有问题")
    else:
        print("顺利得到当前的所有ans的key_words")
    return simialrity_keyword_list,peculiarity_keyword_list,new_ans_list


# 这个函数从src_tgtlist文件中读取一个query下的所有的ans
def load_ans_all(file_src_tgtlist,out_src_tgtlist_simikeylist_perkeylist,word_vectors):
    with open(file_src_tgtlist) as src_tgt_f:
        with open(out_src_tgtlist_simikeylist_perkeylist,'w') as out_f:
            all_lines = src_tgt_f.readlines()
            for line in all_lines:
                # data = line.strip().split('\t')
                # print(data)
                line_parts = line.strip().split('\t')
                if len(line_parts) != 2:
                    print("miss，长度不为2：",line,"长度为：",len(line_parts))
                else:
                    src = line_parts[0]
                    ans_list = eval(line_parts[1])
                    simialrity_keyword_list, peculiarity_keyword_list, new_ans_list= key_word_sort(ans_list,word_vectors)
                    if simialrity_keyword_list == None:
                        continue
                    data = src +"\t" + str(new_ans_list) +"\t" + str(simialrity_keyword_list) +"\t" + str(peculiarity_keyword_list)+'\n'
                    out_f.writelines(data)

file_vocab = '/Users/tina/Downloads/dcvae-dialog/sgns.weibo.word'
file_src_tgtlist = '/Users/tina/Documents/gitlucky_dcvae/dcvae/multi-gen/data/data_fix/sample_one_src_multiple_tgt'
out_src_tgtlist_simikeylist_perkeylist = '/Users/tina/Documents/gitlucky_dcvae/dcvae/multi-gen/data/data_fix/sample_src_tgtlist,simikeylist,perkeylist'


vocab,all_key = get_vocab_all(file_vocab)
load_ans_all(file_src_tgtlist,out_src_tgtlist_simikeylist_perkeylist,vocab)