import os
import jieba
import re
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from nltk.corpus import wordnet
# import synonyms
import copy
import pickle
import json
#选取文章关键词

def key_words():
    # 读取同义词表格
    syn_dict = dict()
    extend_dict=dict()
    with open("HIT-IRLab-同义词词林（扩展版）_full_2005.3.3.txt", "r") as f:
        for line in f.readlines():
            line = line.strip('\n')  # 去掉列表中每一个元素的换行符
            list2 = line.split()
            list2=list2[1:]
            for word in range(len(list2)):
                if list2[word] not in syn_dict.keys():
                    syn_dict[list2[word]] = list2[0]
            extend_dict[list2[0]]=list2


    path = 'data_clean1'
    files = os.listdir(path)
    # print(type(files),files)、

    #定义数据存储列表
    text_list = []
    result = [] #清洗之后的词汇列表
    url_list=[]
    time_list=[]
    title_list=[]

    #读取所有的text
    for i in files:
        f = open(path + '\\' + i, 'rb')
        data = pickle.load(f, encoding='bytes')
        text=data["context"]
        url_list.append(data["url"])
        time_list.append(data["time"])
        title_list.append(data["title"])
        str1=""
        for word in text:
            str1+=word
            str1+=" "
        text_list.append(str1)
    # print(text_list)

    #先转化为词袋模型
    cv = CountVectorizer(stop_words=[])
    cv_fit=cv.fit_transform(text_list)

    #print(cv.vocabulary_	)              # 词汇表， 字典形式呈现，key：词，value:在向量中的位置
    #print(cv_fit.toarray())  #.toarray() 是将结果转化为稀疏矩阵矩阵的表示方式；
    #print(cv_fit.toarray().sum(axis=0))  #每个词在所有文档中的词频


    #计算每篇文章的词数
    passage_word_count=cv_fit.toarray().sum(axis=1)
    print(passage_word_count)
    f1 = open("passage_word_count2", "wb")
    pickle.dump(passage_word_count, f1)


    # 将属性进行复制
    vocabulary_list=copy.deepcopy(cv.vocabulary_)  #字典格式，对应的是词与位置
    all_list=cv_fit.toarray().sum(axis=0).copy()  #每个词的总词频
    passage_list = copy.deepcopy(cv_fit.toarray())   #每篇文章，每个词的总词频
    #同义词合并
    #同义词合并  利用synonyms词典
    # for word1_loc in range(len(vocabulary_list)):
    #     print(word1_loc)
    #     word1= list(vocabulary_list.keys())[word1_loc]  #本身词汇
    #     word1_index=list(vocabulary_list.values())[word1_loc]   #词汇的向量位置
    #     # print("word1_index",word1_index)
    #     # print("word1:",word1,word1_index)
    #     for synlst in range(len(synonyms.nearby(word1)[0])):
    #         if synonyms.nearby(word1)[1][synlst]>=0.8 :
    #             for word2_loc in range(word1_loc,len(vocabulary_list)):
    #                 word2 = list(vocabulary_list.keys())[word2_loc]  # 本身词汇
    #                 word2_index = list(vocabulary_list.values())[word2_loc]  # 词汇的向量位置
    #                 # print("word2:",word2,word2_index)
    #                 if word2 == synonyms.nearby(word1)[0][synlst] and word2 != word1:  #存在一个词是它的同义词
    #                     all_list[word1_index]+=all_list[word2_index]
    #                     all_list[word2_index]=0
    #                     # print(word1,word1_index,word2,word2_index)
    #                     for i in range(len(passage_list)):
    #                         passage_list[i][word1_index]+=passage_list[i][word2_index]
    #                         passage_list[i][word2_index]=0
    #                         tf_iwf[i][word1_index] += tf_iwf[i][word2_index]
    #                         tf_iwf[i][word2_index] = 0


    # 同义词合并 使用哈工大的词典
    vocabulary_value_list=list(vocabulary_list.values())
    vocabulary_key_list = list(vocabulary_list.keys())
    for word_index1 in range(len(cv.vocabulary_)):
        print(word_index1)
        word_loc1=vocabulary_value_list[word_index1]  #词汇在向量中的位置
        word1 = vocabulary_key_list[word_index1]   #本身的词汇
        # 对于每个词汇，找到他对应的代表同义词，如果代表同义词存在，对应代表同义词词频是两者同义词词频相加；如果不存在，则加进去。自身同义词词频设为0
        if word1 in syn_dict.keys(): #代表这个词汇在同义词词典中中
            if word1 != syn_dict[word1]:  #syn_dict[word1]为其代表同义词
                word1_syn=syn_dict[word1]  #word1_syn为其代表同义词
                if word1_syn in vocabulary_list.keys():
                    word1_syn_loc = vocabulary_list[word1_syn]  #代表同义词的位置
                    all_list[word1_syn_loc] += all_list[word_loc1]
                    all_list[word_loc1]=0
                    for i in range(len(passage_list)):
                        passage_list[i][word1_syn_loc]+=passage_list[i][word_loc1]
                        passage_list[i][word1_syn_loc]=0
                else:  #代表同义词不在原来词典中
                    vocabulary_list[word1_syn]=len(all_list)  #代表同义词的位置
                    word1_syn_loc=vocabulary_list[word1_syn]
                    temp_list1=[]
                    temp_list1.append(all_list[word_loc1])
                    all_list=np.append(all_list,temp_list1, axis=0)
                    all_list[word_loc1] = 0
                    temp_list2 = []
                    for i in range(len(passage_list)):
                        temp_list2.append(passage_list[i][word_loc1])
                        # passage_list[i]=np.append(passage_list[i],temp_list2).reshape(len(passage_list[i])+1,1)
                        passage_list[i][word_loc1]=0
                    passage_list=np.c_[passage_list,temp_list2]

        # # 计算tf-iwf
        # tf = []
        # iwf = []
        # for passage in cv_fit.toarray():
        #     tf.append(passage / np.sum(np.array(passage)))
        # iwf = np.log2(np.sum(np.array(cv_fit.toarray().sum(axis=0))) / cv_fit.toarray().sum(axis=0))
        # tf_iwf = []
        # for passage in tf:
        #     tf_iwf.append(passage * iwf)

        # 计算tf-iwf
    tf = []
    iwf = []
    for passage in passage_list:
        # passage 表示每篇文章中，每个词汇的词频
        tf.append(passage / np.sum(np.array(passage)))
    # 所有文章，每个词汇的词频
    for i in all_list:
        if i != 0:
            iwf.append(np.sum(np.array(all_list)) /i)
        else:
            iwf.append(0)
    # iwf = np.log2(np.sum(np.array(all_list)) / all_list)
    tf_iwf = []
    for passage in tf:
        tf_iwf.append(passage * iwf)


    # print("3",all_list)
    #vocabulary_list是字典，tf_iwf是每篇文章的tf-iwf的值
    #对于每个文本建立一个字典：key：词汇，value:tf-iwf的值,存储在all_dict
    all_dict=[]
    for passage in range(len(tf_iwf)):
        temp_dictionary = dict()
        for word in vocabulary_list.keys():
            temp_dictionary[word]=tf_iwf[passage][vocabulary_list[word]]
        all_dict.append(temp_dictionary)

    #sort_dict 存储排序之后的每篇文章的词汇和tf-idf的值
    with open("test.txt","w") as f12:
        f12.writelines(json.dumps(all_dict))
    sort_dict=[]
    for passage in range(len(all_dict)):
        sort_dict.append(sorted(all_dict[passage].items(), key=lambda x: x[1], reverse=True))

    #key_word记录每篇文章的关键词
    key_word=[]
    #all_key_words记录所有的关键词
    all_key_words=set()
    all_key_words_list=[]
    for passage in range(len(all_dict)):
        temp=[]
        for word in range(int(passage_word_count[passage]/10)):
            temp.append(sort_dict[passage][word][0])
            all_key_words.add(sort_dict[passage][word][0])
        key_word.append(temp)
    all_key_words_list=list(all_key_words)
    #构建pickle
    # for i in range(len(files)):
    #     result_dict=dict([('url',url_list[i]),('title',title_list[i]),('time',time_list[i]),('context',key_word[i])])
    #     fn = open("key_words_syn" + '\\' + str(i), "wb")
    #     pickle.dump(result_dict, fn)
    #     fn.close()
    # fm = open("all_key_words_list_add_syn", "wb")
    # pickle.dump(all_key_words_list, fm)
    all_key_words_extend=[]
    for word in all_key_words_list:
        all_key_words_extend.append(word)
        if word in extend_dict.keys():
            # print(all_key_words_list[word])
            all_key_words_extend += extend_dict[word]
    all_key_words_extend=set(all_key_words_extend)
    all_key_words_extend = list(all_key_words_extend)
    fm = open("all_key_words_list_add_syn_extend", "wb")
    pickle.dump(all_key_words_list, fm)


    #构建pickle
    # for i in range(len(files)):
    #     result_dict=dict([('url',url_list[i]),('title',title_list[i]),('time',time_list[i]),('context',key_word[i])])
    #     fn = open("key_words_syn2" + '\\' + str(i), "wb")
    #     pickle.dump(result_dict, fn)
    #     fn.close()

key_words()





