import codecs
from gensim import corpora,models,similarities
import os
import time

def tokenization(filePath):
    fopen = open(filePath, 'r')
    result = []
    for line in fopen:
        for word in line.split(' '):
            if word == "":
                continue
            result.append(word)
    return result

startTime = time.time()
corpus = []
fileRoot = "./cutfile2/"
# fileRoot = "./testCutFile/"
for filename in os.listdir(fileRoot):
    fileSinglePath = fileRoot + filename
    corpus.append(tokenization(fileSinglePath))

# print(corpus)
print("文档总数：" +str(len(corpus)))
endTime = time.time()
print("读取文件时间：",endTime-startTime)

#建立词袋模型
#corpus 就是若干个被拆成单词集合的文档的集合
# 而dictionary就是把所有单词取一个set(),并对set中每个单词分配一个Id号的map;
startTime = time.time()
dictionary = corpora.Dictionary(corpus)
# print(dictionary)
print("词语数量：",len(dictionary))

#将文档转换为稀疏变量
doc_vectors = [dictionary.doc2bow(text) for text in corpus]
# print(doc_vectors)
endTime = time.time()
print("建立词袋模型时间：",endTime-startTime)

#建立TF-IDF模型
startTime = time.time()
tfidf = models.TfidfModel(doc_vectors)
tfidf_vectors = tfidf[doc_vectors]
# print("tfidf_vectors : "+str(len(tfidf_vectors)))
# print(len(tfidf_vectors[0]))
endTime = time.time()
print("建立TF-IDF模型时间：",endTime-startTime)

def LoadFile():
    startTime = time.time()
    # 加载一个文本
    sumTime = 0
    sTime = time.time()
    fSave = open("./vector/simsLSI.txt","w")
    for item in range(len(corpus)):
        print("加载第 ", item, " 个文件")
        startTime = time.time()
        global index
        que_lsi = lsi[doc_vectors[item]]
        sims = index[que_lsi]
        fSave.write(str(item) + ":")
        for sim in enumerate(sims):
            if item == sim[0]:
                continue
            if sim[1] >= 0.8:
                fSave.write(str(sim[0]) + "," + str(sim[1]) + " ")
        fSave.write("\n")
        endTime = time.time()
        print("计算相似度时间：", endTime - startTime)
    eTime = time.time()
    print("总时间: ",eTime-sTime)

#建立LSI模型
global lsi
lsi = models.LsiModel(tfidf_vectors,id2word=dictionary,num_topics=500)
lsi_vector = lsi[tfidf_vectors]
# for vet in lsi_vector:
#     print(vet)

#计算相似度
# startTime = time.time()
global index
# # index = similarities.MatrixSimilarity(tfidf_vectors)
index = similarities.MatrixSimilarity(lsi_vector)
# endTime = time.time()
# print("计算相似矩阵时间：",endTime-startTime)

LoadFile()

