import fileModel
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedLineDocument
import word2vector

def preprocess(path):
    f = open(path,'r',encoding='utf-8')
    fw = open(path+".process", 'w', encoding='utf-8')
    for index,line in enumerate(f):
        fw.write(str(index)+" "+line)
    f.close()
    fw.close()
    return path+".process"

def generate_doc_vec(path):
    fw = open(path + ".theta", 'w', encoding='utf-8')
    model = word2vector.doTrainModel(path)
    print(model.vocab)
    d_l = len(model[model.vocab.keys()[0]])
    f = open(path, 'r', encoding='utf-8')
    for line in f:
        lines = line.split()
        onewrite = [0 for item in d_l]
        for item in lines:
            if item not in model:
                continue
            for i in range(d_l):
                onewrite[i] += model[item][i]
        for i in range(d_l):
            onewrite[i] /= len(lines)
        fw.write(' '.join([str(item) for item in onewrite])+"\n")





generate_doc_vec("G:/intellij/TopicModelForShortText/My_LDA/data/news/news.data")