import fileModel


def doc_revet(path):
    wordmap = fileModel.read_wordMap(path)
    with open(path+".words",'w',encoding='utf-8') as wordmap_writer:
        for key in wordmap:
            wordmap_writer.write(key)
            wordmap_writer.write(" ")
            wordmap_writer.write(wordmap[key])
            wordmap_writer.write("\n")

    word_doc = [[] for item in wordmap]
    with open(path,'r',encoding='utf-8') as f:
        for index,line in enumerate(f):
            lines = line.split()
            for item in lines:
                word_doc[wordmap[item]].append(str(index))

    with open(path+'.worddoc', 'w', encoding='utf-8') as f:
        for line in word_doc:
            f.write(' '.join(line))
            f.write("\n")


def recover(phi_path, wordmap_path):
    wordmap = {}
    with open(wordmap_path, 'r', encoding='utf-8') as f:
        for index, line in enumerate(f):
            lines = line.split()
            wordmap[int(lines[1])] = int(lines[0])

    phi_data = []
    with open(phi_path, 'r', encoding='utf-8') as f:
        for index, line in enumerate(f):
            lines = line.split()
            klines = []
            psum = 0
            for item in lines:
                klines.append(float(item))
                psum+=float(item)
            phi_data.append([str(item/psum) for item in klines])

    recoer_data = []
    for i in range(len(phi_data[0])):
        recoer_data.append(' '.join([item[i] for item in phi_data]))

    real_data = [None for i in recoer_data]

    for index,line in enumerate(recoer_data):
        real_data[wordmap[index]] = line

    with open(phi_path+".phi_theta", 'w', encoding='utf-8') as f:
        for line in real_data:
            f.write(line)
            f.write("\n")


def recover_theta(doc_path, theta_path, word_path):
    wordmap = {}
    with open(word_path, 'r', encoding='utf-8') as f:
        for index, line in enumerate(f):
            lines = line.split()
            wordmap[lines[0]] = int(lines[1])

    theta = []
    with open(theta_path, 'r', encoding='utf-8') as f:
        for index, line in enumerate(f):
            lines = line.split()
            theta.append([float(item) for item in lines])
    assert len(wordmap) == len(theta)

    with open(doc_path, 'r', encoding='utf-8') as f, open(theta_path+".theta", 'w', encoding='utf-8') as w:
        for index, line in enumerate(f):
            lines = line.split()
            length = len(lines)
            oneline = [0 for i in theta[0]]
            for word in lines:
                for index,item in enumerate(theta[wordmap[word]]):
                    oneline[index] += item
            for i in range(len(oneline)):
                oneline[i] = str(oneline[i]/length)
            w.write(' '.join(oneline))
            w.write("\n")


root = "G:/experiment/revert_lda/new-tweet/"
recover_theta(root+"new-tweet.data",root+"LDA_0.1_0.01_100__/0/model-final.theta",root+"new-tweet.words")

# doc_revet("G:\\experiment\\revert_lda\\baiduQA\\baiduQA.data")
# recover("G:/experiment/revert_lda/news/LDA_0.1_0.01_100/0/model-final.phi",
#         "G:/experiment/revert_lda/news/LDA_0.1_0.01_100/0/model-final.wordmap")