#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
from gensim.models import word2vec


def read_data(path, splitor=None, encoding='utf-8'):
    f = open(path, 'r', encoding='UTF-8')
    corpus = []
    for line in f:
        if splitor is None:
            lines = line.strip().split()
        else:
            lines = line.strip().split(splitor)
        corpus.append(lines)
    f.close()
    return corpus


def read_wordMap(path, splitor=None, encoding='utf-8'):
    f = open(path, 'r', encoding='UTF-8')
    corpus = {}
    for line in f:
        if splitor is None:
            lines = line.strip().split()
        else:
            lines = line.strip().split(splitor)
        for item in lines:
            if item not in corpus:
                corpus[item] = len(corpus)
    f.close()
    return corpus


def word2doc(path):
    worddocset = {}
    f = open(path, 'r', encoding='UTF-8')
    for line in f:
        lines = line.split()
        for word in lines:
            if word not in worddocset:
                worddocset[word] = []
            worddocset[word].append(line.strip())
    f.close()
    fwm = open(path + ".wordmap", 'w', encoding='UTF-8')
    fwd = open(path + ".worddoc", 'w', encoding='UTF-8')
    fwd.write(str(len(worddocset)) + "\n")
    for index, word in enumerate(worddocset):
        fwm.write(word + "\t" + str(index) + "\n")
        for sentence in worddocset[word]:
            fwd.write(sentence + " ")
        fwd.write("\n")
    fwm.close()
    fwd.close()


def word2doc_w2v(path, wikipath):
    model = word2vec.Word2Vec.load_word2vec_format(wikipath)
    f = open(path, 'r', encoding='UTF-8')
    fw = open(path + ".w2vDoc", 'w', encoding='UTF-8')
    wordmap = read_wordMap(path)

    for index, line in enumerate(f):
        lines = line.split()
        # print(len(lines))
        wordsTobeWrite = set()
        for item in lines:
            wordsTobeWrite.add(item)
            if item in model:
                similityWords = model.most_similar(item, topn=50)
                for similityWord in similityWords:
                    if similityWord[1] > 0.5 and similityWord in wordmap:
                        # print(similityWord[1])
                        wordsTobeWrite.add(similityWord[0])

        fw.write(' '.join(wordsTobeWrite) + "\n")
        print(index)
    fw.close()
    f.close()


def word2doc_w2v2(path, wikipath):
    model = word2vec.Word2Vec.load_word2vec_format(wikipath)
    f = open(path, 'r', encoding='UTF-8')
    fw = open(path + ".w2vDoc", 'w', encoding='UTF-8')
    fm = open(path + ".wordmap", 'w', encoding='UTF-8')
    wordmap = read_wordMap(path)
    for index, word in enumerate(wordmap):
        fm.write(word + " " + str(index) + "\n")
        wordsTobeWrite = set()
        wordsTobeWrite.add(word)
        if word in model:
            similityWords = model.most_similar(word, topn=100)
            for similityWord in similityWords:
                if similityWord[1] > 0.2 and similityWord[0] in wordmap:
                    # print(similityWord[1])
                    wordsTobeWrite.add(similityWord[0])
        fw.write(' '.join(wordsTobeWrite) + "\n")
    fw.close()
    fm.close()
    f.close()


def generate_theta(datapath, wordmappath, wordthetapath):
    data = read_data(datapath)
    wordmaps = read_data(wordmappath)
    wordthetas = read_data(wordthetapath)

    wordmap = {}
    for line in wordmaps:
        wordmap[line[0]] = int(line[1])

    wordtheta = []
    for line in wordthetas:
        oneline = []
        for item in line[0:-1]:
            oneline.append(float(item))
        wordtheta.append(oneline)

    ff = open(datapath + ".theta", "w", encoding='UTF-8')
    for line in data:
        writerline = [0 for i in range(len(wordtheta[0]))]
        for word in line:
            wordid = wordmap[word]
            for i in range(len(wordtheta[0])):
                writerline[i] += wordtheta[wordid][i]

        ff.write(" ".join([str(item) for item in writerline]) + "\n")

    ff.close()


root = "data/news/word2Vec+wntm/"

# word2doc(root+"news.data")

# generate_theta(root+"news.data", root+"news.data.wordmap",root+"LDA_0.1_0.01_100/0/model-final.theta")

# word2doc_w2v2("data/news/w2d_w2v/news.data", "G:\\我的本地文件\\数据\\w2v_pretrain\\wiki.en.text.vector")
word2doc_w2v("data/news/doc_w2v/news.data", "G:\\我的本地文件\\数据\\w2v_pretrain\\GoogleNews-vectors-negative300.bin")

