__author__ = 'dell-pc'

import logging
from gensim.models import word2vec
import fileModel
import os
import math
import numpy as np
# from sam.corpus.corpus import CorpusWriter


logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

def doTrainModel(path):
    model_name = path+".w2v.model"

    # Set values for various parameters
    num_features = 200    # Word vector dimensionality
    min_word_count = 30   # Minimum word count
    num_workers = 4       # Number of threads to run in parallel
    context = 10          # Context window size
    downsampling = 1e-3   # Downsample setting for frequent words

    # Initialize and train the model (this will take some time)
    if not os.path.exists(model_name):
        sentences = fileModel.read_data(path)
        print("Training model...")
        model = word2vec.Word2Vec(sentences*10, workers=num_workers, \
                    size=num_features, min_count = min_word_count, \
                    window = context, sample = downsampling, negative=0)

        # If you don't plan to train the model any further, calling
        # init_sims will make the model much more memory-efficient.
        model.init_sims(replace=True)

        # It can be helpful to create a meaningful model name and
        # save the model for later use. You can load it later using Word2Vec.load()

        # model.save(model_name)
        model.save_word2vec_format(model_name)
    model = word2vec.Word2Vec.load_word2vec_format(model_name)
    return model
    # print(model)
doTrainModel("G:/intellij/TopicModelForShortText/My_LDA/data4/new-tweet/new-tweet.data")

def generate_word_net(path):
    wordmap = {}

    model = doTrainModel(path)

    wordmapwriter = fileModel.open_file(path+".wordmap","w")
    for one in model.vocab:
        wordmap[one] = len(wordmap)
    for key in wordmap:
        wordmapwriter.write(key+"\t"+str(wordmap[key])+"\n")
    wordmapwriter.close()

    f = fileModel.open_file(path+".wordnet","w")

    wordlist = list(wordmap)
    word_num = len(wordlist)
    for index,word_one in enumerate(range(word_num)):
        print(index)
        for word_two in range(word_one+1,word_num):
            simis = model.similarity(wordlist[word_one],wordlist[word_two])
            if simis >0:
                # print(wordlist[word_one],wordlist[word_two])
                f.write(wordlist[word_one]+" "+wordlist[word_two]+"\n")
    f.close()


def generate_word_vector(path):
    format_name = path+".w2v"
    model = doTrainModel(path)
    model.save_word2vec_format(format_name)
    f = open(format_name,'r',encoding='utf-8')
    lists = f.readlines()
    f.close()

    f = open(format_name,'w',encoding='utf-8')
    for line in lists:
        f.write(",".join(line.split()[1:])+"\n")
    f.close()


def readwordindex(path):
    wordindex = {}
    f = open(path,'r',encoding='utf-8')
    for line in f:
        lines=line.split()
        for word in lines:
            if word not in wordindex:
                wordindex[word] = len(wordindex)
    f.close()
    return wordindex


# generate_word_net("D:/javaEE/JGibbLDA-v.1.0/data/wntm/news.data")
# most_simility("news5000/news5000.data")


def wordVec_presented_doc(path):
    model = doTrainModel(path)
    dim  = model.vector_size
    index = 0
    with open(path,'r',encoding='utf-8') as f,open(path+".docvec",'w',encoding='utf-8') as w:
        for line in f:
            if index % 100 ==0:
                print(index)
            index+=1
            lines = line.split()
            wl = [0 for item in range(dim)]
            for word in lines:
                if word not in model:
                    continue
                for i in range(dim):
                    wl[i] += model[word][i]

            w.write(' '.join([str(item) for item in wl]))
            w.write("\n")




