# -*- coding: utf-8 -*-

import word2vec as vc
from scipy.spatial.distance import cosine
import numpy as np
import os
#import codecs
#import my_config



class Convert2Vector(object):

    model = None
    def __init__(self, vector_dict_path='../dataset/step2/word2vec.bin'):
        self.model = vc.load(vector_dict_path)

    @staticmethod
    def train(text_file,model_file,vector_dim=200):
        vc.word2vec(text_file, model_file, size=vector_dim, verbose=True)

    #shape = [_max_words_in_sentence,len(word2vec)]
    def segmented_sentence_2_vec_list(self, _str="",list_len=1):
        words = _str.split(" ")
        if words[-1] == u'\n':
            del(words[-1])
        vec_list = np.zeros([list_len,len(self.model.vectors[0])])
        i = 0
        for word in words:
            try:
                vec_list[i] = self.model.get_vector(word)
            except KeyError:
                continue
                #index, metrics = self.model.cosine(word, 1)
                #print "%s:%s:%f" %(word,self.model.vocab[index][0],metrics)
                #vec_list[i] = self.model.vectors[index]
            i+=1
            if list_len <= i:
                break
        return vec_list


    #shape = [-1,_max_words_in_sentence,len(word2vec)]
    def file_with_multi_segmented_lines_2_vec(self, _file, _max_words_in_sentence=50):
        f = open(_file,"ra")
        vecs = []
        for line in f.readlines():
            line = line.decode("utf-8")
            features = self.segmented_sentence_2_vec_list(line,_max_words_in_sentence)
            vecs.append(features)
        return np.array(vecs)

    def cosine_dist(self, w1,w2):
        a1 = self.model.get_vector(w1)
        a2 = self.model.get_vector(w2)
        return cosine(a1, a2)

    def __getitem__(self, word):
        return self[word]

    def __contains__(self, word):
        return word in self.model

    def get_close_words(self,pos=[],neg=[], num=10):
        pos_checked = []
        for w in pos:
            if w in self.model:
                pos_checked.append(w)
            else:
                #print "unknow pos word", w
                pass

        neg_checked = []
        for w in neg:
            if w in self.model:
                neg_checked.append(w)
            else:
                #print "unknow neg word", w
                pass

        if len(pos_checked) == 0 and len(neg_checked) == 0:
            return []
        pos_checked = list(set(pos_checked))
        neg_checked = list(set(neg_checked))

        index, matrix = self.model.analogy(pos=pos_checked, neg=neg_checked, n=num)
        closes = []
        for i in index:
            closes.append( self.model.vocab[i])
        return closes

    def test(self):
        print(self.cosine_dist(u"珠宝", u"珠宝"))
        print(self.cosine_dist(u"联通", u"手机"))
        print(self.cosine_dist(u"话费", u"充值"))
        print(self.cosine_dist(u"话费", u"手机"))
        print(self.cosine_dist(u"充值", u"手机"))
        print(self.cosine_dist(u"充值", u"移动"))

        pass


def files_to_one(_input_file="",_output_file=""):
    f_output = open(_output_file,"a")
    f_input = open(_input_file,"r")
    for line in f_input.readlines():
        f_output.write(line)
    f_output.close()
    f_input.close()


def merge_file_in_dir_to_one(_input_dir="dataset/step1",_output_file=""):
    for filename in os.listdir(_input_dir):
        print(filename)
        files_to_one(_input_dir+"/"+filename, _output_file)

if __name__ == "__main__":

    word_vector_model = 'model/news-163_word_vector.bin'
    corpus_file = "dataset/news-163_fencied_utf8.txt"

    Convert2Vector.train(corpus_file, word_vector_model)
    wv = Convert2Vector(word_vector_model)
    wv.test()






