__author__ = 'Administrator'
import numpy


class Preprocessing(object):
    def __init__(self, filename="data//all.txt",
                 word_embedding="F:\\python_chram_data\\WordEmbedding\\glove.42B.300d.txt",
                 vector_length=300):

        self.vector_length = vector_length
        # init the dict
        self.filename = filename
        self.idx_2_words = dict()
        self.words_2_idx = dict()
        self.init_dict()

        # init the vectors
        self.word_embedding = word_embedding
        self.word_2_vectors = dict()
        self.init_vectors()

        # init all the words
        self.WORDS = numpy.random.uniform(low=-0.05, high=0.05, size=(len(self.idx_2_words), self.vector_length))
        self.init_WORDS()

    def init_dict(self):
        openfile = open(self.filename, 'r')

        print "start to index to words"
        index = 1
        self.words_2_idx["</s>"] = 0
        for each in openfile:
            words = each.lstrip().lstrip().strip("\n").split("@")
            # the first word is label
            for i in xrange(1, len(words)):
                word = words[i]
                if word not in self.words_2_idx:
                    self.words_2_idx[word] = index
                    index += 1
        openfile.close()
        print "word length ----->", len(self.words_2_idx)
        print "start to words to index"
        for each in self.words_2_idx:
            idx = self.words_2_idx[each]
            self.idx_2_words[idx] = each
        # print self.idx_2_words

        # test the dict
        for each in self.words_2_idx:
            idx = self.words_2_idx[each]
            assert self.idx_2_words[idx] == each

    def init_vectors(self):
        print "start to get the vector from pre-trained word embedding"
        print "And the word embedding path is -----> ", self.word_embedding
        index = 0
        openfile = open(self.word_embedding, 'r')
        writefile = open("data//Glove_Vec", 'w')
        for line in openfile:
            if index % 100000 == 0:
                print "load pre trained word embedding -----> ", index
            index += 1
            line = line.lstrip().lstrip().strip("\n")
            words = line.split(" ")
            if words[0] in self.words_2_idx:
                writefile.write(line + "\n")
        openfile.close()
        writefile.close()

    def init_WORDS(self):
        print "init the WORDS, and the WORDS shape is -----> ", self.WORDS.shape
        index = 0
        for i in xrange(self.WORDS.shape[0]):
            word = self.idx_2_words[i]
            if word in self.word_2_vectors:
                self.WORDS[i] = self.word_2_vectors[word]
            else:
                index += 1
        print "at least ", index, " words not included in the datasets"


p = Preprocessing()
