﻿__author__ = 'Administrator'
import numpy


class Preprocessing(object):
    def __init__(self, filename="data//all.txt", word_embedding=None, vector_length=None):
        if not word_embedding:
            word_embedding = ["data//terc_word2vec_vectors"]
        if not vector_length:
            vector_length = [300]
        self.vector_length = vector_length
        # init the dict
        self.filename = filename
        self.idx_2_words = dict()
        self.words_2_idx = dict()
        self.init_dict()

        # init the vectors
        self.word_embedding = word_embedding
        self.word_2_vectors = dict()
        self.init_vectors()

        # init all the words
        self.WORDS = numpy.random.uniform(low=-0.08049235, high=0.08049235, size=(len(self.idx_2_words), sum(self.vector_length)))
        self.init_WORDS()
        print self.WORDS.shape

    def init_dict(self):
        openfile = open(self.filename, 'r')

        print "start to index to words"
        index = 1

        # add special symbol first
        self.words_2_idx["</s>"] = 0

        for each in openfile:
            words = each.lstrip().lstrip().strip("\n").split("@")
            # the first word is label
            for i in xrange(1, len(words)):
                word = words[i]
                if word not in self.words_2_idx:
                    self.words_2_idx[word] = index
                    index += 1
        openfile.close()
        print "word length ----->", len(self.words_2_idx)
        print "start to words to index"
        for each in self.words_2_idx:
            idx = self.words_2_idx[each]
            self.idx_2_words[idx] = each
        # print self.idx_2_words

        # test the dict
        for each in self.words_2_idx:
            idx = self.words_2_idx[each]
            assert self.idx_2_words[idx] == each

    def init_vectors(self):
        print "start to get the vector from pre-trained word embedding"
        print "And the word embedding path is -----> ", self.word_embedding

        #--------load the word2vec Vector
        for eachfile in self.word_embedding:
            index = 0
            openfile = open(eachfile, 'r')
            word_2_vector = dict()
            for line in openfile:
                if index % 5000 == 0:
                    print "load pre trained word embedding from \t", eachfile, "\t and the index is\t", index
                index += 1
                words = line.lstrip().lstrip().strip("\n").split(" ")
                vector = list()
                for i in xrange(1, len(words)):
                    vector.append(float(words[i]))
                word_2_vector[words[0]] = vector
                self.word_2_vectors[eachfile] = word_2_vector
            openfile.close()

    def init_WORDS(self):
        print "init the WORDS, and the WORDS shape is -----> ", self.WORDS.shape
        index = 0
        for i in xrange(self.WORDS.shape[0]):
            word = self.idx_2_words[i] 
            # append all the word embedding into a vectors
            for file_index in xrange(len(self.word_embedding)):
                filename = self.word_embedding[file_index]
                w2v = self.word_2_vectors[filename]
                if word in w2v:
                    index = index + 1
                    start = sum(self.vector_length[0:file_index])
                    end = self.vector_length[file_index] + start
                    self.WORDS[i][start:end] = w2v[word]
        print "at least ", index, " words included in the datasets", 

