from gensim.models import Word2Vec
import pdb
import pickle as pkl
import numpy as np
import re
from tokenizer import tokenize
import pandas as pd


def get_embeds():
    df_data = pd.read_csv("/root/autodl-tmp/fakeddit/train.tsv",
                          sep='\t', header=0, index_col='index')
    df_data = df_data[:int(df_data.shape[0]/100)]

    tweets = []
    ct = 0
    for idx, tweet in df_data.iterrows():
        tweet_words = tokenize(tweet.iloc[2])
        if len(tweet_words) > 2:
            tweets.append(tweet_words)

    print(df_data.shape[0],len(tweets))
    print('Max sentence length:', max([len(tweet) for tweet in tweets]))
    print( 'Avg sentence length:', sum([len(tweet) for tweet in tweets]) / len(tweets))
    print( 'Min sentence length:', min([len(tweet) for tweet in tweets]))
   
    model = Word2Vec(tweets, min_count=1, vector_size=32)
    words = list(model.wv.index_to_key)
   
    word_index = {}
    ct = 1
    embedding_matrix = []
    embedding_matrix.append(np.zeros(32))
    for word in words:
        word_index[word] = ct
        ct += 1
        embedding_matrix.append(model.wv.get_vector(word,norm= True))

    embedding_matrix = np.array(embedding_matrix)
   
    print( "Vocab Size:", len(word_index))
    pkl.dump(word_index, open('../data/word_index.pkl', 'wb'))
    model.save('../data/word_embed.bin')
    np.save('../data/embedding_matrix', np.array(embedding_matrix))
    
    max_val = np.max(embedding_matrix)
    min_val = np.min(embedding_matrix)
    embedding_matrix = (embedding_matrix - min_val) / (max_val - min_val)
    for i in range(32):
        embedding_matrix[0][i] = 0

    np.save('../data/embedding_matrix_norm', np.array(embedding_matrix))


if __name__ == '__main__':
    get_embeds()
