import pandas as pd
import gensim
import jieba
from gensim.models import Word2Vec
import numpy as np

def read_data(file_path):
    data = []
    with open(file_path, "r", encoding="utf8") as f:
        for line in f:
            data.append(line.strip().split("\t"))
    return data


def tokenization(text_list):
    result = []
    for text in text_list:
        text_token = []
        for word in jieba.cut(str(text)):
            if word.strip():
                text_token.append(word)
        result.append(text_token)
    return result


def get_freq(text_list, word2id):
    word_freq = {}
    for sen in text_list:
        for word in sen:
            word_freq[word2id[word]] = word_freq.get(word, 0) + 1
    return word_freq


def get_w2v(file_path, size=150, min_count=1, workers=4):

    data = read_data(file_path)
    corpus = []
    label = []
    for ins in data:
        corpus.append(ins[0])
        corpus.append(ins[1])
        label.append(ins[2])
    corpus = tokenization(corpus)
    model = Word2Vec(corpus, min_count=min_count, size=size, workers=workers)
    
    word2id = {}
    word_emb = np.zeros((1+len(model.wv.index2word), size))
    
    for i in model.wv.index2word:
        word2id[i] = len(word2id)+1
        word_emb[word2id[i]] = model[i]
    
    word_freq = get_freq(corpus, word2id)


    sen1_list = [corpus[i] for i in range(len(corpus)) if i%2==0]
    sen2_list = [corpus[i] for i in range(len(corpus)) if i%2!=0]

    sen1_list = [[word2id.get(word, 0) for word in sen] for sen in sen1_list]
    sen2_list = [[word2id.get(word, 0) for word in sen] for sen in sen2_list]
    print("Total sentence pairs: %d\nTotal words: %d\nword embedding dimension: %d"%(len(sen1_list), len(word2id), size))
    return sen1_list, sen2_list, word_emb, word2id, word_freq, label
    