from dataLoader import Voc, getPairs, getWordIndex, RawDataPath, data_name, save_dir
import os
import torch
import random
from gensim.models.word2vec import Word2Vec
import gensim.models.word2vec as word2vec
from src.Main.config import embedding_size


if __name__ == '__main__':

    print("-----loading pairs----")
    pairs = getPairs(RawDataPath)
    print(len(pairs))
    print("-----loading voc------")
    voc = Voc(data_name)
    for pair in pairs:
        voc.addSentence(pair[0].strip())
        voc.addSentence(pair[1].strip())
    voc.trim(min_count=5)
    print('' in voc.word2index)
    with open('../../Datasets/MidData/LCCC_word.txt', 'w', encoding='utf-8') as f:
        f.write('PAD PAD PAD PAD PAD SOS SOS SOS SOS SOS EOS EOS EOS EOS EOS ')
        for pair in pairs:
            f.write(pair[0].strip() + " ")
            f.write(pair[1].strip() + " ")

    # print(voc.num_words)
    print("-----loading index pairs------")

    random.shuffle(pairs)
    scale = 0.8
    split_index = int(len(pairs) * scale)
    train_pairs = pairs[:split_index]
    test_pairs = pairs[split_index:]

    train_pairs = getWordIndex(voc, train_pairs)
    test_pairs = getWordIndex(voc, test_pairs)

    # print(voc.word2index)
    directory = os.path.join(save_dir, 'LCCC_data')
    if not os.path.exists(directory):
        os.makedirs(directory)
    torch.save(voc, os.path.join(directory, '{!s}.tar'.format('voc_LCCC')))
    torch.save(train_pairs, os.path.join(directory, '{!s}.tar'.format('pairs_train_LCCC')))
    torch.save(test_pairs, os.path.join(directory, '{!s}.tar'.format('pairs_test_LCCC')))

    data_set = word2vec.Text8Corpus("../../Datasets/MidData/LCCC_word.txt")
    model = Word2Vec(data_set, vector_size=embedding_size, window=5, negative=5, min_count=5, epochs=5)
    model.save("../../Datasets/MidData/LCCC_word2vec.model")
    # model = Word2Vec.load("../../Datasets/MidData/LCCC_word2vec.model")
    # print("与 香蕉 最相近的词")
    # print(model.wv.most_similar("香蕉"))
