from mydataset import MyDataset
from config import Config
import numpy as np
import torch
from torch.utils.data import DataLoader


def build_word2id(file):
    """
    根据语料库构建word2id表（词汇表）

    :param file: word2id 保存地址
    :return: None
    """
    word2id = {'_PAD_': 0}
    path = [Config.train_path, Config.dev_path, Config.test_path]
    for _path in path:
        with open(_path, encoding='utf-8') as f:
            for line in f.readlines():
                sp = line.strip().split()
                for word in sp[1:]:
                    if word not in word2id.keys():
                        word2id[word] = len(word2id)
    with open(file, 'w', encoding='utf-8') as f:
        for w in word2id:
            f.write(w + '\t')
            f.write(str(word2id[w]))
            f.write('\n')


def build_word2vec(fname, word2id, save_to_path=None):
    """
    基于预训练好的word2vec构建训练预料中所含词语的word2vec

    :param fname: 预训练的word2vec
    :param word2id: 语料文本中包含的词汇集
    :param save_to_path: 保存训练语料库中的词组对应的word2vec到本地
    :return: 语料文本中词汇集对应的word2vec向量 {id: word2vec}
    """
    import gensim
    n_words = max(word2id.values()) + 1
    model = gensim.models.KeyedVectors.load_word2vec_format(fname, binary=True)
    word_vecs = np.array(np.random.uniform(-1., 1., [n_words, model.vector_size]))
    for word in word2id.keys():
        try:
            word_vecs[word2id[word]] = model[word]
        except KeyError:
            pass
    if save_to_path:
        with open(save_to_path, 'w', encoding='utf-8') as f:
            for vec in word_vecs:
                vec = [str(w) for w in vec]
                f.write(' '.join(vec))
                f.write('\n')
    return word_vecs


def load_word2id(file):
    """
    加载word2id表（词汇表）

    :param file: word2id表的文件地址
    :return: word2id
    """
    word2id = dict()
    with open(file, 'r', encoding='utf-8') as f:
        for line in f.readlines():
            line = line.strip().split()
            word2id[line[0]] = int(line[1])
    return word2id


def load_word2vec(file):
    """
    加载word2vec

    :param file: word2vec文件地址
    :return: word2vec
    """
    word2vec = []
    with open(file, 'r', encoding='utf-8') as f:
        for line in f.readlines():
            line = line.split()
            word2vec.append([float(x) for x in line])
    return word2vec


def my_collate_fn(batch_data):
    data, label = [], []
    for single_data in batch_data:
        data.append(single_data[0])
        label.append(single_data[1])
    return torch.tensor(data, dtype=torch.int64), torch.tensor(label)


def preprocess():
    """
    预处理，生成网络输入
    :return: 训练、验证、测试数据集以及标签
    """
    # build_word2id(Config.word2id_path)
    word2id = load_word2id(Config.word2id_path)
    # build_word2vec(Config.pre_word2vec_path, word2id, save_to_path=Config.corpus_word2vec_path)
    # word2vec = load_word2vec(Config.corpus_word2vec_path)

    def load_corpus(file):
        data, label = [], []
        with open(file, encoding='utf-8') as f:
            for line in f.readlines():
                line = line.strip().split()
                if len(line) < 1:
                    continue
                tmp = [word2id[x] for x in line[1:]]
                if len(tmp) > Config.max_sen_len:
                    tmp = tmp[:Config.max_sen_len]
                else:
                    while len(tmp) < Config.max_sen_len:
                        tmp.append(0)
                assert len(tmp) == Config.max_sen_len
                data.append(tmp)
                label.append(torch.tensor(int(line[0]), dtype=torch.int64))
        return data, label

    train_data, train_label = load_corpus(Config.train_path)
    dev_data, dev_label = load_corpus(Config.dev_path)
    test_data, test_label = load_corpus(Config.test_path)
    train_dataset = MyDataset(train_data, train_label)
    dev_dataset = MyDataset(dev_data, dev_label)
    test_dataset = MyDataset(test_data, test_label)
    train_loader = DataLoader(train_dataset, batch_size=Config.batch_size, shuffle=True, num_workers=2,
                              collate_fn=my_collate_fn)
    dev_loader = DataLoader(dev_dataset, batch_size=Config.batch_size, shuffle=True, num_workers=2,
                            collate_fn=my_collate_fn)
    test_loader = DataLoader(test_dataset, batch_size=Config.batch_size, shuffle=False, num_workers=2,
                             collate_fn=my_collate_fn)
    return train_loader, dev_loader, test_loader
