# _*_ coding: utf-8 _*_
# @Time : 2021/8/25 14:00 
# @Author : xupeng
# contact: ipeng_x1029@163.com
# @File : utils.py
import time
from datetime import timedelta
import torch
from tqdm import tqdm
import os
import pickle as pkl

MAX_VOCAB_SIZE = 10000

UNK, PAD = '<UNK>', '<PAD>'

def build_vocab(file_path, tokenizer, max_size, min_freq):
    vocab_dict = {}
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in tqdm(f):
            line = line.strip()
            if line == None:
                continue
            content = line.split('\t')[0]
            for word in tokenizer(content):
                vocab_dict[word] = vocab_dict.get(word, 0) + 1
    vocab_list = sorted([item for item in vocab_dict.items() if item[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]
    vocab = {word[0] : idx for idx, word in enumerate(vocab_list)}
    vocab.update({UNK:len(vocab), PAD:len(vocab)+1})
    return vocab

def load_dataset(path, max_size, tokenizer, vocab):
    contents = []
    with open(path, 'r', encoding='utf-8') as f:
        for line in tqdm(f):
            word_list = []
            line = line.strip()
            if line == None:
                continue
            content, label = line.split('\t')
            token = tokenizer(content)
            seq_len = len(token)
            if seq_len > max_size:
                token = token[:max_size]
                seq_len = max_size
            else:
                token.extend([PAD] * (max_size - seq_len))

            for word in token:
                word_list.append(vocab.get(word, vocab.get(UNK)))
            contents.append((word_list, int(label), seq_len))
    return contents

def build_dataset(use_word, config):
    if use_word:
        tokenizer = lambda x : x.split(' ')
    else:
        tokenizer = lambda x : [ch for ch in x]

    if os.path.exists(config.vocab_path):
        vocab = pkl.load(open(config.vocab_path, 'rb'))
    else:
        vocab = build_vocab(file_path=config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
        with open('dataset/data/vocab.txt', 'w', encoding='utf-8') as f:
            for word in vocab.keys():
                f.write(word + '\n')
        pkl.dump(vocab, open(config.vocab_path, 'wb'))
    print(f"Vocab Size : {len(vocab)}")

    if os.path.exists(config.dataset_pkl):
        # [word_list, label, seq_len]
        train, test, dev = pkl.load(open(config.dataset_pkl,'rb'))
    else:
        train = load_dataset(path=config.train_path, max_size = config.max_seq_size, tokenizer=tokenizer, vocab=vocab)
        test = load_dataset(config.test_path, config.max_seq_size, tokenizer, vocab=vocab)
        dev = load_dataset(config.dev_path, config.max_seq_size, tokenizer, vocab=vocab)
        pkl.dump([train,test,dev], open(config.dataset_pkl, 'wb'))
    return vocab, train, test, dev

class DatssetIterater:
    def __init__(self, dataset, config):
        self.dataset = dataset
        self.batch_size = config.batch_size
        self.device = config.device
        self.n_batches = len(dataset) // self.batch_size
        self.residue = False
        if len(dataset) % self.n_batches != 0:
            self.residue = True
        self.index = 0

    def _to_tensor(self, data_batch):
        """
        data : word_list, int(label), seq_len
        :param data_batch:
        :return:
        """
        x = torch.LongTensor([data[0] for data in data_batch]).to(self.device)
        y = torch.LongTensor([data[1] for data in data_batch]).to(self.device)
        seq_len = torch.LongTensor([data[2] for data in data_batch]).to(self.device)
        return (x, seq_len), y

    def __next__(self):
        if self.index < self.n_batches:
            data_batch = self.dataset[self.index * self.batch_size : (self.index + 1) * self.batch_size]
            data_batch = self._to_tensor(data_batch)
            self.index += 1
            return data_batch
        elif self.index == self.n_batches and self.residue:
            data_batch = self.dataset[self.index * self.batch_size :]
            data_batch = self._to_tensor(data_batch)
            self.index += 1
            return data_batch
        else:
            self.index = 0
            raise StopIteration

    def __iter__(self):
        return self

    def __len__(self):
        if self.residue:
            return self.n_batches + 1
        else:
            return self.n_batches

def build_iterator(dataset, config):
    iter = DatssetIterater(dataset, config)
    return iter

def get_time_dif(start_time):
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))



if __name__ == "__main__":
    import numpy as np
    '''提取预训练词向量'''
    # 下面的目录、文件名按需更改。
    train_dir = "./dataset/data/train.txt"
    vocab_dir = "./dataset/data/vocab.pkl"
    pretrain_dir = "./dataset/data/sgns.sogou.char"
    emb_dim = 300
    filename_trimmed_dir = "./dataset/data/embedding_SougouNews"
    if os.path.exists(vocab_dir):
        word_to_id = pkl.load(open(vocab_dir, 'rb'))
    else:
        # tokenizer = lambda x: x.split(' ')  # 以词为单位构建词表(数据集中词之间以空格隔开)
        tokenizer = lambda x: [y for y in x]  # 以字为单位构建词表
        word_to_id = build_vocab(train_dir, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
        pkl.dump(word_to_id, open(vocab_dir, 'wb'))

    embeddings = np.random.rand(len(word_to_id), emb_dim)
    f = open(pretrain_dir, "r", encoding='UTF-8')
    for i, line in enumerate(f.readlines()):
        # if i == 0:  # 若第一行是标题，则跳过
        #     continue
        lin = line.strip().split(" ")
        if lin[0] in word_to_id:
            idx = word_to_id[lin[0]]
            emb = [float(x) for x in lin[1:301]]
            embeddings[idx] = np.asarray(emb, dtype='float32')
    f.close()
    np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)