import os
import pickle
import torch.nn.utils.rnn as rnn_utils
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch import optim
import config
from transformers import CpmTokenizer, GPT2LMHeadModel
from tqdm import tqdm

tokenizer = CpmTokenizer(config.VOCAB_DIR + 'chinese_vocab.model')
eos_id = tokenizer.eos_token_id
sep_id = tokenizer.sep_token_id

logger = config.get_logger()


def load_list_from_file(filename: str = config.DATA_DIR, file_save_path=config.OUTPUT_DIR + '/datalist.pkl'):
    """从文件中加载数据，形成list,用来填入dataset；
    dataset负责把list转为tensor
    DataLoader的collection_fn负责batch内对齐"""
    logger.info(f'开始处理数据')
    if os.path.exists(file_save_path):
        logger.info('数据已处理，从以下路径加载已处理数据：{}'.format(file_save_path))
        with open(file_save_path, 'rb') as f:
            tokens_list = pickle.load(f)
            return tokens_list
    tqdm_object = tqdm(os.listdir(filename))
    tokens_list = []
    windows = 80
    step = 40
    for file in tqdm_object:
        tqdm_object.set_description('Loading ' + file)
        with open(os.path.join(filename, file), 'r', encoding='utf-8') as f:
            lines = f.readlines()
            lines = [line.strip() for line in lines]
            for line in lines:
                if line.isspace() or line == '' or line == '\n':
                    continue
                tokens = tokenizer.encode(line, add_special_tokens=False) + [eos_id]
                start = 0
                end = start + windows
                while end < len(tokens):
                    tokens_list.append(tokens[start:end])
                    start += step
                    end += step
    with open(file_save_path, 'wb') as f:
        logger.info('数据处理完毕，保存至以下路径:{}'.format(file_save_path))
        pickle.dump(tokens_list, f)
    return tokens_list


class CmpDataset(Dataset):
    def __init__(self, tokens_list, max_len=-1):
        """
        dataset 功能；
        1. 设置最大长度上限
        2. list2tensor, 数据类型设为torch.long
        """
        self.tokens_list = tokens_list
        self.max_len = max_len

    def __len__(self):
        return len(self.tokens_list)

    def __getitem__(self, idx):
        one_data = self.tokens_list[idx]
        if self.max_len > 0:
            one_data = one_data[:self.max_len]
        return torch.tensor(one_data, dtype=torch.long)


def get_dataset():
    data_list = load_list_from_file()
    return CmpDataset(data_list)


def collection_fn(batch):
    input_ids = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=-5)
    labels = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=-100)
    return input_ids, labels


def get_iter():
    return DataLoader(get_dataset(), batch_size=config.BATCH_SIZE,
                      shuffle=True, collate_fn=collection_fn)


def get_optim_lr_scheduler(model: nn.Module):
    optimizer = torch.optim.AdamW(model.parameters(), lr=config.LEARN_RATE)
    lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2)
    return optimizer, lr_scheduler


def get_model():
    return GPT2LMHeadModel.from_pretrained('gpt2')


def print_model_parameter_num(model):
    total_num, trainable_num = config.utils_file.get_model_param_num(model)
    logger.info(f'总参数数量:{total_num},训练参数数量:{trainable_num}')


if __name__ == '__main__':
    data_list = load_list_from_file()
    print(data_list)
