
import torch
from torch.utils.data import Dataset, DataLoader
import random
import numpy as np
from typing import List, Dict, Tuple

import scripts.cmn_eng as cmn_eng
from modules.common import constants
from modules.helper.corpus_helper import CorpusData, CorpusDataset
from modules.helper.model_helper import ModelFactory, ModelTrainer

# 多头注意力的数量
HEAD_NUM = constants.HEAD_NUM_DEFAULT
# 编码器/解码器层数
LAYER_NUM = constants.LAYER_NUM_DEFAULT
# 词向量的维度
EMBEDDING_SIZE = constants.EMBEDDING_SIZE_DEFAULT
# 前馈全连接层的隐藏层维度
HIDDEN_SIZE = constants.HIDDEN_SIZE_DEFAULT

# 丢弃率, 防止过拟合
MODEL_DROPOUT_PROB = constants.MODEL_DROPOUT_PROB_DEFAULT
# 最小序列长度
MIN_SEQUENCE_LENGTH = constants.MIN_SEQUENCE_LENGTH_DEFAULT
# 最大序列长度
MAX_SEQUENCE_LENGTH = constants.MAX_SEQUENCE_LENGTH_DEFAULT
# 预处理的缓存路径前缀
PREPROCESSION_CACHE_PATH_PREFIX = './resources/cache/'
# 预处理的缓存路径后缀
PREPROCESSION_CACHE_PATH_SUFFIX = '.cache'

# 语料文件的路径
CORPUS_DATA_PATH = './resources/corpus/cmn-eng.txt'
# 模型参数保存的路径
MODEL_DICT_PATH = './resources/model/translation-latest.pth'
# 预训练模型的路径
PRETRAINED_MODEL_PATH = './resources/model/translation-latest.pth'
# 批次大小
BATCH_SIZE = 32
# 迭代次数
EPOCH_NUM = 5
# 学习率
LEARNING_RATE = 0.0001


pretrain_flag_str = input('输入是否加载预处理模型(Y/N): ')
pretrain_flag_str = pretrain_flag_str.strip()
if len(pretrain_flag_str) > 0:
    pretrain_flag = (pretrain_flag_str.upper() == 'Y')
else:
    pretrain_flag = False

expected_loss_str = input('输入提前终止的损失值: ')
expected_loss = expected_loss_str.strip()
if len(expected_loss_str) > 0:
    expected_loss = float(expected_loss_str)
else:
    expected_loss = None

if torch.cuda.is_available():
    device = 'cuda'
else:
    device = 'cpu'
    
# 此处通过设置随机方法的种子，可以复现训练的过程
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)

target_sequences_list, source_sequences_list = cmn_eng.preprocess_all(CORPUS_DATA_PATH)
corpus_data = CorpusData(source_sequences_list=source_sequences_list, 
                            target_sequences_list=target_sequences_list)
corpus_dict = corpus_data.corpus_dict
dataset = CorpusDataset(corpus_data=corpus_data, min_sequence_length=MIN_SEQUENCE_LENGTH, 
                        max_sequence_length=MAX_SEQUENCE_LENGTH)
# 创建数据加载器
data_loader = DataLoader(dataset, BATCH_SIZE, shuffle=True)

corpus_dict_cache_path = PREPROCESSION_CACHE_PATH_PREFIX + 'corpus_dict' + PREPROCESSION_CACHE_PATH_SUFFIX
corpus_dataset_cache_path = PREPROCESSION_CACHE_PATH_PREFIX + 'corpus_dataset' + PREPROCESSION_CACHE_PATH_SUFFIX
# 缓存预处理的数据
torch.save(corpus_dict, corpus_dict_cache_path)
torch.save(dataset, corpus_dataset_cache_path)

model_factory = ModelFactory(max_sequence_length=MAX_SEQUENCE_LENGTH,
                                pad_index=0)
# 创建模型
model = model_factory.create_model(head_num=HEAD_NUM, layer_num=LAYER_NUM,
                                    embedding_size=EMBEDDING_SIZE, hidden_size=HIDDEN_SIZE,
                                    source_vocab_size=len(corpus_dict.source_word2index_dict),
                                    target_vocab_size=len(corpus_dict.target_word2index_dict),
                                    dropout_prob=MODEL_DROPOUT_PROB)

# 加载预训练模型的参数
if pretrain_flag:
    state_dict = torch.load(PRETRAINED_MODEL_PATH)
    model.load_state_dict(state_dict)

model_trainer = ModelTrainer()
# 训练模型
model_trainer.train_model(model, data_loader=data_loader,
                            epoch_num=EPOCH_NUM,
                            learning_rate=LEARNING_RATE,
                            expected_loss=expected_loss,
                            device=device)

# 保存模型的参数
state_dict = model.state_dict()        
torch.save(state_dict, MODEL_DICT_PATH)
