
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import List, Dict, Tuple

from ..common import constants
from ..transformer.transformer_model import (PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD,
                               PositionalEncoding, PositionWiseFeedForward, MultiHeadAttention, 
                               EncoderLayer, Encoder, DecoderLayer, Decoder, Transformer)
from .corpus_helper import CorpusDict

class ModelFactory(object):
    r"""
    封装创建模型的逻辑。
    """
    
    def __init__(self, max_sequence_length=constants.MAX_SEQUENCE_LENGTH_DEFAULT, pad_index=0):
        """
        Args:
            max_sequence_length: 最长序列长度
            pad_index: 填充标识符索引
        """
        
        super(ModelFactory).__init__()
        self.max_sequence_length = max_sequence_length
        self.pad_index = pad_index
        
    def create_model(self, head_num: int, layer_num: int, 
                     embedding_size: int, hidden_size: int, 
                     source_vocab_size: int, target_vocab_size: int,
                     encoder_word_embedding=None, decoder_word_embedding=None, 
                     dropout_prob=constants.MODEL_DROPOUT_PROB_DEFAULT) -> Transformer:
        """
        Args:
            head_num: 多头注意力的数量
            layer_num: 编码器/解码器层数
            embedding_size: 词向量的维度
            hidden_size: 前馈神经网络的隐藏层维度
            source_vocab_size: 源序列词表大小
            target_vocab_size: 目标序列词表大小
            encoder_word_embedding: 编码器词向量表
            decoder_word_embedding: 解码器词向量表
            dropout_prob: 丢弃率, 防止过拟合
            
        Returns:
            新建的 Transformer 模型
        """
        
        assert head_num > 0
        assert layer_num > 0
        assert embedding_size > 0
        assert hidden_size > 0
        assert source_vocab_size > 0
        assert target_vocab_size > 0
        
        if encoder_word_embedding is None:
            encoder_word_embedding = nn.Embedding(source_vocab_size, embedding_size, padding_idx=self.pad_index)
            
        if decoder_word_embedding is None:
            decoder_word_embedding = nn.Embedding(target_vocab_size, embedding_size, padding_idx=self.pad_index)
            
        # 编码器层
        encoder_layers = nn.ModuleList()
        for i in range(layer_num):
            self_attention = MultiHeadAttention(head_num, embedding_size, dropout_prob=dropout_prob)
            feed_forward = PositionWiseFeedForward(embedding_size, hidden_size, dropout_prob=dropout_prob)
            encoder_layer = EncoderLayer(self_attention, feed_forward)
            encoder_layers.append(encoder_layer)

        # 解码器层
        decoder_layers = nn.ModuleList()
        for i in range(layer_num):
            decoder_self_attention = MultiHeadAttention(head_num, embedding_size, dropout_prob=dropout_prob)
            decoder_encoder_attention = MultiHeadAttention(head_num, embedding_size, dropout_prob=dropout_prob)
            feed_forward = PositionWiseFeedForward(embedding_size, hidden_size, dropout_prob=dropout_prob)
            decoder_layer = DecoderLayer(decoder_self_attention, decoder_encoder_attention, feed_forward)
            decoder_layers.append(decoder_layer)
            
        positional_encoding = PositionalEncoding(embedding_size, self.max_sequence_length)
        output_projection = nn.Linear(embedding_size, target_vocab_size, bias=False)
        encoder = Encoder(encoder_layers, encoder_word_embedding, positional_encoding, dropout_prob=dropout_prob)
        decoder = Decoder(decoder_layers, decoder_word_embedding, positional_encoding, dropout_prob=dropout_prob)
        model = Transformer(encoder, decoder, output_projection, pad_index=self.pad_index)
    
        return model


class ModelTrainer(object):
    r"""
    封装训练模型的逻辑。
    """
    
    def __init__(self):
        super(ModelTrainer).__init__()
        
    def train_model(self, model: Transformer, data_loader: DataLoader,
                    epoch_num: int, learning_rate: float, expected_loss=None, device='cpu') -> None:
        """
        Args:
            model: 模型
            data_loader: 数据加载器
            epoch_num: 迭代次数
            learning_rate: 学习率
            expected_loss: 提前终止的损失值
            device: 指定设备
        """
        
        assert epoch_num > 0
        assert learning_rate > 0
        
        batch_num = len(data_loader)
        criterion = nn.CrossEntropyLoss()
        model = model.to(device)
        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
        model.train()

        for epoch_id in range(epoch_num):
            print(f'Epoch {epoch_id+1}/{epoch_num}')

            for batch_id, (encoder_inputs, decoder_inputs, target_labels) in enumerate(data_loader):
                optimizer.zero_grad()
                
                encoder_inputs = encoder_inputs.to(device)
                decoder_inputs = decoder_inputs.to(device)
                target_labels = target_labels.to(device)
                outputs, *_ = model(encoder_inputs, decoder_inputs)
                outputs = outputs.transpose(1, 2)
                # 计算训练的误差
                loss = criterion(outputs, target_labels)
                loss_value = loss.item()

                if batch_id % 100 == 0:
                    print(f'loss: {loss_value:>10f} [{batch_id+1:>5d}/{batch_num:>5d}]')
                    
                if (expected_loss is not None) and (loss_value <= expected_loss):
                    optimizer.zero_grad()
                    break
                
                # 根据误差反向计算梯度和更新权重
                loss.backward()
                optimizer.step()


class ModelRunner(object):
    r"""
    封装推理模型的逻辑。
    """
    
    def __init__(self, model: Transformer, corpus_dict: CorpusDict, device='cpu',
                 min_source_sequence_length=constants.MIN_PREDICT_SOURCE_SEQUENCE_LENGTH_DEFAULT, 
                 max_source_sequence_length=constants.MAX_PREDICT_SOURCE_SEQUENCE_LENGTH_DEFAULT, 
                 max_result_sequence_length=constants.MAX_PREDICT_RESULT_SEQUENCE_LENGTH_DEFAULT):
        """
        Args:
            model: 模型
            corpus_data: 语料数据
            device: 指定设备
            min_source_sequence_length: 最小预测源序列长度
            max_source_sequence_length: 最大预测源序列长度
            max_result_sequence_length: 最大预测结果长度
        """
        
        assert min_source_sequence_length > 0
        assert max_source_sequence_length > 0
        assert max_result_sequence_length > 0
        
        super(ModelRunner).__init__()
        self.model = model.to(device)
        self.corpus_dict = corpus_dict
        self.device = device
        self.min_source_sequence_length = min_source_sequence_length
        self.max_source_sequence_length = max_source_sequence_length
        self.max_result_sequence_length = max_result_sequence_length
        
    def run_model(self, input_sequence: List[str], output_callback) -> None:
        """
        Args:
            input_sequence: 输入序列
            output_callback: 语料数据
        """

        source_word2index_dict = self.corpus_dict.source_word2index_dict
        target_word2index_dict = self.corpus_dict.target_word2index_dict
        target_index2word_dict = self.corpus_dict.target_index2word_dict
        SOURCE_PAD_INDEX = source_word2index_dict[PAD_WORD]
        SOURCE_UNK_INDEX = source_word2index_dict[UNK_WORD]
        TARGET_PAD_INDEX = target_word2index_dict[PAD_WORD]
        TARGET_UNK_INDEX = target_word2index_dict[UNK_WORD]
        
        if len(input_sequence) > self.max_source_sequence_length:
            source_sequence = input_sequence[:self.max_source_sequence_length]
        else:
            source_sequence = input_sequence
            
        target_sequence = [BOS_WORD]
        output_callback(input_sequence, -1, None)
        self.model.eval()
        
        with torch.no_grad():
            encoded_source_sequence = [source_word2index_dict.get(word_str, SOURCE_UNK_INDEX)
                                       for word_str in source_sequence]
            
            while len(target_sequence) < self.max_result_sequence_length:
                encoded_target_sequence = [target_word2index_dict.get(word_str, TARGET_UNK_INDEX)
                                           for word_str in target_sequence]
                
                source_sequence_length = len(source_sequence)
                target_sequence_length = len(target_sequence)
                expected_sequence_length = max(self.min_source_sequence_length, source_sequence_length,
                                               target_sequence_length)
                expected_sequence_length = min(expected_sequence_length, self.max_source_sequence_length)
                
                self.__fill_pad_word(encoded_source_sequence, expected_sequence_length, SOURCE_PAD_INDEX)
                self.__fill_pad_word(encoded_target_sequence, expected_sequence_length, TARGET_PAD_INDEX)
                
                encoder_inputs_tensor = torch.tensor([encoded_source_sequence], dtype=torch.long, device=self.device)
                decoder_inputs_tensor = torch.tensor([encoded_target_sequence], dtype=torch.long, device=self.device)
                predict, *_ = self.model(encoder_inputs_tensor, decoder_inputs_tensor)
                # 解析预测的结果
                next_position = len(target_sequence) - 1
                word_index = self.__parse_predict_result(predict, next_position)
                word_str = target_index2word_dict.get(word_index, UNK_WORD)
                # 通过回调函数传递结果
                output_callback(input_sequence, word_index, word_str)
                
                if word_str == EOS_WORD:
                    break
                
                target_sequence.append(word_str)

    @staticmethod
    def __fill_pad_word(encoded_sequence: List[str], expected_length: int, pad_index: int) -> None:
        pad_num = expected_length - len(encoded_sequence)
        
        if pad_num > 0:
            for _ in range(pad_num):
                encoded_sequence.append(pad_index)

    @staticmethod
    def __parse_predict_result(predict: torch.Tensor, index: int) -> int:
        result = predict.data.max(2, keepdim=False)[1]
        result = result[0].tolist()
        result = result[index]
        
        return result

