
import torch
from torch.utils.data import Dataset
from typing import List, Dict, Tuple

from ..common import constants
from ..transformer.transformer_model import PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD

class CorpusData(object):
    
    def __init__(self, source_sequences_list: List[List[str]],
                 target_sequences_list: List[List[str]]):
        """
        Args:
            source_sequences_list: 源序列
            target_sequences_list: 目标序列
        """
        
        super(CorpusData).__init__()

        source_word2count_dict, max_source_sequence_length = self.__count_sequences_list(source_sequences_list)
        source_word2index_dict = self.__get_word2index_dict(source_word2count_dict)
        source_index2word_dict = {word_index: word_str for word_str, word_index
                                  in source_word2index_dict.items()}    
        target_word2count_dict, max_target_sequence_length = self.__count_sequences_list(target_sequences_list)
        target_word2index_dict = self.__get_word2index_dict(target_word2count_dict)
        target_index2word_dict = {word_index: word_str for word_str, word_index
                                  in target_word2index_dict.items()}   
        
        self.source_sequences_list = source_sequences_list
        self.target_sequences_list = target_sequences_list
        # 最长源序列长度
        self.max_source_sequence_length = max_source_sequence_length
        # 最长目标序列长度
        self.max_target_sequence_length = max_target_sequence_length
        
        self.corpus_dict = CorpusDict(source_word2index_dict=source_word2index_dict,
                                      source_index2word_dict=source_index2word_dict,
                                      target_word2index_dict=target_word2index_dict,
                                      target_index2word_dict=target_index2word_dict)
 

    @staticmethod
    def __count_sequences_list(sequences_list: List[List[str]]) -> Tuple[Dict[str, int], int]:
        word2count_dict = dict()
        max_sequence_length = 0
        
        for words_list in sequences_list:
            sequence_length = len(words_list)
            
            # 更新最长序列长度
            if sequence_length > max_sequence_length:
                max_sequence_length = sequence_length
            
            # 统计词汇出现的次数
            for word_str in words_list:
                count_num = word2count_dict.get(word_str, 0)
                word2count_dict[word_str] = count_num + 1
                
        return word2count_dict, max_sequence_length
    
    @staticmethod
    def __get_word2index_dict(word2count_dict: Dict[str, int]) -> Dict[str, int]:
        word2index_dict = dict()
        
        word2index_dict[PAD_WORD] = 0
        word2index_dict[UNK_WORD] = 1
        word2index_dict[BOS_WORD] = 2
        word2index_dict[EOS_WORD] = 3
        
        next_word_index = 4
        vocab_words_list = sorted(word2count_dict, key = word2count_dict.get, reverse=True)
        
        for word_str in vocab_words_list:
            word2index_dict[word_str] = next_word_index
            next_word_index += 1
        
        return word2index_dict


class CorpusDict(object):
    def __init__(self, source_word2index_dict: Dict[str, int],
                 source_index2word_dict: Dict[int, str],
                 target_word2index_dict: Dict[str, int],
                 target_index2word_dict: Dict[int, str]):
        """
        Args:
            source_word2index_dict: 源序列词汇与词汇索引的关系
            source_index2word_dict: 源序列词汇索引与词汇的关系
            target_word2index_dict: 目标序列词汇与词汇索引的关系
            target_index2word_dict: 目标序列词汇索引与词汇的关系
        """
        
        super(CorpusDict).__init__()
        self.source_word2index_dict = source_word2index_dict 
        self.source_index2word_dict = source_index2word_dict
        self.target_word2index_dict = target_word2index_dict
        self.target_index2word_dict = target_index2word_dict


class CorpusDataset(Dataset):
    
    def __init__(self, corpus_data: CorpusData, min_sequence_length=constants.MIN_SEQUENCE_LENGTH_DEFAULT, 
                 max_sequence_length=constants.MAX_SEQUENCE_LENGTH_DEFAULT, max_item_size=None):
        """
        Args:
            corpus_data: 语料数据
            min_sequence_length: 最小序列长度
            max_sequence_length: 最大序列长度
            max_item_size: 最大数量
        """
        
        super(CorpusDataset, self).__init__()
        
        sequence_length = max(min_sequence_length, corpus_data.max_source_sequence_length, 
                              corpus_data.max_target_sequence_length + 1)
        sequence_length = min(sequence_length, max_sequence_length)
        
        self.items_list = self.__get_items_list(corpus_data, sequence_length, max_item_size)
        self.sequence_length = sequence_length
        
    def __len__(self):
        return len(self.items_list)
        
    def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        return self.items_list[index]

    @staticmethod
    def __get_items_list(corpus_data: CorpusData, sequence_length: int,
                         max_item_size=None) -> List[Tuple[torch.Tensor]]:
        items_list = list()
        item_count = 0
        
        corpus_dict = corpus_data.corpus_dict
        source_word2index_dict = corpus_dict.source_word2index_dict
        target_word2index_dict = corpus_dict.target_word2index_dict
        SOURCE_PAD_INDEX = source_word2index_dict[PAD_WORD]
        SOURCE_UNK_INDEX = source_word2index_dict[UNK_WORD]
        TARGET_PAD_INDEX = target_word2index_dict[PAD_WORD]
        TARGET_UNK_INDEX = target_word2index_dict[UNK_WORD]
        TARGET_BOS_INDEX = target_word2index_dict[BOS_WORD]
        TARGET_EOS_INDEX = target_word2index_dict[EOS_WORD]
        
        source_sequences_list = corpus_data.source_sequences_list
        target_sequences_list = corpus_data.target_sequences_list
        sequence_num = len(source_sequences_list)

        for index in range(sequence_num):
            if (max_item_size is not None) and (item_count >= max_item_size):
                break
            
            source_sequence = source_sequences_list[index]
            target_sequence = target_sequences_list[index]
            
            if len(source_sequence) > sequence_length:
                source_sequence = source_sequence[:sequence_length]
                
            if len(target_sequence) >= sequence_length:
                target_sequence = target_sequence[:sequence_length-1]
            
            # 对源序列进行编码
            encoded_source_sequence = [source_word2index_dict.get(word_str, SOURCE_UNK_INDEX) 
                                       for word_str in source_sequence]
            # 目标序列以 BOS 标识符开始
            encoded_target_sequence = [TARGET_BOS_INDEX]
            encoded_label_sequence = []
            
            for word_str in target_sequence:
                word_index = target_word2index_dict.get(word_str, TARGET_UNK_INDEX)
                encoded_target_sequence.append(word_index)
                encoded_label_sequence.append(word_index)
            
            # 标签序列以 EOS 标识符结束  
            encoded_label_sequence.append(TARGET_EOS_INDEX)
            # 填充 PAD 标识符到固定长度
            CorpusDataset.__fill_pad_word(encoded_source_sequence, sequence_length, SOURCE_PAD_INDEX)
            CorpusDataset.__fill_pad_word(encoded_target_sequence, sequence_length, TARGET_PAD_INDEX)
            CorpusDataset.__fill_pad_word(encoded_label_sequence, sequence_length, TARGET_PAD_INDEX)
        
            item_count += 1
            items_list.append((torch.LongTensor(encoded_source_sequence), 
                               torch.LongTensor(encoded_target_sequence),
                               torch.LongTensor(encoded_label_sequence)))
        
        return items_list
    
    @staticmethod
    def __fill_pad_word(encoded_sequence: List[str], expected_length: int, pad_index: int) -> None:
        pad_num = expected_length - len(encoded_sequence)
        
        if pad_num > 0:
            for _ in range(pad_num):
                encoded_sequence.append(pad_index)

