

from time import perf_counter
from typing import Dict, List, Tuple
from word_vec_encoders.word_vec_encoder_base import WordVecEncoderBase
import torch

class MiniSequenceConvEncoder(WordVecEncoderBase):
    """
    这一类编码器将单个字符转写为一个小序列（如拼音、笔画、五笔等），然后使用卷积网络提取总体特征，最终生成字符嵌入
    """
    def __init__(self, name:str, miniseq_symbols:List[str], fixed_miniseq_len:int, embedding_dim:int = 32) -> None:
        super().__init__(name, embedding_dim)
        self.miniseq_symbol_dict = dict()
        for i, letter in enumerate(miniseq_symbols):
            self.miniseq_symbol_dict[letter] = i
        self.one_hot_dim = len(miniseq_symbols)
        self.fixed_miniseq_len = fixed_miniseq_len
        # 每经过一层 CNN，小字符嵌入维度缩减一半
        self.middle_channels = self.one_hot_dim // 2 
        self.out_channels = self.one_hot_dim // 4
        self.cnn = torch.nn.Sequential(
            torch.nn.Conv1d(in_channels=self.one_hot_dim, out_channels=self.middle_channels, kernel_size=3, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool1d(kernel_size=2, stride=2),
            torch.nn.Conv1d(in_channels=self.middle_channels, out_channels=self.out_channels, kernel_size=3, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool1d(kernel_size=2, stride=2),
        )
        # 经过一层全连接，降到输出的维度
        self.cnn_out_dim = (self.fixed_miniseq_len // 2 // 2) * self.out_channels
        self.embedding_dim = embedding_dim
        self.linear = torch.nn.Linear(self.cnn_out_dim, self.embedding_dim)
        # 其他内容
        self._sentence_miniseq_cache = dict()

    def forward(self, input_tensor:torch.Tensor, batch_size:int):
        out = self.cnn(input_tensor)
        seq_batch_size, out_len, out_channels = out.size()
        # 展平特征图
        out = out.reshape((seq_batch_size, out_len * out_channels))
        out = self.linear(out)
        # 重新建立句子界限
        sentence_length = seq_batch_size // batch_size
        out = out.reshape((batch_size, sentence_length, self.embedding_dim))
        return out

    def collate_batch_tensor(self, batch_sentences:List[str]):
        batch_size = len(batch_sentences)
        input_tensor = self.batch_str_to_input_tensor_bert_style_cached(batch_sentences)
        batch_size, sentence_length, _, _ = input_tensor.size()
        # 打破句子界限，组合为小序列的批次
        input_tensor = input_tensor.reshape((batch_size * sentence_length, self.fixed_miniseq_len, self.one_hot_dim))
        # 交换后两个维度的次序，因Conv1d 要求的输入维度顺序是 miniseq_batch_size, channels, seq_len
        input_tensor = input_tensor.swapdims(1, 2)
        return input_tensor

    def sentence_to_tensor(self, sentence:str) -> torch.Tensor:        
        sentence_miniseqs = self.sentence_to_miniseqs_cached(sentence)        
        sentence_tensor = []
        for char_miniseq in sentence_miniseqs:
            char_seq_indices = torch.zeros(self.fixed_miniseq_len, dtype=torch.int64)
            for isymbol, symbol in enumerate(char_miniseq):
                char_seq_indices[isymbol] = self.miniseq_symbol_dict[symbol]
            char_miniseq_onehot = torch.nn.functional.one_hot(char_seq_indices, self.one_hot_dim)
            sentence_tensor.append(char_miniseq_onehot)        
        sentence_tensor = torch.stack(sentence_tensor)        
        return sentence_tensor

    def empty_chars_tensor(self, num_chars:int) -> torch.Tensor:
        return torch.zeros((num_chars, self.fixed_miniseq_len, self.one_hot_dim))

    def sentence_to_miniseqs_cached(sentence:str) -> List[str]:
        raise Exception('必须实现本方法')