



from collections import defaultdict
import sys
from turtle import forward
from typing import Any, Dict, List, Tuple

import torch
from word_vec_encoders.word_vec_encoder_base import WordVecEncoderBase



class WordVecEncoderSet(torch.nn.Module):

    def __init__(self, *word_vec_encoders:Any) -> None:
        super().__init__()
        if word_vec_encoders is None:
            word_vec_encoders = ()
        self.word_vec_encoders:List[WordVecEncoderBase] = list(word_vec_encoders)
        for encoder in word_vec_encoders:
            self.add_module(encoder.name, encoder) # 将各个模型记录为本模型的一部分
        self._current_index = 0
    
    def add_word_vec_encoder(self, encoder:WordVecEncoderBase):
        self.word_vec_encoders.append(encoder)
        self.add_module(encoder.name, encoder)

    def forward(self, input_tensors:List[torch.Tensor], batch_size:int, device:str):
        if self.count() == 0:
            return None
        out = []
        for encoder, input_tensor in zip(self.word_vec_encoders, input_tensors):
            out.append(encoder.forward(input_tensor.to(device), batch_size))
        return torch.cat(out, 2)

    def collate_batch_tensor(self, batch_sentences:List[str]):
        result = []
        for encoder in self.word_vec_encoders:
            result.append(encoder.collate_batch_tensor(batch_sentences))
        return result

    def __iter__(self):
        return iter(self.word_vec_encoders)

    def count(self):
        return len(self.word_vec_encoders)

    def embedding_dim_sum(self):
        return sum([encoder.embedding_dim for encoder in self.word_vec_encoders])