from accelerate import Accelerator
accelerator = Accelerator()
from model_utils.constant import CONFIG_MODEL_TOKENIZER_CLASSES
from model_utils.loss_custom import BCELoss_class_weighted,CrossEntropyLoss_class_weighted
from typing import Iterable, List
import numpy as np
import torch.nn as nn
import torch

class AssembleModel(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.lm_model = CONFIG_MODEL_TOKENIZER_CLASSES[config.model_name][1].from_pretrained(config.model_name)

        # hidden_size = self.lm_model.config.hidden_size
        # vocab_size = self.lm_model.config.vocab_size
        self.config = config
        self.num_lbl = self.patterns.num_lbl
        self.max_num_lbl_tok = self.patterns.max_num_lbl_tok

        # Mask Idx Lkup hack to compute the loss at mask positions
        # 主要是用来做一些变化用的，比如
        # with torch.no_grad():
        #         lkup_lbl = self.lbl_idx_lkup(label_class_id)  # [bs, num_lbl]
        self.mask_idx_lkup = nn.Embedding.from_pretrained(torch.eye(self.config.max_length))  # [max_text_length+1, max_text_length]

        # self.loss = CrossEntropyLoss_class_weighted(reduction='none')
        # self.loss = nn.BCELoss(reduction='none')
        self.loss = BCELoss_class_weighted(weight=torch.tensor([config.weight_label[0], config.weight_label[1]]))
        

    def forward(self, dev=False, predict=False, **kwargs):
        """
        dev=False; 如果是True，那就不做mlm的计算，但是dev的过程中是含有标签的，因此可以做评估
        :param kwargs:{
        'input_ids': (bs, max_length),
        'attention_mask': (bs, max_length),
        'label_token': (num_lbl, num_lbl_length) # 数量不是按照Batch来的，而是按照定义的情况来的
        'label_class_id': (bs, )
        }
        -------
        @Returns  :
        -------
        @description  :
        ---------
        """
        if predict:
            return self.predict(**kwargs)

        input_ids = kwargs['input_ids']
        attention_mask = kwargs['attention_mask']
        mask_idx = kwargs['mask_idx']
        label_token = kwargs['label_token']
        label_class_id = kwargs['label_class_id']
        
        return

    def predict(self, **kwargs):
        """
        @param  :
        :param kwargs:{
        'input_ids': (bs, max_length),
        'attention_mask': (bs, max_length),
        'label_token': (num_lbl, num_lbl_length) # 数量不是按照Batch来的，而是按照定义的情况来的
        }
        -------
        @Returns  :
        返回所有的
        -------
        @description  :
        不做mlm的计算，预测的过程中是没有有标签的
        ---------
        """
        input_ids = kwargs['input_ids']
        attention_mask = kwargs['attention_mask']
        mask_idx = kwargs['mask_idx']
        label_token = kwargs['label_token']
        return 
        