from distutils.command.config import config
import torch
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers import AutoModelForSequenceClassification,AutoModelForMaskedLM,BertModel,RobertaModel
from transformers.modeling_outputs import (
    SequenceClassifierOutput,
)

'''
    To bulid a general text classification model, and help us get better results in competition, and if you do some research on text classification, you can also use it as a comparison model.
'''

params = ['model_name','bilstm_output','linear_output','concat_last_4_layers','average_pooling']


class ModelForNormalTextClassification(AutoModelForSequenceClassification):
    def __init__(self, config,n_classes=2,model_name='bert-base-uncased',bilstm_output = False,linear_output = False,concat_last_4_layers=False,average_pooling=False):
        super().__init__(config)
        self.model_name = model_name,
        self.bilstm_output = bilstm_output
        self.linear_output = linear_output,
        self.concat_last_4_layers = concat_last_4_layers
        self.average_pooling = average_pooling
        if self.average_pooling == 'True' and model_name == 'bert-base-uncased' or model_name == 'bert-base-chinese' or model_name == 'hfl/chinese-bert-wwm-ext':
            self.bert = MyBertModel(config)
        if self.average_pooling == 'True' and 'Robert' in model_name:
            self.roberta = MyRobertaModel(config)
        
        self.classifier = nn.Linear(config.hidden_size,n_classes)
        self.outputLinear = nn.Linear(config.hidden_size,config.hidden_size*4)
        if self.linear_output == 'True':
            self.classifier = nn.Linear(config.hidden_size*4,n_classes)
        #lstm = nn.LSTM(2*HIDDEN, hidden_size, 2,bidirectional=self.bidirectional)
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        labels=None,
        output_attentions=None,
        output_hidden_states=None,
        return_dict=None,
    ):
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if 'Robert' in self.model_name:
            outputs = self.roberta(
                input_ids,
                attention_mask=attention_mask,
                token_type_ids=token_type_ids,
                position_ids=position_ids,
                head_mask=head_mask,
                inputs_embeds=inputs_embeds,
                output_attentions=output_attentions,
                output_hidden_states=output_hidden_states,
                return_dict=return_dict,
            )
            
           
            pooled_output = outputs[0]
            if self.concat_last_4_layers == 'True' and output_hidden_states == True:
                all_hidden_states = torch.stack(outputs[1])
                concatenate_pooling = torch.cat(
                    (all_hidden_states[-1], all_hidden_states[-2], all_hidden_states[-3], all_hidden_states[-4]), -1
                )
                concatenate_pooling = concatenate_pooling[:,0]
                pooled_output = self.linear(concatenate_pooling)

        elif self.model_name == 'bert-base-uncased' or self.model_name == 'bert-base-chinese' or self.model_name == 'hfl/chinese-bert-wwm-ext':
            outputs = self.bert(
                input_ids,
                attention_mask=attention_mask,
                token_type_ids=token_type_ids,
                position_ids=position_ids,
                head_mask=head_mask,
                inputs_embeds=inputs_embeds,
                output_attentions=output_attentions,
                output_hidden_states=output_hidden_states,
                return_dict=return_dict,
            )
            pooled_output = outputs[1]
            if self.concat_last_4_layers == 'True' and output_hidden_states == True:
                all_hidden_states = torch.stack(outputs[2])
                concatenate_pooling = torch.cat(
                    (all_hidden_states[-1], all_hidden_states[-2], all_hidden_states[-3], all_hidden_states[-4]), -1
                )
                concatenate_pooling = concatenate_pooling[:,0]
                pooled_output = self.linear(concatenate_pooling)
            
            pooled_output = self.dropout(pooled_output)
        else:
            print('ERROR')
            return 'ERROR'
        if self.linear_output == 'True':
            pooled_output = self.outputLinear(pooled_output)
        if self.bilstm_output == 'True':
            pass
        logits = self.classifier(pooled_output)
        
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = "regression"
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = "single_label_classification"
                else:
                    self.config.problem_type = "multi_label_classification"

            if self.config.problem_type == "regression":
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == "single_label_classification":
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == "multi_label_classification":
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)

        if not return_dict:
            output = (logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output
        
        return SequenceClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
            
#待完善
class ModelForPromptTextClassification(AutoModelForMaskedLM):
    def __init__(self) -> None:
        pass
        
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        masked_lm_labels=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        lm_labels=None,
        
    ):
        pass

#待完善
class ModelForNezhaTextClassification():
    def __init__(self) -> None:
        pass
        
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        masked_lm_labels=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        lm_labels=None,
        
    ):
        pass

class MyBertModel(BertModel):
    def __init__(self, config):
        super().__init__(config)
        self.pooler = MyBertPooler(config)
    
class MyRobertaModel(RobertaModel):
    def __init__(self, config):
        super().__init__(config)
        self.pooler = MyBertPooler(config)
class MyBertPooler(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.hidden_size = config.hidden_size
        self.dense = nn.Linear(config.hidden_size*3, config.hidden_size)
        self.activation = nn.Tanh()

    def forward(self, hidden_states):
        # We "pool" the model by simply taking the hidden state corresponding
        # to the first token.
        first_token_tensor = hidden_states[:, 0]
        avg_tensor = nn.AvgPool1d(hidden_states)
        max_tensor = nn.MaxPool1d(hidden_states)
        concatenate_pooling = torch.cat(
                    (first_token_tensor, avg_tensor,max_tensor,), -1
                )
        pooled_output = self.dense(concatenate_pooling)
        pooled_output = self.activation(pooled_output)
        return pooled_output

