import torch

import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class SubClassifierNetwork(nn.Module):

    def __init__(self, args, bert_config, bert_classifier):
        super().__init__()

        self.args = args
        self.bert_config = bert_config

        self.bert_classifier = bert_classifier

        self.arch_head_weights = nn.Parameter(
            torch.zeros(bert_config.num_hidden_layers, bert_config.num_attention_heads)
        )
        self.arch_layer_weights = nn.Parameter(
            torch.zeros(bert_config.num_hidden_layers + 1, 1)  # including the bert embedding layer
        )
        self.register_parameter('arch_head_weights',self.arch_head_weights)
        self.register_parameter('arch_layer_weights',self.arch_layer_weights)
        self.head_weights = self.arch_head_weights
        self.layer_weights = self.arch_layer_weights

    def arch_parameters(self):
        return [self.head_weights, self.layer_weights]

    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        inputs_embeds=None,
        labels=None,
    ):
        # print("self.head_weights: ", self.head_weights)
        # print("self.layer_weights: ", self.layer_weights)

        # head_weights_ = torch.sigmoid(5e2 * self.head_weights)
        layer_weights_ = torch.sigmoid(5e2 * self.layer_weights)

        head_weights_ = torch.zeros_like(5e2 * self.head_weights)
        # layer_weights_ = torch.zeros_like(5e2 * self.layer_weights)

        # print("head_weights_: ", head_weights_)
        # print("layer_weights_: ", layer_weights_)

        loss, logits, hidden_states, attentions = self.bert_classifier(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_weights_,
            layer_weights=layer_weights_,
            inputs_embeds=inputs_embeds,
            labels=labels,
        )
        # (loss, logits, weighted_pooled_output,
        #     pooled_outputs, hidden_states, attentions)

        outputs = (
            loss, logits, hidden_states, attentions
        )
    
        return outputs




