import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import os
import numpy as np
from tqdm import tqdm

from transformers import *


class BertOwnModel(nn.Module):
    """Performs prediction, given the input of BERT embeddings.
    """
#     def __init__(self,model_path=None,gpu=True,bert_type='bert-large',label_num=3,batch_size=8,reinit_num=0,freeze_layers=False):
    def __init__(self, args):
        super(BertOwnModel, self).__init__()
        self.args = args
        configuration = BertConfig(hidden_size=args.hidden_size, num_hidden_layers=args.num_hidden_layers,
                               num_attention_heads=args.num_attention_heads, intermediate_size=4 * args.hidden_size)
        self.bert = BertModel(configuration).cuda()
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        self.gpu = args.gpu
        self.num_hidden_layers = args.num_hidden_layers
        self.vdim = self.args.hidden_size
        if self.gpu:
            self.cuda()
    def load_model(self, sdict):
        if self.gpu:
            self.load_state_dict(sdict)
            self.cuda()
        else:
            self.load_state_dict(sdict)
    
    #forward 需要修改，直接返回bert结果
    def forward(self, input_id, seg_id, mask_id, checkpoint=True, bs=None):
        cls_res = self.ff(input_id, seg_id, mask_id, checkpoint)
        return cls_res


    def step_bert_encode(self, module, hidden_states, attention_mask=None, head_mask=None):
        all_hidden_states = ()
        all_attentions = ()
        for i, layer_module in enumerate(module.layer):
            layer_outputs = checkpoint.checkpoint(layer_module, hidden_states, attention_mask, head_mask[i])
            hidden_states = layer_outputs[0]
        outputs = (hidden_states,)
        return outputs  


    def step_checkpoint_bert(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
       
        modules = [module for k, module in self.bert._modules.items()]

        if attention_mask is None:
            attention_mask = torch.ones_like(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
        extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0

        if head_mask is not None:
            if head_mask.dim() == 1:
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
                head_mask = head_mask.expand(self.num_hidden_layers, -1, -1, -1, -1)
            elif head_mask.dim() == 2:
                head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
            head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
        else:
            head_mask = [None] * self.num_hidden_layers
            
        embedding_output = modules[0](input_ids, position_ids=position_ids, token_type_ids=token_type_ids)

        encoder_outputs = self.step_bert_encode(modules[1], embedding_output,extended_attention_mask,head_mask)
        sequence_output = encoder_outputs[0]

        pooled_output = modules[2](sequence_output)
        outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]  # add hidden_states and attentions if they are here
        return outputs  # sequence_output, pooled_output, (hidden_states), (attentions)


    def ff(self,ids_tensor,types_tensor, masks_tensor,checkpoint):
        if self.gpu:
            ids_tensor = ids_tensor.cuda()
            types_tensor = types_tensor.cuda()
            masks_tensor = masks_tensor.cuda()

        if checkpoint:
            cls_vecs = self.step_checkpoint_bert(input_ids=ids_tensor, token_type_ids=types_tensor, attention_mask=masks_tensor)
        else:
            cls_vecs = self.bert(input_ids=ids_tensor, token_type_ids=types_tensor, attention_mask=masks_tensor)
        return cls_vecs

    def save(self, output_path, config_dic=None, acc=None):
        if acc is None:
            model_name = 'nli_model.state_dict'
        else:
            model_name = 'nli_model_acc{}.state_dict'.format(acc)
        opath = os.path.join(output_path, model_name)
        if config_dic is None:
            torch.save(self.state_dict(),opath)
        else:
            torch.save(config_dic,opath)

    @staticmethod
    def load(input_path, args):
        #print('asdfgh')
        if gpu:
            sdict = torch.load(input_path)
        else:
            sdict = torch.load(input_path,map_location=lambda storage, loc: storage)
        model = BertOwnModel(args)
        model.load_state_dict(sdict)
        return model