import copy
import torch
from torch import nn
from transformers import AutoModel, AutoModelForMaskedLM

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Device:", device)


class CorrectionNetwork(nn.Module):

    def __init__(self, tokenizer):
        super(CorrectionNetwork, self).__init__()
        # BERT分词器，作者并没提到自己使用的是哪个中文版的bert，我这里就使用一个比较常用的
        self.tokenizer = tokenizer
        self.method=2
        # BERT
        if self.method==1:
            # 方法一，加载AutoModel，自己写一系列head
            self.bert = AutoModel.from_pretrained("hfl/chinese-roberta-wwm-ext")
            # 预测层。hidden_size是词向量的大小，len(self.tokenizer)是词典大小
            self.dropout = nn.Dropout(0.2)
            self.layernorm = nn.LayerNorm(self.bert.config.hidden_size)
            self.gelu = nn.GELU()
            self.dense1 = nn.Linear(self.bert.config.hidden_size, self.bert.config.hidden_size)
            self.dense2 = nn.Linear(self.bert.config.hidden_size, len(self.tokenizer))
        elif self.method==2:
            # 方法二，使用AutoModelForMaskedLM，定义好了结构
            self.model = AutoModelForMaskedLM.from_pretrained("hfl/chinese-roberta-wwm-ext")
            self.bert = self.model.bert
            self.cls = self.model.cls
        # print("self.bert: ", self.bert)
        
        # BERT的word embedding，本质就是个nn.Embedding
        self.word_embedding_table = self.bert.get_input_embeddings()
        

    def forward(self, inputs, word_embeddings, detect_hidden_states):
        """
        Correction Network的前向传递
        :param inputs: inputs为tokenizer对中文文本的分词结果，
                       里面包含了token对一个的index，attention_mask等
        :param word_embeddings: 使用BERT的word_embedding对token进行embedding后的结果
        :param detect_hidden_states: Detection Network输出hidden state
        :return: Correction Network对个token的预测结果。
        """
        # 1. 使用bert进行前向传递
        bert_outputs = self.bert(token_type_ids=inputs['token_type_ids'],
                                 attention_mask=inputs['attention_mask'],
                                 inputs_embeds=word_embeddings)
        # print("bert_outputs: ", bert_outputs)
        # 2. 将bert的hidden_state和Detection Network的hidden state进行融合。
        hidden_states = bert_outputs['last_hidden_state'] + detect_hidden_states

        # 3. 最终使用全连接层进行token预测
        if self.method==1:
            # 方法一，用自定义的层
            out = self.dropout(hidden_states)
            out = self.dense1(out)
            out = self.layernorm(out)
            out = self.gelu(out)
            out = self.dropout(out)
            out = self.dense2(out)
        # 方法二，用AutoModel的cls层
        elif self.method==2:
            out = self.cls(hidden_states)
        # print("hidden_states.shape: ", hidden_states.shape)
        # print("out.shape: ", out.shape)
        return out

    def get_inputs_and_word_embeddings(self, sequences, max_length=128):
        """
        对中文序列进行分词和word embeddings处理
        :param sequences: 中文文本序列。例如: ["鸡你太美", "哎呦，你干嘛！"]
        :param max_length: 文本的最大长度，不足则进行填充，超出进行裁剪。
        :return: tokenizer的输出和word embeddings.
        """
        inputs = self.tokenizer(sequences, padding='max_length', max_length=max_length, return_tensors='pt',
                                truncation=True).to(device)
        # 使用BERT的work embeddings对token进行embedding，这里得到的embedding并不包含position embedding和segment embedding
        word_embeddings = self.word_embedding_table(inputs['input_ids'])
        return inputs, word_embeddings
    
    def _init_dense_layer(self):
        """
        原论文中提到，使用Word Embedding的weight来对Correction Network进行初始化
        """
        if self.method==1:
            self.dense2.weight.data = self.word_embedding_table.weight.data
        elif self.method==2:
            # print("word_embedding_table.weight.shape: ", self.word_embedding_table.weight.shape) # [21128, 768]
            # print("cls.decoder.weight.shape: ", self.cls.predictions.decoder.weight.shape) # [21128, 768]
            self.cls.predictions.decoder.weight.data = self.word_embedding_table.weight.data


class DetectionNetwork(nn.Module):

    def __init__(self, position_embeddings, transformer_blocks, hidden_size, pos_weight=30):
        """
        :param position_embeddings: bert的position_embeddings，本质是一个nn.Embedding
        :param transformer: BERT的前两层transformer_block，其是一个ModuleList对象
        """
        super(DetectionNetwork, self).__init__()
        self.position_embeddings = position_embeddings
        self.transformer_blocks = transformer_blocks
        self.pos_weight = pos_weight

        self.dropout = nn.Dropout(0.2)
        # self.dense1 = nn.Linear(hidden_size, hidden_size)
        # self.layernorm = nn.LayerNorm(hidden_size)
        # self.gelu = nn.GELU()

        # 定义最后的预测层，预测哪个token是错误的
        if self.pos_weight==1:
            self.dense2 = nn.Sequential(
                nn.Linear(hidden_size, 1),
                nn.Sigmoid()
            )
        else:
            self.dense2 = nn.Sequential(
                nn.Linear(hidden_size, 1)
            )
        
    def forward(self, word_embeddings):
        # 获取token序列的长度，这里为128
        sequence_length = word_embeddings.size(1)
        # 生成position embedding
        position_embeddings = self.position_embeddings(torch.LongTensor(range(sequence_length)).to(device))
        # 融合work_embedding和position_embedding
        x = word_embeddings + position_embeddings
        # 将x一层一层的使用transformer encoder进行向后传递
        for transformer_layer in self.transformer_blocks:
            x = transformer_layer(x)[0]

        # 最终返回Detection Network输出的hidden states和预测结果
        hidden_states = x
        out =  x
        out = self.dropout(out)
        out = self.dense2(out)
        return hidden_states, out


class MDCSpellModel(nn.Module):

    def __init__(self, tokenizer, pos_weight=30):
        super(MDCSpellModel, self).__init__()
        self.tokenizer = tokenizer
        # 构造Correction Network
        self.correction_network = CorrectionNetwork(tokenizer)
        self.correction_network._init_dense_layer()

        # 构造Detection Network
        # position embedding使用BERT的
        position_embeddings = self.correction_network.bert.embeddings.position_embeddings
        # 作者在论文中提到的，Detection Network的Transformer使用BERT的权重
        # 所以我这里直接克隆BERT的前两层Transformer来完成这个动作
        transformer = copy.deepcopy(self.correction_network.bert.encoder.layer[:2])
        # 提取BERT的词向量大小
        hidden_size = self.correction_network.bert.config.hidden_size

        # 构造Detection Network
        self.detection_network = DetectionNetwork(position_embeddings, transformer, hidden_size, pos_weight)

    def forward(self, sequences, max_length=128):
        # 先获取word embedding，Correction Network和Detection Network都要用
        inputs, word_embeddings = self.correction_network.get_inputs_and_word_embeddings(sequences, max_length)
        # Detection Network进行前向传递，获取输出的Hidden State和预测结果
        hidden_states, detection_outputs = self.detection_network(word_embeddings)
        # Correction Network进行前向传递，获取其预测结果
        correction_outputs = self.correction_network(inputs, word_embeddings, hidden_states)
        # 返回Correction Network 和 Detection Network 的预测结果。
        # 在计算损失时`[PAD]`token不需要参与计算，所以这里将`[PAD]`部分全都变为0
        return correction_outputs, detection_outputs.squeeze(2) * inputs['attention_mask'], inputs['attention_mask']
        # return correction_outputs, detection_outputs.squeeze(2), inputs['attention_mask']

    
class MDCSpellLoss(nn.Module):

    def __init__(self, coefficient=0.85, pos_weight=30):
        super(MDCSpellLoss, self).__init__()
        self.pos_weight=torch.tensor([pos_weight]).to(device)
        print("MDCSpellLoss pos_weight: {}".format(self.pos_weight))
        # 定义Correction Network的Loss函数
        self.correction_criterion = nn.CrossEntropyLoss(ignore_index=0)
        if self.pos_weight==1:
            self.detection_criterion = nn.BCELoss()
        else:
        # 定义Detection Network的Loss函数，因为是二分类，所以用Binary Cross Entropy
        # 这里使用BCEWithLogitsLoss，相当于一个sigmoid()和一个BCELoss()，好处是可以加pos weight处理类别不平衡
            self.detection_criterion = nn.BCEWithLogitsLoss(pos_weight=self.pos_weight)  # 一般来讲neg/pos=pos_weight
            # self.detection_criterion = nn.BCEWithLogitsLoss(pos_weight=self.pos_weight, reduction='none')
        # 权重系数
        self.coefficient = coefficient

    def forward(self,   correction_outputs, correction_targets, 
                        detection_outputs, detection_targets, 
                        attention_masks):
        """
        :param correction_outputs: Correction Network的输出，Shape为(batch_size, sequence_length, hidden_size)
        :param correction_targets: Correction Network的标签，Shape为(batch_size, sequence_length)
        :param detection_outputs: Detection Network的输出，Shape为(batch_size, sequence_length)
        :param detection_targets: Detection Network的标签，Shape为(batch_size, sequence_length)
        :return:
        """
        # 计算Correction Network的loss，因为Shape维度为3，所以要把batch_size和sequence_length进行合并才能计算
        correction_loss = self.correction_criterion(correction_outputs.view(-1, correction_outputs.size(2)),
                                                    correction_targets.view(-1))
        # 计算Detection Network的loss
        detection_loss = self.detection_criterion(detection_outputs, detection_targets)
        # detection_loss_per_token = self.detection_criterion(detection_outputs, detection_targets)
        # # print("detection_outputs.shape: ", detection_outputs.shape)
        # # print("detect_loss: ", detection_loss_per_token)
        # # print("detect_loss.shape: ", detection_loss_per_token.shape)
        # # print("attention_masks.shape: ", attention_masks.shape)
        # # print("d[0]: ", detection_loss_per_token[0])
        # # print("a[0]:", attention_masks[0])
        # # print("(d*a)[0]: ", (detection_loss_per_token*attention_masks)[0])
        # detection_loss = torch.sum(detection_loss_per_token*attention_masks)/torch.sum(attention_masks)
        # # print("correction_loss: ", correction_loss)
        # # print("detection_loss: ", detection_loss)
        # # quit()
        # 对两个loss进行加权平均
        return self.coefficient * correction_loss + (1 - self.coefficient) * detection_loss


if __name__=='__main__':
    from transformers import BertTokenizer, AutoModelForMaskedLM
    import os
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    data_path = "../data/realise/"
    vocab_path = os.path.join(data_path, 'vocab.txt')
    tokenizer = BertTokenizer(vocab_file=vocab_path)
    model = MDCSpellModel(tokenizer=tokenizer, pos_weight=30).to(device)
    # for name, param in model.named_parameters():
    #     if param.requires_grad:
    #         print(name, '      ', param.size())
    correction_outputs, detection_outputs = model(["鸡你太美", "哎呦，你干嘛！"])
    print("correction_outputs shape:", correction_outputs.size())
    print("detection_outputs shape:", detection_outputs.size())

    # # 测试模型架构，看看AutoModelForTokenClassification的架构
    # model = AutoModelForMaskedLM.from_pretrained("bert-base-chinese")
    # print("model: ", model)
    # print("model.bert: ", model.bert)
    # print("model.cls: ", model.cls)

    # bert = AutoModel.from_pretrained("bert-base-chinese")
    # print("bert: ", bert)

    # print(torch.equal(model.cls.predictions.transform.dense.weight, bert.pooler.dense.weight))