# -*- coding:utf8 -*-
# @Time : 2022/10/19 11:29 上午
# @Author : WanJie Wu

import torch
import torch.nn as nn
from transformers import BertModel, BertConfig

class SimCseModel(nn.Module):
    def __init__(self, pretrained_model: str, pooling: str, dropout: float=0.1):
        """如果是非监督算法，需要增大dropout"""
        super(SimCseModel, self).__init__()
        assert pooling in ["cls", "pooler", "last-avg", "first-last-avg"]
        cfg = BertConfig.from_pretrained(pretrained_model)
        cfg.hidden_dropout_prob = dropout
        cfg.attention_probs_dropout_prob = dropout
        self.bert = BertModel.from_pretrained(pretrained_model, config=cfg)
        self.pooling = pooling

    def forward(self, input_ids, attention_mask, token_type_ids, output_hidden_states=False):
        """
        输出维度为： batch_size * hidden_size
        """
        out = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            output_hidden_states=output_hidden_states
        )
        # 直接返回最后一层隐藏层的CLS节点
        if self.pooling == "cls":
            return out.last_hidden_state[:, 0]

        # 返回经过全连接后的最后一层隐藏层的CLS节点
        if self.pooling == "pooler":
            return out.pooler_output

        # 返回最后一层全连接的各个字符向量的平均池化
        if self.pooling == "last-avg":
            last = out.last_hidden_state.transpose(1, 2)
            return torch.avg_pool1d(last, kernel_size=last.shape[-1]).squeeze(-1)

        # 返回第一个隐藏层和最后一个隐藏层的平均池化
        if self.pooling == "first-last-avg":
            first = out.hidden_states[1].transpose(1, 2) # batch * hidden_size * seq_length
            last = out.hidden_states[-1].transpose(1, 2)
            first_avg = torch.avg_pool1d(first, kernel_size=first.shape[-1]).squeeze(-1) # batch * hidden_size
            last_avg = torch.avg_pool1d(last, kernel_size=last.shape[-1]).squeeze(-1)
            avg = torch.cat((first_avg.unsqueeze(1), last_avg.unsqueeze(1)), dim=1) # batch * 2 * hidden_size
            return torch.avg_pool1d(avg.transpose(1, 2), kernel_size=2).squeeze()


if __name__ == "__main__":
    from transformers import BertTokenizer
    model_name_or_path = "/data/sdv1/wuwanjie/datasets/transformers/bert-base-chinese"
    model = SimCseModel(pretrained_model=model_name_or_path, dropout=0.3, pooling="first-last-avg")
    tokenizer = BertTokenizer.from_pretrained(model_name_or_path)
    input_x = tokenizer(
        text=["一个女孩在给她的头发做发型。", "一个女孩在梳头。"],
        max_length=64,
        truncation=True,
        padding="max_length",
        return_tensors="pt"
    )
    pred = model(input_x["input_ids"], input_x["attention_mask"], input_x["token_type_ids"], output_hidden_states=True)
    print(pred.shape)
