import torch
from torch import nn
from transformers import BertModel, BertConfig, BertTokenizer
from model.attention import SelfAttention


class Bert_Attention(nn.Module):
    def __init__(self, d_model=4096, in_dim=768, out_dim=512, d_label=7):
        super(Bert_Attention, self).__init__()
        self.bert_model = BertModel.from_pretrained('bert_base_chinese')
        self.config = BertConfig.from_pretrained('bert_base_chinese')
        self.tokenizer = BertTokenizer.from_pretrained('bert_base_chinese')
        self.attention = SelfAttention(d_model)
        self.dropout1 = nn.Dropout(p=0.1)
        self.dropout2 = nn.Dropout(p=0.1)
        self.fc1 = nn.Linear(in_dim, out_dim)
        self.fc = nn.Linear(d_model, d_label)
        self.fc2 = nn.Linear(d_model, d_model)

    def forward(self, x):
        result = 0

        for i in range(len(x)):
            if i == 0:
                result = self.bert_model(x[i]).pooler_output
                result = self.fc1(result)
                result = self.dropout1(result)
            elif i <= 8:
                result = torch.concat([result, self.dropout(self.fc1(self.bert_model(x[i]).pooler_output))], dim=1)
            else:
                pass
        if len(x) < 8:
            result = torch.concat([result, torch.zeros(1, (512 * (8-len(x))))], dim=1)
        result = self.fc2(result)
        result = self.dropout2(result)
        # result = self.attention(result)
        result = self.fc(result)
        return result