import torch
import torch.nn as nn
from transformers import BertModel, BertTokenizer
from models.modelArgs import ModelArgs

class MainBackbone(nn.Module):
    def __init__(self, args: ModelArgs):
        super(MainBackbone, self).__init__()
        self.tokenizer = BertTokenizer.from_pretrained(args.local_path)
        self.encoder = BertModel.from_pretrained(args.local_path, num_labels=2)
        self.channel_attn = nn.MultiheadAttention(args.d_model, args.channel_n_heads, batch_first=True)
        self.mlp = nn.Linear(args.d_model, args.out_features, bias=True)

    def forward(self, input_ids):
        x = self.encoder(input_ids)
        bsz, ch_num, seq_len, _ = x.shape
        emb = emb.reshape(bsz*ch_num, seq_len, -1)

        z = torch.swapaxes(t.reshape(bsz, ch_num, seq_len, -1), axis0=1, axis1=2).reshape(bsz*seq_len, ch_num, -1)
        z = self.channel_attn(self.q(z), self.k(z), self.v(z), need_weights=False)[0]
        z = torch.swapaxes(z.reshape(bsz, seq_len, ch_num, -1), axis0=1, axis1=2)
        t = t.reshape(bsz, ch_num, seq_len, -1)
        t = self.mlp(t)

        return t
