from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertModel, BertConfig
from torch.utils.data import Dataset, DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd

class Baseline_Model(nn.Module):
    def __init__(
        self, model_name='bert-base-uncased', n_class=2, max_seq_len=300, hidden_size=768,
        dropout_rate = 0.5, device = 'cuda'
    ):
        super(Baseline_Model, self).__init__()
        # self.bert_model = BertModel.from_pretrained(model_name)
        self.bert_model = BertModel.from_pretrained(model_name)
        for param in self.bert_model.parameters():
            param.requires_grad = True
        self.l1 = nn.Linear(4*hidden_size, hidden_size)
        self.l2 = nn.Linear(hidden_size, n_class)
        self.max_pool = nn.MaxPool1d(max_seq_len)
        self.avg_pool = nn.AvgPool1d(max_seq_len)
        self.tanh = nn.Tanh()
        self.sigmoid = nn.Sigmoid()
        self.dropout = nn.Dropout(dropout_rate)
        self.device = device
        self.bert_config = BertConfig.from_pretrained(model_name)


    def forward(self, x, mask, token_type_ids, istrain=False):
        
        bert_out = self.bert_model(x)['last_hidden_state']
        
        # (b, seq_len, word_size(768))
        sentence_sta = bert_out[:, 0, :]
        sentence_end = bert_out[:, -1, :]
        bert_out = bert_out.permute(0, 2, 1).contiguous() 
        maxpool_out = self.max_pool(bert_out).squeeze(2)
        avgpool_out = self.avg_pool(bert_out).squeeze(2)
        if istrain:
            bert_out = self.dropout(bert_out)    

        t1 = torch.cat((maxpool_out, avgpool_out, sentence_sta, sentence_end), 1)
        t2 = self.tanh(self.l1(t1))
        res = self.l2(t2)
        
        return res

    def save(self, path: str):
        # path : './save/{src_lang}-{tgt_lang}_map.pth'
        self.save_path = path
        print('save mapping model to [%s]'% path)

        params = {
            'state_dict':self.state_dict(),
            'device': self.device,
            # 'emb_size': self.emb_size
        }
        torch.save(params, path)

    @staticmethod
    def load(path, device='cuda'):
        params = torch.load(path, map_location=lambda storage, loc: storage)
        device = torch.device(device)
        model = Baseline_Model(
            # emb_size = params['emb_size'],
            device = device
        )
        model.load_state_dict(params['state_dict'])
        print('\nsuccess loading')
        return model

    def set_grad(self, open_grad=True):
        for param in self.bert_model.parameters():
            param.requires_grad = open_grad


class my_Dataset(Dataset):
    def __init__(self, encodings, labels, content, test=False):
        self.encodings = encodings
        self.len = len(self.encodings['input_ids'])
        self.labels = labels
        self.test = test
        self.content = content

    def __getitem__(self, idx):
        item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
        if not self.test:
            item['labels'] = torch.tensor(self.labels['labels'].tolist()[idx])

        return item, self.content[idx]
        # return item

    def __len__(self):
        return self.len