import numpy as np
from sklearn.metrics import accuracy_score,recall_score,precision_score
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from transformers import BertTokenizer, BertModel, RobertaTokenizer, RobertaModel
import pytorch_lightning as pl
import torch
class FocalLoss(nn.Module):
    def __init__(self, alpha=1, gamma=2, reduction='mean'):
        super(FocalLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction

    def forward(self, logits, targets):
        # 将logits转换为概率
        probs = torch.sigmoid(logits)
        
        # 计算Focal Loss
        alpha_t = self.alpha * targets + (1 - targets) * (1 - self.alpha)
        focal_loss = alpha_t * (1 - probs) ** self.gamma * torch.log(probs)
        
        if self.reduction == 'mean':
            return torch.mean(focal_loss)
        elif self.reduction == 'sum':
            return torch.sum(focal_loss)
        else:
            return focal_loss
        

class CrossAttentionModel(pl.LightningModule):
    def __init__(self, simle_dim=768, prot_dim=1024,input_dim_embedding=37, hidden_dim=128, num_filters=32, output_dim=2, lr=1e-3, weight_decay=1e-5,batch_size=1024, num_workers=4, device='cuda',train_dataset=None,valid_dataset=None):
        super(CrossAttentionModel, self).__init__()
        self.train_dataset = train_dataset
        self.val_dataset = valid_dataset
        self.save_hyperparameters()
        self.num_classes = output_dim
        self.smile_linear = nn.Linear(simle_dim, self.hparams.hidden_dim)
        self.prot_linear = nn.Linear(prot_dim, self.hparams.hidden_dim)
        self.embedding = nn.Linear(hidden_dim*2, self.hparams.hidden_dim)        
        self.classifier = nn.Sequential(
            nn.SiLU(),
            nn.Linear(self.hparams.hidden_dim, self.hparams.hidden_dim*2),
            nn.BatchNorm1d(self.hparams.hidden_dim*2),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(self.hparams.hidden_dim*2, self.hparams.hidden_dim),
            nn.BatchNorm1d(self.hparams.hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(self.hparams.hidden_dim, self.hparams.output_dim),
        )
        self.criterion = nn.CrossEntropyLoss()
    def forward(self, smile_emb,prot_emb):
        smile_emb = self.smile_linear(smile_emb)
        prot_emb = self.prot_linear(prot_emb)
        x = torch.concat((smile_emb,prot_emb),dim=1)
        x = self.embedding(x)
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        return x
    

    def training_step(self, batch, batch_idx):
        smile_emb, prot_emb, y = batch
        # 转化dtype为模型默认的dtype
        prot_emb = prot_emb.to(torch.float32)
        smile_emb = smile_emb.to(torch.float32)
        logits = self(smile_emb,prot_emb)
        loss = self.criterion(logits, y)
        pred_labels = torch.argmax(logits, dim=1)
        y_hat = pred_labels.detach().cpu().numpy()
        y_true = y.cpu().detach().numpy()
        # 计算分类准确率
        acc = accuracy_score(y_true, y_hat)
        # 计算recall
        recall = recall_score(y_true, y_hat)
        # 计算precision
        precision = precision_score(y_true, y_hat)
        self.log('train_loss', loss,on_epoch=True)
        self.log('train_acc', acc,on_epoch=True)
        self.log('train_recall', recall,on_epoch=True)
        self.log('train_precision', precision,on_epoch=True)
        return loss

    def validation_step(self, batch, batch_idx):
        smile, prot_emb, y = batch
        with torch.no_grad():
            smile_emb = self.mol_encoder(**self.mol_tokenizer(smile, padding=True, truncation=True, return_tensors="pt").to(self.device)).pooler_output
        prot_emb = prot_emb.to(torch.float32)
        smile_emb = smile_emb.to(torch.float32)
        logits = self.forward(smile_emb, prot_emb)
        loss = self.criterion(logits, y)
         # 计算预测值
        pred_labels = torch.argmax(logits, dim=1)
        y_hat = pred_labels.detach().cpu().numpy()
        y_true = y.cpu().detach().numpy()
        # 计算分类准确率
        
        acc = accuracy_score(y_true, y_hat)
        # 计算recall
        recall = recall_score(y_true, y_hat)
        # 计算precision
        precision = precision_score(y_true, y_hat)
        self.log('val_loss', loss,on_epoch=True)
        self.log('val_acc', acc,on_epoch=True)
        self.log('val_recall', recall,on_epoch=True)
        self.log('val_precision', precision,on_epoch=True)
        return loss

    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
        return optimizer
    
    @torch.no_grad()
    def inference(self,X,batch_size=4096):
        self.eval()
        pred = []
        for i in range(0, len(X), batch_size):
            # 获取批次
            batch_X = X[i:min(i+batch_size,len(X))]
            # 执行模型推理
            pred_batch = self.forward(batch_X).cpu().detach().numpy()
             # 存储批次预测结果
            pred.append(pred_batch>0.5)
        # 将批次预测结果连接为整体预测结果
        pred = np.concatenate(pred)
        return pred
    
    def train_dataloader(self):
        return DataLoader(self.train_dataset, batch_size=self.hparams.batch_size, shuffle=True, num_workers=self.hparams.num_workers)
    
    def on_train_epoch_start(self):
        self.train_dataset.resample()
    
    def val_dataloader(self):
        return DataLoader(self.val_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
    