import sys
sys.path.append('.')
from datas import *
from models import *
import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from transformers import RobertaTokenizer, RobertaModel
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
def valid_epoch(model, valid_loader, device):
    model.eval()
    total_loss = 0.0
    total_acc = 0.0
    with torch.no_grad():
        for i, data in enumerate(valid_loader, 0):
            smile_token,prot_embs, labels = data
            with torch.no_grad():
                smile_embs = mol_encoder(**smile_token.to(device)).pooler_output
            smile_embs,prot_embs,labels = smile_embs.to(device),prot_embs.to(device),labels.to(device)
            outputs = model(smile_embs.float(),prot_embs.float())
            loss = criterion(outputs, labels)
            total_loss += loss.item()
            predicted = torch.argmax(outputs, dim=1)
            total_acc += (predicted == labels).sum().item()
    return total_loss / len(valid_loader), total_acc / len(valid_loader.dataset)

# collate_fn 转化smile为token_ids,prot_embs转化成Tensor
def collate_fn(batch):
    smile,prot_embs, labels = zip(*batch)
    smile_token = mol_tokenizer(smile, padding=True, truncation=True, return_tensors="pt")
    prot_embs = torch.Tensor(np.array(prot_embs))
    labels = torch.LongTensor(np.array(labels))
    return smile_token, prot_embs, labels
def early_stopping(train_loss, validation_loss, min_delta, tolerance):
    counter = 0
    if (validation_loss - train_loss) > min_delta:
        counter +=1
        if counter >= tolerance:
          return True


def train(model,mol_tokenizer,mol_encoder, train_loader,valid_loader, optimizer,lr_scheduler, criterion, device,writer,total_epoch):
    for epoch in range(total_epoch):
        model.train()
        running_loss = 0.0
        train_loader.dataset.resample()
        for i, data in enumerate(train_loader, 0):
            smile_token,prot_embs, labels = data
            with torch.no_grad():
                smile_embs = mol_encoder(**smile_token.to(device)).pooler_output
            smile_embs,prot_embs,labels = smile_embs.to(device), prot_embs.to(device),labels.to(device)
            optimizer.zero_grad()
            outputs = model(smile_embs.float(),prot_embs.float())
            loss = criterion(outputs, labels)
            loss = loss.mean()  # 将多个GPU返回的loss取平均
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        print('Train-Epoch[%d] loss: %.3f' % (epoch + 1, running_loss / len(train_loader)))
        valid_loss, valid_acc = valid_epoch(model, valid_loader, device)
        # 使用tensorboardX记录训练过程
        writer.add_scalar('train_loss', running_loss / len(train_loader), epoch)
        writer.add_scalar('valid_loss', valid_loss, epoch)
        writer.add_scalar('valid_acc', valid_acc, epoch)
        # 保存最佳的checkpoint
        if valid_acc > 0.9:
            torch.save(model.state_dict(), 'best_model.pth')
            print('Save best model')
        lr_scheduler.step()
        if early_stopping(running_loss, valid_loss, min_delta=0.01, tolerance = 20):
            print("We are at epoch:", i)
            break
    # 保存最终的checkpoint
    torch.save(model.state_dict(), 'final_model.pth')

    
if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    cuda_count = torch.cuda.device_count()
    model = nn.DataParallel(M3Net()).to(device)
    optimizer = optim.AdamW(model.parameters(), lr=0.01)
    warmup_steps = 3
    total_epoch = 100
    def warmup(current_step: int):
        if current_step < warmup_steps:  # current_step / warmup_steps * base_lr
            return float(current_step / warmup_steps)
        else:                                 # (num_training_steps - current_step) / (num_training_steps - warmup_steps) * base_lr
            return max(0.0, float(training_steps - current_step) / float(max(1, total_epoch - warmup_steps)))
    lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=warmup)
    mol_tokenizer = RobertaTokenizer.from_pretrained("models/seyonec/ChemBERTa-zinc-base-v1")
    mol_encoder = RobertaModel.from_pretrained("models/seyonec/ChemBERTa-zinc-base-v1").to(device)
    mol_encoder.eval()
    criterion = nn.CrossEntropyLoss()
    train_path = 'datas/leash-BELKA/train_split.parquet'
    valid_path = 'datas/leash-BELKA/valid_split.parquet'
    protein_embedding_path = 'datas/leash-BELKA/protein_features.parquet'
    train_dataset = BalancedEmbedDataset(train_path,protein_embedding_path,data_size=100000)
    valid_dataset = BalancedEmbedDataset(valid_path,protein_embedding_path,data_size=100000)
    train_loader = DataLoader(train_dataset, collate_fn=collate_fn,batch_size=256*cuda_count, shuffle=True,num_workers=10)
    valid_loader = DataLoader(valid_dataset, collate_fn=collate_fn,batch_size=1024*cuda_count, shuffle=True,num_workers=10)
    writer = SummaryWriter('runs/m3net')
    train(model, mol_tokenizer,mol_encoder,train_loader,valid_loader, optimizer,lr_scheduler, criterion, device,writer,total_epoch)
