import sys
sys.path.append('.')
from datas import *
from models import *
import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
import torch.multiprocessing as mp
from transformers import RobertaTokenizer, RobertaModel
from torch.utils.data import DataLoader
from torch.distributed import init_process_group, destroy_process_group
from tensorboardX import SummaryWriter
from torch.utils.data.distributed import DistributedSampler
import argparse
import os

def valid_epoch(model, valid_loader, device):
    model.eval()
    total_loss = 0.0
    total_acc = 0.0
    with torch.no_grad():
        for i, data in enumerate(valid_loader, 0):
            smile_token,prot_embs, labels = data
            with torch.no_grad():
                smile_embs = mol_encoder(**smile_token.to(device)).pooler_output
            smile_embs,prot_embs,labels = smile_embs.to(device),prot_embs.to(device),labels.to(device)
            outputs = model(smile_embs.float(),prot_embs.float())
            loss = criterion(outputs, labels)
            total_loss += loss.item()
            predicted = torch.argmax(outputs, dim=1)
            total_acc += (predicted == labels).sum().item()
    return total_loss / len(valid_loader), total_acc / len(valid_loader.dataset)

# collate_fn 转化smile为token_ids,prot_embs转化成Tensor
def collate_fn(batch):
    smile,prot_embs, labels = zip(*batch)
    smile_token = mol_tokenizer(smile, padding=True, truncation=True, return_tensors="pt")
    prot_embs = torch.Tensor(np.array(prot_embs))
    labels = torch.LongTensor(np.array(labels))
    return smile_token, prot_embs, labels

def train(model,mol_tokenizer, train_loader,valid_loader, optimizer, criterion, device,writer):
    best_acc = 0.0
    mol_encoder = RobertaModel.from_pretrained("models/seyonec/ChemBERTa-zinc-base-v1").to(device)
    mol_encoder.eval()
    for epoch in range(100):
        running_loss = 0.0
        train_loader.dataset.resample()
        for i, data in enumerate(train_loader, 0):
            smile_token,prot_embs, labels = data
            with torch.no_grad():
                smile_embs = mol_encoder(**smile_token.to(device)).pooler_output
            smile_embs,prot_embs,labels = smile_embs.to(device), prot_embs.to(device),labels.to(device)
            optimizer.zero_grad()
            outputs = model(smile_embs.float(),prot_embs.float())
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        print('Train-Epoch[%d] loss: %.3f' % (epoch + 1, running_loss / len(train_loader)))
        valid_loss, valid_acc = valid_epoch(model, valid_loader, device)
        print('Valid-Epoch[%d] loss: %.3f acc: %.3f' % (epoch + 1, valid_loss, valid_acc))
        # 使用tensorboardX记录训练过程
        writer.add_scalar('train_loss', running_loss / len(train_loader), epoch)
        writer.add_scalar('valid_loss', valid_loss, epoch)
        writer.add_scalar('valid_acc', valid_acc, epoch)
        # 保存最佳的checkpoint
        if torch.distributed.get_rank() == 0 and valid_acc > best_acc:
            best_acc = valid_acc
            torch.save(model.module.state_dict(), os.path.join(args.output_dir, 'best_model.pth'))
            print('Save best model')
        
def ddp_setup(rank: int, world_size: int):
    # 固定随机种子
    seed = 42
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "54321"  # select any idle port on your machine
    init_process_group(backend="nccl", rank=rank, world_size=world_size)
@record
def main_ddp(
    rank: int,
    world_size: int,
):
    ddp_setup(rank, world_size)  # initialize ddp
    model = M3Net().to(device)
    model = nn.parallel.DistributedDataParallel(model)
    # 冻结model的model_encoder部分
    for param in model.module.model_encoder.parameters():
        param.requires_grad = False
    # 优化器过滤这部分参数
    optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.01, momentum=0.9)
    
    mol_tokenizer = RobertaTokenizer.from_pretrained("models/seyonec/ChemBERTa-zinc-base-v1")
    
    criterion = nn.CrossEntropyLoss()
    train_path = 'datas/leash-BELKA/train_split.parquet'
    valid_path = 'datas/leash-BELKA/valid_split.parquet'
    protein_embedding_path = 'datas/leash-BELKA/protein_features.parquet'
    train_dataset = BalancedEmbedDataset(train_path,protein_embedding_path,data_size=100)
    valid_dataset = BalancedEmbedDataset(valid_path,protein_embedding_path,data_size=100)
    sampler = DistributedSampler(train_dataset)
    train_loader = DataLoader(train_dataset, collate_fn=collate_fn,batch_size=64, shuffle=False,num_workers=4,sampler=sampler)
    sampler = DistributedSampler(valid_dataset, shuffle=False)
    valid_loader = DataLoader(valid_dataset, collate_fn=collate_fn,batch_size=128, shuffle=False,num_workers=4,sampler=sampler)
    writer = SummaryWriter('runs/m3net')
    train(model, mol_tokenizer,train_loader,valid_loader, optimizer, criterion, device,writer)
    destroy_process_group()  # clean up
    
    
    
if __name__ == '__main__':
    world_size = torch.cuda.device_count()
    mp.spawn(
        main_ddp,
        args=(world_size),
        nprocs=world_size,
    ) 
    

    
    