import os, time, sys
import pandas as pd 
from sklearn.model_selection import train_test_split

import torch
import torch.nn as nn
from torch_geometric.loader import DataLoader

import torch.distributed as dist
import torch.multiprocessing as mp 
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DistributedSampler

from models import *
from dataset import STLGraphDataset, NormalizeAugment
from utils import *


def init_distributed_mode(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'  
    os.environ['MASTER_PORT'] = '12355'     
    dist.init_process_group(backend='gloo' if os.name=="nt" else "nccl", rank=rank, world_size=world_size)

def save_checkpoint(model, optimizer, epoch, scheduler, filename='checkpoint.pth'):
    checkpoint = {
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'scheduler_state_dict': scheduler.state_dict(),
        'lr': scheduler.get_last_lr()
    }
    torch.save(checkpoint, filename)

def load_checkpoint(model, optimizer, scheduler,filename='checkpoint.pth'):
    checkpoint = torch.load(filename)
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
    epoch = checkpoint['epoch']
    return model, optimizer, epoch, scheduler


def train(rank, world_size):
    stl_dir = '/shared-public/data/DrivAer_model_TrainingData_3900/DrivAer_model_TrainingData_3900/'
    model_name = 'HGPAttention'
    weighted_sampler = 0
    # Setting
    input_dim, hidden_dim, layers, output_dim = 6, 256, 3, 1
    num_epochs = 250 
    num_cpus = 14
    LoadCheckpoint = False
    train_num = 3600
    test_num = 300 
    batch_size = 1
    lr = 5e-5
    seed = 42 

    checkpoint_path = os.path.join("./logs/", model_name)
    if rank==0 and not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)

    fileName = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    sys.stdout = Logger(os.path.join(checkpoint_path, fileName+".log")) # 记录正常的 print 信息
    sys.stderr = Logger(os.path.join(checkpoint_path, fileName+".log")) # 记录 traceback 信息

    print("==============================")
    print("CPUs:",num_cpus,"Epoch:",num_epochs)
    print( "batch:",batch_size, "lr:", lr)
    print("Traindata:",train_num,"Testdata:", test_num)
    print("==============================")
    
    set_seed(seed=42)
    init_distributed_mode(rank, world_size)
    torch.cuda.set_device(rank)

    # data
    xlsx = pd.read_excel(os.path.join(stl_dir, 'DrivAer_model_TrainingData_3900.xlsx'))
    stl_files = [os.path.join(stl_dir,'DrivAer_model_TrainingData_' + str(i+1).zfill(4)+'.stl') for i in range(len(xlsx))]
    labels = [xlsx['Average Cd'][i] for i in range(len(stl_files))]

    X_train, X_val, y_train, y_val = train_test_split(stl_files[:train_num], labels[:train_num], test_size=0.1, random_state=seed)
    print("Trainset, Valset:",len(X_train), len(X_val))
    
    # model
    model = eval(model_name + "(input_dim=input_dim, hidden_dim=hidden_dim, out_dim=output_dim, n_layers=layers, n_head=8, slice_num=32)")
    model = model.cuda(rank)
    model = DDP(model, device_ids=[rank])

    print(f"Total number of parameters: {count_parameters(model)}")
    
    # dataset
    train_dataset = STLGraphDataset(X_train, y_train, input_dim, NormalizeAugment(augment=True))
    val_dataset = STLGraphDataset(X_val, y_val, input_dim, NormalizeAugment(augment=False))

    train_sampler = DistributedSampler(train_dataset, num_replicas=dist.get_world_size(), rank=rank, shuffle=True)
    val_sampler = DistributedSampler(val_dataset, num_replicas=dist.get_world_size(), rank=rank, shuffle=False)

    train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_cpus,sampler=train_sampler, pin_memory=True, prefetch_factor=4)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=num_cpus,sampler=val_sampler, pin_memory=True, prefetch_factor=4)

    # optimizer
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20*i*len(train_loader) for i in range(1,9)], gamma=0.5)

    if LoadCheckpoint and rank == 0:
        model, optimizer, start_epoch, _ = load_checkpoint(model, optimizer, lr_scheduler, os.path.join(checkpoint_path, 'checkpoint.pth'))
        print(f"Resuming from epoch {start_epoch}...")

    Train_loss, Val_loss = [], []
    best = 1e6
    start_epoch = 0
    
    print("Begin Training....")
    for epoch in range(start_epoch, num_epochs):
        t0 = time.time()

        # train
        train_sampler.set_epoch(epoch)
        model.train() 
        train_loss = 0.0
        for i, data in enumerate(train_loader):
            optimizer.zero_grad()  
            data = data.cuda(rank)
            outputs = model(data)
            loss = criterion(outputs, data.y.unsqueeze(1)) 
            loss.backward()
            optimizer.step()
            lr_scheduler.step()
            train_loss += loss.item()
        train_loss /= (i+1)

        # evalute
        model.eval() 
        val_loss = 0.0
        with torch.no_grad():
            for j, data in enumerate(val_loader):
                data = data.cuda(rank)
                # with amp.autocast():
                outputs = model(data)
                loss = criterion(outputs, data.y.unsqueeze(1))
                val_loss += loss.item()
            val_loss /= (j+1)
            if rank == 0:
                if epoch > 5 and (epoch+1) % 10 == 0:
                    save_checkpoint(model, optimizer, epoch, lr_scheduler, os.path.join(checkpoint_path, 'checkpoint.pth'))

                if epoch > 5 and val_loss < best:
                    best = val_loss
                    torch.save(model.state_dict(), os.path.join(checkpoint_path, 'model_best.pt'))
                    print(f"Epoch: {epoch+1} found Best Valid loss: {val_loss:.3e} !!!")
                print("[{}/{}]  Train Loss:{:.3e} Val Loss:{:.3e} valpred:{} label:{} lr:{:3e} time:{:2f}s".format(
                    epoch+1,num_epochs,train_loss,val_loss,outputs.float().view(-1).cpu().numpy()[:2],data.y.float().view(-1).cpu().numpy()[:2],lr_scheduler.get_lr()[0],time.time()-t0))
                
                Train_loss.append(train_loss)
                Val_loss.append(val_loss)
                plot_loss(Train_loss, Val_loss, checkpoint_path)

    dist.destroy_process_group()

def main():
    world_size = 4
    mp.spawn(train, args=(world_size,), nprocs=world_size, join=True)

if __name__ == "__main__":
    main()