import argparse
# import yaml
import json
from types import SimpleNamespace
import time
from torch.utils.data import DataLoader
import torch.nn as nn
from torch.nn import init
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from torch.optim.lr_scheduler import _LRScheduler, CosineAnnealingLR, ReduceLROnPlateau
import torch
import numpy as np
import os
import random
from tensorboardX import SummaryWriter

# load
from dataset import Car_Dataset
from model import Model
from train import train, validate
from utils import set_seed, parse_args, init_weights, gather_tensor

device = 'cuda' if torch.cuda.is_available() else 'cpu'

def main(rank, world_size, args):
    
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    torch.cuda.set_device(rank)
        
    model =  Model().to(rank)

    if args.model["if_init"]:
        model.apply(init_weights)
    
    model = DDP(model, device_ids=[rank])
    
    model_parameters = filter(lambda p: p.requires_grad, model.parameters())    
    params = sum([np.prod(p.size()) for p in model_parameters])
    
    # load data
    train_dataset = Car_Dataset(mode="train")
    
    test_dataset = Car_Dataset(mode="test")

    # sampler
    train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, shuffle=True, seed=args.dataset["shuffle_seed"], rank=rank)
    test_sampler = DistributedSampler(test_dataset, num_replicas=world_size, shuffle=False, rank=rank)
        
    train_dataloader = DataLoader(train_dataset, 
                        batch_size=args.dataset["train"]["batchsize"], 
                        shuffle=args.dataset["train"]["shuffle"], 
                        sampler=train_sampler,
                        num_workers=args.dataset["train"]["num_workers"])
    
    test_dataloader = DataLoader(test_dataset, 
                        batch_size=args.dataset["test"]["batchsize"], 
                        shuffle=args.dataset["test"]["shuffle"], 
                        sampler= test_sampler,
                        num_workers=args.dataset["test"]["num_workers"])
    
    EPOCH = args.train["epoch"]

    writer = SummaryWriter(f"{args.save_path}/log/{args.name}")
        
    real_lr = float(args.train["lr"])
    optim = torch.optim.AdamW(model.parameters(), lr=real_lr)
    scheduler = CosineAnnealingLR(optim, T_max= EPOCH, eta_min = float(args.train["eta_min"]))
    
    for epoch in range(EPOCH):
        start_time = time.time()
        train_error = train(args, model, train_dataloader, optim, rank)
        end_time = time.time()
        
        # 获取当前的学习率
        scheduler.step()
        current_lr = scheduler.get_last_lr()[0]
        
        training_time = (end_time - start_time)
        current_lr = torch.tensor(current_lr, device=device)
        train_loss = torch.tensor(train_error['loss'], device=device)
        L2_p = torch.tensor(train_error['L2_p'], device=device)
        L2_p_norm = torch.tensor(train_error['L2_p_norm'], device=device)
        
        training_time = torch.tensor(training_time, device=device)
        current_lr = gather_tensor(current_lr, world_size)
        
        train_loss = gather_tensor(train_loss, world_size)
        L2_p = gather_tensor(L2_p, world_size)
        L2_p_norm = gather_tensor(L2_p_norm, world_size)
        
        training_time = gather_tensor(training_time, world_size)
        
        if rank == 0:
            writer.add_scalar('lr/lr', current_lr, epoch)
            writer.add_scalar('Loss/train', train_loss, epoch)
            writer.add_scalar('L2/train_L2_p', L2_p, epoch)
            writer.add_scalar('L2/train_L2_p_norm', L2_p_norm, epoch)
            
            with open(f"{args.save_path}/record/{args.name}_training_log.txt", "a") as file:
                file.write(f"Epoch: {epoch + 1}/{EPOCH}, Train Loss: {train_loss:.4f}\n")
                file.write(f"L2_p_norm: {L2_p_norm:.4f}, L2_p: {L2_p:.4f}\n")
                file.write(f"time pre train epoch/s:{training_time:.2f}, current_lr:{current_lr:.4e}\n")
            
            if (epoch+1) % 1 == 0 or epoch == 0 or (epoch+1) == EPOCH:
                print(f"Epoch: {epoch + 1}/{EPOCH}, Train Loss: {train_loss:.4f}")
                print(f"L2_p_norm: {L2_p_norm:.4f}, L2_p: {L2_p:.4f}")
                print(f"time pre train epoch/s:{training_time:.2f}, current_lr:{current_lr:.4e}")
                print("#################")

        if (epoch+1) % 10 == 0 or epoch == 0 or (epoch+1) == EPOCH:
            # test
            start_time = time.time() 
            test_error =  validate(args, model, test_dataloader, rank)
            end_time = time.time()
            training_time1 = (end_time - start_time)
            
            test_L2_p = torch.tensor(test_error['L2_p'], device=device)
            test_L2_p_norm = torch.tensor(test_error['L2_p_norm'], device=device)

            test_L2_p = gather_tensor(test_L2_p, world_size)
            test_L2_p_norm = gather_tensor(test_L2_p_norm, world_size)
            
            if rank == 0:
                print(f"Epoch: {epoch + 1}/{EPOCH}, test_L2_p_norm: {test_L2_p_norm:.4f}, test_L2_p: {test_L2_p:.4f}")
                print(f"time pre test epoch/s:{training_time1:.2f}")
                print("#################")
                
                writer.add_scalar('L2/test_L2_p', test_L2_p, epoch)
                writer.add_scalar('L2/test_L2_p_norm', test_L2_p_norm, epoch)
                
                with open(f"{args.save_path}/record/{args.name}_training_log.txt", "a") as file:
                    file.write(f"Epoch: {epoch + 1}/{EPOCH}, test_L2_p_norm: {test_L2_p_norm:.4f}, test_L2_p: {test_L2_p:.4f}\n")
                    file.write(f"time pre test epoch/s:{training_time1:.2f}\n")
                    
        if (epoch+1) % 100 == 0 or epoch == 0 or (epoch+1) == EPOCH:
            if args.if_save:
                checkpoint = { 
                    'epoch': epoch + 1,
                    'state_dict': model.module.state_dict() if args.train["if_multi_gpu"] else model.state_dict(),
                    'optimizer': optim.state_dict(),
                    'learning_rate': scheduler.get_last_lr()[0]
                }
                nn_save_path = os.path.join(args.save_path, "nn")
                os.makedirs(nn_save_path, exist_ok=True)
                torch.save(checkpoint, f"{nn_save_path}/{args.name}_{epoch+1}.nn")

    if rank == 0:
        writer.close()
        
if __name__ == "__main__":
    args = parse_args()
    print(args)
    
    with open(f"{args.save_path}/record/{args.name}_training_log.txt", "a") as file:
        file.write(str(args) + "\n")
        file.write(f"time is {time.asctime(time.localtime(time.time()))}\n")
        
    if args.seed is not None:
        set_seed(args.seed)
    
    if args.train["if_multi_gpu"]:
        world_size = torch.cuda.device_count()
        print(f"Let's use {world_size} GPUs!")
        torch.multiprocessing.spawn(main, args=(world_size, args), nprocs=world_size)
    
    with open(f"{args.save_path}/record/{args.name}_training_log.txt", "a") as file:
        file.write(f"time is {time.asctime( time.localtime(time.time()) )}\n")