import torch
import os
import time
from train import Train
from config import Parser
from dataset import ByteDance
from utils import Loader
from metric import Metric
import numpy as np
from torch.utils.data import BufferedShuffleDataset, DataLoader
from torch.optim import Adam
from models.MHT import MHT

from loguru import logger
from pathlib import Path
import torch.backends.cudnn as cudnn

from torch.utils.tensorboard import SummaryWriter

import timm
assert timm.__version__ == "0.3.2"  # version check
import timm.optim.optim_factory as optim_factory

import warnings
warnings.filterwarnings("ignore")


def train():
    config = Loader.load_yaml("./config.yml")
    args = config.get("Train")
    # fix the seed for reproducibility
    seed = args.get("seed")
    torch.manual_seed(seed)
    np.random.seed(seed)
    torch.cuda.manual_seed_all(0)

    # select the best convolution algorithm
    # cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # empty GPU memery
    # nvidia-smi --gpu-reset -i [gpu_id]
    torch.cuda.empty_cache()

    torch.autograd.set_detect_anomaly(True)
    
    date_name = str(time.strftime('%Y%m%d'))

    train_datasets = ByteDance(config)
    valid_datasets = ByteDance(config, mode="valid")
    train_dt = BufferedShuffleDataset(train_datasets, buffer_size=204800)
    valid_dt = BufferedShuffleDataset(valid_datasets, buffer_size=100000)

    train_loader = DataLoader(dataset=train_dt, 
                        batch_size=args.get("batch_size"), 
                        num_workers=args.get("num_workers"),
                        pin_memory=args.get("pin_mem"),
                        drop_last=True)
    
    valid_loader = DataLoader(dataset=valid_dt, 
                        batch_size=args.get("batch_size"), 
                        num_workers=args.get("num_workers"),
                        pin_memory=args.get("pin_mem"),
                        drop_last=True)
    # device select
    device = torch.device("cuda:"+str(args.get("device_id")) if torch.cuda.is_available() else "cpu")

    
    # log summary
    os.makedirs(args.get("log_dir"), exist_ok=True)
    log_writer = SummaryWriter(log_dir=args.get("tensorboard_dir"))
    
    # TODO 加载参数，断点续训练

    # model
    model = MHT(cfg=config, device=device)
    model.to(device)
    # print("Model = %s" % str(model))

    lr = float(args.get("lr"))
    if lr is None:  # only base_lr is specified
        lr = 1e-4
    # TODO scheduler lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) import torch.optim.lr_scheduler as lr_scheduler
    # following timm: set wd as 0 for bias and norm layers
    param_groups = optim_factory.add_weight_decay(model, args.get("weight_decay"))
    optimizer = torch.optim.Adam(model.get_params(), lr=lr, betas=(0.9, 0.95))
    # TODO 权重衰减
    print(optimizer)

    metric = Metric()
    # TODO 自动精度 amp.GradScaler(enabled=cuda)
    train = Train(config, model=model, optimizer=optimizer, train_loader=train_loader, valid_loader=valid_loader, metric=metric, log_writer=log_writer, device=device)
    train.set_weight_path(date_name)
    # train.valid_one_epoch()
    for e in range(args.get("epoch")):
        train.train_one_epoch(e)
    # TODO 保存模型

    # TODO wandb_logger

    # TODO  Warmup

    # TODO from tqdm import tqdm

if __name__ == "__main__":
    train()
    