import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'loss'))
sys.path.append(os.path.join(ROOT_DIR, 'data_loader'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))

import numpy as np
import torch
import datetime
import logging
from pathlib import Path
from tqdm import tqdm
import importlib
import shutil
from tensorboardX import SummaryWriter
from sklearn import metrics as skm
import time
from utils.cfg_util import load_param
from utils import provider

yaml_path = 'cfgs/cfg_cls.yaml'

def main():
    def log_string(str):
        logger.info(str)
        print(str)

    args = load_param(yaml_path)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y%m%d-%H%M%S-'))
    runs_dir = Path('./%s/' % args.runs_dir)
    runs_dir.mkdir(exist_ok=True)
    trained_ckpt_dir = runs_dir.joinpath('trained_ckpt/')
    trained_ckpt_dir.mkdir(exist_ok=True)
    run_once_dir = runs_dir.joinpath(timestr+args.model)
    run_once_dir.mkdir(exist_ok=True)
    ckpts_dir = run_once_dir.joinpath('save_ckpts/')
    ckpts_dir.mkdir(exist_ok=True)
    resume_dir = run_once_dir.joinpath('resume/')
    resume_dir.mkdir(exist_ok=True)
    model_file_dir = run_once_dir.joinpath('model_file/')
    model_file_dir.mkdir(exist_ok=True)

    '''LOG'''
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (run_once_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(str(args))
    writer = SummaryWriter(log_dir=str(run_once_dir),comment='point')
    #* tensorboard --logdir runs_cls --samples_per_plugin=images=100 --port 6031
    writer.add_text(timestr,"parameters:\n"+str(args))

    shutil.copy('./%s.py' % args.run_file, str(model_file_dir))
    shutil.copy('./data_loader/%s.py' % args.data_loader, str(model_file_dir))
    shutil.copy('./models/%s.py' % args.model, str(model_file_dir))
    shutil.copy('./models/pointnet2_utils.py', str(model_file_dir))
    shutil.copy('./loss/%s.py' % args.loss, str(model_file_dir))
    shutil.copy(yaml_path, str(model_file_dir))

    '''DATA LOADING'''
    log_string('Load dataset ...')

    LOADER = importlib.import_module(args.data_loader)
    all_loader = LOADER.get_loader(args)
    train_loader, test_loader = all_loader.create_dataloader()
    
    '''MODEL LOADING'''
    print("CUDA Available: ",torch.cuda.is_available())
    if (torch.cuda.is_available() and args.use_gpu):
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    MODEL = importlib.import_module(args.model)
    LOSS = importlib.import_module(args.loss)
    classifier = MODEL.get_model(args)
    if args.use_gpu and args.use_parallel:
        classifier = torch.nn.DataParallel(classifier).to(device)  
    else:
        classifier = classifier.to(device)
    criterion = LOSS.get_loss(args).to(device)

    #todo 后面可以改一下自定义scheduler和optimizer
    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.lr,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    elif args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(
            classifier.parameters(), 
            lr=args.lr, 
            momentum=args.momentum,
            weight_decay=args.decay_rate
            )
    else:
        log_string('wrong optimizer param !')
    
    #todo 后面可以增加学习率的调整策略
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_step, args.lr_gamma)
    start_epoch = args.start_epoch
    
    #* 1,断点重连 2,pretain 加载
    if args.is_resume==1:
        checkpoint = torch.load(args.ckpt)
        classifier.load_state_dict(checkpoint['net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']+1
        scheduler.load_state_dict(checkpoint['lr_schedule'])
        log_string('Resume trianing')
    if args.is_resume==2:
        checkpoint = torch.load(args.ckpt)
        classifier.load_state_dict(checkpoint['net'])
        log_string('Load pretain model')

    
    '''TRANING'''
    log_string('Start training...')
    global_epoch = 1
    best_res = 0.0
    for epoch in range(start_epoch,args.epochs+1):
        log_string('Epoch %d (%d/%s):' % (global_epoch, epoch, args.epochs))
        log_string('current learning rate:{:f}'.format(scheduler.get_last_lr()[0]))
        global_epoch += 1

        start = time.time()
        train_loss, train_pred, train_true = train_func(classifier,device,train_loader,optimizer,criterion,args)
        time1 = time.time()-start
        test_loss, test_pred, test_true = test_func(classifier,device,test_loader,criterion,args)
        time2 = time.time()-start-time1
        scheduler.step()

        train_acc = skm.accuracy_score(train_true,train_pred)
        test_acc = skm.accuracy_score(test_true,test_pred)
        time3 = time.time()-start

        all_loss = {'train_loss':train_loss,'test_loss':test_loss}
        accuracy = {'train_accuracy':train_acc,'test_accuracy':test_acc}
        writer.add_scalars('./loss', all_loss, epoch)
        writer.add_scalars('./accuracy', accuracy, epoch)

        state = {
            'epoch': epoch,
            'net': classifier.state_dict(),
            'loss': criterion.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lr_schedule': scheduler.state_dict(),
            'accuracy':accuracy,
        }
        # * 保存恢复断点
        if epoch % args.resume_step == 0  and epoch >= args.resume_start_epoch:
            torch.save(state,str(resume_dir)+'/state_resume_{:3d}.pth'.format(epoch))

        if test_acc > best_res: 
            torch.save(state,str(ckpts_dir)+'/best.pth')
            
        log_string('Train Acc: {:f}, Trian Loss: {:f}, Trian Time: {:f}'.format(train_acc,train_loss,time1))
        log_string('Test  Acc: {:f}, Test  Loss: {:f}, Test  Time: {:f}'.format(test_acc, test_loss, time2))
    writer.close()
    log_string('End of training...')

def train_func(model, device, loader, optimizer, criterion,args):
    loss_avg, y_out,y_true = [],np.empty([0,args.num_class]),np.empty(0)
    model.train()
    for b_idx, [inputs, targets] in enumerate(loader):
        inputs = inputs.data.numpy()
        inputs = provider.random_point_dropout(inputs)
        inputs[:,:, 0:3] = provider.random_scale_point_cloud(inputs[:,:, 0:3])
        inputs[:,:, 0:3] = provider.shift_point_cloud(inputs[:,:, 0:3])
        inputs = torch.Tensor(inputs)
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets.long())
        loss.backward()
        optimizer.step()
        pred = np.argmax(outputs.cpu().detach().numpy(), axis=-1)
        loss_avg.append(loss.detach().item())
        y_out=np.append(y_out,pred,axis=0)
        y_true=np.append(y_true,targets.cpu().numpy(),axis=0)
        if b_idx % args.p_iter == 0 :
            print('Trian Batch {:3d}, Loss: {:f}'.format(b_idx,np.mean(loss_avg[-args.p_iter:])))
        
    return np.mean(loss_avg), y_out, y_true


def test_func(model, device, loader, criterion,args):
    loss_avg,y_out,y_true = [],np.empty([0,args.num_class]),np.empty(0)
    with torch.no_grad():
        model.eval()
        for b_idx, [inputs, targets] in enumerate(loader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets.long())
            loss_avg.append(loss.detach().item())
            pred = np.argmax(outputs.cpu().detach().numpy(), axis=-1)
            y_out=np.append(y_out,pred,axis=0)
            y_true=np.append(y_true,targets.cpu().numpy(),axis=0)
            if b_idx % args.p_iter == 0 :
                print('Test  Batch {:3d}, Loss: {:f}'.format(b_idx,np.mean(loss_avg[-args.p_iter:])))

    return np.mean(loss_avg), y_out, y_true


if __name__ == '__main__':
    main()
    print("ok")
