'''
Description: 
Author: suyunzheng
Date: 2021-11-23 22:31:28
LastEditTime: 2021-11-29 17:19:07
LastEditors: maple
'''
import os
from re import L
from unicodedata import numeric
import numpy as np
import glob
from numpy.core.arrayprint import _void_scalar_repr
from pandas.core.accessor import register_index_accessor
from scipy.sparse import coo
import torch
from torch import nn
import torch.optim
from torch.utils.data.dataset import Dataset
import tqdm
import datetime
from pathlib import Path
import sys
import argparse
import logging
import shutil
import importlib
import matplotlib.pyplot as plt

from core.models.spvcnn import SPVCNN
from core.models.minkunet import MinkUNet

from data_utils.semantickitti.SemanticKittiDataLoader import SemanticKittiDataset
from data_utils.s3dis.s3disDataLoader import S3DIS_sparse_Dataset
from lib.utils import Timer, checkpoint, plotCurve
from lib.common.compute_matrix import IoUCalculator
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'                # 仅仅使用一个卡

# log
from lib.log import LOG

# DIR
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))

device = torch.device('cuda:0')
device1 = torch.device('cuda:1')
ignore_index = 255

lr = 2.4e-1
weight_decay = 1.0e-4
momentum = 0.9
nesterov = True

Stanford_CLASSES = [
        'clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa',
        'table', 'wall', 'window'
        ]

# best_iou
best_iou = 0

# loss
train_loss = []

# acc
train_acc =[]

# miou
test_miou = []

def train(train_loader, test_loader, net, optimizer, loss_func, start_epoch, epoch_num, config_dict):
    checkpoints_dir = config_dict['checkpoints_dir']
    tensorboard = config_dict['tensorboard']  
    logger = config_dict['logger']
    num_class = config_dict['num_class']
    
    # 创建一个writer
    from torch.utils.tensorboard import SummaryWriter
    
    log_dir_train = os.path.join(tensorboard, 'train')
    if not os.path.exists(log_dir_train):
        os.mkdir(log_dir_train)
    train_writer = SummaryWriter(log_dir=log_dir_train)
    
    log_dir_test = os.path.join(tensorboard, 'test')
    if not os.path.exists(log_dir_test):
        os.mkdir(log_dir_test)
    test_writer = SummaryWriter(log_dir=log_dir_test)

    if optimizer == 'sgd':
        logger.log_string("===> Optimizer: sgd")
        optimizer = torch.optim.SGD(net.parameters(),
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=weight_decay,
                                    nesterov=nesterov)
    
    logger.log_string("===> Start training...")
    epoch_time = Timer()
    # Iter_time = Timer()
    global_step = 0
    best_iou = 0
    for epoch_iter in tqdm.tqdm(range(start_epoch, epoch_num)):
        
        epoch_time.tic()        # 计时epoch
        tmp_loss = 0
        print_freq = 50         # 打印loss的频率
        
        for i, feed_dict in enumerate(train_loader):
            input = feed_dict['lidar'].to(device)
            # print("coords shape:{}".format(input.C.shape))
            target = feed_dict['targets']
            target_label = target.F.to(device)

            optimizer.zero_grad()

            output = net(input)
            # print("===>input:")
            # print(input)        
            # print("===>target:")
            # print(target)
            # print("===>output:")
            # print(output)
            loss = loss_func(output, target_label)
            loss.backward()
            optimizer.step()

            # 绘制曲线
            loss_numpy = loss.detach().cpu().numpy()
            train_writer.add_scalar("Loss", loss_numpy, global_step)
            iou_cal = IoUCalculator(class_num=num_class)
            output_numpy = output.detach().cpu().numpy()
            target_numpy = target.F.numpy()
            iou_cal.add_data(pred_labels=output_numpy, gt_labels=target_numpy)
            mean_acc, _ = iou_cal.compute_acc()
            train_writer.add_scalar("Acc", mean_acc, global_step)

            # 添加到list中,最后保存
            train_loss.append([loss_numpy, global_step])
            train_acc.append([mean_acc, global_step])

            # 打印log
            tmp_loss += loss_numpy
            if i%print_freq == 0 and i != 0 :
                logger.log_string("===> Epoch:{}/{},{}/{}, Loss: {}, mAcc: {}".format(epoch_iter, epoch_num, i, len(train_loader), tmp_loss/print_freq, mean_acc))
                tmp_loss=0
            global_step+=1

        
        logger.log_string("===> Epoch:{}/{} done, spend time:{} <===".format(epoch_iter, epoch_num, epoch_time.toc()))

        # save model
        if epoch_iter % 5 == 0:
            # logger.info('Save model...')
            logger.log_string("===> Save model...")
            savepath = str(checkpoints_dir) + '/model_{}.pth'.format(epoch_iter)
            logger.log_string('===> Saving at %s' % savepath)
            state = {
                'epoch': epoch_iter,
                'model_state_dict': net.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            logger.log_string('===> Saving model....')

        # evaluate
        miou = evaluate(DataLoader=test_loader, model=net, loss_func=loss_func, current_epoch=epoch_iter, config_dict=config_dict)
        
        # 绘制曲线
        test_writer.add_scalar('miou', miou, epoch_iter)
        test_miou.append([miou, epoch_iter])

        # save best model
        if miou >= best_iou:
            best_iou = miou
            logger.log_string('===> Save model...')
            savepath = str(checkpoints_dir) + '/best_model.pth'
            logger.log_string('===> Saving at %s' % savepath)
            state = {
                'epoch': epoch_iter,
                'class_avg_iou': miou,
                'model_state_dict': net.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            logger.log_string('===> Saving model....')
            logger.log_string('===> Best mIoU: %f' % best_iou)

        logger.log_string("\n\n")
    
    # 保存train_loss、treain_acc、test_miou
    train_loss_file = os.path.join(tensorboard, 'train_loss.npy')
    np.save(train_loss_file, np.asarray(train_loss).reshape((-1,2)))
    train_acc_file = os.path.join(tensorboard, "train_acc.npy")
    np.save(train_acc_file, np.asarray(train_acc).reshape((-1,2)))
    test_miou_file = os.path.join(tensorboard, 'test_miou.npy')
    np.save(test_miou_file, np.asarray(test_miou).reshape((-1,2)))
    logger.log_string("train_loss, train_acc, test_miou Saved!")

    # 读取npy，保存图片
    from lib.utils import save_pic
    save_pic(train_loss_file, x_label='step', y_label='loss')
    save_pic(train_acc_file, x_label='step', y_label='acc')
    save_pic(test_miou_file, x_label='step', y_label='miou')
    # pass



def evaluate(DataLoader, model, loss_func, current_epoch, config_dict):

    logger = config_dict['logger']
    num_class = config_dict['num_class']

    iou_cal = IoUCalculator(class_num=num_class)           # 0-12, 255
    logger.log_string('===> Start testing...')
    global_timer, data_timer, iter_timer = Timer(), Timer(), Timer()

    global_timer.tic()
    data_iter = DataLoader.__iter__()
    max_iter = len(DataLoader)
    max_iter_unique = max_iter

    model.eval()
    torch.cuda.empty_cache()
    
    loss_sum = 0
    with torch.no_grad():
        for iteration in tqdm.tqdm(range(max_iter)):
            # data_timer.tic()
            feed_dict = data_iter.next()
            # data_time = data_timer.toc(False)
            
            # iter_timer.tic()

            # forward
            input = feed_dict['lidar'].to(device)
            target = feed_dict['targets']
            target_label = target.F.long().to(device)
            output = model(input)

            # loss
            loss = loss_func(output, target_label)
            loss = loss.detach().cpu().numpy()
            loss_sum+=loss
            
            # iou
            output_numpy = output.cpu().numpy()
            target_numpy = target.F.numpy()
            iou_cal.add_data(pred_labels=output_numpy, gt_labels=target_numpy)
    
    miou, iou_list = iou_cal.compute_iou()
    logger.log_string("===> Epoch:{}, loss:{}, miou:{}".format(current_epoch, loss_sum/max_iter, miou))
    
    # print("===> Iou list:{}".format(iou_list))

    from data_utils.semantickitti.SemanticKittiDataLoader import kept_labels
    iou_cal.print_iou(kept_labels)

    model.train()
    logger.log_string('===> End testing..., spend time:{}'.format(global_timer.toc()))

    return miou
    pass

def parse_args():
    parser = argparse.ArgumentParser('Model')
    parser.add_argument('--model', type=str, default='spvcnn', help='model name [default: spvcnn]')
    parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 1]')
    parser.add_argument('--epoch', default=300, type=int, help='Epoch to run [default: 300]')
    parser.add_argument('--learning_rate', default= 2.4e-1, type=float, help='Initial learning rate [default: 2.4e-1]')
    # parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')
    parser.add_argument('--optimizer', type=str, default='sgd', help='Adam or sgd [default: sgd]')
    parser.add_argument('--log_dir', type=str, default=None, help='Log path [default: None]')
    parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay [default: 1e-4]')
    parser.add_argument('--voxel_size', type=float, default=0.05, help='voxel size [default: 0.05]')
    parser.add_argument('--momentum', type=float, default=0.9, help='momentum [default: 0.9]')
    parser.add_argument('--ignore_index', type=int, default=255, help='ignore_index [default: 255]')

    parser.add_argument('--gpu_num', type=int, default=1, help='gpu nums [default: 1]')
    parser.add_argument('--dataset', type=str, default='s3dis', help='dataset:s3dis or kitti [default: s3dis]')
    

    # parser.add_argument('--npoint', type=int, default=4096, help='Point Number [default: 4096]')
    # parser.add_argument('--step_size', type=int, default=10, help='Decay step for lr decay [default: every 10 epochs]')
    # parser.add_argument('--lr_decay', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
    # parser.add_argument('--test_area', type=int, default=5, help='Which area to use for test, option: 1-6 [default: 5]')

    return parser.parse_args()
    
def main():
    args = parse_args()

    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-'))+args.dataset
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('kitti')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')   # 保存model
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')                  # logs
    log_dir.mkdir(exist_ok=True)
    tensorboard = experiment_dir.joinpath('tensorboard/')            # 绘制loss,miou曲线
    tensorboard.mkdir(exist_ok=True)
    backup_dir = experiment_dir.joinpath('file_backup/')        # 保存各个版本的相关文件、网络等
    backup_dir.mkdir(exist_ok=True)
    
    optimizer = args.optimizer          # default sgd
    epoch = args.epoch                  # default 300

    '''拷贝Model和train文件'''
    print("===> Copying files to %s..." % backup_dir)
    shutil.copy('core/models/%s.py' % args.model, str(backup_dir))
    shutil.copy('core/models/utils.py', str(backup_dir))
    shutil.copy('%s' % os.path.basename(__file__), str(backup_dir))


    # 创建一个logger对象
    log_config = {
        'level':logging.INFO,
        'log_dir':log_dir,
        'model_name':args.model,
        'args':args
    }
    logger = LOG(log_config=log_config)

    
    dataset_train = SemanticKittiDataset(
        root = "/media/ubuntu/数据/suyunzheng_dataset/polarnet/data/sequences",
        voxel_size = args.voxel_size,
        num_points = 80000,
        split= 'train',
        sample_stride=1
    )
    dataset_test = SemanticKittiDataset(
        root="/media/ubuntu/数据/suyunzheng_dataset/polarnet/data/sequences",
        voxel_size=args.voxel_size,
        num_points=8e5,
        split='val',                # 验证集
        sample_stride=1
    )

    import time, random
    manual_seed = 123
    random.seed(manual_seed)
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    def worker_init_fn(worker_id):
        random.seed(manual_seed, worker_id)

    # dataloader
    train_loader = torch.utils.data.DataLoader(
        dataset_train, batch_size = (1 if args.gpu_num==1 else 2)*args.batch_size, shuffle = True, 
        num_workers = 8, pin_memory = True, 
        worker_init_fn=worker_init_fn, collate_fn = dataset_train.collate_fn
    )

    test_loader = torch.utils.data.DataLoader(
        dataset_test, batch_size = 2, shuffle = False, 
        num_workers = 8, pin_memory = True, 
        worker_init_fn=worker_init_fn, collate_fn = dataset_test.collate_fn
    )


    # network
    if args.model == 'spvcnn':
        net = SPVCNN(num_classes = dataset_train.num_class, init_dim = dataset_train.init_dim, pres = 1, vres = 1).to(device)         # s3dis feats为6维
    elif args.model == 'minkunet':
        net = MinkUNet(num_classes = dataset_train.num_class, init_dim = dataset_train.init_dim).to(device)

    # 初始化权重
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
    
    try:
        checkpoint_path = str(experiment_dir) + '/checkpoints/best_model.pth'
        checkpoint = torch.load(checkpoint_path)
        start_epoch = checkpoint['epoch']
        net.load_state_dict(checkpoint['model_state_dict'])
        print('===> Use pretrain model at %s' % checkpoint_path)
    except:
        print('===> No existing model, starting training from scratch...')
        start_epoch = 0
        net = net.apply(weights_init)

        
    if args.gpu_num == 2:
        print("===> Let's use {} GPUs!".format(torch.cuda.device_count()))
        net = nn.DataParallel(net)

    # loss
    criterion = nn.CrossEntropyLoss(
            ignore_index=dataset_train.ignored_class)


    # 传入train
    config_dict = {
        'checkpoints_dir':checkpoints_dir,      # 断点保存文件夹
        'log_dir':log_dir,                      # log文件夹
        'voxel_size':args.voxel_size,                # voxel_size
        'tensorboard':tensorboard,              # 绘制曲线相关
        'logger':logger,                        # 日志相关
        'num_class':dataset_train.num_class     # 对应数据集的样本数
    }
    # train
    train(train_loader, test_loader, net, optimizer=optimizer, loss_func=criterion, start_epoch = start_epoch, epoch_num=epoch, config_dict=config_dict)

    # evaluate
    # evaluate(DataLoader=test_loader, model=net, loss_func=criterion, current_epoch=0, config_dict=config_dict)
    


if __name__ == '__main__':
        
    main()
