import argparse
import logging
import os
import pprint
import random

import warnings
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import DataLoader, ConcatDataset, random_split
from torch.optim import AdamW
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter

from dataset.base_dataset import BaseDataset
from dataset.real_state_dataset import RealStateDataset
from depth_anything_v2.dpt import DepthAnythingV2
from depth_anything_v2.control import ControlDepthAnythingV2
from depth_anything_v2.util.transform import align_depth_least_square
from util.dist_helper import setup_distributed
from util.loss import SiLogLoss
from util.metric import eval_depth
from util.utils import init_log
import pdb
import matplotlib.pyplot as plt
from PIL import Image


parser = argparse.ArgumentParser(description='Depth Anything V2 for Metric Depth Estimation')

parser.add_argument('--encoder', default='vitl', choices=['vits', 'vitb', 'vitl', 'vitg'])
parser.add_argument('--dataset', default='hypersim', choices=['hypersim', 'vkitti'])
parser.add_argument('--img-size', default=518, type=int)
parser.add_argument('--min-depth', default=0.001, type=float)
parser.add_argument('--max-depth', default=20, type=float)
parser.add_argument('--epochs', default=40, type=int)
parser.add_argument('--bs', default=2, type=int)
parser.add_argument('--lr', default=0.000005, type=float)
parser.add_argument('--pretrained-from', type=str)
parser.add_argument('--save-path', type=str, required=True)
parser.add_argument('--local-rank', default=0, type=int)
parser.add_argument('--port', default=None, type=int)


def main():
    args = parser.parse_args()
    
    # warnings.simplefilter('ignore', np.RankWarning)
    
    logger = init_log('global', logging.INFO)
    logger.propagate = 0
    
    rank, world_size = setup_distributed(port=args.port)
    
    if rank == 0:
        all_args = {**vars(args), 'ngpus': world_size}
        logger.info('{}\n'.format(pprint.pformat(all_args)))
        writer = SummaryWriter(args.save_path)
    
    cudnn.enabled = True
    cudnn.benchmark = True
    
    # size = (args.img_size, args.img_size)
    # if args.dataset == 'hypersim':
    #     trainset = Hypersim('dataset/splits/hypersim/train.txt', 'train', size=size)
    # elif args.dataset == 'vkitti':
    #     trainset = VKITTI2('dataset/splits/vkitti2/train.txt', 'train', size=size)
    # else:
    #     raise NotImplementedError
    
    # 真正训练时再使用
    # dl3dv_train_dataset = BaseDataset(
    #     filename_ls_path="/mnt/new/liufenglin/DL3DV-ALL-960P/split_dataset/train_data.csv",
    #     dataset_dir="/mnt/new/liufenglin/DL3DV-ALL-960P",
    #     random_mask_prob=0.5,
    # )
    # dl3dv_val_dataset = BaseDataset(
    #     filename_ls_path="/mnt/new/liufenglin/DL3DV-ALL-960P/split_dataset/test_data.csv",
    #     dataset_dir="/mnt/new/liufenglin/DL3DV-ALL-960P",
    #     random_mask_prob=0.5,
    #     validate=True,
    # )
    # real_state_train_dataset = RealStateDataset(
    #     filename_ls_path="/mnt/new/liufenglin/Real-state/split_dataset/train_data.csv",
    #     dataset_dir="/mnt/new/liufenglin/Real-state",
    #     H=336,
    #     W=512,
    #     random_mask_prob=0.5,
    # )
    # real_state_val_dataset = RealStateDataset(
    #     filename_ls_path="/mnt/new/liufenglin/Real-state/split_dataset/test_data.csv",
    #     dataset_dir="/mnt/new/liufenglin/Real-state",
    #     random_mask_prob=0.5,
    #     validate=True,
    # )
    # val_dataset = ConcatDataset([dl3dv_val_dataset, real_state_val_dataset])
    # train_dataset = ConcatDataset([dl3dv_train_dataset, real_state_train_dataset])
    
    train_dataset = BaseDataset(
        filename_ls_path="/mnt/new/liufenglin/DL3DV-ALL-960P/split_dataset/test_data.csv",
        dataset_dir="/mnt/new/liufenglin/DL3DV-ALL-960P",
        random_mask_prob=0.5,
        validate=True,
    )
    val_dataset = BaseDataset(
        filename_ls_path="/mnt/new/liufenglin/DL3DV-ALL-960P/split_dataset/test_data.csv",
        dataset_dir="/mnt/new/liufenglin/DL3DV-ALL-960P",
        random_mask_prob=0.5,
        validate=True,
    )
    train_dataset,_ = random_split(train_dataset, [128, len(train_dataset) - 128])
    val_dataset,_ = random_split(val_dataset, [64, len(val_dataset) - 64])
    trainsampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    trainloader = DataLoader(train_dataset, batch_size=args.bs, pin_memory=True, num_workers=4, drop_last=True, sampler=trainsampler)
    
    valsampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
    valloader = DataLoader(val_dataset, batch_size=1, pin_memory=True, num_workers=4, drop_last=True, sampler=valsampler)
    
    local_rank = int(os.environ["LOCAL_RANK"])
    print(f"local_rank: {local_rank}")
    #输出数据集大小
    print(f"train_dataset: {len(train_dataset)}, val_dataset: {len(val_dataset)}")
    
    model_configs = {
        'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
        'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
        'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
        'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
    }
    model = ControlDepthAnythingV2(**model_configs[args.encoder])
    
    if args.pretrained_from:
        model.init_by_main_state_dict(torch.load(args.pretrained_from, map_location='cpu'))
    
    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
    model.cuda(local_rank)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False,
                                                      output_device=local_rank, find_unused_parameters=True)
    
    criterion = SiLogLoss().cuda(local_rank)
    
    # 原来的写法， 这里由于是训练control部分，所以有区别
    # optimizer = AdamW([{'params': [param for name, param in model.named_parameters() if 'pretrained' in name], 'lr': args.lr},
    #                    {'params': [param for name, param in model.named_parameters() if 'pretrained' not in name], 'lr': args.lr * 10.0}],
    #                   lr=args.lr, betas=(0.9, 0.999), weight_decay=0.01)
    
    model.module.set_grad()
    optimizer = AdamW([{'params': model.module.get_control_params(), 'lr': args.lr}],
                    lr=args.lr, betas=(0.9, 0.999), weight_decay=0.01)
    
    total_iters = args.epochs * len(trainloader)
    
    previous_best = {'d1': 0, 'd2': 0, 'd3': 0, 'abs_rel': 100, 'sq_rel': 100, 'rmse': 100, 'rmse_log': 100, 'log10': 100, 'silog': 100}
    
    for epoch in range(args.epochs):
        if rank == 0:
            logger.info('===========> Epoch: {:}/{:}, d1: {:.3f}, d2: {:.3f}, d3: {:.3f}'.format(epoch, args.epochs, previous_best['d1'], previous_best['d2'], previous_best['d3']))
            logger.info('===========> Epoch: {:}/{:}, abs_rel: {:.3f}, sq_rel: {:.3f}, rmse: {:.3f}, rmse_log: {:.3f}, '
                        'log10: {:.3f}, silog: {:.3f}'.format(
                            epoch, args.epochs, previous_best['abs_rel'], previous_best['sq_rel'], previous_best['rmse'], 
                            previous_best['rmse_log'], previous_best['log10'], previous_best['silog']))
        
        trainloader.sampler.set_epoch(epoch + 1)
        
        model.train()
        total_loss = 0
        
        for i, sample in enumerate(trainloader):
            optimizer.zero_grad()
            img, condition, disparity, masked_disparity, valid_mask = sample['rgb_handled'].cuda(), sample['condition'], \
                    sample['disparity_handled'].cuda(), sample['masked_disparity_handled'].cuda(), sample['valid_mask'].cuda()
            disparity = disparity[:,1,...]
            if random.random() < 0.5:
                img = img.flip(-1)
                condition = condition.flip(-1)
                disparity = disparity.flip(-1)
                valid_mask = valid_mask.flip(-1)
            
            pred = model(img, condition)
            
            # print('prednan', torch.isnan(pred).sum(), 'disparitynan', torch.isnan(disparity).sum())
            valid_mask = (pred > 1e-3) & (disparity > 1e-3) & valid_mask
            loss = criterion(pred, disparity, valid_mask)
            
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            
            iters = epoch * len(trainloader) + i
            
            lr = args.lr * (1 - iters / total_iters) ** 0.9
            
            optimizer.param_groups[0]["lr"] = lr
            
            if rank == 0:
                writer.add_scalar('train/loss', loss.item(), iters)
            
            if rank == 0 and i % 1 == 0:
                logger.info('Iter: {}/{}, LR: {:.7f}, Loss: {:.3f}'.format(i, len(trainloader), optimizer.param_groups[0]['lr'], loss.item()))
        
        
        model.eval()
        
        results = {'d1': torch.tensor([0.0]).cuda(), 'd2': torch.tensor([0.0]).cuda(), 'd3': torch.tensor([0.0]).cuda(), 
                   'abs_rel': torch.tensor([0.0]).cuda(), 'sq_rel': torch.tensor([0.0]).cuda(), 'rmse': torch.tensor([0.0]).cuda(), 
                   'rmse_log': torch.tensor([0.0]).cuda(), 'log10': torch.tensor([0.0]).cuda(), 'silog': torch.tensor([0.0]).cuda()}
        nsamples = torch.tensor([0.0]).cuda()
        
        for i, sample in enumerate(valloader):
            
            img, disparity, masked_disparity, mask, valid_mask = sample['rgb'].cuda().float()[0], sample['disparity'].cuda()[0], sample['masked_disparity'].cuda()[0] \
                ,sample['mask'].cuda()[0], sample['valid_mask'].cuda()[0]
            
            if valid_mask.sum() < 10:
                continue
            
            with torch.no_grad():
                pred = model.module.infer_image(img, masked_disparity, mask)
                pred = torch.tensor(pred).cuda()
                pred,_,_ = align_depth_least_square(pred.cpu().numpy(), disparity.cpu().numpy(), ((mask[...,0] < 125) & valid_mask).cpu().numpy())
                pred = torch.tensor(pred).cuda()
                
            valid_mask = (pred > 1e-3) & (disparity > 1e-3) & valid_mask
            valid_mask = valid_mask.cpu().numpy().astype(np.bool)
            cur_results = eval_depth(pred[valid_mask], disparity[valid_mask])
            if rank == 0 and i%10 == 0:
                # print('cur_results', cur_results)
                # 可视化rgb与pred与disparity与mask
                plt.figure(figsize=(12, 3))
                plt.subplot(1, 4, 1)
                plt.imshow(img.cpu().numpy().astype(np.uint8))
                plt.subplot(1, 4, 2)
                plt.imshow(mask.cpu().numpy())
                plt.subplot(1, 4, 3)
                plt.imshow(pred.cpu().numpy())
                plt.subplot(1, 4, 4)
                plt.imshow(disparity.cpu().numpy())
                os.makedirs(f'{args.save_path}/{epoch}', exist_ok=True)
                plt.savefig(f'{args.save_path}/{epoch}/{i}.png')
                # 保存npy
                np.save(f'{args.save_path}/{epoch}/{i}_pred.npy', pred.cpu().numpy())
                np.save(f'{args.save_path}/{epoch}/{i}_disparity.npy', disparity.cpu().numpy())
                # 使用Image保存img与mask为jpg
                img = Image.fromarray(img.cpu().numpy().astype(np.uint8))
                img.save(f'{args.save_path}/{epoch}/{i}_img.jpg')
                mask = Image.fromarray(mask.cpu().numpy().astype(np.uint8))
                mask.save(f'{args.save_path}/{epoch}/{i}_mask.jpg')
                
            
            for k in results.keys():
                results[k] += cur_results[k]
            nsamples += 1
        
        torch.distributed.barrier()
        
        for k in results.keys():
            dist.reduce(results[k], dst=0)
        dist.reduce(nsamples, dst=0)
        
        if rank == 0:
            logger.info('==========================================================================================')
            logger.info('{:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}'.format(*tuple(results.keys())))
            logger.info('{:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}'.format(*tuple([(v / nsamples).item() for v in results.values()])))
            logger.info('==========================================================================================')
            print()
            
            for name, metric in results.items():
                writer.add_scalar(f'eval/{name}', (metric / nsamples).item(), epoch)
        
        for k in results.keys():
            if k in ['d1', 'd2', 'd3']:
                previous_best[k] = max(previous_best[k], (results[k] / nsamples).item())
            else:
                previous_best[k] = min(previous_best[k], (results[k] / nsamples).item())
        
        if rank == 0:
            checkpoint = {
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'epoch': epoch,
                'previous_best': previous_best,
            }
            print('save model, in path:', os.path.join(args.save_path, f'{epoch}.pth'))
            torch.save(checkpoint, os.path.join(args.save_path, 'latest.pth'))


if __name__ == '__main__':
    main()