import argparse, os, sys, time, gc, datetime

# import torch
# import torch.nn as nn
# import torch.nn.parallel
# import torch.backends.cudnn as cudnn
# import torch.optim as optim
# from torch.utils.data import DataLoader
# import torch.distributed as dist
# cudnn.benchmark = True

import mindspore as ms
from mindspore import context, nn, Tensor,save_checkpoint,ops
from mindspore.nn import AdamWeightDecay
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.callback import Callback
from mindspore.dataset import GeneratorDataset
from mindspore.communication import init, get_rank, get_group_size
from mindspore.train.callback import SummaryCollector
from datasets import find_dataset_def
from models import *
from utils import *
from datasets.data_io import read_pfm, save_pfm
import datetime
import matplotlib.pyplot as plt
import cv2
parser = argparse.ArgumentParser(description='A PyTorch Implementation of Cascade Cost Volume MVSNet')
parser.add_argument('--mode', help='train or test')
parser.add_argument('--model', default='mvsnet', help='select model')
parser.add_argument('--device', default='cuda', help='select model')

parser.add_argument('--dataset', default='dtu_yao', help='select dataset')
parser.add_argument('--trainpath', help='train datapath')
parser.add_argument('--testpath', help='test datapath')
parser.add_argument('--trainlist', help='train list')
parser.add_argument('--testlist', help='test list')

parser.add_argument('--epochs', type=int, default=16, help='number of epochs to train')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--lrepochs', type=str, default="10,12,14:2", help='epoch ids to downscale lr and the downscale rate')
parser.add_argument('--wd', type=float, default=0.001, help='weight decay')

parser.add_argument('--batch_size', type=int, default=4, help='train batch size')
parser.add_argument('--numdepth', type=int, default=384, help='the number of depth values')
parser.add_argument('--interval_scale', type=float, default=1.06, help='the number of depth values')

parser.add_argument('--loadckpt', default=None, help='load a specific checkpoint')
parser.add_argument('--logdir', default='./checkpoints/debug/refine', help='the directory to save checkpoints/logs')
parser.add_argument('--resume', type=bool, default=False, help='continue to train the model')

parser.add_argument('--summary_freq', type=int, default=50, help='print and summary frequency')
parser.add_argument('--save_freq', type=int, default=1, help='save checkpoint frequency')
parser.add_argument('--eval_freq', type=int, default=1, help='eval freq')

parser.add_argument('--seed', type=int, default=10, metavar='S', help='random seed')
parser.add_argument('--pin_m', action='store_true', help='data loader pin memory')
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--train loss", type=str, default="0.25,0.5,1", help='last_stage_name')
parser.add_argument('--lossrate', type=float, default=0.9, help='the number of depth values')
parser.add_argument('--last_stage', type=str, default="stage4", help='last_stage_name')
parser.add_argument('--share_cr', action='store_true', help='whether share the cost volume regularization')
parser.add_argument('--ndepths', type=str, default="48,8,8", help='ndepths')
parser.add_argument('--depth_inter_r', type=str, default="1", help='depth_intervals_ratio')
# parser.add_argument('--dlossw', type=str, default="0.25,0.5,1.0,2.0", help='depth loss weight for different stage')
parser.add_argument('--dlossw', type=str, default="1,1,1,1,2,2,2,3,3,3,4", help='depth loss weight for different stage')
parser.add_argument('--cr_base_chs', type=str, default="4", help='cost regularization base channels')
parser.add_argument('--grad_method', type=str, default="detach", choices=["detach", "undetach"], help='grad method')
parser.add_argument('--GRUiters', type=str, default="3,3,3",  help='iters')
parser.add_argument('--iters', type=int, default=12,  help='iters')
parser.add_argument('--maskupmode', type=str, default="laststage",  help='iters')
parser.add_argument('--CostNum', type=int, default=1,  help='CostNum')
parser.add_argument('--initloss', type=str, default='initloss',  help='initloss')
parser.add_argument('--trainviews', type=int, default=3,  help='trainviews')
parser.add_argument('--testviews', type=int, default=3,  help='testviews')
parser.add_argument('--dispmaxfirst', type=str, default='last',  help='testviews')

parser.add_argument('--maskupsample', type=str, default="last",  help='maskupsample')
parser.add_argument('--hiddenstate', type=str, default="init",  help='hiddenstate')


parser.add_argument('--using_apex', action='store_true', help='using apex, need to install apex')
parser.add_argument('--sync_bn', action='store_true',help='enabling apex sync BN.')
parser.add_argument('--opt-level', type=str, default="O0")
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--usingrefine', action='store_true')

parser.add_argument('--logdirX', default='./checkpoints/from_old_retrain/log/', help='the directory to save checkpoints/logs')
parser.add_argument('--outdir', default='./eval_training_log', help='output dir for eval')
parser.add_argument('--evalpath', default="/home2/dataset/jack/Documents_home1/Database/DTU/DTU/dtu_testing/",  help='testing data path')
parser.add_argument('--evallist', default="lists/dtu/val.txt",  help='testing scan list')

is_distributed = False

# def train(model, model_loss, optimizer, TrainImgLoader, TestImgLoader, EvalImgLoader, lr_scheduler, start_epoch, args):
#     gru_loss = {f"l{i}": 0.0 for i in range(args.iters + 1)}
#     gru_loss_refine = {f"l{i}": 0.0 for i in range(args.iters - 3)} if args.usingrefine else {}
#     print("gru_loss_refine:",gru_loss_refine,"\n")
#     for epoch_idx in range(start_epoch, args.epochs):
#         print(f"\nEpoch {epoch_idx + 1}/{args.epochs}")
#         print_loss_interval = 2000 // args.batch_size
#         batch_idx = 0
#         for sample in TrainImgLoader.create_dict_iterator():
#             start_time = time.time()
#             print("start_time:",start_time)
#             global_step = len(TrainImgLoader) * epoch_idx + batch_idx
#             do_summary = global_step % args.summary_freq == 0
#             loss, scalar_outputs, image_outputs = train_sample(model, model_loss, optimizer, sample, args)
#             for i in range(args.iters + 1):
#                 gru_loss[f"l{i}"] += scalar_outputs[f"l{i}"]
#             if args.usingrefine:
#                 for i in range(args.iters - 3):
#                     gru_loss_refine[f"l{i}"] += scalar_outputs[f"l_refine{i}"]
#             if lr_scheduler is not None:
#                 lr_scheduler.step()
#             if batch_idx % print_loss_interval == 0 and batch_idx > 0:
#                 cur_lr = optimizer.learning_rate.asnumpy() if hasattr(optimizer, 'learning_rate') else optimizer.get_lr().asnumpy()
#                 print("Iter {}/{}, lr {:.6f}, train loss = {:.3f}, depth loss = {:.3f}, time = {:.3f}".format(
#                     batch_idx, args.batch_size,
#                     cur_lr if isinstance(cur_lr, float) else cur_lr[0],
#                     loss,
#                     scalar_outputs['depth_loss'],
#                     time.time() - start_time
#                 ))
#                 print(['{}: {:.4f}'.format(k, gru_loss[k] / batch_idx) for k in gru_loss])
#                 if args.usingrefine:
#                     print(['{}: {:.4f}'.format(k, gru_loss_refine[k] / batch_idx) for k in gru_loss_refine])
#             batch_idx += 1
#             #     print(
#             #        "Epoch {}/{}, Iter {}/{}, lr {:.6f}, train loss = {:.3f}, depth loss = {:.3f}, time = {:.3f}".format(
#             #            epoch_idx, args.epochs, batch_idx, len(TrainImgLoader),
#             #            optimizer.param_groups[0]["lr"], loss,
#             #            scalar_outputs['depth_loss'],
#             #            time.time() - start_time))
#             #     print(optimizer.state_dict()['param_groups'][0]['lr'])
#             #     print(optimizer.param_groups[0]["lr"])
#             #     print(['{}:{}'.format(key,gru_loss[key]/batch_idx) for key in gru_loss.keys()])
#             #     if args.usingrefine:
#             #         print(['{}:{}'.format(key, gru_loss_refine[key] / batch_idx) for key in gru_loss_refine.keys()])
#         # checkpoint
#         if (not is_distributed) or (get_rank() == 0):
#             if (epoch_idx + 1) % args.save_freq == 0:
#                 save_path = os.path.join(args.logdir, f"model_{epoch_idx:06d}.ckpt")
#                 save_checkpoint(model, save_path)
#                 print(f"Checkpoint saved to {save_path}")
#         # testing
#         if (epoch_idx % args.eval_freq == 0) or (epoch_idx == args.epochs - 1):
#             avg_test_scalars = DictAverageMeter()
#             for batch_idx, sample in enumerate(TestImgLoader.create_dict_iterator()):
#                 start_time = time.time()
#                 loss, scalar_outputs_test, _ = test_sample_depth(model, model_loss, sample, args)
#                 scalar_outputs_test['time'] = time.time() - start_time
#                 avg_test_scalars.update(scalar_outputs_test)
#             print("Eval Result:", avg_test_scalars.mean())
#     for i in gru_loss.keys():
#         gru_loss[i] = 0
#     for i in gru_loss_refine.keys():
#         gru_loss_refine[i] = 0
class WithLossCell(nn.Cell):
    def __init__(self, backbone, loss_fn, args):
        super(WithLossCell, self).__init__()
        self.backbone = backbone
        self.loss_fn = loss_fn
        self.args = args
    def construct(self, imgs, proj_matrices, depth_values, depth_gt, mask):
        outputs = self.backbone(imgs, proj_matrices, depth_values)
        outputs_depth = outputs["depth"]
        iter_list = [int(e) for e in self.args.GRUiters.split(",")]
        dlossw_list = (
            [1] * (iter_list[0] + 1) + [2] * (iter_list[1] + 1) + [3] * (iter_list[2] + 1) + [4]
        )
        loss, depth_loss_dict = self.loss_fn(
            outputs_depth, depth_gt, mask, dlossw_list, depth_values, loss_rate=self.args.lossrate
        )
        return loss, outputs, depth_loss_dict
def train(model, model_loss, optimizer, TrainImgLoader, TestImgLoader, EvalImgLoader, lr_scheduler, start_epoch, args):
    # 1. 封装loss和训练step
    net_with_loss = WithLossCell(model, model_loss, args)
    train_step = nn.TrainOneStepCell(net_with_loss, optimizer)
    train_step.set_train()
    for epoch_idx in range(start_epoch, args.epochs):
        print(f"\nEpoch {epoch_idx + 1}/{args.epochs}")
        batch_idx = 0
        gru_loss = {f"l{i}": 0.0 for i in range(args.iters + 1)}
        gru_loss_refine = {f"l{i}": 0.0 for i in range(args.iters - 3)} if args.usingrefine else {}
        print_loss_interval = 2000 // args.batch_size
        for sample in TrainImgLoader.create_dict_iterator():
            start_time = time.time()
            imgs = sample["imgs"]
            proj_matrices = sample["proj_matrices"]
            depth_values = sample["depth_values"]
            depth_gt = sample["depth"]
            mask = sample["mask"]
            # 2. 前向+反向+优化一步
            loss, outputs, depth_loss_dict = train_step(imgs, proj_matrices, depth_values, depth_gt, mask)
            # 3. 统计loss
            for i in range(args.iters + 1):
                gru_loss[f"l{i}"] += depth_loss_dict[f"l{i}"].asnumpy().item()
            if args.usingrefine:
                for i in range(args.iters - 3):
                    gru_loss_refine[f"l{i}"] += depth_loss_dict[f"l_refine{i}"].asnumpy().item()
            if batch_idx % print_loss_interval == 0 and batch_idx > 0:
                cur_lr = optimizer.learning_rate.asnumpy() if hasattr(optimizer, 'learning_rate') else optimizer.get_lr().asnumpy()
                print("Iter {}/{}, lr {:.6f}, train loss = {:.3f}, time = {:.3f}".format(
                    batch_idx, args.batch_size,
                    cur_lr if isinstance(cur_lr, float) else cur_lr[0],
                    loss.asnumpy().item(),
                    time.time() - start_time
                ))
                print(['{}: {:.4f}'.format(k, gru_loss[k] / batch_idx) for k in gru_loss])
                if args.usingrefine:
                    print(['{}: {:.4f}'.format(k, gru_loss_refine[k] / batch_idx) for k in gru_loss_refine])
            batch_idx += 1
        # 4. 保存ckpt
        if (not is_distributed) or (get_rank() == 0):
            if (epoch_idx + 1) % args.save_freq == 0:
                save_path = os.path.join(args.logdir, f"model_{epoch_idx:06d}.ckpt")
                save_checkpoint(model, save_path)
                print(f"Checkpoint saved to {save_path}")
        # 5. 验证
        if (epoch_idx % args.eval_freq == 0) or (epoch_idx == args.epochs - 1):
            avg_test_scalars = DictAverageMeter()
            for batch_idx, sample in enumerate(TestImgLoader.create_dict_iterator()):
                start_time = time.time()
                loss, scalar_outputs_test, _ = test_sample_depth(model, model_loss, sample, args)
                scalar_outputs_test['time'] = time.time() - start_time
                avg_test_scalars.update(scalar_outputs_test)
            print("Eval Result:", avg_test_scalars.mean())

def test(model, model_loss, TestImgLoader, args):
    avg_test_scalars = DictAverageMeter()
    i = 0
    print(len(TestImgLoader))
    for batch_idx, sample in enumerate(TestImgLoader):
        # print(batch_idx)
        start_time = time.time()
        loss, scalar_outputs, image_outputs = test_sample_depth(model, model_loss, sample, args)
        scalar_outputs['time'] = time.time() - start_time
        avg_test_scalars.update(scalar_outputs)
    print("final", avg_test_scalars.mean())


def train_sample(model, model_loss, optimizer, sample, args):
    model.set_train()
    imgs = sample["imgs"]
    proj_matrices = sample["proj_matrices"]
    depth_values = sample["depth_values"]
    depth_gt = sample["depth"]
    mask = sample["mask"]
    print("In train_sample:sample_cuda[imgs]:",imgs.shape)
    print("In train_sample:sample_cuda[proj_matrices]:",proj_matrices["stage1"].shape)
    print("In train_sample:sample_cuda[depth_values]:",depth_values.shape)
    outputs = model(imgs, proj_matrices, depth_values)
    outputs_depth = outputs["depth"]
    # 生成 loss 权重列表
    iter_list = [int(e) for e in args.GRUiters.split(",")]
    dlossw_list = (
        [1] * (iter_list[0] + 1) + [2] * (iter_list[1] + 1) + [3] * (iter_list[2] + 1) + [4]
    )
    # 前向loss计算
    loss, depth_loss_dict = model_loss(outputs_depth, depth_gt, mask, dlossw_list, depth_values, loss_rate=args.lossrate)
    print("In train sample loss is ",loss)
    print(type(loss))
    # 反向传播 & 参数更新
    grads = ops.grad(loss, optimizer.parameters)
    optimizer(grads)
    # loss = ops.depend(loss, optimizer(loss))
    # 计算指标
    depth_est = outputs_depth[-1]
    depth_loss = depth_loss_dict["l{}".format(args.iters)]
    last_stage = args.last_stage
    scalar_outputs = {
        "loss": loss,
        "depth_loss": depth_loss,
        "abs_depth_error": AbsDepthError_metrics(depth_est, depth_gt[last_stage], mask[last_stage] > 0.5),
        "thres2mm_error": Thres_metrics(depth_est, depth_gt[last_stage], mask[last_stage] > 0.5, 2),
        "thres4mm_error": Thres_metrics(depth_est, depth_gt[last_stage], mask[last_stage] > 0.5, 4),
        "thres8mm_error": Thres_metrics(depth_est, depth_gt[last_stage], mask[last_stage] > 0.5, 8),
    }
    for i in range(args.iters + 1):
        scalar_outputs[f"l{i}"] = depth_loss_dict[f"l{i}"]
    # if args.usingrefine:
    #     for i in range(args.iters - 3):
            # scalar_outputs[f"l_refine{i}"] = depth_loss_dict_refine[f"l{i}"]  # TODO: 确保 depth_loss_dict_refine 存在
    # 图像输出用于可视化
    image_outputs = {
        "depth_est": depth_est * mask[last_stage],
        "depth_est_nomask": depth_est,
        "depth_gt": depth_gt,
        "ref_img": imgs[:, 0],
        "mask": mask,
        "errormap": ops.abs(depth_est - depth_gt[last_stage]) * mask[last_stage],
    }
    return loss.asnumpy().item(), tensor2float(scalar_outputs), tensor2numpy(image_outputs)


@make_nograd_func
def test_sample_depth(model, model_loss, sample, args):
    model.set_train(False)  # eval 模式
    imgs = sample["imgs"]
    proj_matrices = sample["proj_matrices"]
    depth_values = sample["depth_values"]
    depth_gt = sample["depth"]
    mask = sample["mask"]
    # 模型推理
    outputs = model(imgs, proj_matrices, depth_values)
    outputs_depth = outputs["depth"]
    # loss 计算
    iter_list = [int(e) for e in args.GRUiters.split(",")]
    dlossw_list = (
        [1] * (iter_list[0] + 1)
        + [2] * (iter_list[1] + 1)
        + [3] * (iter_list[2] + 1)
        + [4]
    )
    loss, depth_loss_dict = model_loss(outputs_depth, depth_gt, mask, dlossw_list, depth_values, loss_rate=args.lossrate)
    # 最终输出估计
    depth_est = outputs_depth[-1]
    last_stage = args.last_stage
    m = mask[last_stage] > 0.5
    d_gt = depth_gt[last_stage]

    # 深度误差指标
    scalar_outputs = {
        "loss": loss,
        "depth_loss": depth_loss_dict[f"l{args.iters}"],
        "abs_depth_error": AbsDepthError_metrics(depth_est, d_gt, m),
        "thres2mm_error": Thres_metrics(depth_est, d_gt, m, 0.125),
        "thres4mm_error": Thres_metrics(depth_est, d_gt, m, 0.25),
        "thres8mm_error": Thres_metrics(depth_est, d_gt, m, 0.5),
        "thres14mm_error": Thres_metrics(depth_est, d_gt, m, 1),
        "thres20mm_error": Thres_metrics(depth_est, d_gt, m, 20),
        "thres2mm_abserror": AbsDepthError_metrics(depth_est, d_gt, m, [0.0, 2.0]),
        "thres4mm_abserror": AbsDepthError_metrics(depth_est, d_gt, m, [2.0, 4.0]),
        "thres8mm_abserror": AbsDepthError_metrics(depth_est, d_gt, m, [4.0, 8.0]),
        "thres14mm_abserror": AbsDepthError_metrics(depth_est, d_gt, m, [8.0, 14.0]),
        "thres20mm_abserror": AbsDepthError_metrics(depth_est, d_gt, m, [14.0, 20.0]),
        "thres>20mm_abserror": AbsDepthError_metrics(depth_est, d_gt, m, [20.0, 1e5]),
    }
    for i in range(args.iters + 1):
        scalar_outputs[f"l{i}"] = depth_loss_dict[f"l{i}"]

    # 输出图像字典
    image_outputs = {
        "depth_est": depth_est * m,
        "depth_est_nomask": depth_est,
        "depth_gt": depth_gt,
        "ref_img": imgs[:, 0],
        "mask": mask,
        "errormap": ops.abs(depth_est - d_gt) * m,
    }

    return (
        scalar_outputs["loss"].asnumpy().item(),
        tensor2float(scalar_outputs),
        tensor2numpy(image_outputs)
    )

if __name__ == '__main__':
    # parse arguments and check
    args = parser.parse_args()
    context.set_context(device_target="Ascend")
    # context.set_context(mode=context.GRAPH_MODE)
    # ms.set_context(device_target="CPU")
    # ms.set_context(debug_level=ms.context.DEBUG)
    # ms.set_context(print_file_path = './save_print_data')
    
    # 如果需要调试，print网络中的参数需要进入动态图
    ms.set_context(mode=ms.PYNATIVE_MODE)
    # ms.set_context(pynative_synchronize=True)
    if args.resume:
        assert args.mode == "train"
        assert args.loadckpt is None
    if args.testpath is None:
        args.testpath = args.trainpath
    rank_id = 0
    if args.mode == "train":
        if not os.path.isdir(args.logdir):
            os.makedirs(args.logdir)
        current_time_str = str(datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
        print("current time", current_time_str)
        print("creating new summary file")
    print("argv:", sys.argv[1:])
    print_args(args)   
    set_random_seed(args.seed)
    

    # model, optimizer
    model = Effi_MVS_plus(args, refine=False)
    # model_loss = cas_raft_1to8_loss_smooth_dispmode
    model_loss = mvs_loss

    # optimizer = optim.AdamW(model.parameters(), lr=args.lr,weight_decay=args.wd, eps=1e-8)
    optimizer = AdamWeightDecay(params=model.trainable_params(),learning_rate=args.lr,weight_decay=args.wd)

    # load parameters
    start_epoch = 0
    if (args.mode == "train" and args.resume) or (args.mode == "test" and not args.loadckpt):
        # use the latest checkpoint file
        ckpts = [fn for fn in os.listdir(args.logdir) if fn.endswith(".ckpt")]
        ckpts = sorted(ckpts, key=lambda x: int(x.split('_')[-1].split('.')[0]))
        latest_ckpt = os.path.join(args.logdir, ckpts[-1])
        print("resuming", latest_ckpt)
        param_dict = load_checkpoint(latest_ckpt)
        load_param_into_net(model, param_dict)
        start_epoch = int(ckpts[-1].split('_')[-1].split('.')[0]) + 1
    elif args.loadckpt:
        # load checkpoint file specified by args.loadckpt
        print("loading model {}".format(args.loadckpt))
        param_dict = load_checkpoint(args.loadckpt)
        load_param_into_net(model, param_dict)

    if rank_id == 0:
        total_params = sum([p.size for p in model.get_parameters()])
        print(f"start at epoch {start_epoch}")
        print(f'Number of model parameters: {total_params}')
    # dataset, dataloader
    MVSDataset = find_dataset_def(args.dataset)
    train_dataset = MVSDataset(args.trainpath, args.trainlist, "train", args.trainviews, args.numdepth, args.interval_scale, args.dispmaxfirst)
    test_dataset = MVSDataset(args.testpath, args.testlist, "test", args.testviews, args.numdepth, args.interval_scale, args.dispmaxfirst)

    TrainImgLoader = GeneratorDataset(
        train_dataset,
        column_names=["imgs", "proj_matrices", "depth", "depth_values", "mask","viewid","scanid"],
        shuffle=True,
        num_shards=get_group_size() if is_distributed else None,
        shard_id=get_rank() if is_distributed else None
    ).batch(args.batch_size, drop_remainder=True)

    TestImgLoader = GeneratorDataset(
        test_dataset,
        column_names=["imgs", "proj_matrices", "depth", "depth_values", "mask","viewid","scanid"],
        shuffle=False,
        num_shards=get_group_size() if is_distributed else None,
        shard_id=get_rank() if is_distributed else None
    ).batch(args.batch_size, drop_remainder=False)
    
    EvalImgLoader = None  # 可扩展为 val_dataset 同样构建方式

    # 学习率调度器（MindSpore 没有 OneCycleLR，可自定义）
    lr_scheduler = None

    # Summary 日志记录器（可选）
    if args.mode == "train" and rank_id == 0:
        summary_collector = SummaryCollector(summary_dir=args.logdir)
    else:
        summary_collector = None
    if args.mode in ["train", "finetune"]:
        train(model, model_loss, optimizer, TrainImgLoader, TestImgLoader, EvalImgLoader, lr_scheduler, start_epoch, args)
    elif args.mode == "test":
        test(model, model_loss, TestImgLoader, args)
    else:
        raise NotImplementedError