import argparse
import logging
import os
from glob import glob

import numpy as np
from omegaconf import OmegaConf
import pandas as pd
import torch
from PIL import Image
from tqdm.auto import tqdm

from marigold_inpaint.controlnet import ControlNetMarigoldPipeline
from marigold_inpaint.utils import aligin_and_erase
from marigold.marigold_pipeline import MarigoldDepthOutput
from marigold_inpaint.base_dataset import BaseDataset
from src.util.metric import MetricTracker
from src.util import metric

EXTENSION_LIST = [".jpg", ".jpeg", ".png"]
import sys
from safetensors.torch import load_file
import trimesh
from torch.utils.data import ConcatDataset, DataLoader, Subset
from src.util.logging_util import config_logging

# 模拟命令行参数
sys.argv = [
    "test_controlnet.py",  # 模拟脚本名称
    "--checkpoint", "checkpoint/marigold-inpaint",
    "--denoise_steps", "50",
    "--ensemble_size", "10",
    "--data_dir", "/mnt/new/liufenglin/DL3DV-ALL-960P",
    "--data_info", "/mnt/new/liufenglin/lishiyang/Marigold_Inpaint/test.csv",
    "--output_dir", "./test_output"
]

if "__main__" == __name__:
    # -------------------- Arguments --------------------
    parser = argparse.ArgumentParser(description="Infer depth using ControlNetMarigold.")
    parser.add_argument(
        "--checkpoint",
        type=str,
        required=True,
        help="Path to the checkpoint.",
    )
    parser.add_argument(
        "--denoise_steps",
        type=int,
        default=50,
        help="Number of denoising steps.",
    )
    parser.add_argument(
        "--ensemble_size",
        type=int,
        default=10,
        help="Ensemble size.",
    )
    parser.add_argument(
        "--data_dir",
        type=str,
        required=True,
        help="Path to the data directory.",
    )
    parser.add_argument(
        "--data_info",
        type=str,
        required=True,
        help="Path to the data info file.",
    )
    parser.add_argument(
        "--half_precision",
        "--fp16",
        action="store_true",
        help="Run with half-precision (16-bit float), might lead to suboptimal result.",
    )
    parser.add_argument(
        "--seed",
        type=int,
        default=None,
        help="Reproducibility seed. Set to `None` for unseeded inference.",
    )
    
    # resolution setting
    parser.add_argument(
        "--processing_res",
        type=int,
        default=512,
        help="Maximum resolution of processing. 0 for using input image resolution. Default: 768.",
    )
    parser.add_argument(
        "--output_processing_res",
        action="store_true",
        help="When input is resized, out put depth at resized operating resolution. Default: False.",
    )
    parser.add_argument(
        "--resample_method",
        choices=["bilinear", "bicubic", "nearest"],
        default="bilinear",
        help="Resampling method used to resize images and depth predictions. This can be one of `bilinear`, `bicubic` or `nearest`. Default: `bilinear`",
    )
    parser.add_argument(
        "--test_num",
        type=int,
        default=100,
        help="Number of cases to show.",
    )
    parser.add_argument(
        "--output_dir",
        type=str,
        default="output",
        help="Output directory.",
    )
    # parser.add_argument(
    #     "--batch_size",
    #     type=int,
    #     default=0,
    #     help="Inference batch size. Default: 0 (will be set automatically).",
    # )
    
    
    args = parser.parse_args()

    cfg = OmegaConf.load("config/logging.yaml")
    config_logging(cfg.logging, out_dir='eval')

    checkpoint_path = args.checkpoint
    output_dir = args.output_dir
    denoising_steps = args.denoise_steps
    ensemble_size = args.ensemble_size
    if ensemble_size > 15:
        logging.warning("Running with large ensemble size will be slow.")
    resample_method = args.resample_method
    processing_res = args.processing_res
    match_input_res = not args.output_processing_res
    if 0 == processing_res and match_input_res is False:
        logging.warning(
            "Processing at native resolution without resizing output might NOT lead to exactly the same resolution, due to the padding and pooling properties of conv layers."
        )
    seed = args.seed
    # batch_size = args.batch_size
    batch_size = 1
    half_precision = args.half_precision
    test_num = args.test_num
    if half_precision:
        dtype = torch.float16
        variant = "fp16"
        logging.info(
            f"Running with half precision ({dtype}), might lead to suboptimal result."
        )
    else:
        dtype = torch.float32
        variant = None
    
    # -------------------- Preparation --------------------
    # Output directories
    output_dir_tif = os.path.join(output_dir, "depth_bw")
    output_dir_depth_pred = os.path.join(output_dir, "depth_pred")
    output_dir_depth_gt = os.path.join(output_dir, "depth_gt")
    output_dir_ply = os.path.join(output_dir, "depth_ply")
    output_dir_mask = os.path.join(output_dir, "mask")
    output_dir_rgb = os.path.join(output_dir, "rgb")
    os.makedirs(output_dir, exist_ok=True)
    os.makedirs(output_dir_tif, exist_ok=True)
    os.makedirs(output_dir_depth_pred, exist_ok=True)
    os.makedirs(output_dir_depth_gt, exist_ok=True)
    os.makedirs(output_dir_ply, exist_ok=True)
    os.makedirs(output_dir_mask, exist_ok=True)
    os.makedirs(output_dir_rgb, exist_ok=True)
    logging.info(f"output dir = {output_dir}")

    # -------------------- Device --------------------
    if torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")
        logging.warning("CUDA is not available. Running on CPU will be slow.")
    logging.info(f"device = {device}")

    # -------------------- Data --------------------
    test_dataset = BaseDataset(
        filename_ls_path=args.data_info,
        dataset_dir=args.data_dir,
        H=336,
        W=512,
        random_mask_prob=0,
    )
    sub_test_dataset = Subset(test_dataset, list(range(test_num)))
    test_loader = DataLoader(
        dataset=sub_test_dataset,
        batch_size=batch_size,
        num_workers=1,
        shuffle=False,
    )
    # -------------------- Model --------------------

    pipe: ControlNetMarigoldPipeline = ControlNetMarigoldPipeline.from_pretrained(
        checkpoint_path, variant=variant, torch_dtype=dtype
    )
    try:
        pipe.enable_xformers_memory_efficient_attention()
    except ImportError:
        pass  # run without xformers
    
    pipe = pipe.to(device)
    logging.info(
        f"scale_invariant: {pipe.scale_invariant}, shift_invariant: {pipe.shift_invariant}"
    )

    # Print out config
    logging.info(
        f"Inference settings: checkpoint = `{checkpoint_path}`, "
        f"with denoise_steps = {denoising_steps or pipe.default_denoising_steps}, "
        f"ensemble_size = {ensemble_size}, "
        f"processing resolution = {processing_res or pipe.default_processing_resolution}, "
        f"seed = {seed}; "
    )

    # -------------------- Inference and saving --------------------
    intrinsics = {
                'fx': 500,  # 焦距x
                'fy': 500,  # 焦距y
                'cx': 320,  # 光心x
                'cy': 240   # 光心y
    }
    metric_keys = ['rmse_linear','rmse_log','squared_relative_difference','delta1_acc']
    metric_funs = [getattr(metric, key) for key in metric_keys]
    test_metric = MetricTracker(*metric_keys)
    eval_list = []
    eval_data_dir = "/mnt/new/liufenglin/lishiyang/Marigold_Inpaint/eval_data"
    
    with torch.no_grad():
        assert 1 == test_loader.batch_size
        for index, batch in enumerate(tqdm(test_loader, desc="inpaint depth", leave=True)):
            print(f'Processing {batch["rgb_name"][0]}...')
            
            # Read input image
            rgb_int = batch["rgb_int"]  # [B, 3, H, W]
            mask_in = batch["mask"]
            # GT depth
            depth_raw_ts = batch["depth"].squeeze()
            depth_raw = depth_raw_ts.cpu().numpy()
            depth_raw_ts = depth_raw_ts.to(device)
            depth_in = batch["masked_depth"]
            
            valid_mask = np.ones_like(depth_raw).squeeze().astype(int)
            valid_mask_ts = torch.from_numpy(valid_mask).to(device)
            if seed is None:
                generator = None
            else:
                generator = torch.Generator(device)
                generator.manual_seed(seed)

            # Predict depth
            pipe_out: MarigoldDepthOutput = pipe(
                rgb_int,
                depth_in,
                mask_in,
                denoising_steps=denoising_steps,
                ensemble_size=ensemble_size,
                processing_res=processing_res,
                match_input_res=match_input_res,
                generator=generator,
                batch_size=0,  # use batch size 1 to increase reproducibility
                color_map="Spectral",
                show_progress_bar=True,
                resample_method=resample_method,
            )

            depth_pred: np.ndarray = pipe_out.depth_np
            
            # clip to d > 0 for evaluation
            depth_pred = np.clip(depth_pred, a_min=1e-6, a_max=None)

            depth_pred_aligin = aligin_and_erase(batch["depth"].squeeze(), depth_pred.squeeze(), batch["mask"].squeeze())
            
            for i in range(len(metric_keys)):
                test_metric.update(metric_keys[i], metric_funs[i](torch.tensor(depth_pred_aligin), batch["depth"].squeeze()))
            
            logging.info(f"Index {index} Test metric: {test_metric.result()}")
            test_save_dir = os.path.join(eval_data_dir, batch['rgb_name'][0])
            os.makedirs(test_save_dir, exist_ok=True)
            np.save(os.path.join(test_save_dir, "batch.npy"),batch["depth"].squeeze().cpu().numpy())
            Image.fromarray(batch["rgb_int"].squeeze().cpu().numpy().astype(np.uint8)).save(os.path.join(test_save_dir, "rgb.jpg"))
            Image.fromarray(batch["mask"].squeeze().cpu().numpy().astype(np.uint8)).save(os.path.join(test_save_dir, "mask.jpg"))
            eval_list.append(test_save_dir)
    
    csv_save_path = os.path.join(eval_data_dir, "eval_list.csv")
    #save csv, and col of csv is 'path'
    pd.DataFrame(eval_list, columns=['path']).to_csv(csv_save_path, index=False)
            
    