
# coding: utf-8

from WeConvene.eval_wave_residual_two_entropy_modified_y_downsample_8 import *
from WeConvene_with_Channel import WeConveneWithChannel
from config import Config
from compressai.zoo import models

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--channel-type", type=str, default="awgn",
        choices=["awgn", "rayleigh"], help="wireless channel model, awgn or rayleigh."
    )
    parser.add_argument(
        "--multiple-snr", type=str, default='5',
        help="random or fixed snr (dB)."
    )
    parser.add_argument(
        "-m",
        "--model",
        default="bmshj2018-factorized",
        choices=models.keys(),
        help="Model architecture (default: %(default)s)",
    )
    parser.add_argument(
        "-d", "--dataset", type=str, required=True, help="Training dataset path."
    )
    parser.add_argument(
        "-e",
        "--epochs",
        default=50,
        type=int,
        help="Number of epochs (default: %(default)s)",
    )
    parser.add_argument(
        "-lr",
        "--learning-rate",
        default=1e-4,
        type=float,
        help="Learning rate (default: %(default)s)",
    )
    parser.add_argument(
        "-n",
        "--num-workers",
        type=int,
        default=20,
        help="Dataloaders threads (default: %(default)s)",
    )
    parser.add_argument(
        "--lambda",
        dest="lmbda",
        type=float,
        default=3,
        help="Bit-rate distortion parameter (default: %(default)s)",
    )
    parser.add_argument(
        "--batch-size", type=int, default=8, help="Batch size (default: %(default)s)"
    )
    parser.add_argument(
        "--test-batch-size",
        type=int,
        default=8,
        help="Test batch size (default: %(default)s)",
    )
    parser.add_argument(
        "--aux-learning-rate",
        default=1e-3,
        help="Auxiliary loss learning rate (default: %(default)s)",
    )
    parser.add_argument(
        "--patch-size",
        type=int,
        nargs=2,
        default=(256, 256),
        help="Size of the patches to be cropped (default: %(default)s)",
    )
    parser.add_argument("--cuda", action="store_true", help="Use cuda")
    parser.add_argument(
        "--save", action="store_true", default=True, help="Save model to disk"
    )
    parser.add_argument(
        "--seed", type=float, default=100, help="Set random seed for reproducibility"
    )
    parser.add_argument(
        "--clip_max_norm",
        default=1.0,
        type=float,
        help="gradient clipping max norm (default: %(default)s",
    )
    parser = argparse.ArgumentParser(description="Example testing script.")
    parser.add_argument("--cuda", action="store_true", help="Use cuda")
    parser.add_argument("--checkpoint", type=str, help="Path to a checkpoint")
    parser.add_argument("--N", type=int, default=128, help="The number filters")
    parser.add_argument("--data", type=str, help="Path to dataset")
    parser.add_argument(
        "--real", action="store_true", default=True
    )
    # parser.set_defaults(real=False)
    args = parser.parse_args()
    return args


def _main():
    args = parse_args()
    configs = Config()
    configs.CUDA = args.cuda
    configs.device = "cuda" if args.cuda else "cpu"
    args.channel_type = "awgn"
    args.multiple_snr = 1
    for arg in vars(args):
        print(arg, ":", getattr(args, arg))

    data = []
    columns = ["image", "PSNR", "SSIM", "Bpp"]
    p = 128
    path = args.data
    img_list = []
    for file in os.listdir(path):
        if file[-3:] in ["jpg", "png", "peg"]:
            img_list.append(file)
    if args.cuda:
        device = 'cuda:0'
    else:
        device = 'cpu'
    # net = TCM_residual_wave_two_entropy_modified_y_downsample_8(config=[2,2,2,2,2,2], head_dim=[8, 16, 32, 32, 16, 8], drop_path_rate=0.0, N=args.N, M=320)
    net = WeConveneWithChannel(args, configs,
                               # model_args
                               config=[2, 2, 2, 2, 2, 2],
                               head_dim=[8, 16, 32, 32, 16, 8],
                               drop_path_rate=0.0,
                               N=args.N,
                               M=320)
    net = net.to(device)
    net.eval()
    count = 0
    PSNR = 0
    Bit_rate = 0
    MS_SSIM = 0
    total_time = 0
    total_enc =0
    total_dec =0
    dictory = {}
    if args.checkpoint:  # load from previous checkpoint
        print("Loading", args.checkpoint)
        checkpoint = torch.load(args.checkpoint, map_location=device)
        for k, v in checkpoint["state_dict"].items():
            dictory[k.replace("module.", "")] = v
        net.load_state_dict(dictory)
    if args.real:
        net.update()
        for img_name in img_list:
            print(img_name)
            img_path = os.path.join(path, img_name)
            img = transforms.ToTensor()(Image.open(img_path).convert('RGB')).to(device)
            x = img.unsqueeze(0)
            x_padded, padding = pad(x, p)
            count += 1
            with torch.no_grad():
                if args.cuda:
                    torch.cuda.synchronize()
                s = time.time()
                out_enc = net.compress(x_padded)
                enc_time = time.time()-s
                dec_start = time.time()
                out_dec = net.decompress(out_enc["strings"], out_enc["shape"])
                if args.cuda:
                    torch.cuda.synchronize()
                e = time.time()
                dec_time = e-dec_start
                total_time += (e - s)
                total_enc +=enc_time
                total_dec +=dec_time
                out_dec["x_hat"] = crop(out_dec["x_hat"], padding)

                num_pixels = x.size(0) * x.size(2) * x.size(3)

                # NOTE:
                img = toPilImage(out_dec["x_hat"])
                folder = r"F:\PCL月报\月报\2025年\25-06月\实验记录\WeConvene\CLIC\0.05_snr1"
                os.makedirs(folder, exist_ok=True)
                img_save_path = os.path.join(folder, img_name.replace(".png", "_dec.png"))
                img.save(img_save_path)

                print(f'Bitrate: {(sum(len(s[0]) for s in out_enc["strings"]) * 8.0 / num_pixels):.3f}bpp')
                print(f'SSIM: {compute_ssim(x, out_dec["x_hat"]):.2f}')
                print(f'PSNR: {compute_psnr(x, out_dec["x_hat"]):.2f}dB')
                Bit_rate += sum(len(s[0]) for s in out_enc["strings"]) * 8.0 / num_pixels
                PSNR += compute_psnr(x, out_dec["x_hat"])
                MS_SSIM += compute_ssim(x, out_dec["x_hat"])

                # NOTE:
                data.append(
                    [
                    img_name,
                    compute_psnr(x, out_dec["x_hat"]),
                    compute_ssim(x, out_dec["x_hat"]),
                    (sum(len(s[0]) for s in out_enc["strings"]) * 8.0 / num_pixels)
                    ]
                )

    else:
        for img_name in img_list:
            print(img_name)
            img_path = os.path.join(path, img_name)
            img = Image.open(img_path).convert('RGB')
            x = transforms.ToTensor()(img).unsqueeze(0).to(device)
            x_padded, padding = pad(x, p)
            count += 1
            with torch.no_grad():
                if args.cuda:
                    torch.cuda.synchronize()
                s = time.time()
                out_net = net.forward(x_padded)
                if args.cuda:
                    torch.cuda.synchronize()
                e = time.time()
                total_time += (e - s)
                out_net['x_hat'].clamp_(0, 1)
                out_net["x_hat"] = crop(out_net["x_hat"], padding)
                print(f'PSNR: {compute_psnr(x, out_net["x_hat"]):.2f}dB')
                print(f'SSIM: {compute_ssim(x, out_net["x_hat"]):.2f}')
                print(f'Bit-rate: {compute_bpp(out_net):.3f}bpp')
                PSNR += compute_psnr(x, out_net["x_hat"])
                MS_SSIM += compute_ssim(x, out_net["x_hat"])
                Bit_rate += compute_bpp(out_net)
    PSNR = PSNR / count
    MS_SSIM = MS_SSIM / count
    Bit_rate = Bit_rate / count
    total_time = total_time / count
    ave_enc  = total_enc/ count
    ave_dec  = total_dec /count
    print(f'average_PSNR: {PSNR:.2f}dB')
    print(f'average_SSIM: {MS_SSIM:.4f}')
    print(f'average_Bit-rate: {Bit_rate:.3f} bpp')
    print(f'average_time: {total_time:.3f} ms')
    print(f'encode average_time: {ave_enc:.3f} ms')
    print(f'decode average_time: {ave_dec:.3f} ms')

    # NOTE:
    save_path = r"F:\PCL月报\月报\2025年\25-06月\实验记录\WeConvene\CLIC\MSE_0.05_snr1_clic.xlsx"
    save_experiments_to_excel(data=data, columns=columns, save_path=save_path)


"""
python eval.py --data D:/video-communication-dataset/图像编解码测试/CLIC/professional_valid/ --checkpoint "D:/0.05checkpoint_best.pth.tar" --cuda --real
"""

if __name__ == '__main__':
    _main()
