#!usr/bin/env python
# -*- coding:utf-8 _*

"""
@File : test.py 
@Author : ljt
@Description: xx
@Time : 2021/7/7 15:54 
"""


import argparse
import os

# os.chdir(r"/mnt/4t/ljt/project/pet_ct_")
import math
from tqdm import tqdm
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
import numpy as np
import paddle.vision.transforms as transforms

# 自定义模块
print(os.getcwd())
from models.model import *
from utils.datasets import *
# from models.feature import *
import paddle
import piq
from patchify import patchify, unpatchify
from matplotlib import image as mpimg
import cv2 as cv
from patchfly import patchfly, unpatchfly
import glob
from PIL import Image

parser = argparse.ArgumentParser()

parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
parser.add_argument("--n_cpu", type=int, default=12, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_height", type=int, default=256, help="size of image height")
parser.add_argument("--img_width", type=int, default=256, help="size of image width")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
opt = parser.parse_args()

# cuda = True if torch.cuda.is_available() else False
# cuda = False

# generator = TransUnet(in_channels=1, img_dim=256, vit_blocks=1, vit_dim_linear_mhsa_block=512, classes=1)

# generator = R2AttU_Net()
generator = GeneratorUNet()
# generator = TransUnet(in_channels=1, img_dim=256, vit_blocks=4, vit_dim_linear_mhsa_block=512, classes=1)


# if cuda:
#     generator = generator.cuda()

# Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

# 设置数据加载器
transforms_ = [
    transforms.Resize((opt.img_height, opt.img_width), transforms.InterpolationMode.BICUBIC),
    transforms.ToTensor(),
    transforms.Normalize(0.5, 0.5),
]



model_path = r"/mnt/4t/ljt/project/pet_ct_/generator_1500.pth"
# model_path = r"/mnt/4t/ljt/project/pet2ct/saved_models/atten_unet/generator_40.pth"
generator.load_state_dict(paddle.load(model_path))
# generator.eval()
output_path = "output/test_images"
os.makedirs(output_path, exist_ok=True)



test_path = r"/mnt/4t/ljt/datasets/ai_challenge/moire/moire_testA_dataset/images"
output_path = r"work/pet2ct_paddle/output"
os.makedirs(output_path, exist_ok=True)

im_list = sorted(glob.glob(os.path.join(test_path) + "/*.*"))
assert len(im_list) == 200


def choose_step(num1, num2):
    """Choose the max step, to acclerate the algorithm.
     求长和宽的公约数集合，满足小于256的最大公约数即为最大step。

    Args:
        num1 ([type]): [description]
        num2 ([type]): [description]
    """
    num1 = num1 - 256
    num2 = num2 - 256
    cd1 = [a for a in range(1, num1+ 1) if num1 % a==0]
    cd2 = [a for a in range(1, num2+ 1) if num2 % a==0]
    cd = list(set(cd1) & set(cd2))
    cd = sorted(cd, reverse=True)
    for i in range(len(cd)):
        if cd[i] < 256:
            return cd[i]
        else:
            continue
    print("The max cd is 1!")
    return 1

# print(im_list[0])



### test block 
# im = cv.imread(im_list[0])
# im = im[:,:,[2,1,0]]
# max_step = choose_step(im.shape[0], im.shape[1])
# im_patches = patchify(im, patch_size=(256, 256, 3), step=1)
# print(im_patches.shape)
# tmp = np.ones_like(im_patches)
# recon = unpatchify(tmp, im.shape)


# ----------------------
#  如果使用大的step会导致程序卡在循环中出不来，
#  官方patch库存在bug, 后期尝试对其修改
# ----------------------


for id in range(len(im_list)):
    # im = Image.open(im_list[id]).convert('RGB')
    im = mpimg.imread(im_list[id])
    im = np.array(im).swapaxes(0, 1)
    f_name = os.path.basename(im_list[id])
    im_patches = patchfly(im, patch_size=(256, 256, 3))
    trans = transforms.Compose(transforms_)
    for i in tqdm(range(im_patches.shape[0])):
        for j in range(im_patches.shape[1]):
            im_patch = im_patches[i][j][0]
            im_patch = trans(Image.fromarray((im_patch)))
            im_patch = im_patch.unsqueeze(0)
            im_patch = generator(im_patch.cuda())
            im_patch = im_patch.squeeze(0).cpu().detach().numpy()
            im_patch = im_patch.swapaxes(0, 2).swapaxes(0, 1)
            im_patch = (im_patch + 1) * 128
            im_patch = im_patch.astype('uint8')
            im_patches[i][j][0] = im_patch   
    out = unpatchfly(im_patches, img_size=im.shape)
    print(out.min(), out.max())
    out = out.swapaxes(0, 1)
    mpimg.imsave(output_path + "/" + f_name, out)
    print("已保存第{}张测试图片".format(id))


# ----------------------
#  不拼接查看
# ----------------------

# for id in tqdm(range(len(im_list))):
#     # im = Image.open(im_list[id]).convert('RGB')
#     im = mpimg.imread(im_list[id])
#     im = np.array(im).swapaxes(0, 1)
#     f_name = os.path.basename(im_list[id])
#     max_step = choose_step(im.shape[0], im.shape[1])
#     # print(im.shape, max_step)
#     im_patches = patchify(im, patch_size=(256, 256, 3), step=256)    
#     trans = transforms.Compose(transforms_)
#     for i in range(im_patches.shape[0]):
#         for j in range(im_patches.shape[1]):
#             im_patch = im_patches[i][j][0]
#             im_patch = trans(Image.fromarray((im_patch)))
#             im_patch = im_patch.unsqueeze(0)
#             im_patch = generator(im_patch.cuda())
#             im_patch = im_patch.squeeze(0).cpu().detach().numpy()
#             im_patch = im_patch.swapaxes(0, 2)
#             im_patch = (im_patch + 1) * 128
#             im_patch = im_patch.astype('uint8')
#             mpimg.imsave(output_path + "/no_patch/{}_{}_{}.jpg".format(id, i, j), im_patch)
#             print(i, j)
