# @Time: 2023/6/2 15:43
# @Author: liyuhan
# @File: test.py.py
import torch
import cv2
import numpy as np
import imageio
from huggingface_hub import snapshot_download
from transformers import CLIPImageProcessor, Blip2Processor, Blip2ForConditionalGeneration
from PIL import Image
import os

device = "cuda" if torch.cuda.is_available() else "cpu"

if __name__ == '__main__':
    # print(torch.load('dpt_weights/dpt_hybrid-midas-501f0c75.pt', map_location=torch.device('cpu')))
    # snapshot_download(repo_id="openai/clip-vit-large-patch14",
    #                   # allow_patterns=["*.json", "pytorch_model.bin", "vocab.txt"],
    #                   local_dir="./my_model/")

    # ------------ CLIPImageProcessor -----------------
    # prompt_img = Image.open('D:\Make-It-3D-master\img\img.png')
    # processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14",
    #                                                allow_patterns=["*.json", "pytorch_model.bin"],
    #                                                cache_dir='./pretrained_model')
    # img_input = processor(images=prompt_img, return_tensors='pt')  # 输入是一张图片PIL.Image.Image，得到图片的特征值
    # print(img_input)
    # ------------ CLIPImageProcessor -----------------

    # ------------ BLIP-2 --------------------
    # print('loading!')
    # processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b", cache_dir='/mnt/Make-It-3D-master/transformer_model')
    # blip_model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b",
    #                                                            # torch_dtype=torch.float16,
    #                                                            cache_dir='/mnt/Make-It-3D-master/transformer_model').to(device)
    # print("successful!")
    # ------------ text2img --------------------

    # ------------- count H -----------
    # target_poses = np.array([[
    #     [1, 2, 8, 2],
    #     [4, 5, 7, 3],
    #     [2, 3, 5, 9],
    #     [0, 0, 0, 1]
    # ]])
    # front_poses = np.array([[
    #     [4, 1, 8, 2],
    #     [3, 5, 9, 3],
    #     [5, 3, 5, 9],
    #     [0, 0, 0, 1]
    # ]])
    # target_t = np.array([[0., 0.86638254, 0.14148566],
    #                      [-0.86638254, 0., 0.30224228],
    #                      [-0.14148566, -0.30224228, 0.]],
    #                     )
    # target_r = np.array([[0.9441948, -0.05019601, 0.32554033],
    #                      [0., -0.9883202, -0.15239194],
    #                      [-0.32938752, -0.1438877, 0.93316674]]
    #                     )
    # target_pose = np.array([[
    #     [0.9441948, -0.05019601, 0.32554033, -0.30224228],
    #     [0., -0.9883202, -0.15239194, 0.14148566],
    #     [-0.32938752, -0.1438877, 0.93316674, -0.86638254],
    #     [0, 0, 0, 1]
    # ]])
    # H = target_t @ target_r
    # H = np.where(np.abs(H) > 1e-5, H, 0)
    # # target_t = np.array([0.14148566, 0.30224228, 0.86638254])
    # # H = np.cross(target_t, target_r)
    # co1 = [[i, 512 - j, 1] for i in range(512) for j in range(512)]
    # co1 = np.array(co1).T
    # # 映射
    # co2 = (H @ co1).T  # 得到映射后的xyz
    # # 对z进行归一化
    # print(co1.shape)
    # print(co2.shape)
    # # 将co1拼接到co2后面
    # co2 = np.concatenate((co2, co1.T), axis=-1)
    # normalized_co2 = [[int(p[0] / p[2]), 512 - int(p[1] / p[2]), p[3], 512 - p[4]] for p in co2 if
    #                   p[2] != 0 and p[0] / p[2] >= 0 and p[0] / p[2] < 512 and p[1] / p[2] >= 0 and p[1] / p[2] < 512]
    # print(normalized_co2)
    # H = np.array(target_pose[:, :3, :])
    # H = np.delete(H, obj=2, axis=-1)
    # print(H)
    # front_poses = np.linalg.inv(front_poses)
    # print(front_poses)
    # H = target_poses @ front_poses
    # print(H)
    # ------------- count H -----------

    # ------------- depth ---------------
    # np.set_printoptions(threshold=np.inf)
    # im_depth = (imageio.imread('D:\Make-It-3D-master\\results\\test3\depth.png') / 65535.) * 255
    # depth = np.array(im_depth).astype(np.float32)
    # depth = torch.from_numpy(depth)
    # print(depth, torch.max(depth), torch.min(depth), depth[255, 255])

    # ------------- image ------------------
    # ref_path = 'D:\Make-It-3D-master\img\img.png'
    #
    # ref_imgs = cv2.imread(ref_path, cv2.IMREAD_UNCHANGED)  # [H, W, 3] or [H, W, 4] cv2.IMREAD_UNCHANGED会读入alpha通道
    # image_pil = Image.open(ref_path).convert("RGB")
    # imgs = cv2.cvtColor(ref_imgs, cv2.COLOR_BGRA2RGBA)
    # imgs = cv2.resize(imgs, (512, 512), interpolation=cv2.INTER_AREA)
    # # print(imgs.shape)
    # ref_imgs = (torch.from_numpy(imgs) / 255.).unsqueeze(0).permute(0, 3, 1, 2).to(device)  # 归一化的RGBA图片
    # ori_imgs = ref_imgs[:, :3, :, :] * ref_imgs[:, 3:, :, :] + (1 - ref_imgs[:, 3:, :, :])  # 填充背景，背景设置为1(255)
    # # print(ori_imgs.shape)
    # mask = imgs[:, :, 3:]  # 取出imgs的alpha维度
    # kernel = np.ones(((5, 5)), np.uint8)
    # mask = cv2.erode(mask, kernel, iterations=1)
    # mask = (mask == 0)
    # mask = (torch.from_numpy(np.uint8(mask))).unsqueeze(0).unsqueeze(0).to(device)
    # depth_mask = mask

    # ndarr = ref_imgs.mul(255).add_(0.5).clamp_(0, 255).squeeze(0).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
    # im = Image.fromarray(ndarr)
    # im.save('./ref_img.png')
    # ndarr = ori_imgs.mul(255).add_(0.5).clamp_(0, 255).squeeze(0).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
    # im = Image.fromarray(ndarr)
    # im.save('./ori_img.png')
    # ndarr = depth_mask.mul(255).squeeze(0).squeeze(0).to("cpu").numpy()  # 灰度图保存必须是[512, 512],RGB图为[512, 512, 3]
    # im = Image.fromarray(np.uint8(ndarr))
    # im.save('./depth_mask.png')

    # ------------ depth ----------------
    # disparity = imageio.imread(os.path.join('D:\\Make-It-3D-master\\results\\test3', 'depth.png')) / 65535.
    # depth = 1. / np.maximum(disparity, 1e-2)
    #
    # depth_prediction = torch.tensor(depth, device=device)
    # depth_mask = torch.tensor(depth_mask, device=device)
    # im = Image.fromarray(np.uint8(depth_mask * 255).squeeze(0).squeeze(0))
    # im.save('./depth_mask.png')
    # # normalize estimated depth
    # depth_prediction = depth_prediction * (~depth_mask) + torch.ones_like(depth_prediction) * (depth_mask)
    # depth_prediction = ((depth_prediction - 1.0) / (depth_prediction.max() - 1.0)) * 0.9 + 0.1  # 归一化
    # im = Image.fromarray(np.uint8(depth_prediction * 255).squeeze(0).squeeze(0))
    # im.save('./depth_predict.png')

    # data
    bg_color = torch.ones(3)  # [3], frame-wise random.
    print(bg_color)
