from transformers.image_transforms import convert_to_rgb, to_channel_dimension_format
from transformers.image_utils import to_numpy_array, infer_channel_dimension_format, ChannelDimension, \
    make_flat_list_of_images, get_image_size
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VisionTransformerPretrainedModel
import numpy as np
import torch
import numpy as np
import matplotlib.pyplot as plt
import cv2
from PIL import Image
from transformers import AutoModelForVision2Seq, AutoProcessor
import os

# # 设置中文字体
# plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
#
# model_path = "/home/dengyunfei/.cache/modelscope/hub/models/Qwen/Qwen2.5-VL-7B-Instruct"
# model = AutoModelForVision2Seq.from_pretrained(
#     model_path,
#     trust_remote_code=True,
#     device_map="auto",
#     torch_dtype=torch.float16  # 使用float16节省显存
# )
# processor = AutoProcessor.from_pretrained(
#     model_path,
#     trust_remote_code=True
# )
# image_path = "/data/stock_data/train4/600785.SH_20220516_20221108.png"
# image = Image.open(image_path).convert('RGB')
# img_w, img_h = image.size
# # 预处理：Qwen2.5-VL需要特定的图文格式，使用<image>标记
# # 构造输入：[{'image': image, 'text': text}]
# # inputs = processor(
# #     text=["aaaa"],
# #     images=image,
# #     return_tensors="pt",
# #     padding=True
# # ).to(model.device, dtype=torch.float16)
# # pixel_values = inputs['pixel_values']

def  image_pixel_values(model,processor,image):
    images = make_flat_list_of_images([image])
    pixel_values, vision_grid_thws = [], []
    patch_size = 14
    scale = processor.image_processor.rescale_factor  # 0.00392156862745098
    mean = processor.image_processor.image_mean  # [0.48145466, 0.4578275, 0.40821073]
    std = processor.image_processor.image_std  # [0.26862954, 0.26130258, 0.27577711]
    data_format = ChannelDimension.FIRST  # 指定进入模型之前，图片的channel需要在第一维
    temporal_patch_size = 2  # 时间维度上的块大小，虽然这里处理的是单张图片，但是默认给了2
    merge_size = processor.image_processor.merge_size  # 2
    for image in images:
        rgb = convert_to_rgb(image)  # Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image as is.
        array = to_numpy_array(rgb)  # 转为array
        input_data_format = infer_channel_dimension_format(array)  # Infers the channel dimension format of `image`.
        # 这里是<ChannelDimension.LAST: 'channels_last'>
        resized_height, resized_width = get_image_size(array, channel_dim=input_data_format)  # (1372, 2044)
        rescale = processor.image_processor.rescale(array, scale=scale, input_data_format=input_data_format)
        normalize = processor.image_processor.normalize(image=rescale, mean=mean, std=std,
                                                        input_data_format=input_data_format)
        right_dimension = to_channel_dimension_format(normalize, data_format,
                                                      input_channel_dim=input_data_format)  # Converts `image` to the channel dimension format specified by `channel_dim`. The input can have arbitrary number of leading dimensions. Only last three dimension will be permuted to format the `image`.
        processed_images = [right_dimension]
        patches = np.array(processed_images)
        repeats = np.repeat(patches[-1][np.newaxis], temporal_patch_size - (patches.shape[0] % temporal_patch_size),
                            axis=0)  # 将图片重复了一下 这里打个问号？？？后面重点看一下
        patches = np.concatenate([patches, repeats], axis=0)
        channel = patches.shape[1]
        grid_t = patches.shape[0] // temporal_patch_size  # 1
        grid_h, grid_w = resized_height // patch_size, resized_width // patch_size  # 1372/14 2044/14
        patches = patches.reshape(
            grid_t,
            temporal_patch_size,
            channel,
            grid_h // merge_size,
            merge_size,
            patch_size,
            grid_w // merge_size,
            merge_size,
            patch_size,
        )  # 1 2 3 49 2 14 73 2 14
        patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8)  # 1 49 73 2 2 3 2 14 14
        flatten_patches = patches.reshape(
            grid_t * grid_h * grid_w, channel * temporal_patch_size * patch_size * patch_size
        )  #
        pixel_values.extend(flatten_patches)
        vision_grid_thws.append([grid_t, grid_h, grid_w])
        # print("thw",grid_t, grid_h, grid_w)
        # print(len(pixel_values),len(pixel_values[0]),vision_grid_thws)
        # pv = to_numpy_array(pixel_values)
        pv = torch.Tensor(pixel_values)
        thw = torch.IntTensor(vision_grid_thws)
        # print(pv.shape)
        return pv,thw

# t,h,w=1,16,16
# # print(t, h, w, 3333333333333333)
# hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
# print(hpos_ids)

