import torch
import io
import base64
import os

from IPython.display import display
from PIL import Image
from openai import OpenAI
from dotenv import load_dotenv, find_dotenv

from ram.models import ram_plus, ram
from ram import inference_ram as inference
from ram import get_transform
from GroundingDINO.groundingdino.util.inference import load_model, load_image, predict, annotate, Model
import cv2

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)

# 一般来说，图像分辨率越大，可识别的图像内容精细程度越高。但是随之可能带来的风险是提升识别错误的概率。
image_size = 384
transform = get_transform(image_size=image_size)
# model = ram(pretrained="models/ram_swin_large_14m.pth",
#                             image_size=image_size,
#                             vit='swin_l')
model = ram_plus(pretrained="models/ram_plus_swin_large_14m.pth",
                 image_size=image_size,
                 vit='swin_l')
model.eval()
model = model.to(device)

image_path = "data_examples/test.jpg"
image_pil = Image.open(image_path)
image = transform(image_pil).unsqueeze(0).to(device)
recog_res = inference(image, model)

display(image_pil)
print("Image Tags: ", recog_res[0])
print("图像标签: ", recog_res[1])

# CONFIG_PATH = "GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py"
# CHECKPOINT_PATH = "models/groundingdino_swint_ogc.pth"
# model = load_model(CONFIG_PATH, CHECKPOINT_PATH)
#
# image_path = "data_examples/test.jpg"
# image_source, image = load_image(image_path)
# # "bicycle. man. passenger train. railroad. ride. rural. track. train. train track"
# TEXT_PROMPT = recog_res[0].replace(" | ", ". ")
# BOX_TRESHOLD = 0.25
# TEXT_TRESHOLD = 0.25
# boxes, logits, phrases = predict(
#     model=model,
#     image=image,
#     caption=TEXT_PROMPT,
#     box_threshold=BOX_TRESHOLD,
#     text_threshold=TEXT_TRESHOLD,
#     device=device,
# )
# annotated_frame = annotate(image_source=image_source,
#                            boxes=boxes, logits=logits, phrases=phrases)
# annotated_frame = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
# annotated_frame = Image.fromarray(annotated_frame)
# print(TEXT_PROMPT)
# print(boxes, logits, phrases)
#
# display(annotated_frame)
#
#
# def convertDINO2GPT(boxes, phrases):
#     return ", ".join(f"{phrases[i]}: {boxes[i].numpy()}" for i in range(len(phrases)))
#
#
# bbox_query = convertDINO2GPT(boxes, phrases)
# print(bbox_query)
#
# # 加载 .env 文件中定义的环境变量
# _ = load_dotenv(find_dotenv())
#
#
# # Function to encode the image to base64
# def encode_image_to_base64(image):
#     buffered = io.BytesIO()
#     image.save(buffered, format="JPEG")
#     return base64.b64encode(buffered.getvalue()).decode('utf-8')
#
#
# # Function to query GPT-4 Vision
# def query_gpt4_vision(messages, api_key=os.getenv('OPENAI_API_KEY')):
#     client = OpenAI(api_key=api_key)
#
#     response = client.chat.completions.create(
#         model="gpt-4o",
#         messages=messages,
#         max_tokens=4096,
#     )
#     return response.choices[0].message.content
#
#
# # Function to query GPT-4 Vision
# def query_gpt4_text(messages, api_key=os.getenv('OPENAI_API_KEY')):
#     client = OpenAI(api_key=api_key)
#
#     response = client.chat.completions.create(
#         model="gpt-4",
#         messages=messages,
#         max_tokens=2048,
#     )
#     return response.choices[0].message.content
#
#
# from IPython.display import display
#
# system_message_description = f"""你是一个功能强大的中文图像描述器。请创建详细的描述，阐述给定图片的内容。包括物体的类型和颜色、计算物体的数量、物体的动作、物体的精确位置、图片中的文字、核对物体之间的相对位置等。不要描述虚构的内容，只描述从图片中可以确定的内容。在描述内容时，不要以清单形式逐项列出。尽可能最小化审美描述。"""
#
# image_path = "data_examples/test.jpg"
# image = Image.open(image_path)
# base64_image = encode_image_to_base64(image)
# messages = [{"role": "system", "content": system_message_description}, {"role": "user", "content": [
#     {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}]}]
# gpt4v_description = query_gpt4_vision(messages)
#
# display(image)
#
# print(gpt4v_description)
