# -*- coding: utf-8 -*-
"""
Created on Thu Sep 18 10:36:24 2025

@author: m00819658
"""
import os
import ssl
import time
ssl._create_default_https_context = ssl._create_unverified_context
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

def test_transformers():
    from transformers import AutoTokenizer, AutoModelForCausalLM 
    import torch 
    
    model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"   # 官方组织名是 deepseek-ai 
    # model_name = 'deepseek-ai/DeepSeek-R1-Distill-Llama-8B'
    
    tokenizer = AutoTokenizer.from_pretrained(model_name,  trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained( 
        model_name,
        torch_dtype=torch.bfloat16,    # 或 "auto"
        device_map="auto",            # 多张 GPU/CPU 自动分配 
        trust_remote_code=True 
    )
    
    prompt = "你好，请介绍一下你自己。"
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    outputs = model.generate(**inputs,  max_new_tokens=128, do_sample=True, top_p=0.9)
    print(tokenizer.decode(outputs[0],  skip_special_tokens=True))

def test_yolo():
    from ultralytics import YOLO 
    from PIL import Image 
    import cv2, torch, json
    from transformers import AutoProcessor, AutoModelForCausalLM 
    
    # 加载 OmniParser 的 YOLO 模型 
    model = YOLO("weights/icon_detect/model.pt") 
    
    # 加载图片 
    img_path = r"D:\db\4.jpg" 
    # img = cv2.imread(img_path)
    img = Image.open(img_path)
    results = model.predict(
        source=img,
        conf=0.01,
        iou=0.01, # default 0.7
    )
    crop_imgs = []
    for result in results:
        xywh = result.boxes.xywh  # center-x, center-y, width, height
        xywhn = result.boxes.xywhn  # normalized
        xyxy = result.boxes.xyxy  # top-left-x, top-left-y, bottom-right-x, bottom-right-y
        xyxyn = result.boxes.xyxyn  # normalized
        names = [result.names[cls.item()] for cls in result.boxes.cls.int()]  # class name of each box
        confs = result.boxes.conf  # confidence score of each box
        im_bgr = result.plot()  # BGR-order numpy array
        im_rgb = Image.fromarray(im_bgr[..., ::-1])  # RGB-order PIL image
        im_rgb.show()
        crop_imgs.extend([img.crop(tuple(box)).resize((64, 64)) for box in xyxy.tolist()])
        # for img in crop_imgs:
        #     img.show()

    # ---------- 2. 初始化 Caption 模型 ----------
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float16
 
    model = AutoModelForCausalLM.from_pretrained("weights/icon_caption_florence", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
    processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
    image = img

    def run_example(task_prompt, text_input=None, images=None):
        if text_input is None:
            prompt = task_prompt
        else:
            prompt = task_prompt + text_input
        if not images:
            images = [image]
        prompt = [prompt]*len(images)
        inputs = processor(text=prompt, images=images, return_tensors="pt",do_resize=False).to(device, torch_dtype)
        generated_ids = model.generate(
            input_ids=inputs["input_ids"],
            pixel_values=inputs["pixel_values"],
            max_new_tokens=20,
            num_beams=1,
            do_sample=False
        )
        generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
        print(generated_text)
        # parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
        # print(parsed_answer)
        
        # inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt", do_resize=False).to(device=device, dtype=torch.float16)
        # generated_ids = model.generate(input_ids=inputs["input_ids"],pixel_values=inputs["pixel_values"],max_new_tokens=20,num_beams=1, do_sample=False)
        # generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
    run_example("<CAPTION>", None, crop_imgs)
    # run_example("<DETAILED_CAPTION>")
    # run_example("<MORE_DETAILED_CAPTION>")
    # run_example("<CAPTION_TO_PHRASE_GROUNDING>")
    # run_example("<OD>")
    # run_example("<DENSE_REGION_CAPTION>")
    # run_example("<REGION_PROPOSAL>")
    # run_example("<OCR>")
    # run_example("<OCR_WITH_REGION>")


def test_omni_yolo():
    from typing import Optional
    import gradio as gr
    import numpy as np
    import torch
    from PIL import Image
    import io
    import base64, os
    from util.utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img

    yolo_model = get_yolo_model(model_path='weights/icon_detect/model.pt')
    device = 'cuda' if torch.cuda.is_available() else ( 'xpu' if torch.xpu.is_available() else 'cpu')
    caption_model_processor = get_caption_model_processor(model_name="florence2", model_name_or_path="weights/icon_caption_florence", device=device)
    
    img_path = r"D:\db\4.jpg" 
    image_input = Image.open(img_path)
    box_threshold = 0.01
    iou_threshold = 0.01
    use_paddleocr = True
    imgsz = 640
    box_overlay_ratio = image_input.size[0] / 3200
    draw_bbox_config = {
        'text_scale': 0.8 * box_overlay_ratio,
        'text_thickness': max(int(2 * box_overlay_ratio), 1),
        'text_padding': max(int(3 * box_overlay_ratio), 1),
        'thickness': max(int(3 * box_overlay_ratio), 1),
    }
    # import pdb; pdb.set_trace()

    # ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_input, display_img = False, output_bb_format='xyxy', goal_filtering=None, 
    #     easyocr_args={'paragraph': False, 'text_threshold':0.9}, use_paddleocr=use_paddleocr)
    # text, ocr_bbox = ocr_bbox_rslt
    # print(text, ocr_bbox)
    text, ocr_bbox = [''], [(56, 31, 173, 85)]
    dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_input, yolo_model, BOX_TRESHOLD = box_threshold, 
        output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, 
        ocr_text=text,iou_threshold=iou_threshold, imgsz=imgsz,)
    image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
    image.save(r'D:\db\4_out.png')
    print('finish processing')
    parsed_content_list = '\n'.join([f'icon {i}: ' + str(v) for i,v in enumerate(parsed_content_list)])
    # parsed_content_list = str(parsed_content_list)
    print(parsed_content_list)
    return image, str(parsed_content_list)

if __name__ == '__main__':
    start = time.time()
    # test_transformers()
    test_yolo()
    # test_omni_yolo()
    end = time.time()
    print(end - start)
