'''

img 2 text
qwen2.5-vl推理
远程
文档解析推理
'''
import glob
import sys

from openai import OpenAI
import os
import base64

sys.path.append(r'D:\code\other\LLMs\my_langchain')
# import keys


#  base 64 编码格式
def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")

def encode_frames(frames_path):
    b64s = []
    # ls = os.listdir(frames_path)
    ls = glob.glob(f"{frames_path}*.jpg")
    for i in ls:
        with open(i, "rb") as image_file:
            b64 = base64.b64encode(image_file.read()).decode("utf-8")
        b64 = f"data:image/jpg;base64,{b64}"
        b64s.append(b64)
    return b64s

# @title inference function with API
def inference_with_api(image_path, prompt, sys_prompt="You are a helpful assistant.",
                       model_id="qwen2.5-vl-72b-instruct", min_pixels=512 * 28 * 28, max_pixels=2048 * 28 * 28):
    base64_image = encode_image(image_path)
    client = OpenAI(
        # If the environment variable is not configured, please replace the following line with the Dashscope API Key: api_key="sk-xxx".
        api_key='sk-38bdbf76aba641dfb1a671c7259d6dd5',
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", # "https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
    )

    messages = [
        {
            "role": "system",
            "content": [{"type": "text", "text": sys_prompt}]},
        {
            "role": "user",
            "content": [
                {
                    "type": "image_url",
                    "min_pixels": min_pixels,
                    "max_pixels": max_pixels,
                    # Pass in BASE64 image data. Note that the image format (i.e., image/{format}) must match the Content Type in the list of supported images. "f" is the method for string formatting.
                    # PNG image:  f"data:image/png;base64,{base64_image}"
                    # JPEG image: f"data:image/jpeg;base64,{base64_image}"
                    # WEBP image: f"data:image/webp;base64,{base64_image}"
                    "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
                },
                {"type": "text", "text": prompt},
            ],
        }
    ]
    completion = client.chat.completions.create(
        model=model_id,
        messages=messages,

    )
    return completion.choices[0].message.content

import os
from PIL import Image, ImageDraw, ImageFont
import requests
from io import BytesIO
from bs4 import BeautifulSoup, Tag
from pathlib import Path
import re


# Function to draw bounding boxes and text on images based on HTML content
def draw_bbox(image_path, resized_width, resized_height, full_predict):

    if image_path.startswith("http"):
        response = requests.get(image_path)
        image = Image.open(BytesIO(response.content))
    else:
        image = Image.open(image_path)
    original_width = image.width
    original_height = image.height

    # Parse the provided HTML content
    soup = BeautifulSoup(full_predict, 'html.parser')
    # Extract all elements that have a 'data-bbox' attribute
    elements_with_bbox = soup.find_all(attrs={'data-bbox': True})

    filtered_elements = []
    for el in elements_with_bbox:
        if el.name == 'ol':
            continue  # Skip <ol> tags
        elif el.name == 'li' and el.parent.name == 'ol':
            filtered_elements.append(el)  # Include <li> tags within <ol>
        else:
            filtered_elements.append(el)  # Include all other elements

    # font = ImageFont.truetype("NotoSansCJK-Regular.ttc", 20)
    font = ImageFont.truetype(r"C:\Users\Dell\Downloads\Noto_Sans_SC,Roboto\Noto_Sans_SC\static\NotoSansSC-Regular.ttf", 10)
    draw = ImageDraw.Draw(image)

    # Draw bounding boxes and text for each element
    for element in filtered_elements:
        bbox_str = element['data-bbox']
        text = element.get_text(strip=True)
        x1, y1, x2, y2 = map(int, bbox_str.split())

        # Calculate scaling factors
        scale_x = resized_width / original_width
        scale_y = resized_height / original_height

        # Scale coordinates accordingly
        x1_resized = int(x1 / scale_x)
        y1_resized = int(y1 / scale_y)
        x2_resized = int(x2 / scale_x)
        y2_resized = int(y2 / scale_y)

        if x1_resized > x2_resized:
            x1_resized, x2_resized = x2_resized, x1_resized
        if y1_resized > y2_resized:
            y1_resized, y2_resized = y2_resized, y1_resized

        # Draw bounding box
        draw.rectangle([x1_resized, y1_resized, x2_resized, y2_resized], outline='red', width=2)
        # Draw associated text
        draw.text((x1_resized, y2_resized), text, fill='red', font=font)
        # draw.text((x1_resized, y2_resized), text, fill='black')

    # Display the image
    image.show()


# Function to clean and format HTML content
def clean_and_format_html(full_predict):
    soup = BeautifulSoup(full_predict, 'html.parser')

    # Regular expression pattern to match 'color' styles in style attributes
    color_pattern = re.compile(r'\bcolor:[^;]+;?')

    # Find all tags with style attributes and remove 'color' styles
    for tag in soup.find_all(style=True):
        original_style = tag.get('style', '')
        new_style = color_pattern.sub('', original_style)
        if not new_style.strip():
            del tag['style']
        else:
            new_style = new_style.rstrip(';')
            tag['style'] = new_style

    # Remove 'data-bbox' and 'data-polygon' attributes from all tags
    for attr in ["data-bbox", "data-polygon"]:
        for tag in soup.find_all(attrs={attr: True}):
            del tag[attr]

    classes_to_update = ['formula.machine_printed', 'formula.handwritten']
    # Update specific class names in div tags
    for tag in soup.find_all(class_=True):
        if isinstance(tag, Tag) and 'class' in tag.attrs:
            new_classes = [cls if cls not in classes_to_update else 'formula' for cls in tag.get('class', [])]
            tag['class'] = list(dict.fromkeys(new_classes))  # Deduplicate and update class names

    # Clear contents of divs with specific class names and rename their classes
    for div in soup.find_all('div', class_='image caption'):
        div.clear()
        div['class'] = ['image']

    classes_to_clean = ['music sheet', 'chemical formula', 'chart']
    # Clear contents and remove 'format' attributes of tags with specific class names
    for class_name in classes_to_clean:
        for tag in soup.find_all(class_=class_name):
            if isinstance(tag, Tag):
                tag.clear()
                if 'format' in tag.attrs:
                    del tag['format']

    # Manually build the output string
    output = []
    for child in soup.body.children:
        if isinstance(child, Tag):
            output.append(str(child))
            output.append('\n')  # Add newline after each top-level element
        elif isinstance(child, str) and not child.strip():
            continue  # Ignore whitespace text nodes
    complete_html = f"""```html\n<html><body>\n{" ".join(output)}</body></html>\n```"""
    return complete_html


# document_parsing
# ocr

def test_():

    # Use an API-based approach to inference. Apply API key here: https://bailian.console.alibabacloud.com/?apiKey=1
    from qwen_vl_utils import smart_resize
    import os
    from PIL import Image, ImageDraw, ImageFont

    # img_url = r'D:\code\other\LLMs\algorithms\remote_infer\test_bxd_20250402135314.png'
    # img_url = r'D:\code\other\LLMs\algorithms\remote_infer\test_doc_20250402104302.png'
    # img_url = r'D:\code\other\LLMs\algorithms\remote_infer\test_doc_wzh_20250402154026.png'
    img_url = r'D:\code\other\LLMs\algorithms\remote_infer\test_fapiao_20250402155650.png'

    mode = ''
    # 文档识别
    # system_prompt = "You are an AI specialized in recognizing and extracting text from images. Your mission is to analyze the image document and generate the result in QwenVL Document Parser HTML format using specified tags while maintaining user privacy and data integrity."
    # prompt = "QwenVL HTML "


    # ocr提取
    system_prompt = 'You are a helpful assistant.'
    prompt = "提取图中的：['购买方纳税人识别号','销售方纳税人识别号','总金额','开票日期']，并且按照json格式输出。"
    # os.environ['DASHSCOPE_API_KEY'] = 'your_api_key_here'
    min_pixels = 512*28*28
    max_pixels = 2048*28*28

    image = Image.open(img_url)
    width, height = image.size
    input_height, input_width = smart_resize(height,width,min_pixels=min_pixels, max_pixels=max_pixels)
    output = inference_with_api(img_url, prompt, sys_prompt=system_prompt, min_pixels=min_pixels, max_pixels=max_pixels)
    # print(output)

    # with open('res.html', 'r', encoding='utf-8') as f:
    #     output = f.read()

    # Visualization
    print(input_height, input_width)
    print(output)
    draw_bbox(img_url, input_width, input_height, output)

    # ordinary_html = clean_and_format_html(output)
    # print(ordinary_html)




# universal_recognition
# spatial_understanding
def test_spatial_understanding():
    import json
    import random
    import io
    import ast
    from PIL import Image, ImageDraw, ImageFont
    from PIL import ImageColor
    import xml.etree.ElementTree as ET
    additional_colors = [colorname for (colorname, colorcode) in ImageColor.colormap.items()]
    def parse_json(json_output):
        # Parsing out the markdown fencing
        lines = json_output.splitlines()
        for i, line in enumerate(lines):
            if line == "```json":
                json_output = "\n".join(lines[i + 1:])  # Remove everything before "```json"
                json_output = json_output.split("```")[0]  # Remove everything after the closing "```"
                break  # Exit the loop once "```json" is found
        return json_output
    def plot_bounding_boxes(im, bounding_boxes, input_width, input_height):
        """
        Plots bounding boxes on an image with markers for each a name, using PIL, normalized coordinates, and different colors.

        Args:
            img_path: The path to the image file.
            bounding_boxes: A list of bounding boxes containing the name of the object
             and their positions in normalized [y1 x1 y2 x2] format.
        """

        # Load the image
        img = im
        width, height = img.size
        print(img.size)
        # Create a drawing object
        draw = ImageDraw.Draw(img)

        # Define a list of colors
        colors = [
                     'red',
                     'green',
                     'blue',
                     'yellow',
                     'orange',
                     'pink',
                     'purple',
                     'brown',
                     'gray',
                     'beige',
                     'turquoise',
                     'cyan',
                     'magenta',
                     'lime',
                     'navy',
                     'maroon',
                     'teal',
                     'olive',
                     'coral',
                     'lavender',
                     'violet',
                     'gold',
                     'silver',
                 ] + additional_colors

        # Parsing out the markdown fencing
        bounding_boxes = parse_json(bounding_boxes)

        font = ImageFont.truetype(r"C:\Users\Dell\Downloads\Noto_Sans_SC,Roboto\Noto_Sans_SC\static\NotoSansSC-Regular.ttf", size=14)

        try:
            json_output = ast.literal_eval(bounding_boxes)
        except Exception as e:
            end_idx = bounding_boxes.rfind('"}') + len('"}')
            truncated_text = bounding_boxes[:end_idx] + "]"
            json_output = ast.literal_eval(truncated_text)

        # Iterate over the bounding boxes
        for i, bounding_box in enumerate(json_output):
            # Select a color from the list
            color = colors[i % len(colors)]

            # Convert normalized coordinates to absolute coordinates
            abs_y1 = int(bounding_box["bbox_2d"][1] / input_height * height)
            abs_x1 = int(bounding_box["bbox_2d"][0] / input_width * width)
            abs_y2 = int(bounding_box["bbox_2d"][3] / input_height * height)
            abs_x2 = int(bounding_box["bbox_2d"][2] / input_width * width)

            if abs_x1 > abs_x2:
                abs_x1, abs_x2 = abs_x2, abs_x1

            if abs_y1 > abs_y2:
                abs_y1, abs_y2 = abs_y2, abs_y1

            # Draw the bounding box
            draw.rectangle(
                ((abs_x1, abs_y1), (abs_x2, abs_y2)), outline=color, width=4
            )

            # Draw the text
            if "label" in bounding_box:
                draw.text((abs_x1 + 8, abs_y1 + 6), bounding_box["label"], fill=color, font=font)

        # Display the image
        img.show()

    # image_path = r"D:\code\other\LLMs\algorithms\remote_infer\test_cakes_20250402163314.png"
    # image_path = r"D:\code\other\LLMs\algorithms\remote_infer\test_luogan_20250402164601.png"
    # image_path = r'D:\code\other\LLMs\algorithms\remote_infer\test_yaopian_20250402165446.png'
    # image_path = r'D:\code\other\LLMs\algorithms\remote_infer\test_xianxv_20250403102247.jpg'
    # image_path = r'D:\code\other\LLMs\algorithms\remote_infer\test_xx_20250403103211.png'
    image_path = r'D:\DATA\20250519RENBAO\trainV8Pose_closePeople\add_imgs\front_cam_dataset0_100_t23721-2025-05-13\000100.jpg'

    ## Use a local HuggingFace model to inference.
    # # prompt in chinese
    # prompt = "框出每一个小蛋糕的位置，以json格式输出所有的坐标"
    # # prompt in english
    # prompt = "Outline the position of each small cake and output all the coordinates in JSON format."
    # prompt = "框出擀面杖的位置，以json格式输出坐标"
    # prompt = "框出图中凸起的螺杆的位置，再框出每个螺杆里的划痕,磕伤缺陷，以json格式输出坐标"
    # prompt = "框出图中电线的颜色和位置，以json格式输出坐标。"
    prompt = "这是人员过安检场景，乘客将各自的行李放置在灰色框里，每个灰色框上贴有编号，以json格式输出每个框的坐标和编号"
    # prompt = "这是一板药的图片，图片中有些药片框出图中，以json格式输出坐标"


    # prompt0 = r"这是一板药的图片，以下是图片中的一些特征和物体：1. **药品名称**：图片上显示药品名称为“依达拉奉右莰醇舌下片”（Edaravone and Dexborneol Sublingual Tablets）。2. **成分含量**：每片含依达拉奉30mg和右莰醇6mg。3. **品牌标志**：左上角有“先必新”的标志，右下角有“Simcere 先声药业”的标志。4. **包装形式**：药品采用铝塑泡罩包装，每片药片单独密封在泡罩中。5. **药片形状**：药片呈圆形，表面有放射状纹理。6. **切割线**：泡罩包装上有红色的切割线，方便撕开取药。这些特征表明这是一种用于特定医疗用途的药物，具体使用方法和适应症需要参考药品说明书或咨询医生。"
    # prompt = prompt0 + "框出图中没有药片的泡罩，以json格式输出坐标"
    # Use an API-based approach to inference. Apply API key here: https://bailian.console.alibabacloud.com/?apiKey=1
    from qwen_vl_utils import smart_resize
    os.environ['DASHSCOPE_API_KEY'] = 'your_api_key_here'
    min_pixels = 512*28*28
    max_pixels = 2048*28*28
    image = Image.open(image_path)
    width, height = image.size
    input_height,input_width = smart_resize(height,width,min_pixels=min_pixels, max_pixels=max_pixels)
    response = inference_with_api(image_path, prompt, min_pixels=min_pixels, max_pixels=max_pixels)

    # sys.path.append(r'D:\CODE\ZXC\LLMs\algorithms')
    # from infer_vl_qwen2_5vl import get_vlm_infer
    # vlm_infer = get_vlm_infer()

    print(response)
    plot_bounding_boxes(image, response, input_width, input_height)




def test_video_understanding():
    import os
    from openai import OpenAI

    def inference_with_api(
            save_frames_path,
            prompt,
            sys_prompt="You are a helpful assistant.",
            # model_id="qwen-vl-max-latest",
            model_id="qwen2.5-vl-72b-instruct",
    ):
        client = OpenAI(
            api_key='sk-38bdbf76aba641dfb1a671c7259d6dd5',
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        bs64s = encode_frames(save_frames_path)
        messages = [
            {
                "role": "system",
                "content": [{"type": "text", "text": sys_prompt}]
            },
            {
                "role": "user",
                "content": [
                    # {"type": "video_url", "video_url": {"url": video_path}},
                    {"type": "video", "video": bs64s},
                    {"type": "text", "text": prompt},
                ]
            }
        ]
        completion = client.chat.completions.create(
            model=model_id,
            messages=messages,
        )
        print(completion)
        return completion.choices[0].message.content

    import os
    import hashlib
    import requests


    import numpy as np
    from PIL import Image
    from IPython.display import Markdown, display
    import decord
    from decord import VideoReader, cpu # 读video
    # D:\code\other\LLMs\llm_py310\python.exe -m pip install decord

    def download_video(url, dest_path):
        response = requests.get(url, stream=True)
        with open(dest_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8096):
                f.write(chunk)
        print(f"Video downloaded to {dest_path}")
    def get_video_frames(video_path, num_frames=128, cache_dir='.cache'):
        os.makedirs(cache_dir, exist_ok=True)

        video_hash = hashlib.md5(video_path.encode('utf-8')).hexdigest()
        if video_path.startswith('http://') or video_path.startswith('https://'):
            video_file_path = os.path.join(cache_dir, f'{video_hash}.mp4')
            if not os.path.exists(video_file_path):
                download_video(video_path, video_file_path)
        else:
            video_file_path = video_path

        frames_cache_file = os.path.join(cache_dir, f'{video_hash}_{num_frames}_frames.npy')
        timestamps_cache_file = os.path.join(cache_dir, f'{video_hash}_{num_frames}_timestamps.npy')

        if os.path.exists(frames_cache_file) and os.path.exists(timestamps_cache_file):
            frames = np.load(frames_cache_file)
            timestamps = np.load(timestamps_cache_file)
            return video_file_path, frames, timestamps

        vr = VideoReader(video_file_path, ctx=cpu(0))
        total_frames = len(vr)

        indices = np.linspace(0, total_frames - 1, num=num_frames, dtype=int)
        frames = vr.get_batch(indices).asnumpy()
        timestamps = np.array([vr.get_frame_timestamp(idx) for idx in indices])

        np.save(frames_cache_file, frames)
        np.save(timestamps_cache_file, timestamps)

        return video_file_path, frames, timestamps

    def create_image_grid(images, num_columns=8):
        pil_images = [Image.fromarray(image) for image in images]
        num_rows = (len(images) + num_columns - 1) // num_columns

        img_width, img_height = pil_images[0].size
        grid_width = num_columns * img_width
        grid_height = num_rows * img_height
        grid_image = Image.new('RGB', (grid_width, grid_height))

        for idx, image in enumerate(pil_images):
            row_idx = idx // num_columns
            col_idx = idx % num_columns
            position = (col_idx * img_width, row_idx * img_height)
            grid_image.paste(image, position)

        return grid_image

    def get_video_frames_cv2(
                       video_path='/data1/xiancai/FACE_ANGLE_DATA/other/test/face_angle_test1.mp4',
                       save_path='/data1/xiancai/BABY_DATA/other/test/Video2DeepCam/res_10A4BE72856C_monitoringOff_1618593172930.mp4'
                       ):
        '''
        检测一个视频,保存检测结果
        :param video_path:
        :param save_path:
        :return:
        '''
        save_frames_path = f'{video_path}_frames/'
        if os.path.exists(save_frames_path):
            return save_frames_path
        import cv2
        # 设置video读入与写出
        cap = cv2.VideoCapture(video_path)
        fps, total = cap.get(cv2.CAP_PROP_FPS), int(cap.get(cv2.CAP_PROP_FRAME_COUNT))  # 帧率，总帧数
        w, h = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # 帧宽，帧高
        mp4 = cv2.VideoWriter_fourcc(*'mp4v')
        # res = cv2.VideoWriter(save_path, mp4, 20.0, (1280, 720), True)  # WH
        down_scale = 20  # 下采样
        res = cv2.VideoWriter(save_path, mp4, fps / down_scale, (w, h), True)  # WH
        numb = 0

        print(f'fps: {fps}, total: {total}, w: {w}, h: {h}')
        # 检测
        while (cap.isOpened()):
            numb += 1
            ret, frame = cap.read()
            if numb % down_scale == 0:
                if ret:
                    print(f'{numb}/{total},frame.shape:{frame.shape}')

                    # 缩小帧至 640x640
                    sc = 640 / max(frame.shape)
                    frame = cv2.resize(frame, dsize=(int(frame.shape[1] * sc), int(frame.shape[0] * sc)))

                    # # detect
                    # pre = self.engine.detect_onnx(frame)
                    # # draw
                    # res_img = self.engine.draw(frame, pre)
                    print('')
                    # cv2.imwrite(f'{out_imgs}/{numb}_{name[:-4]}.jpg', res_img)  # 一帧保存为图片
                    if True:
                        # cv2.imwrite(f'/data1/xiancai/FACE_ANGLE_DATA/other/test/frames/{numb}.jpg', res_img)
                        # save_frames_path = f'{video_path}_frames/'
                        if not os.path.exists(save_frames_path):
                            os.makedirs(save_frames_path)
                        cv2.imwrite(f'{video_path}_frames/{numb}.jpg', frame)
                    # res.write(res_img)  # 一帧保存至 mp4
                else:
                    break

        cap.release()
        res.release()
        print('Done.')
        return save_frames_path

    # video_url = "https://duguang-labelling.oss-cn-shanghai.aliyuncs.com/qiansun/video_ocr/videos/50221078283.mp4"

    # video_url = r"D:\wechat_file\WeChat Files\wxid_ig5xjsfld7cp22\FileStorage\File\2025-04\1.mp4"
    video_url = r"D:\CODE\ZXC\hk-airport-poc\data\input\n2_q2_away.mp4"

    prompt = "请总结一下视频中的内容, 提出3个关于视频的问题"


    # video_path, frames, timestamps = get_video_frames(video_url, num_frames=64) # 获取视频帧和时间戳
    # image_grid = create_image_grid(frames, num_columns=8)
    # # display(image_grid.resize((640, 640)))
    # image_grid.resize((1280, 1280)).show()

    save_frames_path = get_video_frames_cv2(video_url)


    # response = inference(video_path, prompt)
    response = inference_with_api(save_frames_path, prompt)
    print(response)


if __name__ == '__main__':
    # test_()
    # test_spatial_understanding()
    test_video_understanding()