# coding:utf-8
import os
import sys

project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
if project_root not in sys.path:
    sys.path.append(project_root)
from config.config import Config
from src.chat import generate_response
from PIL import Image
import torch
import time
def image_understand(image_path, config: Config):
    image = Image.open(image_path)
    query = '请描述这张图片,有可能是有名的景点。'

    # here the message formatting refers to https://huggingface.co/microsoft/Phi-3-vision-128k-instruct#sample-inference-code

    messages = [
        {"role": "user", "content": "<|image_1|>\n{prompt}".format(prompt=query)},
    ]
    model, processor = config.image_to_text_model
    prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)

    # Generate predicted tokens
    with torch.inference_mode():
        inputs = processor(prompt, [image], return_tensors="pt").to('xpu')
        st = time.time()
        output = model.generate(**inputs,
                                eos_token_id=processor.tokenizer.eos_token_id,
                                num_beams=1,
                                do_sample=False,
                                max_new_tokens=config.max_new_tokens,
                                temperature=0.5,
                                )
        end = time.time()
        print(f'Inference time: {end - st} s')
        output_str = processor.decode(output[0],
                                      skip_special_tokens=True,
                                      clean_up_tokenization_spaces=False)
        print('-' * 20, 'Prompt', '-' * 20)
        print(f'Message: {messages}')
        print(output_str)

    del model
    del processor

    return output_str




def image_understand_api(image_path, config: Config):
    import base64
    from openai import OpenAI

    def image_to_base64(image_path: str) -> str:
        """将本地图片转换为 base64 编码"""
        with open(image_path, "rb") as img_file:
            # 读取图片内容并转换为 base64 编码
            encoded_string = base64.b64encode(img_file.read()).decode('utf-8')
        return encoded_string

    base64_image = image_to_base64(image_path)

    client = OpenAI(
        api_key=f"{config.api_token}",  # 从https://cloud.siliconflow.cn/account/ak获取
        base_url="https://api.siliconflow.cn/v1"
    )
    response = client.chat.completions.create(
        model="Qwen/Qwen2-VL-72B-Instruct",
        messages=[
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{base64_image}",
                            "detail": "low"
                        }
                    },
                    {
                        "type": "text",
                        "text": "请描述这张图片"
                    }
                ]
            }],
        stream=False
    )

    # for chunk in response:
    #     chunk_message = chunk.choices[0].delta.content
    #     print(chunk_message, end='', flush=True)
    result = response.choices[0].message.content
    return result


# 生成文案主逻辑
def generate_text_from_image(image, style, config: Config, mode):
    # temp_image_path = save_and_get_temp_url(image)
    config.set_mode(mode)
    if config.use_image_to_text_api:
        image_description = image_understand_api(image, config)
        print("*" * 20, "图片理解", "*" * 20, "\n")
        print(image_description, "\n")

    else:
        image_description = image_understand(image, config)

    question = f"根据图片描述：{image_description}, 用{style}风格生成一段简洁的文字。"
    # generated_text = stream_model(question,config)
    generated_text = generate_response(question, config, mode)

    return generated_text
