# ==========================
#         导入依赖包
# ==========================
import os
os.environ['GRADIO_TEMP_DIR'] = '/mnt/nas/shengjie/tmp'

import gradio as gr
import time
import requests
import json
import uuid
import asyncio
from copy import deepcopy
from PIL import Image
import re

from aiooss2 import AioBucket
from oss2.auth import Auth
import io

# ==========================
#   OSS配置与工具类导入
# ==========================

class OssConfig:
    def __init__(self, access_key_id, access_key_secret, endpoint, bucket_name):
        self.access_key_id = access_key_id
        self.access_key_secret = access_key_secret
        self.endpoint = endpoint
        self.bucket_name = bucket_name

    @classmethod
    def from_dict(cls, d):
        return cls(
            access_key_id=d.get('access_key_id'),
            access_key_secret=d.get('access_key_secret'),
            endpoint=d.get('endpoint'),
            bucket_name=d.get('bucket_name'),
        )

class Config:
    def __init__(self, oss_config):
        self.oss_config = oss_config

    @classmethod
    def from_json(cls, json_str):
        d = json.loads(json_str)
        oss_dict = d.get('oss_config', {})
        oss_config = OssConfig.from_dict(oss_dict)
        return cls(oss_config=oss_config)

class OSSHandler:
    """
    阿里云OSS上传图片实现，基于aiooss2与oss2.auth。
    """
    def __init__(self, access_key_id, access_key_secret, endpoint, bucket_name):
        self.access_key_id = access_key_id
        self.access_key_secret = access_key_secret
        self.endpoint = endpoint
        self.bucket_name = bucket_name
        self.auth = Auth(self.access_key_id, self.access_key_secret)

    async def upload_images(self, filenames, images, imtype="PNG"):
        """
        异步上传多张图片到OSS，返回每个图片的url。
        filenames: ["a.png", ...]
        images: [PIL.Image对象, ...]
        """
        # images: List[PIL.Image]
        urls = []
        async with AioBucket(self.auth, self.endpoint, self.bucket_name) as bucket:
            for fn, img in zip(filenames, images):
                # 将PIL图像写入内存二进制流
                buffer = io.BytesIO()
                img.save(buffer, format=imtype)
                buffer.seek(0)
                data = buffer.getvalue()
                await bucket.put_object(fn, data)
                urls.append(f"http://{self.bucket_name}.{self.endpoint}/{fn}")
        return urls

def get_oss():
    # 读取OSS配置
    with open('/home/zhangshu/project/algo_service/configs/config.json', 'r') as f:
        config = Config.from_json(f.read())

    oss_handler = OSSHandler(
        access_key_id=config.oss_config.access_key_id,
        access_key_secret=config.oss_config.access_key_secret,
        endpoint=config.oss_config.endpoint,
        bucket_name=config.oss_config.bucket_name,
    )
    return oss_handler

# ==========================
#  封装LLM请求的类
# ==========================
class NanoBanana:
    """
    用于向远端多模态服务发起请求的类。
    """

    def __init__(self):
        self.headers = {
            'Authorization': 'Bearer sk-VVJU25qYb5Bm3NHiIjYGevGfvQBZIj838sNC1D73FM0rqFpT',
            'Content-Type': 'application/json'
        }
        # self.url = "https://ismaque.org/v1/images/generations"
        self.url = "https://ismaque.org/v1/chat/completions"

    def __call__(self, messages):
        """
        发送消息到远端服务并获取文本/图片markdown响应
        """
        payload = {
            "model": "gemini-2.5-flash-image-vip",
            "stream": False,
            "messages": messages
        }
        payload = json.dumps(payload)
        response = requests.request("POST", self.url, headers=self.headers, data=payload)
        # import httpx
        # import asyncio

        # async def async_call(self, messages):
        #     """
        #     异步发送消息到远端服务并获取文本/图片markdown响应
        #     """
        #     payload = {
        #         "model": "gemini-2.5-flash-image-vip",
        #         "stream": False,
        #         "messages": messages
        #     }
        #     async with httpx.AsyncClient() as client:
        #         response = await client.post(self.url, headers=self.headers, json=payload)
        #         response.raise_for_status()
        #         return response.json()['choices'][0]['message']['content']

        '''
        (Pdb) response.json()
        {'id': 'chatcmpl-89DGTMS1DlDTmQC96GIkRylmHxISV', 
        'object': 'chat.completion', 'created': 1762407384, 
        'model': 'gemini-2.5-flash-image-vip', 
        'choices': [{'index': 0, 
                    'message': 
                        {'role': 'assistant', 
                        'content': "![image](https://googl...26.png)"},
                    'finish_reason': 'stop'}], 
        'usage': {'prompt_tokens': 110, 
                'completion_tokens': 2, 
                'total_tokens': 112, 
                'prompt_tokens_details': {'text_tokens': 103}, 
                'completion_tokens_details': {'content_tokens': 2}}}  
        '''

        print(response.text, "?????")
        print("response.json()['choices'] len>>" , len(response.json()['choices']))
        import pdb
        pdb.set_trace()
        return response.json()['choices'][0]['message']['content']

# ==========================
#   封装聊天历史数据转LLM输入
# ==========================
nano = None
oss = None


# ==========================
#      图片异步上传到OSS
# ==========================
async def upload_image(ref_image):
    global oss
    if oss is None:
        oss = get_oss()
    """
    上传本地图片对象到OSS，并返回可用url
    """
    filenames = [f'test_tmp/{int(time.time() * 1000)}-{uuid.uuid4()}.png']
    ref_image_urls = await oss.upload_images(filenames, [ref_image], imtype="PNG")
    print(ref_image_urls)
    return ref_image_urls[0]



def fn(history):
    global nano
    if nano is None:
        nano = NanoBanana()
    """
    将gradio的聊天历史转为远端服务所需的Message结构，并处理图片上传。
    """
    # 系统提示，增加优先用中文响应
    messages = [{
        "role": "user",
        "content": [
            {
                "text": "如果有文本回答，请使用中文",
                "type": "text"
            },
        ]
    }]
    # 遍历历史，每换角色重开一条，合并当前角色多轮输入
    for i in history:
        # 新开一轮消息
        # 当前messages为空 或者 当前 role != 上一个role ==> append msg
        if len(messages) == 0 or i["role"] != messages[-1]['role']:
            if isinstance(i['content'], tuple):
                # 处理图片内容（tuple视为图片）
                image = Image.open(i['content'][0])
                image_url = asyncio.run(upload_image(image))
                messages.append({
                    "role": i["role"],
                    "content": [{
                        "image_url": {
                            "url": image_url
                        },
                        "type": "image_url"
                    }]
                })
            elif isinstance(i['content'], str):
                # 文本
                messages.append({
                    "role": i["role"],
                    "content": [{
                        "text": i['content'],
                        "type": "text"
                    }]
                })
            else:
                raise ValueError("未知的content字段类型")
        else:
            # 当前role==上一个role 并且 messages is not Empty ==> 合并content
            # 跟当前角色的上一条合并多模态内容
            if isinstance(i['content'], tuple):
                image = Image.open(i['content'][0])
                image_url = asyncio.run(upload_image(image))
                messages[-1]['content'].append({
                    "image_url": {
                        "url": image_url
                    },
                    "type": "image_url"
                })
            elif isinstance(i['content'], str):
                messages[-1]['content'].append({
                    "text": i['content'],
                    "type": "text"
                })
            else:
                raise ValueError("未知的content字段类型")

    print("messages:", messages)
    res = nano(messages)
    return res

# ==========================
#      Gradio回调/处理函数
# ==========================

def print_like_dislike(x: gr.LikeData):
    """ 打印用户点赞/点踩事件 """
    print(x.index, x.value, x.liked)

def bot0(history: list):
    """
    仅演示功能: 直接返回LLM输出为图片路径形式的消息
    """
    res = fn(history)
    history.append({"role": "assistant", "content": {"path": res}})
    return history

def bot_(history: list):
    """
    仅演示功能: 批量返回多条消息
    """
    res_list = fn(history)
    for res in res_list:
        history.append({"role": "assistant", "content": {"path": res}})
    return history

def bot(history: list):
    """
    处理LLM输出, 支持markdown图片自动切割为独立消息
    """
    res = fn(history)  # res 是完整字符串，包含![image](url)等内容

    # 正则切分为图片和纯文本片段  匹配Markdown图片语法：![alt_text](url)
    # [^)]+ 匹配非右括号的所有字符
    parts = [p for p in re.split(r'(!\[image]\(https?://[^)]+\))', res) if p]
    print('parts>> ',parts)

    current_role = "assistant"

    for part in parts:
        if part.startswith("![image](https://"):
            # markdown图片，抽出url
            img_url = part[len("![image]("):-1].strip()
            history.append({"role": current_role, "content": {"path": img_url}})
        else:
            # 纯文本
            history.append({"role": current_role, "content": part})

    return history

def add_message(history, message):
    """
    处理前端多模态输入框得到的message, 合并到聊天历史，yield用于Gradio流式清空输入框
    """
    history_ = deepcopy(history)
    for x in message["files"]:
        history.append({"role": "user", "content": {"path": x}})
    if message["text"] is not None:
        history.append({"role": "user", "content": message["text"]})
    else:
        history = history_
        raise ValueError("文本不能为空")
    yield history, gr.MultimodalTextbox(value=None, interactive=False)

# ==========================
#      Gradio主界面搭建
# ==========================
def start_gradio(port=20023):
    with gr.Blocks() as demo:
        # bubble_full_width False（气泡自适应宽度）
        chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
        # 允许混合输入类型（文字+文件）。
        chat_input = gr.MultimodalTextbox(
            interactive=True,
            file_count="multiple",
            placeholder="Enter message or upload file...",
            show_label=False,
            sources=["upload"],
        )

        chat_msg = chat_input.submit(
            add_message, [chatbot, chat_input], [chatbot, chat_input]
        )
        bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
        bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
        chatbot.like(print_like_dislike, None, None, like_user_message=True) # like()是 Gradio 为聊天组件（如 Chatbot）提供的​​反馈功能​​，允许用户对消息点赞/点踩（类似 ChatGPT 的反馈按钮）。

    demo.launch(server_name="0.0.0.0", server_port=port)

# ==========================
#  并发API测试示例（可选演示用）
# ==========================
def demo_async():
    """
    使用aiohttp高并发测试API接口，演示函数
    """
    import asyncio
    import aiohttp

    API_KEY = 'sk-VVJU25qYb5Bm3NHiIjYGevGfvQBZIj838sNC1D73FM0rqFpT'
    BASE_URL = "https://api.apicore.ai/v1/"
    headers = {
        "Authorization": f"Bearer {API_KEY}",
        "Content-Type": "application/json",
    }

    async def create_completion(session):
        try:
            async with session.post(
                url=f"{BASE_URL}chat/completions",
                json={
                    "model": "gpt-4o",
                    "max_tokens": 4000,
                    "temperature": 1,
                    # "frequency_penalty": 0.05,
                    # "presence_penalty": 0.0,
                    # "top_p": 1,
                    "messages": [{"role": "user", "content": "你是谁"}],
                },
                headers=headers
            ) as response:
                if response.status == 200:
                    result = await response.json()
                    print(result['choices'][0]['message']['content'])
                else:
                    print(f"请求失败，状态码: {response.status}")
        except Exception as e:
            print(f"请求发生异常: {e}")

    tasks = []
    async def test_main():
        async with aiohttp.ClientSession() as session:
            task = create_completion(session)
            tasks.append(task)
            await asyncio.gather(*tasks)

    async def main():
        max_limits = 2  # 建议并发限制，不建议太大或生产环境跑 <=5000
        async with aiohttp.ClientSession() as session:
            while True:
                tasks = [create_completion(session) for _ in range(max_limits)]
                await asyncio.gather(*tasks)
                await asyncio.sleep(1)  # 控制请求间隔

def test_sync():
    messages = [
            {'role': 'user', 
            'content': [{'text': '如果有文本回答，请使用中文', 
                        'type': 'text'}, 
                        {'image_url': {'url': 'http://huayiyi-ai.oss-cn-shanghai.aliyuncs.com/test_tmp/1762407601188-0275a518-f76d-4b9a-8891-5c5046a7695a.png'}, 
                        'type': 'image_url'}, 
                        {'text': '请把上面的衣服颜色变成红色', 'type': 'text'}
                        ]}, 
            ]
    url = "https://ismaque.org/v1/chat/completions"
    headers = {
            'Authorization': 'Bearer sk-VVJU25qYb5Bm3NHiIjYGevGfvQBZIj838sNC1D73FM0rqFpT',
            'Content-Type': 'application/json'
        }
    payload = {
                "model": "gemini-2.5-flash-image-vip",
                "stream": False,
                "messages": messages
            }

    response = requests.request("POST", url, headers=headers, data=json.dumps(payload))
    
    pass

tasks = {}
async def test_async(payload=None,url='',task_id=-1):
    headers = {
            'Authorization': 'Bearer sk-VVJU25qYb5Bm3NHiIjYGevGfvQBZIj838sNC1D73FM0rqFpT',
            'Content-Type': 'application/json'
        }
    import aiohttp
    timeout_cfg = aiohttp.ClientTimeout(total=600)
    async with aiohttp.ClientSession(timeout=timeout_cfg) as session:
        async with session.post(url, headers=headers, json=payload) as response:
            response.raise_for_status()

            if response.status == 200:
                result = await response.json()
                response_content = result['choices'][0]['message']['content']

                global tasks
                tasks[task_id] = {
                    "status": "completed",
                    'response_content':response_content
                }

                return response_content
            else:
                return []

async def test_async_task():
    messages = [
            {'role': 'user', 
            'content': [{'text': '如果有文本回答，请使用中文', 
                        'type': 'text'}, 
                        {'image_url': {'url': 'http://huayiyi-ai.oss-cn-shanghai.aliyuncs.com/test_tmp/1762407601188-0275a518-f76d-4b9a-8891-5c5046a7695a.png'}, 
                        'type': 'image_url'}, 
                        {'text': '请把上面的衣服颜色变成红色', 
                        'type': 'text'}
                        ]}, 
            ]
    url = "https://ismaque.org/v1/chat/completions"
    payload = {
                "model": "gemini-2.5-flash-image-vip",
                "stream": False,
                "messages": messages
            }
    import uuid
    task_id = str(uuid.uuid4())
    # res = await test_async(payload=payload, url=url, task_id=task_id)
    task = asyncio.create_task(test_async(payload=payload, url=url, task_id=task_id))

    response_content = await poll_task_result(task_id)

    pass

import time
async def poll_task_result(task_id, poll_interval=1, timeout=30):
    """
    轮询tasks获得指定task_id的结果。
    :param task_id: 任务ID
    :param poll_interval: 轮询间隔（秒）
    :param timeout: 超时时间（秒）
    :return: 响应内容或超时None
    """
    start_time = time.time()
    while True:
        task_info = tasks.get(task_id, {})
        status = task_info.get("status", "")
        if status == "completed":
            return task_info.get("response_content")
        if time.time() - start_time > timeout:
            return None
        await asyncio.sleep(poll_interval)
        print(f"Polling for task_id: {task_id}, elapsed: {int(time.time() - start_time)}s, status: {task_info.get('status', 'unknown')}")



# ==========================
#       启动gradio服务
# ==========================
if __name__ == "__main__":
    # start_gradio(20024)
    # demo_async()
    # test_sync()
    
    import asyncio
    asyncio.run(test_async_task())