      
from binhex import RUNCHAR
import hmac
import time
import requests
from datetime import datetime
import hashlib
import uuid
import base64
import time
import uuid

import os
from openai import OpenAI,AsyncOpenAI
from PIL import Image
import asyncio
import time
import uuid

client = AsyncOpenAI(
    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
    api_key="sk-68651914ca9a4dd6b10ad2bd2fcc20af",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)
client = OpenAI(
    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
    api_key="sk-68651914ca9a4dd6b10ad2bd2fcc20af",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)


class SmartImg1:
    # def __init__(self, ak='', sk='', interval=5):
    def __init__(self, ak='ZB4FqSMFuuGCg4pgRzMMOg', sk='S_uCEk4-CSJ63LSJeZiv_2WDxlJxLZ4x', interval=5):
        """
        :param ak
        :param sk
        :param interval 轮询间隔
        """
        self.ak = ak
        self.sk = sk
        self.time_stamp = int(datetime.now().timestamp() * 1000)  # 毫秒级时间戳
        self.signature_nonce = uuid.uuid1()  # 随机字符串
        self.signature_kontext_text2img = self._hash_kontext_text2img_sk(self.sk, self.time_stamp, self.signature_nonce)
        self.signature_kontext_img2img = self._hash_kontext_img2img_sk(self.sk, self.time_stamp, self.signature_nonce)
        self.signature_task_status = self._hash_task_status_sk(self.sk, self.time_stamp, self.signature_nonce)
        self.interval = interval
        self.headers = {'Content-Type': 'application/json'}
        self.kontext_text2img = self.kontext_text2img(self.ak, self.signature_kontext_text2img, self.time_stamp,
                                                  self.signature_nonce)
        self.kontext_img2img = self.kontext_img2img(self.ak, self.signature_kontext_img2img, self.time_stamp,
                                                  self.signature_nonce)
        self.get_task_status = self.get_task_status_url(self.ak, self.signature_task_status, self.time_stamp,
                                                               self.signature_nonce)
    def hmac_sha1(self, key, code):
        hmac_code = hmac.new(key.encode(), code.encode(), hashlib.sha1)
        return hmac_code.digest()

    def _hash_kontext_text2img_sk(self, key, s_time, ro):
        """加密sk"""
        data = "/api/generate/kontext/text2img" + "&" + str(s_time) + "&" + str(ro)
        s = base64.urlsafe_b64encode(self.hmac_sha1(key, data)).rstrip(b'=').decode()
        return s
    
    def _hash_kontext_img2img_sk(self, key, s_time, ro):
        """加密sk"""
        data = "/api/generate/kontext/img2img" + "&" + str(s_time) + "&" + str(ro)
        s = base64.urlsafe_b64encode(self.hmac_sha1(key, data)).rstrip(b'=').decode()
        return s

    def _hash_task_status_sk(self, key, s_time, ro):
        """加密sk"""
        data = "/api/generate/status" + "&" + str(s_time) + "&" + str(ro)
        s = base64.urlsafe_b64encode(self.hmac_sha1(key, data)).rstrip(b'=').decode()
        return s

    def kontext_text2img(self, ak, signature, time_stamp, signature_nonce):
        url = f"https://openapi.liblibai.cloud/api/generate/kontext/text2img?AccessKey={ak}&Signature={signature}&Timestamp={time_stamp}&SignatureNonce={signature_nonce}"
        return url
    
    def kontext_img2img(self, ak, signature, time_stamp, signature_nonce):
        url = f"https://openapi.liblibai.cloud/api/generate/kontext/img2img?AccessKey={ak}&Signature={signature}&Timestamp={time_stamp}&SignatureNonce={signature_nonce}"
        return url    

    def get_task_status_url(self, ak, signature, time_stamp, signature_nonce):

        url = f"https://openapi.liblibai.cloud/api/generate/status?AccessKey={ak}&Signature={signature}&Timestamp={time_stamp}&SignatureNonce={signature_nonce}"
        return url


    def kontext_img2img_edit(self, url, prompt, model):        
        base_json = {
            "templateUuid":"1c0a9712b3d84e1b8a9f49514a46d88c",
            "generateParams":{
                # "model":model,
                "prompt":prompt,
                "aspectRatio":"2:3",
                "guidance_scale":3.5,
                "imgCount":1,
                "image_list":[
                    url]      
            }
        }
        return self.run(base_json, self.kontext_img2img)

    def run(self, data, url, timeout=300):
        """
        发送任务到生图接口，直到返回image为止，失败抛出异常信息
        """
        start_time = time.time()  # 记录开始时间
        # 这里提交任务，校验是否提交成功，并且获取任务ID
        print(url)
        response = requests.post(url=url, headers=self.headers, json=data)
        response.raise_for_status()
        progress = response.json()
        print(progress)
        if progress['code'] == 0:
            # 如果获取到任务ID，执行等待生图
            while True:
                current_time = time.time()
                if (current_time - start_time) > timeout:
                    print(f"{timeout}s任务超时，已退出轮询。")
                    return None

                generate_uuid = progress["data"]['generateUuid']
                data = {"generateUuid": generate_uuid}
                response = requests.post(url=self.get_task_status, headers=self.headers, json=data)
                response.raise_for_status()
                progress = response.json()
                print(progress)

                if progress['data'].get('images') and any(
                        image for image in progress['data']['images'] if image is not None):
                    print("任务完成，获取到图像数据。")
                    return progress

                print(f"任务尚未完成，等待 {self.interval} 秒...")
                time.sleep(self.interval)
        else:
            return f'任务失败,原因：code {progress["msg"]}'

import asyncio
def get_prompt(image_url, cap):
    completion = client.chat.completions.create(
    model="qwen-vl-plus",  
    messages=[{"role": "user","content": [
                {"type": "image_url",
                "image_url": {"url": image_url}},
                {"type": "text", "text": cap},
                ]}]
    )
    res = completion.model_dump()
    prompt = res['choices'][0]['message']['content']
    return prompt


async def test_qwen_async():
    clothes_type = "上衣" #["上衣", "裤子/半身裙", "连衣裙", "套装"]
    image_url = "http://huayiyi-ai.oss-cn-shanghai.aliyuncs.com/test_tmp%2F1756694451605%20-%2038f8a920-aeb6-4ac9-b223-699541f5191c.jpg"
    # 改用异步方法
    async def run_chat(imageUrl, prompt):
        completion = await client.responses.create(
            model="qwen-vl-plus",
            input=[
                {
                    "role": "user",
                    "content": [
                        {"type": "input_image", "image_url": f"{imageUrl}"},
                        {"type": "input_text", "text": prompt},
                    ],
                }
            ],
        )
        return completion.model_dump()
    # 直接调用异步函数
    prompt = f"图片中的{clothes_type}的具体服装类型是什么，你的回复只需要服装类型的英文单词就好了，其他什么都不要回复"
    result = await run_chat(image_url, prompt)
    return result

if __name__ == '__main__':
    import asyncio
    from openai import DefaultAioHttpClient
    from openai import AsyncOpenAI
    
    clothes_type = "上衣" #["上衣", "裤子/半身裙", "连衣裙", "套装"]
    image_url = "http://huayiyi-ai.oss-cn-shanghai.aliyuncs.com/test_tmp%2F1756694451605%20-%2038f8a920-aeb6-4ac9-b223-699541f5191c.jpg"

    prompt = f"图片中的{clothes_type}的具体服装类型是什么，你的回复只需要服装类型的英文单词就好了，其他什么都不要回复"

    async def main(image_url,prompt) -> None:
        async with AsyncOpenAI(
            api_key="sk-68651914ca9a4dd6b10ad2bd2fcc20af",
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
            http_client=DefaultAioHttpClient(),
        ) as client:
            chat_completion = await client.chat.completions.create(
                model="qwen-vl-plus",  
                messages=[{"role": "user","content": [
                            {"type": "image_url",
                            "image_url": {"url": image_url}},
                            {"type": "text", "text": prompt},
                            ]}]
            )
            print(chat_completion)
            res = chat_completion.model_dump()
            prompt = res['choices'][0]['message']['content']
            print( prompt )
            '''
            ChatCompletion(id='chatcmpl-0c618e68-7cde-9ad9-8dbd-3c3e912659ae', 
            choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Jacket', 
            refusal=None, role='assistant', annotations=None, audio=None, function_call=None, tool_calls=None))], 
            created=1756804082, model='qwen-vl-plus', object='chat.completion', service_tier=None, 
            system_fingerprint=None, usage=CompletionUsage(completion_tokens=3, prompt_tokens=1262, total_tokens=1265, 
            completion_tokens_details=None, prompt_tokens_details=PromptTokensDetails(audio_tokens=None, cached_tokens=896)))
            '''


    asyncio.run(main(image_url,prompt))

    # 正确运行异步函数的方式
    # result = asyncio.run(test_qwen_async())
    # print(result) 
    #输入参数
    # clothes_type = "上衣" #["上衣", "裤子/半身裙", "连衣裙", "套装"]
    # image_url = "http://huayiyi-ai.oss-cn-shanghai.aliyuncs.com/test_tmp%2F1756694451605%20-%2038f8a920-aeb6-4ac9-b223-699541f5191c.jpg"

    # prompt = get_prompt(image_url, f"图片中的{clothes_type}的具体服装类型是什么，你的回复只需要服装类型的英文单词就好了，其他什么都不要回复")
    # print(prompt)

    # test = SmartImg1()
    #输入参数
    
    # async get prompt

    
    # input_prompt = f"Please perform image extraction on the  {prompt} shown in the image, fully preserving all design details on the clothing. Remove the model, background, and any other irrelevant elements, and generate a front-view image of the garment. The background must be pure white, the garment should be centered and aligned, with a clear outline and accurate proportions. Only the extracted standalone garment should be displayed."
    # res = test.kontext_img2img_edit(image_url, input_prompt, model="pro")
    # print(res['data']['images'][0]['imageUrl'])


    
    

    