import json
import traceback
import aiohttp
import asyncio
from tqdm import tqdm
import pandas as pd
import base64
from PIL import Image
from io import BytesIO
import yaml
from openai import OpenAI
from loguru import logger
import copy
import sys,os

sys.path.append(os.path.abspath(os.path.dirname(__file__)))

logger.remove()
logger.add(sink=sys.stderr, level="INFO")

class AsyncLLMClient:
    def __init__(self, app_id, app_secret, url, chat_url, model, timeout=60,Semaphore=20):
        """
        初始化通用参数

        Args:
            app_id (str): 应用ID
            app_secret (str): 应用密钥
            url (str): 认证接口URL
            chat_url (str): 对话接口URL
            model (str): 使用的模型名称
            timeout (int): 请求超时时间，默认60秒
        """
        self.app_id = app_id
        self.app_secret = app_secret
        self.url = url
        self.chat_url = chat_url
        self.model = model
        self.timeout = timeout
        self.Semaphore = Semaphore
        self.authority = None
        self.session = None

    async def initialize(self):
        """初始化session"""
        if self.session is None:
            self.session = aiohttp.ClientSession()
        return self

    async def close(self):
        """关闭session"""
        if self.session:
            await self.session.close()
            self.session = None

    @staticmethod
    def _encode_image(image_path):
        """
        将图片编码为base64格式
        
        Args:
            image_path (str): 图片的路径
            
        Returns:
            str: Base64编码的图片
        """
        pil_image = Image.open(image_path)
        img_byte_arr = BytesIO()
        pil_image.save(img_byte_arr, format=pil_image.format)
        return base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
    
    async def refresh_authority(self):
        """刷新身份信息"""
        if self.session is None:
            await self.initialize()
            
        d = {"app_id": self.app_id, "app_secret": self.app_secret}
        h = {"Content-Type": "application/json"}
        
        async with self.session.post(self.url, json=d, headers=h, timeout=self.timeout) as response:
            data = await response.json()
            if data.get("success"):
                self.authority = data
            else:
                raise Exception(f"Failed to authenticate: {data}")

    @staticmethod
    def find_key_in_nested_dict(nested_dict, target_key):
        """
        嵌套字典中寻找目标Key

        Args:
            nested_dict (dict):嵌套字典
            target_key(str):要寻找的目标Key
        Returns:
            Dict: 如果找到返回键所在的值，否则返回 None
        """
        if not isinstance(nested_dict, dict):
            return None

        stack = [nested_dict]
        while stack:
            current_dict = stack.pop()
            if target_key in current_dict:
                return current_dict[target_key]
            for value in current_dict.values():
                if isinstance(value, dict):
                    stack.append(value)
        return None
    
    @staticmethod
    def save_file(path,instruction,predict_result):
        with open(path,'a',encoding='utf-8') as f: 
            tmp = instruction
            tmp['reasoning_content'] = predict_result['reasoning_content'] if isinstance(predict_result, dict) and 'reasoning_content' in predict_result else ""
            tmp['predict_result'] = predict_result['content'] if isinstance(predict_result, dict) and 'content' in predict_result else predict_result

            f.write(json.dumps(tmp,ensure_ascii=False)+"\n")
 
    async def get_gpt_response(self, messages, chat_url, temperature=0, try_num=3, pbar=None):
        """
        大模型调用和身份验证

        Args:
            messages (Obj): messages模式请求格式
            chat_url(str): 对话接口URL
            temperature(float): 调用接口的温度
            try_num(int): retry最大次数
            pbar (tqdm): 进度条对象，可选
        Returns:
            Dict: 接口返回的text或者错误
        """
        if self.session is None:
            await self.initialize()

        error_messages = ""
        try:
            assert isinstance(messages, list) and not [x for x in messages if x["role"] not in {"system", "user", "assistant"}]
            chat_d = {
                "model": self.model,
                "messages": messages,
                "temperature": temperature
            }
            if not self.authority:
                await self.refresh_authority()
            chat_h = {
                "Content-Type": "application/json",
                "userId": self.authority["data"]["user_id"],
                "token": self.authority["data"]["token"]
            }
        except Exception as e:
            error_messages = traceback.format_exc()
            logger.error(error_messages)
            return "error: "+error_messages

        for try_id in range(try_num):
            try:
                async with self.session.post(chat_url, json=chat_d, headers=chat_h, timeout=self.timeout) as response:
                    response_data = await response.json()
                    choices = self.find_key_in_nested_dict(response_data, 'choices')
                    if 'success' in response_data and not response_data.get('success'):
                        logger.error("Retry time:{}\n{}".format(try_id,response_data))
                        continue
                    if choices is None:
                        logger.error("Retry time:{}\n{}".format(try_id,response_data))
                        continue
                    if pbar:
                        pbar.update(1)
                    logger.debug(response_data)
                    result = choices[0]["message"] if "reasoning_content" in choices[0]["message"] else choices[0]["message"]["content"]
                    return result
            except Exception as e:
                error_messages = traceback.format_exc()
                logger.error("Retry time:{}\n{}".format(try_id,error_messages))
                await self.refresh_authority()
        return "error: "+str(error_messages)

    async def text2text(self, instruction, temperature=0,pbar=None,output_file=None):
        """
        大模型文本生成文本

        Args:
            instruction (str): input 文本
            temperature (float): 调用接口的温度
        Returns:
            Dict: 接口返回的text或者错误
        """
        if isinstance(instruction, str):
            messages = [
                {"role": "user", "content": instruction}
            ]
            instruction = {'messages':messages}
        else:
            messages = instruction['messages']
        
        predict_result = await self.get_gpt_response(messages, self.chat_url, temperature=temperature,pbar=pbar)
        if output_file:
            self.save_file(output_file,instruction,predict_result)
        return predict_result
    
    async def image2text(self, instruction, image=None, temperature=0,pbar=None,output_file=None):
        """
        大模型图文生成文本

        Args:
            instruction(str): input 文本 或者message格式的list
            image (str): 图片路径，当传入message时可以是空
            temperature (float): 调用接口的温度
        Returns:
            Dict: 接口返回的text或者错误
        """
        if isinstance(instruction, str):
            messages = [
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "text",
                            "text": instruction
                        },
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": f"data:image/jpeg;base64,{self._encode_image(image)}"
                            }
                        }
                    ]
                }
            ]
            instruction = {
                "messages":[
                    {
                        "role": "user",
                        "content": [
                            {
                                "type": "text",
                                "text": instruction
                            },
                            {
                                "type": "image_url",
                                "image_url": {
                                    "url": image
                                }
                            }
                        ]
                    }
                ]
            }
        else:
            messages = copy.deepcopy(instruction['messages'])
            for i,item in enumerate(messages):
                for j, item2 in enumerate(item["content"]):
                    if item2.get("type") == "image_url":
                        messages[i]["content"][j]["image_url"] = {
                            "url": f"data:image/jpeg;base64,{self._encode_image(item2['image_url']['url'])}"
                        }
        
        predict_result = await self.get_gpt_response(messages, self.chat_url, temperature,pbar=pbar)
        if output_file:
            self.save_file(output_file,instruction,predict_result)
        return predict_result

    async def images2texts(self, instructions: list, temperature=0, images: list=None,output_file=None):
        """
        大模型批量图文生成文本

        Args:
            instructions (list): input 一个列表，包含多个instruction,可以是messages格式
            images (list): 图片路径列表
            temperature (float): 调用接口的温度
        Returns:
            List: 接口返回一个结果列表，结果列表中元素顺序和instructions中元素顺序一致
        """
        await self.initialize()  # 确保session已初始化
        
        # 创建进度条
        pbar = tqdm(total=len(instructions), desc="Processing Image-Text Pairs")

        semaphore = asyncio.Semaphore(self.Semaphore)
        async def process_with_semaphore(instruction, image=None):
            async with semaphore:  # 使用信号量控制并发
                return await self.image2text(instruction, image, temperature, pbar=pbar, output_file=output_file)
    
        try:
            if images:
                tasks = [process_with_semaphore(instruction, image) 
                        for instruction, image in zip(instructions, images)]
            else:
                tasks = [process_with_semaphore(instruction) 
                        for instruction in instructions]
                
            # 执行所有任务
            results = await asyncio.gather(*tasks)
        finally:
            pbar.close()
            
        # 确保结果顺序与输入顺序一致
        return results

    async def texts2texts(self, instructions: list, temperature=0,output_file=None):
        """
        大模型批量文本生成文本

        Args:
            instructions (list): input 一个列表，包含多个instruction,可以是messages格式
            temperature (float): 调用接口的温度
        Returns:
            List: 接口返回一个结果列表，结果列表中元素顺序和instructions中元素顺序一致
        """
        await self.initialize()  # 确保session已初始化
        text_messages = []
        if not isinstance(instructions[0], str):
            for i,item in enumerate(instructions):
                if "messages" in item:
                    item = item['messages']
                tmp = []
                for text_item in item:
                    tmp.append({
                        "role": text_item["role"],
                        "content": "\n".join([x["text"] for x in text_item["content"]]) if isinstance(text_item["content"], list) else text_item["content"]
                    })
                instructions[i]['messages'] = tmp
        # print(text_messages)
        # 创建进度条
        pbar = tqdm(total=len(instructions), desc="Processing Text-to-Text")
        
        # 创建信号量来限制并发
        semaphore = asyncio.Semaphore(self.Semaphore)
        
        # 创建一个包装函数，在其中使用信号量
        async def process_with_semaphore(message):
            async with semaphore:  # 使用信号量控制并发
                return await self.text2text(message, temperature=temperature, pbar=pbar, output_file=output_file)
        
        try:
            # 使用包装函数创建任务
            tasks = [process_with_semaphore(message) for message in instructions]
            
            # 执行所有任务
            results = await asyncio.gather(*tasks)
        finally:
            pbar.close()

        return results

class GPT4OClient(AsyncLLMClient):
    def __init__(self, app_id="5cafd46a3b2342b5a903afafc38d4aef",
        app_secret="iv+JZxHTQKrpYmxx1U9HNyXEjJNZcUhlBCkmT/lSYIE=",
        url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/auth/api/oauth/v1/login",
        chat_url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/ai_access/chatgpt/v3/chat/completions",
        model="gpt-4o",timeout=60):
        super().__init__(app_id, app_secret, url, chat_url, model, timeout)

class KIMIClient(AsyncLLMClient):
    def __init__(self, app_id="5cafd46a3b2342b5a903afafc38d4aef",
        app_secret="iv+JZxHTQKrpYmxx1U9HNyXEjJNZcUhlBCkmT/lSYIE=",
        url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/auth/api/oauth/v1/login",
        chat_url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/ai_access/kimi_moonshot/v1/chat/completions",
        model="moonshot-v1-8k",timeout=60):
        """
        KIMI初始化参数，注意KIMI不支持多模态数据

        Args:
            app_id (str): 应用ID
            app_secret (str): 应用密钥
            url (str): 认证接口URL
            chat_url (str): 对话接口URL
            model (str): 使用的模型名称
            timeout (int): 请求超时时间，默认800秒
        """
        super().__init__(app_id, app_secret, url, chat_url, model, timeout)

class CLAUDEClient(AsyncLLMClient):
    def __init__(self, app_id="5cafd46a3b2342b5a903afafc38d4aef",
        app_secret="iv+JZxHTQKrpYmxx1U9HNyXEjJNZcUhlBCkmT/lSYIE=",
        url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/auth/api/oauth/v1/login",
        chat_url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/ai_access/claude/v1/chat/completions",
        model="us.anthropic.claude-3-5-sonnet-20241022-v2:0",timeout=60):
        super().__init__(app_id, app_secret, url, chat_url, model, timeout)

class GEMINIClient(AsyncLLMClient):
    def __init__(self, app_id="5cafd46a3b2342b5a903afafc38d4aef",
        app_secret="iv+JZxHTQKrpYmxx1U9HNyXEjJNZcUhlBCkmT/lSYIE=",
        url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/auth/api/oauth/v1/login",
        chat_url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/ai_access/gemini/v1/chat/completions",
        model="gemini-2.0-flash",timeout=60):
        super().__init__(app_id, app_secret, url, chat_url, model, timeout,Semaphore=5)
    
class DEEPSEEKClient(AsyncLLMClient):
    def __init__(self, app_id="5cafd46a3b2342b5a903afafc38d4aef",
        app_secret="iv+JZxHTQKrpYmxx1U9HNyXEjJNZcUhlBCkmT/lSYIE=",
        url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/auth/api/oauth/v1/login",
        chat_url="https://arsenal-openai.10jqka.com.cn:8443/vtuber/ai_access/doubao/v3/chat/completions",
        model="ep-20250204210426-gclbn",timeout=200):
        super().__init__(app_id, app_secret, url, chat_url, model, timeout,Semaphore=5)

class OtherGPTClient:
    def __init__(self, api_key,base_url,model):
        self.api_key = api_key
        self.base_url = base_url
        self.model = model
        self.client = OpenAI(api_key=api_key, base_url=base_url)

    def text2text(self, instruction,temperature=0.0):
        """
        大模型文本生成文本

        Args:
            instruction (str):input 文本
            temperature (float):调用接口的温度
        Returns:
            Dict: 接口返回的text或者错误
        """
        try:
            client = self.client
            response = client.chat.completions.create(
                model=self.model,  # 默认模型，可替换为其他模型
                messages=[
                    {"role": "user", "content": instruction},
                ],
                stream=False,
                temperature=temperature
            )
            return response.choices[0].message.content
        except Exception as e:
            logger.error(f"{e}")
            return None

async def debug():
    logger.remove()
    logger.add(sink=sys.stderr, level="DEBUG")

    gpt4o = GPT4OClient()
    ds = DEEPSEEKClient()
    claude = CLAUDEClient()
    gemini = GEMINIClient()
    # with open('/mnt/data/users/xiongkai/datasets/training_data/jinglianwen_example_pass.jsonl','r') as f:
    #     data = [json.loads(l) for l in f]
    instruction = {"messages":[
        {
            "role": "user",
            "content": "你好你是谁nenene"
        },
        {
            "role": "assistant",
            "content": "你好！我是一个人工智能助手，可以帮助回答问题、提供信息或协助解决问题。如果你有任何问题或需要帮助，请随时告诉我！"
        },
        {
            "role": "user",
            "content": "明天天气如何"
        }
    ]}

    gpt4o_result = await gpt4o.text2text("是否可以用世界上最大的湖填满世界上最大的沙漠？")
    # ds_result = await ds.texts2texts(data,output_file="reasoning.jsonl")
    # claude_result = await claude.text2text("是否可以用世界上最大的湖填满世界上最大的沙漠？")
    # gemini_result = await gemini.text2text(instruction)
    print(gpt4o_result)

async def debug2():
    logger.remove()
    logger.add(sink=sys.stderr, level="DEBUG")

    gpt4o = GPT4OClient()
    gemini = GEMINIClient()
    # with open('/mnt/data/users/fyb/mywork/GUI-data/inp.jsonl','r') as f:
        # data = [json.loads(l) for l in f][670:]
    prompt="""
    Human: 你是一个测试用例生成Agent，你的职责是根据第一张图片的UI设计图，第二张图片的当前操作界面，和<Demand>中的产品需求，生成全面且合理的测试用例。

    使用如下格式:
    <Demand>: 你需要测试验证的产品需求。
    <Observation>:你上次生成的<Action>中测试用例的测试结果。
    <Thought>: 你需要输出<FINISHED>或<UNFINISHED>。你必须总是根据<Observation>中已经执行的测试用例结果，来判断测试用例是否已覆盖<Demand>中的需求。如果已满足，则输出<FINISHED>；如果不满足，则输出<UNIFINISHED>，并思考应如何生成新用例，并在<Action>中输出：
    <Action>:你生成的测试用例，按下面json格式输出：
        <TestCase>
        {
            "操作名称":(该测试用例名称),
            "测试点":(测试动作，仅描述目标),
            "预期测试结果":（该测试应达到的结果）,
        }

    ... (如此 <Observation>/<Thought>/<Action> 可以重复 N 次)

    
    你务必遵守以下原则：
    1. 最重要的是：你的思考一定要深入全面，仔细分析UI界面内容、需求和已经测试的用例，这很大程度上决定了新测试用例的合理性。
    2. 你接下来的每一次思考都要基于已经执行过的测试用例，客观地判断是继续生成测试用例还是结束。
    3. 你每次<Action>仅需要生成一个测试用例，每个测试用例仅含一个测试动作/逻辑。
    4. 在每个测试点中，如果涉及新建、新建、查询等需要输入内容的任务时，请你结合需求和测试目标给出示例内容输入，以便我可以直接填入UI界面。
    5. 如果同一个测试点连续两次失败，请转向下一个测试点。
    6. 你只需要生成<Thought>和<Action>部分内容。
    7. 你在<Thought>中必须先判断生成<FINISHED>或是<UNFINISHED>。

    Begin!

    <Demand>：
    参考所提供的UI界面旁标注的需求，或者UI界面旁的文字需求说明。

    <Observation>:


"""
    max_step=30
    with open ("/mnt/data/users/fyb/mywork/GUI-data/gpt4o_test_case_newprompt.txt",'w') as f:
            f.write("")# 清空
    for i in range(max_step):
        instruction ={"messages": [{"role": "user", "content": [{"type": "text", "text": "描述图片内容"}, {"type": "image_url", "image_url": {"url": "test_pic.jpg"}},{"type": "image_url", "image_url": {"url": "new.jpg"}}]}]}
        result = await gpt4o.image2text(instruction)
        # result = await gemini.image2text(instruction,output_file="/mnt/data/users/fyb/output.txt")
        print(result)
        res=result+"\n"
        # print(gemini_result)
        prompt=prompt+res+"""
        <Observation>
        - Conclusion：OK.
        - Reason：指令是验证同花顺功能。通过点击同花顺图标，手机界面从手机桌面界面<图1>成功跳转到了显示了同花顺主界面<图2>。符合预期。
        """
        with open("/mnt/data/users/fyb/mywork/GUI-data/gpt4o_test_case_newprompt.txt",'a') as f:
            f.write(f"------------------第{i+1}次问询：------------------\n"+res)
        if not (result.find('<UNFINISHED>')>=0 or result.find('<TestCase>')>=0) :
            break

async def _send(prompt):
    # agent = GPT4OClient()
    agent = GEMINIClient()
    # with open('/mnt/data/users/fyb/mywork/GUI-data/inp.jsonl','r') as f:
        # data = [json.loads(l) for l in f][670:]

    gpt4o_result = await agent.text2text(prompt)
    
    return gpt4o_result

def send(prompt):
    return asyncio.run(_send(prompt))


# 示例用法
if __name__ == "__main__":
    # asyncio.run(debug())
    # instructions = ['who are you','hello!','who are you','hello!','who are you','hello!','who are you','hello!']
    # results = gpt4o.texts2texts(instructions)
    # print(results)
    # image_result = gpt4o.images2texts(["描述一下这张图片","描述一下这张图片"],["/mnt/data/damien/LCK作业/images/客服.png","/mnt/data/damien/LCK作业/images/客服.png"])
    # print(image_result)
    send(1)