import json
import sys
import time
from typing import List, Optional, Dict, Callable,Any, Union,Type
import typing
from openai import OpenAI, pydantic_function_tool #type: ignore
import jieba #type: ignore
import jieba.analyse #type: ignore
from pydantic import BaseModel
from termcolor import colored
from functools import wraps
import typing
import base64
import os
from operation import *
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def escapeLn(string: Union[str, dict]) -> str:
    if isinstance(string, dict):
        return json.dumps(string, ensure_ascii=False).replace('\\n', '\n')
    elif isinstance(string, str):   
        decoded_str = string
        # .encode('utf-8').decode('unicode_escape')
        return decoded_str.replace('\\n', '\n')
    else:
        print(f"Invalid input type: {type(string)}")
        return str(string)
client = OpenAI(
    api_key='sk-jo2vXbIRMaUshbqZ05812d5cCe634d818180A41e22E02273',
    base_url='https://api.openai-next.com/v1'
)
# client = OpenAI(
#     api_key='sk-11ffe74a9d494bc3bb2d5ca3b27eedbd',
#     base_url='https://dashscope.aliyuncs.com/compatible-mode/v1'
# )
import tiktoken

def num_tokens_from_string(string: str, encoding_name: str) -> int:
    """Returns the number of tokens in a text string."""
    encoding = tiktoken.get_encoding(encoding_name)
    num_tokens = len(encoding.encode(string))
    return num_tokens

class PrettyMessages(list):
    """
    it can only process system, user, assistant or tool messages in the last message item
    """
    def __str__(self):
        if not self:
            return ""
        msg = self[-1]
        if isinstance(msg,dict):
            role = msg.get("role","")
            content = msg.get("content","")
            tool_calls = msg.get("tool_calls",[])
        else:
            role = msg.role.lower()
            content = msg.content
            tool_calls = msg.tool_calls
        rtnstr = ""
        color = "light_blue"
        if role == "system":
            rtnstr = f"[System]\n{content}\n"
            color = "light_blue"
        elif role == "user":
            if isinstance(content,list):
                rtnstr = "[User]\n"
                for item in content:
                    if item['type']=='text':
                        rtnstr += f"{escapeLn(item['text'])}\n"
                    elif item['type']=='image_url':
                        rtnstr += f"{escapeLn(item['image_url']['url'][0:100])}\n"
            else:
                rtnstr = f"[User]\n{escapeLn(content)}\n"
            color = "light_blue"
        elif role == "assistant":
            if content:
                rtnstr += f"[Assistant]\n{escapeLn(content)}\n"
            if tool_calls:
                for tool_call in tool_calls:
                    rtnstr += f"[{tool_call.function.name}]\n{escapeLn(tool_call.function.arguments)}\n"
            color = "light_cyan"
        elif role == "tool":
            tool_result_strOrList = msg.get("content", "")
            if isinstance(tool_result_strOrList,str):
                tool_result = json.loads(tool_result_strOrList)
                tool_name = tool_result.get('tool_name', 'N/A')
                rtnstr = f"[{tool_name} Return]\n{escapeLn(tool_result)}\n"
            else:
                rtnstr = f"[{tool_name} Return(not str)]\n{escapeLn(tool_result)}\n"
            color = "light_green"
        else:
            rtnstr = f"{escapeLn(msg)}\n"
        return colored(rtnstr, color)

class LLMService:
    """
    LLMService 类封装了对大语言模型（LLM）的调用，用于生成摘要并进行文本向量化。
    - request(content: str) -> str
      调用大语言模型生成"摘要"或其他结果。
    - embedding(content: str) -> List[float]
      对文本进行向量化，用于相似度搜索等。
    """
    def embedding(self, content: List[str]) -> List[List[float]]:
        """
        首先检查 content 的长度与每条文本的 tokens 总数，若超过 2048 或 8192 tokens，则自动分批次调用
        embedding 接口。最终保持与原先相同的输入输出格式。
        """
        def tokens_in_string(s: str) -> int:
            return num_tokens_from_string(s, "cl100k_base")
        def _get_embedding_batch(batch: List[str]) -> List[List[float]]:
            print(
                "batch list length",
                len(batch),
                ", word length",
                len("".join(batch)),
                ", content",
                str(batch)[0:30])
            # print(colored(f"_get_embedding_batch: {len(batch)} {str(batch)[:100]}", "magenta"))
            response = client.embeddings.create(
                model="text-embedding-3-small",
                input=batch
            )
            # print(colored(  f"RETURN len:{len(response.data)} {response.data[0].embedding[0:2]}", "magenta"))
            return [emb.embedding for emb in response.data]
        def _chunk_and_embed(all_texts: List[str]) -> List[List[float]]:
      
            all_embeddings: List[List[float]] = []
            current_batch: List[str] = []
            current_batch_token_sum = 0
            for text in all_texts:
                text_token_count = tokens_in_string(text)
                # 如果当前批量添加此条文本，会导致条数或token数超标，则先处理当前批量
                if (
                    len(current_batch) + 1 > 2047  # 条数超标
                    or current_batch_token_sum + text_token_count > 8191  # token 超标
                ):
                    print(f"current_batch_token_sum: {current_batch_token_sum}, text_token_count: {text_token_count} ,total: {current_batch_token_sum+text_token_count}")
                    print(f"current_batch_len: {len(current_batch)}")
                    # 调用接口获取当前批量 embeddings
                    if current_batch:
                        batch_embeddings = _get_embedding_batch(current_batch)
                        all_embeddings.extend(batch_embeddings)
                    # 重置当前批量
                    current_batch = [text]
                    current_batch_token_sum = text_token_count
                else:
                    # 否则将文本添加到当前批量
                    current_batch.append(text)
                    current_batch_token_sum += text_token_count

            # 最后一批若非空也要处理
            if current_batch:
                # print(f"last batch: current_batch_token_sum: {current_batch_token_sum}, text_token_count: {text_token_count} ,total: {current_batch_token_sum+text_token_count}")
                # print(f"last batch: current_batch_len: {len(current_batch)}")
                batch_embeddings = _get_embedding_batch(current_batch)
                all_embeddings.extend(batch_embeddings)

            return all_embeddings


        # 执行分批并获取结果
        embs = _chunk_and_embed(content)
        if __name__ == "__main__":
            print("\n".join(
                [
                    f"[{i}] {content[i]}->{str(emb[:2])}"
                    for i, emb in enumerate(embs)
                ]
            ))
        return embs
    def initContent(self,content:str,imagePath:str,messages:list[typing.Any]):
        contentList:List[Dict[str,Any]]=[]
        if content!="": 
            contentList.append({"type":"text","text":content})
        if imagePath!="":
            if os.path.exists(imagePath):
                # Determine the mime type based on file extension
                mime_type = "image/jpeg"  # Default
                if imagePath.lower().endswith('.png'):
                    mime_type = "image/png"
                elif imagePath.lower().endswith('.gif'):
                    mime_type = "image/gif"
                elif imagePath.lower().endswith('.webp'):
                    mime_type = "image/webp"
                with open(imagePath, "rb") as f:
                    image_content = f.read()
                    base64_image = base64.b64encode(image_content).decode("utf-8")
                    contentList.append({"type":"image_url","image_url":{"url":f"data:{mime_type};base64,{base64_image}"}})
        if contentList:
            messages.append(
                {
                    "role":"user",
                    "content":contentList
                }
            )
            print(PrettyMessages(messages))#user
            sys.stdout.flush()
    @timeit('red')
    def funcCallLLM(self, content: str,imagePath:str, tools:list[Any],messages:list[typing.Any]):
        self.initContent(content,imagePath,messages)
        tool_params = [pydantic_function_tool(tool) for tool in tools]
        completion = client.chat.completions.create(
            model="gpt-4o",
            messages=messages,
            tools=tool_params,
        )
            # model="gpt-4.5-preview",

        messages.append(completion.choices[0].message)
        print(PrettyMessages(messages))#assistant/toolcall
        sys.stdout.flush()
        return completion.choices[0].message
def test_embedding(test_input = ["Hello, world!"]):
    # 测试 embedding 方法
    service = LLMService()
    return service.embedding(test_input)
def test_generateSum():
    # 测试 generateSum 方法
    service = LLMService()
    test_input = '''PostgreSQL，又称为Postgres，是一个强大的开源对象关系数据库系统。
    它使用并扩展了SQL语言，并结合了许多功能，可以安全地存储和扩展最复杂的数据工作负载
    。PostgreSQL以其经过验证的架构、可靠性、数据完整性、强大的功能集、可扩展性以及背后的开源社区的奉献精神而赢得了良好的声誉，
    这些社区始终提供高性能和创新的解决方案。PostgreSQL支持所有主要操作系统，自2001年以来一直符合ACID规范，
    并具有强大的附加组件，如广受欢迎的PostGIS地理空间数据库扩展器。'''
    result = service.generateSum(test_input)
    result2 = service.generateSumByKeyWord(test_input)
    print("LLM Summary Result    :", result)
    print("Keyword Summary Result:", result2)
def cos_sim(v1: List[float], v2: List[float]) -> float:
    """
    计算余弦相似度的辅助函数。
    """
    if not v1 or not v2:
        return 0.0
    dot = sum(a * b for a, b in zip(v1, v2))
    norm1 = sum(a * a for a in v1) ** 0.5
    norm2 = sum(b * b for b in v2) ** 0.5
    if norm1 == 0 or norm2 == 0:
        return 0.0
    return dot / (norm1 * norm2)
if __name__ == '__main__':
#   vecs=test_embedding(["简介 PostgreSQL,PostgreSQL,Postgres,SQL,数据库系统,功能强大,特点,PostgreSQL,Linux,Windows,MacOS,兼容性,扩展性,非常适合,数据库,查询,高度,开源,PostgreSQL,社区,改进,活跃,PostgreSQL,互联网服务,应用,应用程序,存储,学习资源,https,www,postgresql,org,docs,在线教程,https,www,postgresqltutorial,com,PostgreSQL,开源,数据库,受欢迎,灵活性",
#                        "简介 PostgreSQL,Postgres,SQL,数据库系统,功能强大,特点,Linux,Windows,MacOS,兼容性,扩展性,非常适合,数据库,查询,高度,开源,社区,改进,活跃,互联网服务,应用,应用程序,存储,学习资源,https,www,org,docs,在线教程,https,www,com,开源,数据库,受欢迎,灵活性",
#                        "PostgreSQL"])
#   print(cos_sim(vecs[0],vecs[2]),cos_sim(vecs[1],vecs[2]))
  service = LLMService()
  service.funcCallLLM(f"call tool,click the button 交我办. screen resolution is {resolution()}.XML is {(xmlshotTraverse(True))}", screenshotDrawline(True,200,200,True), AGENT_TOOLS,[])


