import json
import sys
from typing import List, Type,Optional
import typing
from openai import OpenAI, pydantic_function_tool #type: ignore
import jieba #type: ignore
import jieba.analyse #type: ignore
from pydantic import BaseModel
from termcolor import colored
from classDef import timeit,trace_info,escapeLn
from termcolor import colored
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)


client = OpenAI(
    api_key='sk-jo2vXbIRMaUshbqZ05812d5cCe634d818180A41e22E02273',
    base_url='https://api.openai-next.com/v1'
)
# client = OpenAI(
#     api_key='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',
# )
import tiktoken

def num_tokens_from_string(string: str, encoding_name: str) -> int:
    """Returns the number of tokens in a text string."""
    encoding = tiktoken.get_encoding(encoding_name)
    num_tokens = len(encoding.encode(string))
    return num_tokens

class PrettyMessages(list):
    """
    it can only process system, user, assistant or tool messages in the last message item
    """
    def __str__(self):
        if not self:
            return ""
        msg = self[-1]
        if isinstance(msg,dict):
            role = msg.get("role","")
            content = msg.get("content","")
            tool_calls = msg.get("tool_calls",[])
        else:
            role = msg.role.lower()
            content = msg.content
            tool_calls = msg.tool_calls
        rtnstr = ""
        color = "light_blue"
        if role == "system":
            rtnstr = f"[System]\n{content}\n"
            color = "light_blue"
        elif role == "user":
            rtnstr = f"[User]\n{escapeLn(content)}\n"
            color = "light_blue"
        elif role == "assistant" and content and content!="":
            rtnstr = f"[Assistant]\n{escapeLn(content)}\n"
            color = "yellow"
        elif role == "assistant" and (not content):
            for tool_call in tool_calls:
                rtnstr += f"[{tool_call.function.name}]\n{escapeLn(tool_call.function.arguments)}\n"
            color = "light_cyan"
        elif role == "tool":
            tool_result_str = msg.get("content", "")
            tool_result = json.loads(tool_result_str)
            rtnstr = f"[{tool_result.get('tool_name', 'N/A')} Return]\n{escapeLn(tool_result)}\n"
            color = "light_green"
        else:
            rtnstr = f"{escapeLn(msg)}\n"
        return colored(rtnstr, color)

class LLMService:
    """
    LLMService 类封装了对大语言模型（LLM）的调用，用于生成摘要并进行文本向量化。
    - request(content: str) -> str
      调用大语言模型生成"摘要"或其他结果。
    - embedding(content: str) -> List[float]
      对文本进行向量化，用于相似度搜索等。
    """
    @timeit("light_green")
    def embedding(self, content: List[str]) -> List[List[float]]:
        """
        首先检查 content 的长度与每条文本的 tokens 总数，若超过 2048 或 8192 tokens，则自动分批次调用
        embedding 接口。最终保持与原先相同的输入输出格式。
        """
        def tokens_in_string(s: str) -> int:
            return num_tokens_from_string(s, "cl100k_base")
        def _get_embedding_batch(batch: List[str]) -> List[List[float]]:
            print(
                "batch list length",
                len(batch),
                ", word length",
                len("".join(batch)),
                ", content",
                str(batch)[0:30])
            # print(colored(f"_get_embedding_batch: {len(batch)} {str(batch)[:100]}", "magenta"))
            response = client.embeddings.create(
                model="text-embedding-3-small",
                input=batch
            )
            # print(colored(  f"RETURN len:{len(response.data)} {response.data[0].embedding[0:2]}", "magenta"))
            return [emb.embedding for emb in response.data]
        def _chunk_and_embed(all_texts: List[str]) -> List[List[float]]:
      
            all_embeddings: List[List[float]] = []
            current_batch: List[str] = []
            current_batch_token_sum = 0
            for text in all_texts:
                text_token_count = tokens_in_string(text)
                # 如果当前批量添加此条文本，会导致条数或token数超标，则先处理当前批量
                if (
                    len(current_batch) + 1 > 2047  # 条数超标
                    or current_batch_token_sum + text_token_count > 8191  # token 超标
                ):
                    print(f"current_batch_token_sum: {current_batch_token_sum}, text_token_count: {text_token_count} ,total: {current_batch_token_sum+text_token_count}")
                    print(f"current_batch_len: {len(current_batch)}")
                    # 调用接口获取当前批量 embeddings
                    if current_batch:
                        batch_embeddings = _get_embedding_batch(current_batch)
                        all_embeddings.extend(batch_embeddings)
                    # 重置当前批量
                    current_batch = [text]
                    current_batch_token_sum = text_token_count
                else:
                    # 否则将文本添加到当前批量
                    current_batch.append(text)
                    current_batch_token_sum += text_token_count

            # 最后一批若非空也要处理
            if current_batch:
                # print(f"last batch: current_batch_token_sum: {current_batch_token_sum}, text_token_count: {text_token_count} ,total: {current_batch_token_sum+text_token_count}")
                # print(f"last batch: current_batch_len: {len(current_batch)}")
                batch_embeddings = _get_embedding_batch(current_batch)
                all_embeddings.extend(batch_embeddings)

            return all_embeddings


        # 执行分批并获取结果
        embs = _chunk_and_embed(content)
        if __name__ == "__main__":
            print("\n".join(
                [
                    f"[{i}] {content[i]}->{str(emb[:2])}"
                    for i, emb in enumerate(embs)
                ]
            ))
        return embs
    @timeit("light_green")
    def generateSum(self, content: str) -> Optional[str]:
        print(f"generateSum: {content[:100]}")
        if content=="":
            return " "
        completion = client.chat.completions.create(
                model="gpt-4o",
                messages=[
                    {"role": "system", "content":
                    '''You are a summary generation assistant.
                    generate a summary of the following text withIn 30 words.The less the better.
                    You shall give me many keywords or short sentences,devided by comma.No ANY space around each comma!!!
                    If the text is empty or nonsense, return an space.The text is bracketed by < and >.
                    But your summary should not contain < and >.'''},
                    {
                        "role": "user",
                        "content": "the text for summary is: <"+content+">"
                    }
                ]
            )
        return completion.choices[0].message.content
    def generateSumByKeyWord(self, content: str,topK:int=5) -> List[str]:
      keywords = jieba.analyse.extract_tags(content, topK=topK)
      return keywords
    
    @timeit("light_green")
    def structureOutput(self, content: str, event_info_format: Type[BaseModel])->Optional[BaseModel]:
        """
        使用结构化输出从文本中提取事件信息。
        """

        completion = client.beta.chat.completions.parse(
            model="gpt-4o-mini",
            messages=[
                {"role": "user", "content": content},
            ],
            response_format=event_info_format,
        )

        return completion.choices[0].message.parsed
    def initContent(self,content:str,messages:list[typing.Any]):
        if content!="":
            messages.append(
                {
                    "role":"user",
                    "content":content
                }
            )
            print(PrettyMessages(messages))#user
            sys.stdout.flush()
    @timeit("light_green")
    def funcCallLLMS1(self, content: str, tools: list[type[BaseModel]],messages:list[typing.Any]):
        self.initContent(content,messages)
        tool_params = [pydantic_function_tool(tool) for tool in tools]
        completion = client.chat.completions.create(
            model="gpt-4o",
            messages=messages,
            tools=tool_params,
        )
        # messages.append(completion.choices[0].message)
        # print(PrettyCompletion(completion.choices[0].message))
        # sys.stdout.flush()
        return completion.choices[0].message
    @timeit("light_green")
    def funcCallLLMS2(self, content: str, tools: list[type[BaseModel]], messages:list[typing.Any]):
        self.initContent(content,messages)
        tool_params = [pydantic_function_tool(tool) for tool in tools]
        response_stream = client.chat.completions.create(
            model="gpt-4o",
            messages=messages,
            tools=tool_params,
            stream=True
        )
        return response_stream
def test_embedding(test_input = ["Hello, world!"]):
    # 测试 embedding 方法
    service = LLMService()
    return service.embedding(test_input)
def test_generateSum():
    # 测试 generateSum 方法
    service = LLMService()
    test_input = '''PostgreSQL，又称为Postgres，是一个强大的开源对象关系数据库系统。
    它使用并扩展了SQL语言，并结合了许多功能，可以安全地存储和扩展最复杂的数据工作负载
    。PostgreSQL以其经过验证的架构、可靠性、数据完整性、强大的功能集、可扩展性以及背后的开源社区的奉献精神而赢得了良好的声誉，
    这些社区始终提供高性能和创新的解决方案。PostgreSQL支持所有主要操作系统，自2001年以来一直符合ACID规范，
    并具有强大的附加组件，如广受欢迎的PostGIS地理空间数据库扩展器。'''
    result = service.generateSum(test_input)
    result2 = service.generateSumByKeyWord(test_input)
    print("LLM Summary Result    :", result)
    print("Keyword Summary Result:", result2)
def cos_sim(v1: List[float], v2: List[float]) -> float:
  """
  计算余弦相似度的辅助函数。
  """
  if not v1 or not v2:
      return 0.0
  dot = sum(a * b for a, b in zip(v1, v2))
  norm1 = sum(a * a for a in v1) ** 0.5
  norm2 = sum(b * b for b in v2) ** 0.5
  if norm1 == 0 or norm2 == 0:
      return 0.0
  return dot / (norm1 * norm2)
if __name__ == '__main__':
  vecs=test_embedding(["简介 PostgreSQL,PostgreSQL,Postgres,SQL,数据库系统,功能强大,特点,PostgreSQL,Linux,Windows,MacOS,兼容性,扩展性,非常适合,数据库,查询,高度,开源,PostgreSQL,社区,改进,活跃,PostgreSQL,互联网服务,应用,应用程序,存储,学习资源,https,www,postgresql,org,docs,在线教程,https,www,postgresqltutorial,com,PostgreSQL,开源,数据库,受欢迎,灵活性",
                       "简介 PostgreSQL,Postgres,SQL,数据库系统,功能强大,特点,Linux,Windows,MacOS,兼容性,扩展性,非常适合,数据库,查询,高度,开源,社区,改进,活跃,互联网服务,应用,应用程序,存储,学习资源,https,www,org,docs,在线教程,https,www,com,开源,数据库,受欢迎,灵活性",
                       "PostgreSQL"])
  print(cos_sim(vecs[0],vecs[2]),cos_sim(vecs[1],vecs[2]))
  # test_generateSum()

