import json

from langchain_openai import ChatOpenAI

from ai_configs import default_ai_configs
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.runnables import RunnableLambda

def create_embeddings(platformCode="huoshan-embedding-250615"):
  _ai_config = default_ai_configs.get(platformCode)
  if _ai_config is None:
    raise Exception("无法找到模型服务：" + platformCode)
  return CustomEmbeddings(
    base_url=_ai_config["url"].replace("/embeddings", ""),
    api_key=_ai_config["key"],
    model=_ai_config["model"],
  )


class CustomEmbeddings(Embeddings):
  def __init__(self, base_url, api_key, model):
    self.base_url = base_url
    self.api_key = api_key
    self.model = model

  def embed_documents(self, texts):
    headers = {
      "Content-Type": "application/json",
      "Authorization": f"Bearer {self.api_key}"
    }
    payload = {"input": texts, "model": self.model, "encoding_format": "float"}
    response = requests.post(
      f"{self.base_url}/embeddings",
      headers=headers,
      data=json.dumps(payload)
    )
    response.raise_for_status()
    return [item["embedding"] for item in response.json()["data"]]

  def embed_query(self, text):
    return self.embed_documents([text])[0]

def create_llm(platformCode = "huoshan-doubao", temperature=0.5):
  _ai_config = default_ai_configs.get(platformCode)
  if _ai_config is None:
    raise Exception("无法找到模型服务：" + platformCode)
  return ChatOpenAI(
    base_url=_ai_config["url"].replace("/chat/completions", ""),
    api_key=_ai_config["key"],
    model=_ai_config["model"],
    temperature=temperature,
  )

def chain_log(format_func=None):
  # 内部函数
  def func(val):
    # 打印日志前缀及格式化函数处理后的值
    print('\33[34m chain_log ==>>', format_func(val) if format_func else val, '\033[0m')
    return val
  return func

# 将chain_log返回的函数包装为RunnableLambda，使其可作为LangChain中的可运行组件
def runnable_chain_log(format_func=None):
  return RunnableLambda(chain_log(format_func))


