from vanna.chromadb.chromadb_vector import ChromaDB_VectorStore

from ..openai.openai_chat import OpenAI_Chat


class Telchina_Chat(OpenAI_Chat):
  def __init__(self, client=None, config=None):
    super().__init__(client, config)

  def generate_command(self, question: str, **kwargs) -> str:
    if self.config is not None:
      initial_prompt = self.config.get("initial_prompt_command", None)
    else:
      initial_prompt = None

    if initial_prompt is None:
      # initial_prompt = """
      #   从用户提供的问题中提取出灯杆编号、指令动作、调光值，用Json格式回答问题。不进行任何额外解释。
      #   样例：
      #       样例1：
      #       问题：打开灯杆编号是100001的灯
      #       回答：{"command":"on","lamppostCode":"100001","dimmingValue":100}
      #       错误回答：{"error":"无法解析"}
      #       样例2：
      #       问题：关闭灯杆编号是100001的灯
      #       正确回答：{"command":"off","lampPostCode":"100001","dimmingValue":0}
      #       错误回答：{"error":"无法解析"}
      #       样例3：
      #       问题：灯杆编号是100001的灯调光到30
      #       正确回答：{"command":"on","lampPostCode":"100001","dimmingValue":30}
      #       错误回答：{"error":"无法解析"}
      #   约束：
      #       指令只能是"on"或者"off"
      #   """

      initial_prompt = """
        给定一个问题，请你按步骤要求工作，直接回答问题，不要输出任何解释。
          步骤1：识别问题中的灯杆编号和指令动作以及调光值信息
          步骤2：如果问题中无法提取灯杆编号、指令动作、调光值信息，请直接回复：{"error":"请补充灯杆编号、开关灯指令、调光值信息"}
          步骤3：用Json格式回答问题，格式为：{"command":指令动作,"lampPostCode":灯杆编号,"dimmingValue":调光值}
          步骤4：只要dimmingValue不等于0，command就不能为"off"
        样例：
          样例1：
          问题：打开灯杆编号是灯杆100001的灯
          回答：{"command":"on","lamppostCode":"灯杆100001","dimmingValue":100}
          错误回答：{"error":"无法解析"}
          样例2：
          问题：关闭灯杆编号是灯杆100001的灯
          正确回答：{"command":"off","lampPostCode":"灯杆100001","dimmingValue":0}
          错误回答：{"error":"无法解析"}
          样例3：
          问题：灯杆编号是灯杆100001的灯调光到30
          正确回答：{"command":"on","lampPostCode":"灯杆100001","dimmingValue":30}
        约束：
          指令动作只能是"on"或者"off"
          开灯指令的调光默认值：100
          关灯指令的调光默认值：0
          回复的内容可以使用json.loads()函数解析
        直接回答问题，不要输出任何解释。
      """
    message_log = [self.system_message(initial_prompt)]
    message_log.append(self.user_message(question))
    prompt = message_log
    self.log(prompt)
    llm_response = self.submit_prompt(prompt, **kwargs)
    self.log(llm_response)
    return llm_response

  def system_message(self, message: str) -> any:
      return {"role": "system", "content": message}

  def user_message(self, message: str) -> any:
      return {"role": "user", "content": message}

  def assistant_message(self, message: str) -> any:
      return {"role": "assistant", "content": message}

  def submit_prompt(self, prompt, **kwargs) -> str:
      if prompt is None:
          raise Exception("Prompt is None")

      if len(prompt) == 0:
          raise Exception("Prompt is empty")

      # Count the number of tokens in the message log
      # Use 4 as an approximation for the number of characters per token
      num_tokens = 0
      for message in prompt:
          num_tokens += len(message["content"]) / 4

      if self.config is not None and "engine" in self.config:
          print(
              f"Using engine {self.config['engine']} for {num_tokens} tokens (approx)"
          )
          response = self.client.chat.completions.create(
              engine=self.config["engine"],
              messages=prompt,
              max_tokens=self.max_tokens,
              stop=None,
              temperature=self.temperature,
          )
      elif self.config is not None and "model" in self.config:
          print(
              f"Using model {self.config['model']} for {num_tokens} tokens (approx)"
          )
          response = self.client.chat.completions.create(
              model=self.config["model"],
              messages=prompt,
              max_tokens=self.max_tokens,
              stop=None,
              temperature=self.temperature,
          )
      else:
          if num_tokens > 3500:
              model = "gpt-3.5-turbo-16k"
          else:
              model = "gpt-3.5-turbo"

          print(f"Using model {model} for {num_tokens} tokens (approx)")
          response = self.client.chat.completions.create(
              model=model,
              messages=prompt,
              max_tokens=self.max_tokens,
              stop=None,
              temperature=self.temperature,
          )

      # Find the first response from the chatbot that has text in it (some responses may not have text)
      for choice in response.choices:
          if "text" in choice:
              return choice.text

      # If no response with text is found, return the first response's content (which may be empty)
      return response.choices[0].message.content


class Telchina_Command_LocalContext_OpenAI(ChromaDB_VectorStore, Telchina_Chat):
  def __init__(self, client=None, config=None):
    ChromaDB_VectorStore.__init__(self, config=config)
    Telchina_Chat.__init__(self, client, config=config)

class Telchina_SQL_LocalContext_OpenAI(ChromaDB_VectorStore, OpenAI_Chat):
  def __init__(self, client=None, config=None):
    ChromaDB_VectorStore.__init__(self, config=config)
    OpenAI_Chat.__init__(self, client=client, config=config)
