from dotenv import load_dotenv

load_dotenv()

import os
import json
import time
import logging
from collections import deque
from typing import Dict, List
from openai import OpenAI
import chromadb
import tiktoken as tiktoken
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
import re
import utils

from chromadb.config import Settings


client = chromadb.Client(Settings(anonymized_telemetry=False))

LLM_MODEL = os.getenv("LLM_MODEL", os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")).lower()


# API Keys
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
assert OPENAI_API_KEY, "\033[91m\033[1m" + ".env 中未设置 OPENAI_API_KEY " + "\033[0m\033[0m"

# API BASE 设置中转站
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE", "")

# 向量数据库 chroma 表，存储任务及执行结果
RESULTS_STORE_NAME = os.getenv("RESULTS_STORE_NAME", os.getenv("TABLE_NAME", ""))
assert RESULTS_STORE_NAME, "\033[91m\033[1m" + ".env 中未设置 RESULTS_STORE_NAME" + "\033[0m\033[0m"

# 运行配置
INSTANCE_NAME = os.getenv("INSTANCE_NAME",  "Blueos")

# 大模型配置
OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.0))

if LLM_MODEL.startswith("gpt-4"):
    print(
        "\033[91m\033[1m"
        + "\n*****正在使用 GPT-4. 代价昂贵，请注意 token 用量*****"
        + "\033[0m\033[0m"
    )

# 将任务执行结果存到 ChromaDB
class DefaultResultsStorage:
    def __init__(self):
        logging.getLogger('chromadb').setLevel(logging.ERROR)
        # Create Chroma collection
        chroma_persist_dir = "chroma"
        chroma_client = chromadb.PersistentClient(
            settings=chromadb.config.Settings(
                persist_directory=chroma_persist_dir,
            )
        )

        metric = "cosine"
        embedding_function = OpenAIEmbeddingFunction(api_key=OPENAI_API_KEY, api_base=OPENAI_API_BASE)
        self.collection = chroma_client.get_or_create_collection(
            name=RESULTS_STORE_NAME,
            metadata={"hnsw:space": metric},
            embedding_function=embedding_function,
        )

    def add(self, task: Dict, result: str, result_id: str):
        embeddings =  None
         # 检查结果是否已存在
        if (
            len(self.collection.get(ids=[result_id], include=[])["ids"]) > 0
        ):
            self.collection.update(
                ids=result_id,
                embeddings=embeddings,
                documents=result,
                metadatas={"task": task["task_name"], "result": result},
            )
        else:
            self.collection.add(
                ids=result_id,
                embeddings=embeddings,
                documents=result,
                metadatas={"task": task["task_name"], "result": result},
            )

    def query(self, query: str, top_results_num: int) -> List[dict]:
        count: int = self.collection.count()
        if count == 0:
            return []
        results = self.collection.query(
            query_texts=query,
            n_results=min(top_results_num, count),
            include=["metadatas"]
        )
        return [item["task"] for item in results["metadatas"][0]]

def use_chroma():
    print("\n使用向量数据库: " + "\033[93m\033[1m" + "Chroma (Default)" + "\033[0m\033[0m")
    return DefaultResultsStorage()

results_storage =  use_chroma()

# 存储任务
class SingleTaskListStorage:
    def __init__(self):
        self.tasks = deque([])
        self.task_id_counter = 0

    def append(self, task: Dict):
        self.tasks.append(task)

    def replace(self, tasks: List[Dict]):
        self.tasks = deque(tasks)

    def popleft(self):
        return self.tasks.popleft()

    def is_empty(self):
        return False if self.tasks else True

    def next_task_id(self):
        self.task_id_counter += 1
        return self.task_id_counter

    def get_task_names(self):
        return [t["task_name"] for t in self.tasks]

# 初始化任务列表
tasks_storage = SingleTaskListStorage()

def limit_tokens_from_string(string: str, model: str) -> str:
    """Limits the string to a number of tokens (estimated)."""

    try:
        encoding = tiktoken.encoding_for_model(model)
    except:
        encoding = tiktoken.encoding_for_model('gpt2')

    encoded = encoding.encode(string)

    return encoding.decode(encoded)


def openai_call(
    prompt: str,
    model: str = LLM_MODEL,
    temperature: float = OPENAI_TEMPERATURE,
    max_tokens: int = 100,
):
    while True:
        try:
            trimmed_prompt = limit_tokens_from_string(prompt, model)
            client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
            # 调用 chat api
            messages = [{"role": "system", "content": trimmed_prompt}]
            response = client.chat.completions.create(
                model=model,
                messages=messages,
                temperature=temperature,
                max_tokens=max_tokens,
                n=1,
                stop=None,
            )
            return response.choices[0].message.content.strip()
        except Exception as e:
            print(e)

def task_creation_agent(
        objective: str, result: Dict, task_description: str, task_list: List[str]
):
    prompt = f"""
You are to use the result from an execution agent to create new tasks with the following objective: {objective}.
The last completed task has the result: \n{result["data"]}
This result was based on this task description: {task_description}.\n"""

    if task_list:
        prompt += f"These are incomplete tasks: {', '.join(task_list)}\n"
    prompt += "Based on the result, return a list of tasks to be completed in order to meet the objective. "
    if task_list:
        prompt += "These new tasks must not overlap with incomplete tasks. "

    prompt += """
Return one task per line in your response. The result must be a numbered list in the format:

#. First task
#. Second task

The number of each entry must be followed by a period. If your list is empty, write "There are no tasks to add at this time."
Unless your list is empty, do not include any headers before your numbered list or follow your numbered list with any other output."""

    # print(f'\n*****TASK CREATION AGENT PROMPT****\n{prompt}\n')
    response = openai_call(prompt, max_tokens=4000)
    # print(f'\n****TASK CREATION AGENT 响应****\n{response}\n')
    new_tasks = response.split('\n')
    new_tasks_list = []
    for task_string in new_tasks:
        task_parts = task_string.strip().split(".", 1)
        if len(task_parts) == 2:
            task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
            task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
            if task_name.strip() and task_id.isnumeric():
                new_tasks_list.append(task_name)
            # print('New task created: ' + task_name)

    out = [{"task_name": task_name} for task_name in new_tasks_list]
    return out


def prioritization_agent(OBJECTIVE):
    task_names = tasks_storage.get_task_names()
    bullet_string = '\n'

    prompt = f"""
You are tasked with prioritizing the following tasks: {bullet_string + bullet_string.join(task_names)}
Consider the ultimate objective of your team: {OBJECTIVE}.
Tasks should be sorted from highest to lowest priority, where higher-priority tasks are those that act as pre-requisites or are more essential for meeting the objective.
Do not remove any tasks. Return the ranked tasks as a numbered list in the format:

#. First task
#. Second task

The entries must be consecutively numbered, starting with 1. The number of each entry must be followed by a period.
Do not include any headers before your ranked list or follow your list with any other output."""

    # print(f'\n****TASK PRIORITIZATION AGENT PROMPT****\n{prompt}\n')
    response = openai_call(prompt, max_tokens=4000)
    # print(f'\n****TASK PRIORITIZATION AGENT 响应****\n{response}\n')
    if not response:
        print('priotritization agent 无响应.')
        return
    new_tasks = response.split("\n") if "\n" in response else [response]
    new_tasks_list = []
    for task_string in new_tasks:
        task_parts = task_string.strip().split(".", 1)
        if len(task_parts) == 2:
            task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
            task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
            if task_name.strip():
                new_tasks_list.append({"task_id": task_id, "task_name": task_name})

    return new_tasks_list

def get_is_need_blueos_context(task):
    # print(f"get_is_need_blueos_context: {task}")
    prompt = f"""
    A new task will be given below, please determine if it is similar or exactly the same as the original task

    New task: {task}, the original task: Convert the H5 code to Quickapp code
    
    - Only returns True or False, indicating whether the new task is similar or exactly the same as the original task.
    """
    is_need_blueos_context = openai_call(prompt, max_tokens=4000)
    return json.loads(is_need_blueos_context.lower())

def direct_prompting(task):
    prompt = utils.get_direct_prompt(task)
    result = openai_call(prompt, max_tokens=4000)
    print("\033[93m\033[1m" + "\ndirect_prompting result:\n" + "\033[0m\033[0m" + f" {result}")
    return result

def review_prompting(direct_prompt_code_result):
    prompt = utils.get_review_prompt(direct_prompt_code_result)
    result = openai_call(prompt, max_tokens=4000)
    print("\033[93m\033[1m" + "\nreview_prompting result:\n" + "\033[0m\033[0m" + f" {result}")
    return result

# 根据目标和最近5个任务执行结果，执行任务
def execution_agent(objective: str, task: str) -> str:
    """
    根据给定的目标和先前保存的上下文执行任务。

    参数:
        objective (str): AI 执行任务的目的或目标.
        task (str): AI要执行的任务.

    返回:
        str: AI 针对给定任务生成的响应.
    """

    prompt = f'Perform one task based on the following objective: {objective}.\n'

    task_context = task_context_agent(query=objective, top_results_num=5)
    # task_context = ""
    # print("\n****RELEVANT CONTEXT****\n")
    # print(context)
    # print('')
    if task_context:
        prompt += 'Take into account these previously completed tasks: ' + '\n'.join(task_context)
    
    prompt += f'\nYour task: {task}\n'
    # print("\n****Execution Agent prompt****\n")
    # print(prompt)
    
    # 判断该任务是否是蓝河代码转换，如果是，则需要增加蓝河应用开发的上下文，同时进行双段PE优化
    is_matched = get_is_need_blueos_context(task)
    # is_matched = True
    print("\033[93m\033[1m" + "\n是否需要额外蓝河标准及编译ERROR提示:" + "\033[0m\033[0m" + f" {is_matched}")
    if is_matched:
        direct_prompt_code_result = direct_prompting(prompt)
        return review_prompting(direct_prompt_code_result)
    else:
        result = openai_call(prompt, max_tokens=4000)
        return result

# Get the top n completed tasks for the objective
def task_context_agent(query: str, top_results_num: int):
    """
    从任务索引中检索给定查询条件的上下文.

    参数:
        query (str): 检索上下文的查询条件或目标.
        top_results_num (int): 要检索的最靠前的结果的数量.

    返回:
        list: 给定的查询条件的上下文列表，需要按相关性排序.

    """
    results = results_storage.query(query=query, top_results_num=top_results_num)
    # print("****RESULTS****")
    # print(results)
    return results

def get_code_by_task(OBJECTIVE, INITIAL_TASK, source_file_path):
    initial_task = {
    "task_id": tasks_storage.next_task_id(),
    "task_name": INITIAL_TASK
    }
    tasks_storage.append(initial_task)

    loop = 0
    while not tasks_storage.is_empty():
        loop = loop + 1
        # Print the task list
        print("\033[95m\033[1m" + f"\n*****第 {loop} 轮循环*****\n" + "\033[0m\033[0m")
        print("\033[95m\033[1m" + "\n*****任务列表*****\n" + "\033[0m\033[0m")
        for t in tasks_storage.get_task_names():
            print(" • " + str(t))

        # Step 1: 获取第一个未完成的任务
        task = tasks_storage.popleft()
        print("\033[92m\033[1m" + "\n*****下个任务*****\n" + "\033[0m\033[0m")
        print(str(task["task_name"]))

        # 基于上下文，完成任务
        result = execution_agent(OBJECTIVE, str(task["task_name"]))
        print("\033[93m\033[1m" + "\n*****任务执行结果*****\n" + "\033[0m\033[0m")
        print(result)
        target_file_path = utils.get_target_file_path(source_file_path)
        if utils.is_quickapp_code(result):
            print("\033[93m\033[1m" + "\n写入到ux文件:" + "\033[0m\033[0m" + f" {target_file_path}")
            utils.write_result_to_ux(target_file_path, result)
        if loop >= 10:
            print(f"任务完成，输出的代码为：\n -> {result}")
            return result
        # Step 2: 增强任务执行结果
        enriched_result = {
            "data": result
        }
        # vector = enriched_result["data"]

        result_id = f"result_{task['task_id']}"

        results_storage.add(task, result, result_id)

        # Step 3: 创建新任务，并进行优先级排序
        new_tasks = task_creation_agent(
            OBJECTIVE,
            enriched_result,
            task["task_name"],
            tasks_storage.get_task_names(),
        )

        # print('将新任务添加到 task_storage')
        for new_task in new_tasks:
            new_task.update({"task_id": tasks_storage.next_task_id()})
            # print(str(new_task))
            tasks_storage.append(new_task)

        prioritized_tasks = prioritization_agent(OBJECTIVE)
        if prioritized_tasks:
            tasks_storage.replace(prioritized_tasks)

        time.sleep(5)

def get_code_result(h5_code, source_file_path):
    OBJECTIVE = f"""
Convert the entered H5 web page code into Quickapp code. The code for H5 is：
{h5_code}
- Only return the Quickapp code. Do not give any other explanation.
- If converted code remains the same, copy directly, instead of writing /* The code remains the same */
"""
    INITIAL_TASK = "Develop a task list"
    
    print("\033[94m\033[1m" + "\n*****任务目标OBJECTIVE*****\n" + "\033[0m\033[0m")
    print(f"{OBJECTIVE}")

    print("\033[93m\033[1m" + "\n初始任务:" + "\033[0m\033[0m" + f" {INITIAL_TASK}")
    get_code_by_task(OBJECTIVE, INITIAL_TASK, source_file_path)
