import os
from config import config_file_or_env, config_list, llm_config
from autogen import ConversableAgent, register_function, UserProxyAgent, GroupChat, GroupChatManager
from autogen.agentchat.contrib.capabilities import transform_messages, transforms
from autogen.agentchat.contrib.agent_builder import AgentBuilder
from tool import *
import autogen
import re


def code_agent(field1, field2):
    prompt = (f"###Perform semantic matching"
              f"###Field 1:{field1}"
              f"###Field 2:{field2}"
              f"###Response Template:"
              f"####score:[]")

    CODE_agent = ConversableAgent(
        name="CODE_Agent",
        system_message="You can solve the problem by calling a function.Return 'TERMINATE' when the task is done.",
        llm_config={"config_list": config_list},
    )
    user_proxy = ConversableAgent(
        name="User",
        llm_config=False,
        is_termination_msg=lambda msg: msg.get("content") is not None and "TERMINATE" in msg["content"],
        human_input_mode="NEVER",
    )
    register_function(
        SemanticMatch,
        caller=CODE_agent,  # The assistant agent can suggest calls to the calculator.
        executor=user_proxy,  # The user proxy agent can execute the calculator calls.
        name="SemanticMatch",  # By default, the function name is used as the tool name.
        description="A simple Semantic Match",  # A description of the tool.
    )
    chat = user_proxy.initiate_chat(CODE_agent, message=prompt)
    for i in chat.chat_history:
        if i['role'] == 'tool':
            return i['content']


def cot(field1, field2):
    prompt = (f"###query：{field1}"
              f"###product：{field2}"
              f"###Task: Check if query and product match, return 1 if they match, and return Confidence level (range 0.00~1.00)，otherwise return 0 ，and return Confidence level (range 0.00~1.00)"
              f"###Response Template:"
              f"####STEP1.Understand the Query and Product:"
              f"Query: Identify the user's intent behind the search query and understand "
              f"what specific product or feature they are looking for."
              f"Product: Examine the product description, focusing on its key features, "
              f"specifications, and characteristics."
              f"####STEP2.Extract Key Terms:"
              f"Extract the most important terms or concepts from both the query and the product description."
              "Look for synonyms, abbreviations, or domain-specific terms that may match between the two."
              f"####STEP3.Compare for Direct Matches:"
              f"####STEP4.Apply Chain of Thought (COT) Reasoning:"
              f"Step-by-step reasoning: Consider how each key term in the query relates to the product features, one at a time."
              "Multi-step connections: If the connection isn’t obvious, use reasoning to link intermediate "
              "concepts that might bridge the query and product description."
              f"####STEP5.Assess Match Quality:"
              f"Determine if the query and product description are a perfect match, partial match, or not a match."
              "For partial matches, analyze whether the missing elements can be inferred through reasoning."
              f"####STEP6.Generate Conclusion:"
              f"####RESULT:[0,0.00]")

    COT_agent = ConversableAgent(
        name="COT_Agent",
        system_message="You are a very helpful assistant who can solve problems through a step-by-step thinking approach.Return 'TERMINATE' when the task is done.",
        llm_config={"config_list": config_list},
    )

    user_proxy = ConversableAgent(
        name="User",
        llm_config=False,
        is_termination_msg=lambda msg: msg.get("content") is not None and "TERMINATE" in msg["content"],
        human_input_mode="NEVER",
    )
    chat = user_proxy.initiate_chat(COT_agent, message=prompt)

    for i in chat.chat_history:
        if i["name"] == "COT_Agent":
            return i["content"]


def advisor1(title, Historical_fields):
    prompt = (
        f"###Generate another unrelated field based on the given field.The generated field cannot be the same as the historical field."
        f"###Historical fields:{Historical_fields}"
        f"###field:{title}"
        f"###Response Template:"
        f"####newField:''"
    )

    Advisor = ConversableAgent(
        name="Advisor",
        system_message="You are an Advisor, and you can Generate another unrelated field based on the given field.The generated field cannot be the same as the historical field.Return 'TERMINATE' when the task is done.",
        llm_config={"config_list": config_list},
    )

    user_proxy = ConversableAgent(
        name="User",
        llm_config=False,
        is_termination_msg=lambda msg: msg.get("content") is not None and "TERMINATE" in msg["content"],
        human_input_mode="NEVER",
    )

    return user_proxy.initiate_chat(Advisor, message=prompt)  #


def advisor2(title, Historical_fields):
    prompt = (
        f"###Generate another related field based on the given field.The generated field cannot be the same as the historical field."
        f"###Historical fields：{Historical_fields}"
        f"###field:{title}"
        f"###Response Template:"
        f"####newField:''"
    )

    Advisor = ConversableAgent(
        name="Advisor",
        system_message="You are an Advisor, and you can Generate another related field based on the given field.The generated field cannot be the same as the historical field.Return 'TERMINATE' when the task is done.",
        llm_config={"config_list": config_list},
    )

    user_proxy = ConversableAgent(
        name="User",
        llm_config=False,
        is_termination_msg=lambda msg: msg.get("content") is not None and "TERMINATE" in msg["content"],
        human_input_mode="NEVER",
    )

    return user_proxy.initiate_chat(Advisor, message=prompt)  #


def start_task(execution_task: str, agent_list: list, coding=True):
    group_chat = autogen.GroupChat(
        agents=agent_list,
        messages=[],
        max_round=12,
        allow_repeat_speaker=agent_list[:-1] if coding is True else agent_list,
    )
    manager = autogen.GroupChatManager(
        groupchat=group_chat,
        llm_config={"config_list": config_list},
    )
    message = agent_list[0].initiate_chat(manager, message=execution_task)
    return message.chat_history


def debate(field1, field2):
    prompt = (f"###Debate"
              f"###field_1: {field1}"
              f"###field_2: {field2}"
              f"determine whether the field_1 and field_2 match"
              f"###1 is a match, 0 is a mismatch")

    final_prompt = """
        ###You must reply according to the response template.
        ###Response Template:
        ####Conclusion:reson step by step
        ####debate_result:[]"""

    building_task = (f"#Create three agents: "
                     f"#1. Debater_1 (Debate Agent) "
                     f"#2. Debater_2 (Debate Agent) "
                     f"#3. SUMMARY_AGENT (Summarization Agent) "
                     f"#The SUMMARY_AGENT will summarize the debates between Debater_1 and  Debater_2 based on the following criteria: {final_prompt}")
    builder = AgentBuilder(
        config_file_or_env=config_file_or_env, builder_model=["gpt-4o"], agent_model=["gpt-4o"]
    )
    agent_list, agent_configs = builder.build(building_task, llm_config)

    message = start_task(
        execution_task=prompt,
        agent_list=agent_list,
        coding=agent_configs["coding"],
    )

    return message


#field1 = "sports nutrition, hydration beverages, energy drinks"
#field2 = "vitaminwater zero power c, electrolyte enhanced water w/ vitamins, dragonfruit drink, 20 fl oz"
#print(debate(field1, field2))
