import os
import time
import json
import copy
import pickle
import dotenv
import logging
import threading
import traceback
import subprocess
from multiprocessing import Pool
import multiprocessing as mp

from hey.utils.misc import set_log
from hey.agents.base import NaiveAgent
from hey.agents.basic.critic import BasicCritic
from hey.agents.basic.planner import BasicPlanner
from hey.agents.basic.const import END, INPUT_REQUIRED
from hey.mcp_tools.sync_client import get_mcp_client

dir_path = os.path.dirname(os.path.realpath(__file__))


class BasicAgent(NaiveAgent):
    INPUT_TIMEOUT = 120

    def __init__(self, config, environment):
        self.environment = environment
        self.planner = BasicPlanner(
            config.planner,
            environment,
            config.mcp_server_script_file
        )
        self.critic = BasicCritic(config.critic, environment)
        self.config = config

        self.all_tasks = {}
        self.completed_task_names = []
        self.running_task_names = []
        self.pending_task_names = [] 

    def get_toolkits(self, log_path, server_script_file):  # mcp
        return get_mcp_client(
            log_path=log_path,
            server_script_file=server_script_file
        )
    
    def call_toolkit(self, toolkits, tool_name, tool_args):  # mcp
        result = toolkits.call_tool(tool_name, tool_args)
        res_list = []
        for con in result.content:
            res_list.append(con.text)
        result = "\n".join(res_list)
        return result

    def relay_input_for_workers(self):  # in a separate thread of the main process
        subscriber = self.environment.get_message_subscriber(
            channels=[INPUT_REQUIRED, END]
        )
        for message in subscriber.listen():
            raw_data = message['data']
            if not isinstance(raw_data, bytes):
                continue

            channel = message["channel"].decode()
            try:
                data = pickle.loads(raw_data)
            except Exception as e:
                logging.error(f'Unable to load data from channel {channel} due to {e}')
            if channel == INPUT_REQUIRED:
                from inputimeout import inputimeout, TimeoutOccurred
                try:
                    user_input = inputimeout(
                        prompt=data["prompt"],
                        timeout=self.INPUT_TIMEOUT
                    )
                    logging.info(f"User input got (length: {len(user_input)}).")
                except TimeoutOccurred:
                    user_input = ""
                    logging.error("Time's up! No user input received.")

                self.environment.set_data_for_subprocess(
                    data=user_input,
                    target_pid=data["target_pid"]
                )
            elif channel == END:
                logging.info(f"Asked to quit")
                break

    def serve(self, query):
        start_time = time.perf_counter()
        logging.info(f"Starting serving the query: {query}")

        num_tasks_launched = 0
        try:
            all_tasks = self.planner.plan(query)
            self.set_tasks(all_tasks)
            async_results = {}

            # Because under the "spawn" start method, sub-processes cannot access the terminal input
            t = threading.Thread(target=self.relay_input_for_workers)
            t.start()

            abort = False
            mp.set_start_method("spawn", force=True)
            with Pool(processes=self.config.max_workers) as pool:
                # Loop until no pending tasks remain and all running tasks have finished
                while self.pending_tasks_exist() or self.running_tasks_exist():

                    # Step 1: Check which running tasks have completed
                    newly_finished_tasks = self.get_newly_finished_tasks(async_results)
                    for task_name in newly_finished_tasks:
                        try:
                            self.propagate_exception(task_name, async_results)
                        except Exception as e:  # TODO: if necessary, deal with it
                            logging.error(f"Task {task_name} not finished due to {e}\n"
                                          f"{traceback.format_exc()}")
                        self.mark_completed_task(task_name, async_results)

                    # Step 2: Schedule any tasks whose dependencies are met
                    while True:
                        ready_task = self.get_a_ready_task()
                        if not ready_task:
                            break

                        # Only try to replan for those tasks having dependencies
                        if ready_task["dependencies"]:
                            adjust_result = self.planner.replan(
                                query=query,
                                pending_task=ready_task
                            )
                            if adjust_result["choice"] == "retain":
                                num_tasks_launched += 1
                                logging.info(f"Num tasks launched: {num_tasks_launched}")
                                if num_tasks_launched > self.config.max_num_tasks_launched:
                                    logging.info(f"Exceeded max number of tasks. Aborting...")
                                    abort = True
                                    break
                                result = pool.apply_async(self.process_a_task, (query, ready_task))
                                self.mark_running_task(ready_task["name"], result, async_results)
                            elif adjust_result["choice"] == "remove":
                                self.mark_completed_task(ready_task["name"], async_results)

                                dummy_result = ("Skipped as the task is already done before, "
                                                "or deemed as irrelevant")
                                self.environment.set_task_result(
                                    task=ready_task,
                                    result=dummy_result
                                )  # TODO: if the task is running, the task_state may be later overwritten
                            else:  # choice == replace
                                derived_ready_tasks = self.replace_a_task(
                                    original_task=ready_task,
                                    new_task_list=adjust_result["detail"]
                                )
                                for derived_ready_task in derived_ready_tasks:
                                    num_tasks_launched += 1
                                    logging.info(f"Num tasks launched: {num_tasks_launched}")
                                    if num_tasks_launched > self.config.max_num_tasks_launched:
                                        logging.info(f"Exceeded max number of tasks. Aborting...")
                                        abort = True
                                        break
                                    result = pool.apply_async(self.process_a_task, (query, derived_ready_task))
                                    self.mark_running_task(derived_ready_task["name"], result, async_results)
                                if abort:
                                    break
                        else:
                            num_tasks_launched += 1
                            logging.info(f"Num tasks launched: {num_tasks_launched}")
                            if num_tasks_launched > self.config.max_num_tasks_launched:
                                logging.info(f"Exceeded max number of tasks. Aborting...")
                                abort = True
                                break
                            result = pool.apply_async(self.process_a_task, (query, ready_task))
                            self.mark_running_task(ready_task["name"], result, async_results)
                    if abort:
                        break

                    # Avoid busy waiting
                    if not ready_task and not newly_finished_tasks:
                        time.sleep(self.config.task_waiting_time_in_sec)
                        continue

        except Exception as e:
            print(f"Failed to serve query due to {e}")
            print(traceback.format_exc())
        finally:
            # to notify other threads to stop
            self.environment.publish_a_message(channel=END, message="done")

            # only need to do this when usnig owl's tools through mcp
            cmd = "ps aux | grep python | grep server.py | grep -v grep | awk '{print $2}' | xargs kill -9"
            try:
                subprocess.run(cmd, shell=True, check=True)
                print("Kill command executed successfully.")
            except subprocess.CalledProcessError as e:
                print(f"Error executing kill command: {e}")

        end_time = time.perf_counter()
        duration = end_time - start_time
        print(f"Query served in {round(duration, 3)}s")
        logging.info(f"Query served in {round(duration, 3)}s")


class Agent_single_thread(BasicAgent):
    def call_toolkit(self, toolkits, tool_name, tool_args):
        # work around for ask user
        if tool_name == 'ask_user':
            result = input(tool_args['question']+' > ')
        else:
            result = toolkits.call_tool(tool_name, tool_args)
            res_list = []
            for con in result.content:
                res_list.append(con.text)
            result = "\n".join(res_list)
        return result
        
    def serve(self, query):
        start_time = time.perf_counter()
        logging.info(f"Starting serving the query: {query}")

        num_tasks_launched = 0
        try:
            all_tasks = self.planner.plan(query)
            self.set_tasks(all_tasks)
            async_results = {}

            abort = False
            
            # Loop until no pending tasks remain and all running tasks have finished
            while self.pending_tasks_exist() or self.running_tasks_exist():

                # Step 1: Check which running tasks have completed
                newly_finished_tasks = self.get_newly_finished_tasks(async_results)
                for task_name in newly_finished_tasks:
                    try:
                        self.propagate_exception(task_name, async_results)
                    except Exception as e:  # TODO: if necessary, deal with it
                        logging.error(f"Task {task_name} not finished due to {e}\n"
                                        f"{traceback.format_exc()}")
                    self.mark_completed_task(task_name, async_results)

                # Step 2: Schedule any tasks whose dependencies are met
                while True:
                    ready_task = self.get_a_ready_task()
                    if not ready_task:
                        break

                    # Only try to replan for those tasks having dependencies
                    if ready_task["dependencies"]:
                        adjust_result = self.planner.replan(
                            query=query,
                            pending_task=ready_task
                        )
                        if adjust_result["choice"] == "retain":
                            num_tasks_launched += 1
                            logging.info(f"Num tasks launched: {num_tasks_launched}")
                            if num_tasks_launched > self.config.max_num_tasks_launched:
                                logging.info(f"Exceeded max number of tasks. Aborting...")
                                abort = True
                                break
                            result = self.process_a_task(query, ready_task)
                            self.mark_completed_task(ready_task["name"], async_results)
                        elif adjust_result["choice"] == "remove":
                            self.mark_completed_task(ready_task["name"], async_results)

                            dummy_result = ("Skipped as the task is already done before, "
                                            "or deemed as irrelevant")
                            self.environment.set_task_result(
                                task=ready_task,
                                result=dummy_result
                            )  # TODO: if the task is running, the task_state may be later overwritten
                        else:  # choice == replace
                            derived_ready_tasks = self.replace_a_task(
                                original_task=ready_task,
                                new_task_list=adjust_result["detail"]
                            )
                            for derived_ready_task in derived_ready_tasks:
                                num_tasks_launched += 1
                                logging.info(f"Num tasks launched: {num_tasks_launched}")
                                if num_tasks_launched > self.config.max_num_tasks_launched:
                                    logging.info(f"Exceeded max number of tasks. Aborting...")
                                    abort = True
                                    break
                                result = self.process_a_task(query, derived_ready_task)
                                self.mark_completed_task(derived_ready_task["name"], async_results)
                            if abort:
                                break
                    else:
                        num_tasks_launched += 1
                        logging.info(f"Num tasks launched: {num_tasks_launched}")
                        if num_tasks_launched > self.config.max_num_tasks_launched:
                            logging.info(f"Exceeded max number of tasks. Aborting...")
                            abort = True
                            break
                        result = self.process_a_task(query, ready_task)
                        self.mark_completed_task(ready_task["name"], async_results)
                if abort:
                    break

                # Avoid busy waiting
                if not ready_task and not newly_finished_tasks:
                    time.sleep(self.config.task_waiting_time_in_sec)
                    continue

        except Exception as e:
            print(f"Failed to serve query due to {e}")
            print(traceback.format_exc())

        end_time = time.perf_counter()
        duration = end_time - start_time
        print(f"Query served in {round(duration, 3)}s")
        logging.info(f"Query served in {round(duration, 3)}s")

        return result # in case result is needed