import json
import time
import random
import threading
import requests
from typing import Optional, Dict, Tuple, List
from task import Task
from protocol import MessageType, serialize_task_with_path, deserialize_result
from fastapi import FastAPI, HTTPException
import uvicorn
from config.base_config import CLIENT_CONFIG, TOPO_CONFIG_FILE
import sys
import logging
import numpy as np
from concurrent.futures import ThreadPoolExecutor

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    filename="logs/client.log",
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Configure result logging
result_logger = logging.getLogger("result_logger")
result_logger.setLevel(logging.INFO)
result_handler = logging.FileHandler("logs/result.log")
result_handler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
result_logger.addHandler(result_handler)
result_logger.propagate = False  # Prevent result logs from being output to main logs


class TaskStats:
    def __init__(self):
        self.total_tasks = 0
        self.tmp_completed_tasks = 0
        self.tmp_timeout_failed_tasks = 0
        self.tmp_other_failed_tasks = 0
        self.completed_tasks = 0
        self.tmp_completed_time = []
        self.timeout_failed_tasks = 0
        self.other_failed_tasks = 0
        self.pending_tasks: Dict[str, Task] = {}  # 等待结果的任务
        # self.task_results = {}  # 存储任务结果
        self.lock = threading.Lock()

    def add_task(self, task: Task):
        """添加新任务到统计"""
        with self.lock:
            self.total_tasks += 1
            self.pending_tasks[task.id] = task
            logger.debug(f"Added task {task.id} to stats. Total tasks: {self.total_tasks}")

    def complete_task(self, task_id: str, success: bool, result: Dict):
        """完成任务统计"""
        with self.lock:
            if task_id in self.pending_tasks:
                task = self.pending_tasks.pop(task_id)
                if success:
                    self.completed_tasks += 1
                    self.tmp_completed_tasks += 1
                    self.tmp_completed_time.append(result['completion_time'])
                    logger.debug(f"Task {task_id} completed successfully. Total completed: {self.completed_tasks}")

                    # Record successful task details to result log
                    result_logger.info(json.dumps({
                        "task_id": task_id,
                        "task_type": task.task_type,
                        "submit_time": task.submit_time,
                        "compute_load": task.compute_load,
                        "data_size": task.data_size,
                        "status": "success",
                        "completion_time": result['completion_time'],
                        "max_delay": result['max_delay'],
                        "response_time": result['completion_time'],
                        "details": result.get('details', {})
                    }))
                else:
                    if result['status'] == 'timeout':
                        self.timeout_failed_tasks += 1
                        self.tmp_timeout_failed_tasks += 1
                        logger.debug(f"Task {task_id} timed out. Total timeouts: {self.timeout_failed_tasks}")

                        # Record timed out task details to result log
                        result_logger.info(json.dumps({
                            "task_id": task_id,
                            "task_type": task.task_type,
                            "submit_time": task.submit_time,
                            "compute_load": task.compute_load,
                            "data_size": task.data_size,
                            "status": "timeout",
                            "completion_time": result['completion_time'],
                            "max_delay": result['max_delay'],
                            "response_time": result['completion_time'],
                            "reason": "Task execution exceeded maximum delay"
                        }))
                    else:
                        self.other_failed_tasks += 1
                        self.tmp_other_failed_tasks += 1
                        logger.debug(
                            f"Task {task_id} failed with status {result['status']}. Total other failures: {self.other_failed_tasks}")

                        # Record other failed task details to result log
                        result_logger.info(json.dumps({
                            "task_id": task_id,
                            "status": "failed",
                            "task_type": task.task_type,
                            "submit_time": task.submit_time,
                            "compute_load": task.compute_load,
                            "data_size": task.data_size,
                            "completion_time": result.get('completion_time', 0),
                            "max_delay": result.get('max_delay', 0),
                            "reason": result.get('error', "Unknown error"),
                            "details": result
                        }))
                # self.task_results[task_id] = result
            else:
                logger.warning(f"Task {task_id} not found in pending tasks when completing")

    def get_all_stats(self) -> Dict:
        """获取统计信息"""
        # 先检查超时任务
        self._check_timeouts()

        with self.lock:
            return {
                "total_tasks": self.total_tasks,
                "completed_tasks": self.completed_tasks,
                "timeout_failed_tasks": self.timeout_failed_tasks,
                "other_failed_tasks": self.other_failed_tasks,
                "pending_tasks": len(self.pending_tasks),
                "success_rate": (self.completed_tasks / self.total_tasks * 100
                                 if self.total_tasks > 0 else 0)
            }

    def _check_timeouts(self):
        """检查并处理超时任务"""
        current_time = time.time()
        timeout_tasks = []

        with self.lock:
            for task_id, task in self.pending_tasks.items():
                if current_time - task.submit_time > task.max_delay:
                    timeout_tasks.append([task_id, current_time - task.submit_time, task.max_delay])

        for task in timeout_tasks:
            self.complete_task(task[0], False, {"status": "timeout", "completion_time": task[1], "max_delay": task[2]})

    def get_stats(self) -> Dict:
        """获取统计信息"""
        # 检查超时任务
        self._check_timeouts()

        with self.lock:
            stats = {
                "total_tasks": self.total_tasks,
                "completed_tasks": self.tmp_completed_tasks,
                "timeout_failed_tasks": self.tmp_timeout_failed_tasks,
                "other_failed_tasks": self.tmp_other_failed_tasks,
                "avg_completed_time": np.mean(self.tmp_completed_time) if self.tmp_completed_time else 0,
                "pending_tasks": len(self.pending_tasks),
                "success_rate": (self.tmp_completed_tasks / (
                            self.tmp_completed_tasks + self.tmp_timeout_failed_tasks + self.tmp_other_failed_tasks) * 100
                                 if (
                                                self.tmp_completed_tasks + self.tmp_timeout_failed_tasks + self.tmp_other_failed_tasks) > 0 else 0)
            }
            return stats

    def reset_stats(self):
        """重置统计信息"""
        with self.lock:
            self.tmp_completed_tasks = 0
            self.tmp_timeout_failed_tasks = 0
            self.tmp_other_failed_tasks = 0
            self.tmp_completed_time = []
            logger.info("Statistics reset")


class TaskClient:
    def __init__(self, host: str, port: int):
        # self.results = {}
        self.app = FastAPI()
        self.request_interval = CLIENT_CONFIG["request_interval"]
        self.max_concurrent_tasks = CLIENT_CONFIG["max_concurrent_tasks"]
        self.host = host
        self.port = port
        self.is_running = True
        self.stats = TaskStats()
        # 使用有界线程池，设置合理的线程数
        self.executor = ThreadPoolExecutor(max_workers=self.max_concurrent_tasks, thread_name_prefix="task_worker")

        # 添加线程池状态监控
        self._monitor_thread = threading.Thread(target=self._monitor_thread_pool, daemon=True)
        self._monitor_thread.start()

        # self.session = requests.Session()  # 使用requests.Session替代aiohttp

        # 添加统计接口
        @self.app.get("/stats")
        async def get_stats():
            stats = self.stats.get_stats()
            logger.info(f"Current statistics: {stats}")
            self.stats.reset_stats()
            return stats

        @self.app.get("/all_stats")
        async def get_all_stats():
            stats = self.stats.get_all_stats()
            logger.info(f"Current all statistics: {stats}")
            return stats

        # 添加重置统计接口
        @self.app.post("/reset_stats")
        async def reset_stats():
            self.stats.reset_stats()
            return {"status": "success"}

        # 修改结果接收路由
        @self.app.post("/result")
        async def receive_result(result: Dict):
            try:
                r = deserialize_result(result["data"])
                # self.results[r.task_id] = r
                logger.info(f"Received result for task {r.task_id}")

                # 检查是否超时
                current_time = time.time()
                if current_time - r.submit_time > r.max_delay:
                    logger.warning(
                        f"Task {r.task_id} timed out. Completion time: {current_time - r.submit_time}, max_delay:{r.max_delay}")
                    self.stats.complete_task(r.task_id, False, {
                        "status": "timeout",
                        "completion_time": current_time - r.submit_time,
                        "max_delay": r.max_delay
                    })
                else:
                    logger.info(f"Task {r.task_id} completed successfully")
                    self.stats.complete_task(r.task_id, True, {
                        "status": "success",
                        "completion_time": current_time - r.submit_time,
                        "max_delay": r.max_delay
                    })

                return {"status": "success"}
            except Exception as e:
                logger.exception(f"Error processing result: {e}")
                raise HTTPException(status_code=500, detail=str(e))

        # 启动 API 服务器
        threading.Thread(target=self._start_api_server, daemon=True).start()
        logger.info(f"TaskClient initialized on 0.0.0.0:{port}")

    def _start_api_server(self):
        """Starts the FastAPI server."""
        try:
            uvicorn.run(self.app, host="0.0.0.0", port=self.port)
        except Exception as e:
            logger.exception(f"Error starting API server: {e}")
            sys.exit(1)  # Exit if the API server fails to start

    def submit_task(self, task: Task, target_node: str):
        """改进的任务提交方法"""
        try:
            # 检查线程池状态
            if self.executor._work_queue.qsize() >= self.max_concurrent_tasks:
                logger.warning("Thread pool queue is full, waiting...")
                time.sleep(1)
                return

            self.stats.add_task(task)
            message = {
                'type': MessageType.SUBMIT_TASK,
                'data': serialize_task_with_path(
                    task, '', [],
                    (self.host, self.port)
                )
            }

            future = self.executor.submit(self._submit_task_thread, task, target_node, message)
            future.add_done_callback(self._handle_task_completion)

        except Exception as e:
            logger.exception(f"Error submitting task {task.id}: {e}")
            self.stats.complete_task(task.id, False, {
                "status": "submit_error",
                "error": str(e),
                "completion_time": time.time() - task.submit_time,
                "max_delay": task.max_delay
            })

    def _handle_task_completion(self, future):
        """处理任务完成回调"""
        try:
            future.result()  # 获取结果，如果有异常会抛出
        except Exception as e:
            logger.exception(f"Task execution failed: {e}")

    def _submit_task_thread(self, task: Task, target_node: str, message: Dict):
        """改进的任务提交线程"""
        max_retries = 3
        retry_delay = 1

        for attempt in range(max_retries):
            try:
                logger.info(f"Submitting task {task.id} to {target_node} (attempt {attempt + 1})")

                response = requests.post(
                    f"http://{target_node}/submit_task",
                    json=message,
                    timeout=(5, 30)  # 连接超时5秒，读取超时30秒
                )

                if response.status_code == 200:
                    logger.info(f"Task {task.id} submitted successfully")
                    return

                logger.warning(f"Task {task.id} submission failed: {response.text}")

            except requests.exceptions.Timeout:
                logger.warning(f"Timeout submitting task {task.id} (attempt {attempt + 1})")
            except Exception as e:
                logger.exception(f"Error submitting task {task.id} (attempt {attempt + 1}): {e}")

            if attempt < max_retries - 1:
                time.sleep(retry_delay)
                retry_delay *= 2  # 指数退避

        # 所有重试都失败后
        self.stats.complete_task(task.id, False, {
            "status": "submit_failed",
            "error": "Max retries exceeded",
            "completion_time": time.time() - task.submit_time,
            "max_delay": task.max_delay
        })

    def _monitor_thread_pool(self):
        """监控线程池状态"""
        while self.is_running:
            active_threads = len([t for t in threading.enumerate() if t.is_alive()])
            logger.info(f"Active threads: {active_threads}, "
                       f"Thread pool tasks: {self.executor._work_queue.qsize()}")
            time.sleep(30)  # 每30秒记录一次

    def close(self):
        """优化关闭流程"""
        self.is_running = False
        logger.info("Shutting down thread pool...")
        self.executor.shutdown(wait=True, cancel_futures=True)  # Python 3.9+
        logger.info("Thread pool shut down complete")


def generate_random_task(topo_config) -> Tuple[Task, str]:
    """生成随机任务"""
    try:
        # 随机选择任务类型
        service_config = topo_config["service_configs"]
        task_type_name = random.choice(list(service_config.keys()))
        task_config = service_config[task_type_name]

        task_id = f"task_{int(time.time() * 1000)}"
        task_type = task_type_name

        # 根据配置生成参数
        max_delay = np.random.normal(*task_config["max_delay_range"])
        compute_load = np.random.normal(*task_config["compute_load_range"])
        data_size = np.random.normal(*task_config["data_size_range"])

        # max_delay = max_delay if max_delay > 0 else 0
        # compute_load = compute_load if compute_load > 0 else 0
        # data_size = data_size if data_size > 0 else 0
        #
        # max_delay = 10000
        # compute_load = 100*1000
        # data_size = 1

        task = Task.create_random_task(
            task_id=task_id,
            task_type=task_type,
            max_delay=max_delay,
            compute_load=compute_load,
            data_size=data_size
        )
        logger.info(f"Generated random task {task_id} of type {task_type_name}")

        return task, task_config["target_nodes"]

    except Exception as e:
        logger.exception(f"Error generating random task: {e}")
        raise


def get_topo_config():
    # 示例邻居节点配置（实际使用时应该通过配置文件加载）
    try:
        with open(TOPO_CONFIG_FILE, "r") as f:
            topo_config = json.load(f)

        return topo_config
    except FileNotFoundError:
        logger.error(f"Config file {TOPO_CONFIG_FILE} not found.")
        sys.exit(1)  # Exit if the config file is missing
    except json.JSONDecodeError:
        logger.error(f"Config file {TOPO_CONFIG_FILE} is not valid JSON.")
        sys.exit(1)  # Exit if the config file is corrupted


def main():
    # 需要输入host和port
    node_id = "cpNode1"

    topo_config = get_topo_config()
    host = None
    port = None
    for node in topo_config["cpn_nodes"]:
        if node["name"] == node_id:
            host = node["ip"]
            port = node["port"]
            break

    if not host or not port:
        logger.error(f"Node {node_id} not found in config")
        sys.exit(1)

    logger.info(f"Node Configuration: host={host}, port={port}")

    # 创建任务客户端
    try:
        client = TaskClient(host, port)
    except Exception as e:
        logger.error(f"Failed to initialize TaskClient: {e}")
        sys.exit(1)

    # 模拟用户提交任务
    task_count = 0
    max_tasks = CLIENT_CONFIG["max_task"]  # 设置最大任务数限制

    try:
        while task_count < max_tasks:
            try:
                task, target_node = generate_random_task(topo_config)
                logger.info(f"Submitting task: {task.id} to node: {target_node}")

                client.submit_task(task, target_node)
                task_count += 1

                # 动态调整提交间隔
                queue_size = client.executor._work_queue.qsize()
                if queue_size > client.max_concurrent_tasks * 0.8:  # 队列快满时增加间隔
                    sleep_time = client.request_interval * 2
                else:
                    sleep_time = client.request_interval
                sleep_time = client.request_interval
                time.sleep(sleep_time)

            except Exception as e:
                logger.exception(f"Error in task submission loop: {e}")
                time.sleep(5)  # 发生错误时等待更长时间

    except KeyboardInterrupt:
        logger.info("Program interrupted by user.")
    except Exception as e:
        logger.exception(f"Unhandled exception in main loop: {e}")
    finally:
        logger.info("Shutting down client...")
        client.close()
        logger.info("Client shutdown complete.")


if __name__ == "__main__":
    # python client.py cpNode1
    # docker run -d --name client -p 6000:6000 -v $(pwd)/config:/app/config task-client cpNode1
    main()
