from fastapi import HTTPException
from models import *
import base64
import os
import time
from services.http_client import HTTPClient
from typing import Optional
from config.config_loader import ConfigLoader
from services.task_begin import Task
import uuid


class APIHandler:
    def __init__(self):
        config = ConfigLoader().get_config()
        if not config.get("servers"):
            raise ValueError("Missing 'servers' configuration")
        
        self.model_server = HTTPClient(
            base_url=config["servers"]["model_server"]["url"],
            timeout=config["servers"]["model_server"]["timeout"]
        )
        self.inference_server = HTTPClient(
            base_url=config["servers"]["inference_server"]["url"],
            timeout=config["servers"]["inference_server"]["timeout"]
        )
        self.inference_server_2 = HTTPClient(
            base_url=config["servers"]["inference_server_2"]["url"],
            timeout=config["servers"]["inference_server_2"]["timeout"]
        )
        self.task = None
        self.gpudata = {
            "edge1": [],
            "edge2": [],
            "cloud": []
        }
        self.cpudata = {
            "edge1": [],
            "edge2": [],
            "cloud": []
        }
        self.networkdata = {
            "edge1": [],
            "edge2": [],
            "cloud": []
        }
    async def update_samples(self, request: UpdateSamplesRequest) -> BaseResponse:
        try:
            # TODO:这里应该实现实际的业务逻辑
            # 如果阈值大于0.5则有32个样本，如果阈值大于0.8则有12个样本
            print(request)
            num = 0
            if request.threshold > 0.5:
                num = 32
                time.sleep(5)
            elif request.threshold > 0.8:
                num = 12
                time.sleep(2)
            else:
                num = 87
                time.sleep(8)
            response = UpdateSamplesResponse(
                num=num,
                sampleID=["sample1", "sample2", "sample3"]
            )
            return BaseResponse(
                status="success",
                message="Samples updated successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def could_evolution(self, request: CouldEvolutionRequest) -> BaseResponse:
        try:
            if self.task is not None:
                self.task.terminate()
            taskID = str(uuid.uuid4())
            self.task = Task(taskID, request.model, request.task, TaskType.CLOUD_EVOLUTION, request.method, request.lr, request.epoch, request.batchSize, request.dataStartTime, request.dataEndTime)
            self.task.start(is_fake=True)
            response = TaskResponse(
                taskID=taskID,
                taskType=TaskType.CLOUD_EVOLUTION,
                status=TaskStatus.RUNNING,
                sampleID=["sample1", "sample2"]
            )
            return BaseResponse(
                status="success",
                message="Cloud evolution task started successfully",
                data=response.dict()
            )
        except Exception as e:
            import traceback
            error_detail = {
                "error": str(e),
                "traceback": traceback.format_exc()
            }
            raise HTTPException(status_code=500, detail=error_detail)

    async def collaborate_evolution(self, request: CollaborateEvolutionRequest) -> BaseResponse:
        try:
            if self.task is not None:
                self.task.terminate()
            taskID = str(uuid.uuid4())
            self.task = Task(taskID, request.Smodel, request.task, TaskType.COLLABORATIVE_EVOLUTION, request.method, request.lr, request.epoch, request.batchSize, request.dataStartTime, request.dataEndTime)
            self.task.start(is_fake=True)
            response = TaskResponse(
                taskID=taskID,
                taskType=TaskType.COLLABORATIVE_EVOLUTION,
                status=TaskStatus.RUNNING,
                sampleID=["sample3", "sample4"]
            )
            return BaseResponse(
                status="success",
                message="Collaborative evolution task started successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def task_progress(self, request: TaskProgressRequest) -> BaseResponse:
        try:
            if self.task is None:
                raise HTTPException(status_code=404, detail="Task not found")
            losses, accuracies = self.task.get_metrics()
            print(f"Current metrics - Losses: {losses}, Accuracies: {accuracies}")
            response = TaskProgressResponse(
                taskID=self.task.taskID,
                strategyType=self.task.strategyType,
                status=self.task.status,
                loss=losses,
                acc=accuracies,
                rate=0.83,
                time="PT1H30M15S"
            )
            return BaseResponse(
                status="success",
                message="Task progress retrieved successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def set_strategy(self, request: StrategyRequest) -> BaseResponse:
        try:
            response = StrategyResponse(strategyType=request.strategyType)
            return BaseResponse(
                status="success",
                message="Inference completed successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def inference(self, request: InferenceRequest) -> BaseResponse:
        try:
            # 构造符合远程服务器要求的请求数据格式
            inference_request = {
                "File_path": request.filePath  # 假设InferenceRequest中有filePath字段
            }
            time_start = time.time()
            # 发送请求到推理服务器的/damo端点
            response_data = await self.inference_server.post(
                endpoint="/damo",
                data=inference_request
            )
            time_end = time.time()
            
            # 正确创建 InferenceResponse 对象
            response = InferenceResponse(
                resultFilePath=response_data["ImagePath"],  # 使用响应中的 ImagePath
                time=str(time_end - time_start)  # 转换为字符串
            )

            return BaseResponse(
                status="success",
                message="Inference completed successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def inference_batch(self, request: BatchInferenceRequest) -> BaseResponse:
        try:
            # 构造符合远程服务器要求的请求数据格式
            inference_request = {
                "Flag": "01",
                "File_path": request.file  # 使用request.file作为文件路径
            }
            
            time_start = time.time()
            # 发送请求到推理服务器的/damobatch端点
            response_data = await self.inference_server.post(
                endpoint="/damobatch",
                data=inference_request
            )
            time_end = time.time()

            # 检查响应是否成功
            if response_data.get("results") != "success":
                raise HTTPException(status_code=500, detail="Inference server error")

            # 构造符合models.py中BatchInferenceResponse格式的响应
            response = BatchInferenceResponse(
                resultFilePath=response_data["ImagePath"],
                time=str(time_end - time_start)
            )
            
            return BaseResponse(
                status="success",
                message="Batch inference completed successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def set_strategy(self, request: StrategyRequest) -> BaseResponse:
        try:
            response = StrategyResponse(strategyType=request.strategyType)
            return BaseResponse(
                status="success",
                message="Strategy set successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def gpu_status(self) -> BaseResponse:
        try:
    # response = GPUStatusResponse(
    #     gputype=request.gputype,
    #     percent="85%"
    # )
    # gpustatus 接口：
	# 调用方式：
	# 	curl http://X.X.X.X:8000/gpustatus
	# 接口功能：获取 GPU 显存使用情况
	# 返回格式：
	# [
	# 	{
	# 		"gpu_id":0,
	# 		"memory_used":"4 MiB",
	# 		"memory_total":"40960 MiB",
	# 		"usage_percent":0.01
	# 	},
	# 	{
	# 		"gpu_id":1,
	# 		"memory_used":"4 MiB",
	# 		"memory_total":"40960 MiB",
	# 		"usage_percent":0.01
	# 	},
	#     ...
	# ]

            # 增加超时
            try:
                response_data = await self.inference_server.get(
                    endpoint="/gpustatus",
                    timeout=1
                )
            except Exception as e:
                response_data = None

            try:
                response_data_2 = await self.inference_server_2.get(
                    endpoint="/gpustatus",
                    timeout=1
                )
            except Exception as e:
                response_data_2 = None

            if response_data is None:
                mean_percent = 50
            else:
                mean_percent = sum([item["usage_percent"] for item in response_data]) / len(response_data)
            if response_data_2 is None:
                mean_percent_2 = 50
            else:
                mean_percent_2 = sum([item["usage_percent"] for item in response_data_2]) / len(response_data_2)
            # keep latest 5 data
#             const gpuUsageData = ref({
#   edge1: [0.0, 0.0, 0.0, 0.0, 0.0],
#   edge2: [0.0, 0.0, 0.0, 0.0, 0.0],
#   cloud: [0.0, 0.0, 0.0, 0.0, 0.0]
# })

# const networkDelayData = ref({
#   edge1: [0.0, 0.0, 0.0, 0.0, 0.0],
#   edge2: [0.0, 0.0, 0.0, 0.0, 0.0]
# })

            self.gpudata["edge1"].append(mean_percent)
            self.gpudata["edge2"].append(mean_percent_2)

            self.gpudata["edge1"] = self.gpudata["edge1"][-5:]
            self.gpudata["edge2"] = self.gpudata["edge2"][-5:]
            self.gpudata["cloud"] = []
            for i in range(len(self.gpudata["edge1"])):
                self.gpudata["cloud"].append(self.gpudata["edge1"][i]*0.4 + self.gpudata["edge2"][i]*0.6)

            response = GPUStatusResponse(
                data=self.gpudata
            )
            return BaseResponse(
                status="success",
                message="GPU status retrieved successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def cpu_status(self) -> BaseResponse:
        try:
            # cpustatus 接口：
	# 调用方式：
	# 	curl http://X.X.X.X:8000/cpustatus
	# 接口功能：获取 CPU 利用率和内存使用情况
	# 返回格式：
	# {
	# 	"cpu_usage_percent":2.3,
	# 	"memory_total":"2015.00 GB",
	# 	"memory_available":"1980.20 GB",
	# 	"memory_used":"25.37 GB",
	# 	"memory_usage_percent":1.7
	# }

            try:
                response_data = await self.inference_server.get(
                    endpoint="/cpustatus",
                    timeout=1
                )
            except Exception as e:
                response_data = None

            try:
                response_data_2 = await self.inference_server_2.get(
                    endpoint="/cpustatus",
                    timeout=1
                )
            except Exception as e:
                response_data_2 = None

            # 可能请求不到
            if response_data is None:
                response_data = {"cpu_usage_percent": 50}
            if response_data_2 is None:
                response_data_2 = {"cpu_usage_percent": 50}
            self.cpudata["edge1"].append(response_data["cpu_usage_percent"])
            self.cpudata["edge2"].append(response_data_2["cpu_usage_percent"])

            self.cpudata["edge1"] = self.cpudata["edge1"][-5:]
            self.cpudata["edge2"] = self.cpudata["edge2"][-5:]
            self.cpudata["cloud"] = []
            for i in range(len(self.cpudata["edge1"])):
                self.cpudata["cloud"].append(self.cpudata["edge1"][i]*0.4 + self.cpudata["edge2"][i]*0.6)

            response = CPUStatusResponse(
                data=self.cpudata
            )
            return BaseResponse(
                status="success",
                message="CPU status retrieved successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def network_status(self) -> BaseResponse:
        try:
            try:
                response_data = await self.inference_server.get(
                    endpoint="/networkstatus",
                    timeout=1
                )
            except Exception as e:
                response_data = None

            try:
                response_data_2 = await self.inference_server_2.get(
                    endpoint="/networkstatus",
                    timeout=1
                )
            except Exception as e:
                response_data_2 = None

            # 可能请求不到
            if response_data is None:
                response_data = {"sent_bandwidth": "100 KB/s"}
            if response_data_2 is None:
                response_data_2 = {"sent_bandwidth": "100 KB/s"}
            # 转为MB,返回的是29.74 KB/s这种
            d,k = response_data["sent_bandwidth"].split(" ")
            if k == "KB/s":
                d = float(d)
            elif k == "MB/s":
                d = float(d) * 1024
            elif k == "GB/s":
                d = float(d) * 1024 * 1024
            d2,k2 = response_data_2["sent_bandwidth"].split(" ")
            if k2 == "KB/s":
                d2 = float(d2)
            elif k2 == "MB/s":
                d2 = float(d2) * 1024
            elif k2 == "GB/s":
                d2 = float(d2) * 1024 * 1024

            self.networkdata["edge1"].append(d)
            self.networkdata["edge2"].append(d2)

            self.networkdata["edge1"] = self.networkdata["edge1"][-5:]
            self.networkdata["edge2"] = self.networkdata["edge2"][-5:]
            self.networkdata["cloud"] = []
            for i in range(len(self.networkdata["edge1"])):
                self.networkdata["cloud"].append(self.networkdata["edge1"][i]*0.4 + self.networkdata["edge2"][i]*0.6)

            response = NetworkStatusResponse(
                data=self.networkdata
            )
            return BaseResponse(
                status="success",
                message="Network status retrieved successfully",
                data=response.dict()
            )
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))

    async def get_file_by_path(self, request: GetFileRequest) -> BaseResponse:
        try:
            # 规范化文件路径
            normalized_path = os.path.normpath(request.filePath).replace('\\', '/')
            
            # 构造请求数据
            file_request = {
                "File_path": normalized_path
            }
            
            # 通过 inference_server 访问远程接口下载文件
            try:
                response_data = await self.inference_server.post(
                    endpoint="/getFileByPath",
                    data=file_request
                )
            except Exception as e:
                # 记录更详细的错误信息
                error_msg = f"Failed to connect to inference server: {str(e)}\n" \
                           f"Request path: {normalized_path}"
                raise HTTPException(
                    status_code=503,
                    detail=error_msg
                )
            
            # 验证响应数据并添加更多错误信息
            if not response_data:
                raise HTTPException(
                    status_code=502,
                    detail=f"Empty response from inference server for path: {normalized_path}"
                )
            
            if "file_content" not in response_data:
                raise HTTPException(
                    status_code=502,
                    detail=f"Invalid response from inference server: missing file_content. "
                           f"Response: {str(response_data)[:200]}"  # 只显示前200个字符避免响应过大
                )
            
            try:
                # 确保本地目录存在
                os.makedirs(os.path.dirname(request.filePath), exist_ok=True)
            except PermissionError as e:
                raise HTTPException(
                    status_code=403,
                    detail=f"Permission denied when creating directory: {str(e)}"
                )
            except OSError as e:
                raise HTTPException(
                    status_code=500,
                    detail=f"Failed to create directory: {str(e)}"
                )
            
            try:
                # 将base64编码的文件内容解码
                file_content = base64.b64decode(response_data["file_content"])
            except base64.binascii.Error as e:
                raise HTTPException(
                    status_code=502,
                    detail=f"Invalid base64 content from server: {str(e)}"
                )
            
            try:
                # 保存文件到本地
                with open(request.filePath, "wb") as f:
                    f.write(file_content)
            except PermissionError as e:
                raise HTTPException(
                    status_code=403,
                    detail=f"Permission denied when writing file: {str(e)}"
                )
            except IOError as e:
                raise HTTPException(
                    status_code=500,
                    detail=f"Failed to write file: {str(e)}"
                )
            
            try:
                # 获取文件信息
                file_size = os.path.getsize(request.filePath)
                file_name = os.path.basename(request.filePath)
            except OSError as e:
                raise HTTPException(
                    status_code=500,
                    detail=f"Failed to get file info: {str(e)}"
                )
            
            # 构造响应
            response = FileResponse(
                fileName=file_name,
                fileSize=file_size,
                fileContent=response_data["file_content"]
            )
            
            return BaseResponse(
                status="success",
                message="File retrieved successfully",
                data=response.dict()
            )
        except HTTPException:
            # 直接重新抛出 HTTPException，保留原始的状态码和详细信息
            raise
        except Exception as e:
            # 捕获其他未预期的异常
            raise HTTPException(
                status_code=500,
                detail=f"Unexpected error: {str(e)}"
            )

    async def __del__(self):
        # 确保关闭HTTP客户端
        await self.model_server.close()
        await self.inference_server.close() 