from fastapi import FastAPI, Depends, HTTPException, APIRouter
from fastapi.responses import JSONResponse
from sqlalchemy.orm import Session
import asyncio
from typing import List

from . import dao, schemas, models
from .models import get_db, init_db
from .data_collection import SSHManager, scheduler
from .. import LinuxTune_service

# Initialize Database on startup
init_db()

app = FastAPI(
    title="OS Performance Monitor API",
    description="API for OS performance monitoring and tuning.",
    version="1.0.0"
)

# API Routers
router_v1 = APIRouter(prefix="/api/v1")

class BusinessException(Exception):
    """
    Custom exception for business logic errors.
    This is used to return specific error codes and messages.
    """
    def __init__(self, code: str, message: str, details: dict = None):
        self.code = code
        self.message = message
        self.details = details
    def __str__(self):
        return f"BusinessException(code={self.code}, message={self.message}, details={self.details})"

# --- Exception Handler ---
@app.exception_handler(BusinessException)
async def business_exception_handler(request, exc):
    return JSONResponse(
        status_code=400,  # 统一使用400状态码处理业务异常
        content=schemas.ErrorResponse(
            error=schemas.ErrorDetail(code=str(exc.code), message=exc.message, details=exc.details)
        ).dict(),
    )

@app.exception_handler(HTTPException)
async def http_exception_handler(request, exc):
    return JSONResponse(
        status_code=exc.status_code,
        content=schemas.ErrorResponse(
            error=schemas.ErrorDetail(code=str(exc.status_code), message=exc.detail)
        ).dict(),
    )

# --- 0. Server Management ---
@router_v1.post("/servers", response_model=schemas.ServerAddResponse, tags=["Server Management"])
async def add_server(server: schemas.ServerCreate, db: Session = Depends(get_db)):
    try:
        db_server = dao.get_server_by_name(db, servername=server.servername)
        if db_server:
            raise BusinessException(code="1000", message="Server with this name already exists")
        server.authMethod = 1 if server.authMethod != 0 else 0
        ssh_manager = SSHManager(server.__dict__)
        info = await ssh_manager.get_server_info()

        if not info:
            raise BusinessException(code="1001", message="Failed to retrieve server info")

        new_server = dao.create_server(db=db, server=server, info=info)
        return schemas.ServerAddResponse(data={"id": new_server.id, "status": "added"})
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="1002", message=f"Failed to add server: {str(e)}")

@router_v1.get("/servers", response_model=List[schemas.ServerInfo], tags=["Server Management"])
def get_servers(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
    try:
        servers = dao.get_servers(db, skip=skip, limit=limit)
        return servers
    except Exception as e:
        raise BusinessException(code="1003", message=f"Failed to retrieve servers: {str(e)}")

@router_v1.delete("/servers/{server_id}", response_model=schemas.SuccessResponse, tags=["Server Management"])
def delete_server(server_id: str, db: Session = Depends(get_db)):
    try:
        if not dao.delete_server(db, server_id):
            raise BusinessException(code="1004", message="Server not found")
        # If the deleted server was active, stop the scheduler
        if scheduler.server_id == server_id:
            asyncio.run(scheduler.stop())
        return schemas.SuccessResponse(message="服务器删除成功")
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="1005", message=f"Failed to delete server: {str(e)}")

@router_v1.post("/servers/{server_id}/activate", response_model=schemas.ActivateServerResponse, tags=["Server Management"])
async def activate_server(server_id: str, db: Session = Depends(get_db)):
    try:
        server = dao.set_active_server(db, server_id)
        if not server:
            raise BusinessException(code="1006", message="Server not found")
        
        # Start/Restart the data collection scheduler for the new active server
        await scheduler.start(server.id)
        
        return schemas.ActivateServerResponse(data={"currentServerId": server.id})
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="1007", message=f"Failed to activate server: {str(e)}")

# --- 1. System Info ---
# This endpoint might need to be re-evaluated as we don't have a single 'system_info' metric anymore.
# For now, we can construct it from other metrics or create a static info collector.
@router_v1.get("/system/info", response_model=schemas.SystemInfoResponse, tags=["System Info"])
def get_system_info(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        return schemas.SystemInfoResponse(
            data=schemas.SystemInfoData(
                os=active_server.os,
                hostname=active_server.hostname,
                uptime=active_server.uptime,
                architecture=active_server.architecture
            )
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2002", message=f"Failed to retrieve system info: {str(e)}")

@router_v1.get("/system/overview", response_model=schemas.SystemOverviewResponse, tags=["System overview"])
def get_system_overview(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")

        cpu_metric = dao.get_latest_metric(db, active_server.id, models.CpuMetric)
        memory_metric = dao.get_latest_metric(db, active_server.id, models.MemoryMetric)
        disk_metric = dao.get_latest_metric(db, active_server.id, models.IoMetric)
        network_metric = dao.get_latest_metric(db, active_server.id, models.NetworkMetric)

        if not all([cpu_metric, memory_metric, disk_metric, network_metric]):
            raise BusinessException(code="2003", message="Not all system metrics available yet")

        return schemas.SystemOverviewResponse(data=schemas.SystemOverviewData(
            cpu=schemas.CPUOverview(
                cores=cpu_metric.nproc,
                usage=cpu_metric.cpu_usage['-1']['user'] + cpu_metric.cpu_usage['-1']['system'],
                load1m=cpu_metric.loadavg['1m'],
                load5m=cpu_metric.loadavg['5m'],
                load15m=cpu_metric.loadavg['15m']
            ),
            memory=schemas.MemoryOverview(
                total=memory_metric.info['total'],
                used=memory_metric.info['used'],
                usage=(memory_metric.info['used'] / memory_metric.info['total']) * 100 if memory_metric.info['total'] else 0,
                oomKills=memory_metric.oom_kill
            ),
            disk=schemas.DiskOverview(
                totalSpace=sum(disk['size'] for disk in disk_metric.device_size.values()),
                totalIOPS=sum(metric['r/s'] + metric['w/s'] for metric in disk_metric.device_stats.values()),
                totalThroughput=sum(metric['rKB/s'] + metric['wKB/s'] for metric in disk_metric.device_stats.values())
            ),
            network=schemas.NetworkOverview(
                interfaces=len(network_metric.net_stats),
                totalRxThroughput=sum(iface['rxkB/s'] for iface in network_metric.net_stats.values()),
                totalTxThroughput=sum(iface['txkB/s'] for iface in network_metric.net_stats.values())
            )
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2004", message=f"Failed to retrieve system overview: {str(e)}")

# --- Realtime data endpoints ---

@router_v1.get("/cpu/realtime", response_model=schemas.CPURealtimeResponse, tags=["Realtime Data"])
def get_cpu_realtime(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        metric = dao.get_latest_metric(db, active_server.id, models.CpuMetric)
        if not metric:
            raise BusinessException(code="3001", message="No CPU data available yet")

        return schemas.CPURealtimeResponse(
            data=schemas.CPURealtimeData(
                cores=metric.nproc,
                usage=metric.cpu_usage['-1']['user'] + metric.cpu_usage['-1']['system'],
                load1m=metric.loadavg['1m'],
                load5m=metric.loadavg['5m'],
                load15m=metric.loadavg['15m'],
                processCount=metric.process_count,
                contextSwitchesPerSec=metric.context_switches_interrupts['contextSwitchesPerSec'],
                interruptsPerSec=metric.context_switches_interrupts['interruptsPerSec'],
                syscallsPerSec=metric.syscalls_per_sec,
                perCoreUsage=[
                    {
                        'core': int(core),
                        'user': metric.cpu_usage[core]['user'],
                        'system': metric.cpu_usage[core]['system'],
                        'idle': metric.cpu_usage[core]['idle'],
                        'iowait': metric.cpu_usage[core]['iowait']
                    }
                    for core in metric.cpu_usage if core != '-1'
                ]
            ),
            timestamp=int(metric.timestamp.timestamp() * 1000)
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="3002", message=f"Failed to retrieve CPU realtime data: {str(e)}")

@router_v1.get("/memory/realtime", response_model=schemas.MemoryRealtimeResponse, tags=["Realtime Data"])
def get_memory_realtime(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        metric = dao.get_latest_metric(db, active_server.id, models.MemoryMetric)
        if not metric:
            raise BusinessException(code="4001", message="No Memory data available yet")

        return schemas.MemoryRealtimeResponse(
            data=schemas.MemoryRealtimeData(
                total=metric.info['total'],
                used=metric.info['used'],
                free=metric.info['free'],
                available=metric.info['available'],
                buffers=metric.info['buffers'],
                usage=(metric.info['used'] / metric.info['total']) * 100 if metric.info['total'] else 0,
                swap=schemas.SwapInfo(
                    total=metric.info['swap']['total'],
                    used=metric.info['swap']['used'],
                    usage=(metric.info['swap']['used'] / metric.info['swap']['total']) * 100 if metric.info['swap']['total'] else 0
                ),
                oomKills=metric.oom_kill,
                vmstat= schemas.VmstatInfo(
                    swappiness=metric.swappiness,
                    vfsCachePressure=metric.vfs_cache_pressure,
                    dirtyRatio=metric.dirty_ratio,
                    dirtyBackgroundRatio=metric.dirty_background_ratio,
                    transparentHugepage=metric.transparent_hugepage,
                    pageScanRate=metric.page_scan['pageScanRate'],
                    pgscanKswapd=metric.page_scan['kswapd'],
                    pgscanDirect=metric.page_scan['direct']
                )
            ),
            timestamp=int(metric.timestamp.timestamp() * 1000)
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="4002", message=f"Failed to retrieve memory realtime data: {str(e)}")

@router_v1.get("/disk/realtime", response_model=schemas.DiskRealtimeResponse, tags=["Realtime Data"])
def get_disk_realtime(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        metric = dao.get_latest_metric(db, active_server.id, models.IoMetric)
        if not metric:
            raise BusinessException(code="5001", message="No I/O data available yet")

        return schemas.DiskRealtimeResponse(
            data=schemas.DiskRealtimeData(
                devices=[
                    {
                        'name': name,
                        'size': size['size'],
                        'used': size['used'],
                        'usage': size['usage'],
                        'readIOPS': stat['r/s'],
                        'writeIOPS': stat['w/s'],
                        'readThroughput': stat['rKB/s'],
                        'writeThroughput': stat['wKB/s'],
                        'await': stat['await'],
                        'util': stat['utilization'],
                        'scheduler': rest['scheduler'],
                        'queueDepth': rest['queue_depth'],
                        'readAhead': rest['read_ahead_kB']
                    } for (name, stat), (_, size), (_, rest) in zip(metric.device_stats.items(), metric.device_size.items(), metric.device_sched_queDepth_rdAhead.items())
                ],
                filesystems=[
                    {
                        'mountPoint': size['mountPoint'],
                        'device': name,
                        'type': size['type'],
                        'size': size['size'],
                        'used': size['used'],
                        'available': size['available'],
                        'usage': size['usage'],
                        'inodes': inode['total'],
                        'inodesUsed': inode['used'],
                        'inodesAvailable': inode['available'],
                        'inodeUsage': inode['usage']
                    } for (name, size), (_, inode) in zip(metric.fs_space.items(), metric.inode_usage.items())
                ],
            ),
            timestamp=int(metric.timestamp.timestamp() * 1000)
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="5002", message=f"Failed to retrieve disk realtime data: {str(e)}")

@router_v1.get("/network/realtime", response_model=schemas.NetworkRealtimeResponse, tags=["Realtime Data"])
def get_network_realtime(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        metric = dao.get_latest_metric(db, active_server.id, models.NetworkMetric)
        if not metric:
            raise BusinessException(code="6001", message="No Network data available yet")

        return schemas.NetworkRealtimeResponse(
            data=schemas.NetworkRealtimeData(
                interfaces=[
                    {
                        'name': name,
                        'rxPacketsPerSec': stats['rxpck/s'],
                        'txPacketsPerSec': stats['txpck/s'],
                        'rxBytesPerSec': stats['rxkB/s'] * 1024,
                        'txBytesPerSec': stats['txkB/s'] * 1024,
                        'rxDroppedPerSec': err_stats['rxdrop/s'],
                        'txDroppedPerSec': err_stats['txdrop/s'],
                        'rxErrorsPerSec': err_stats['rxerr/s'],
                        'txErrorsPerSec': err_stats['txerr/s'],
                        'speed': speed_duplex_mtu['speed'],
                        'duplex': speed_duplex_mtu['duplex'],
                        'mtu': speed_duplex_mtu['mtu']
                    }
                    for (name, stats), (_, err_stats), (_, speed_duplex_mtu) in zip(
                        metric.net_stats.items(), 
                        metric.net_err_stats.items(), 
                        metric.net_speed_duplex_mtu.items()
                    )
                ],
                tcp=schemas.TCPInfo(
                    listenOverflows=metric.tcp_drops['tcp_listen_overflows'],
                    syncookiesSent=metric.tcp_drops['tcp_syncookies_sent'],
                    listenDrops=metric.tcp_drops['tcp_listen_drops'],
                    retransSegs=metric.tcp_retrans['tcp_retrans_segs/s'],
                    congestionControl=metric.tcp_congestion_control,
                    windowScaling=metric.tcp_window_scaling == '1',
                    maxSynBacklog=metric.tcp_max_syn_backlog,
                    somaxconn=metric.somaxconn,
                    tcpRmem=[int(x) for x in metric.tcp_rmem.split()],
                    tcpWmem=[int(x) for x in metric.tcp_wmem.split()]
                )
            ),
            timestamp=int(metric.timestamp.timestamp() * 1000)
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="6002", message=f"Failed to retrieve network realtime data: {str(e)}")

@router_v1.get("/mysql/realtime", response_model=schemas.MysqlRealtimeResponse, tags=["Realtime Data"])
def get_mysql_realtime(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        mysql = dao.get_app_by_type(db, active_server.id, "mysql")
        if not mysql:
            raise BusinessException(code="7000", message="MySQL is not configured for the active server")
        if not scheduler.ssh_manager.mysql_config:
            scheduler.ssh_manager.mysql_config = mysql.config
        
        metric = dao.get_latest_metric(db, active_server.id, models.MysqlMetric)
        if not metric:
            raise BusinessException(code="7001", message="No MySQL data available yet")

        return schemas.MysqlRealtimeResponse(
            data=schemas.MysqlRealtimeData(
                qps=metric.qps,
                tps=metric.tps,
                slowQueries=metric.slow_queries,
                threadsConnected=metric.threads_connected,
                threadsRunning=metric.threads_running,
                abortedConnects=metric.aborted_connects,
                innodbBufferPoolReads=metric.innodb_buffer_pool_reads,
                innodbBufferPoolReadRequests=metric.innodb_buffer_pool_read_requests,
                innodbRowLocksWaits=metric.innodb_row_locks_waits,
                innodbRowLockTime=metric.innodb_row_lock_time,
                innodbLogWaits=metric.innodb_log_waits
            ),
            timestamp=int(metric.timestamp.timestamp() * 1000)
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="7002", message=f"Failed to retrieve MySQL realtime data: {str(e)}")
# --- Historical data endpoints ---

@router_v1.get("/cpu/history", response_model=schemas.CPUHistoryResponse, tags=["Historical Data"])
def get_cpu_history(timeRange: str = "1h", db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        metrics = dao.get_historical_metrics(db, active_server.id, models.CpuMetric, timeRange)
        if not metrics:
            raise BusinessException(code="3003", message="No historical CPU data available")

        timestamps = [int(metric.timestamp.timestamp() * 1000) for metric in metrics]
        datas = []
        
        for metric in metrics:
            data = schemas.CPURealtimeData(
                cores=metric.nproc,
                usage=metric.cpu_usage['-1']['user'] + metric.cpu_usage['-1']['system'],
                load1m=metric.loadavg['1m'],
                load5m=metric.loadavg['5m'],
                load15m=metric.loadavg['15m'],
                processCount=metric.process_count,
                contextSwitchesPerSec=metric.context_switches_interrupts['contextSwitchesPerSec'],
                interruptsPerSec=metric.context_switches_interrupts['interruptsPerSec'],
                syscallsPerSec=metric.syscalls_per_sec,
                perCoreUsage=[
                    {
                        'core': int(core),
                        'user': metric.cpu_usage[core]['user'],
                        'system': metric.cpu_usage[core]['system'],
                        'idle': metric.cpu_usage[core]['idle'],
                        'iowait': metric.cpu_usage[core]['iowait']
                    }
                    for core in metric.cpu_usage if core != '-1'
                ]
            )
            datas.append(data)

        return schemas.CPUHistoryResponse(
            data=schemas.CPUHistoryData(
                timestamps=timestamps,
                datas=datas
            )
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="3004", message=f"Failed to retrieve CPU history data: {str(e)}")

@router_v1.get("/memory/history", response_model=schemas.MemoryHistoryResponse, tags=["Historical Data"])
def get_memory_history(timeRange: str = "1h", db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        metrics = dao.get_historical_metrics(db, active_server.id, models.MemoryMetric, timeRange)
        if not metrics:
            raise BusinessException(code="4003", message="No historical memory data available")

        timestamps = [int(metric.timestamp.timestamp() * 1000) for metric in metrics]
        datas = []
        
        for metric in metrics:
            data = schemas.MemoryRealtimeData(
                total=metric.info['total'],
                used=metric.info['used'],
                free=metric.info['free'],
                available=metric.info['available'],
                buffers=metric.info['buffers'],
                usage=(metric.info['used'] / metric.info['total']) * 100 if metric.info['total'] else 0,
                swap=schemas.SwapInfo(
                    total=metric.info['swap']['total'],
                    used=metric.info['swap']['used'],
                    usage=(metric.info['swap']['used'] / metric.info['swap']['total']) * 100 if metric.info['swap']['total'] else 0
                ),
                oomKills=metric.oom_kill,
                vmstat=schemas.VmstatInfo(
                    swappiness=metric.swappiness,
                    vfsCachePressure=metric.vfs_cache_pressure,
                    dirtyRatio=metric.dirty_ratio,
                    dirtyBackgroundRatio=metric.dirty_background_ratio,
                    transparentHugepage=metric.transparent_hugepage,
                    pageScanRate=metric.page_scan['pageScanRate'],
                    pgscanKswapd=metric.page_scan['kswapd'],
                    pgscanDirect=metric.page_scan['direct']
                )
            )
            datas.append(data)

        return schemas.MemoryHistoryResponse(
            data=schemas.MemoryHistoryData(
                timestamps=timestamps,
                datas=datas
            )
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="4004", message=f"Failed to retrieve memory history data: {str(e)}")

@router_v1.get("/disk/history", response_model=schemas.DiskHistoryResponse, tags=["Historical Data"])
def get_disk_history(timeRange: str = "1h", db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        metrics = dao.get_historical_metrics(db, active_server.id, models.IoMetric, timeRange)
        if not metrics:
            raise BusinessException(code="5003", message="No historical disk data available")

        timestamps = [int(metric.timestamp.timestamp() * 1000) for metric in metrics]
        datas = []
        
        for metric in metrics:
            data = schemas.DiskRealtimeData(
                devices=[
                    {
                        'name': name,
                        'size': size['size'],
                        'used': size['used'],
                        'usage': size['usage'],
                        'readIOPS': stat['r/s'],
                        'writeIOPS': stat['w/s'],
                        'readThroughput': stat['rKB/s'],
                        'writeThroughput': stat['wKB/s'],
                        'await': stat['await'],
                        'util': stat['utilization'],
                        'scheduler': rest['scheduler'],
                        'queueDepth': rest['queue_depth'],
                        'readAhead': rest['read_ahead_kB']
                    } for (name, stat), (_, size), (_, rest) in zip(metric.device_stats.items(), metric.device_size.items(), metric.device_sched_queDepth_rdAhead.items())
                ],
                filesystems=[
                    {
                        'mountPoint': size['mountPoint'],
                        'device': name,
                        'type': size['type'],
                        'size': size['size'],
                        'used': size['used'],
                        'available': size['available'],
                        'usage': size['usage'],
                        'inodes': inode['total'],
                        'inodesUsed': inode['used'],
                        'inodesAvailable': inode['available'],
                        'inodeUsage': inode['usage']
                    } for (name, size), (_, inode) in zip(metric.fs_space.items(), metric.inode_usage.items())
                ],
            )
            datas.append(data)

        return schemas.DiskHistoryResponse(
            data=schemas.DiskHistoryData(
                timestamps=timestamps,
                datas=datas
            )
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="5004", message=f"Failed to retrieve disk history data: {str(e)}")

@router_v1.get("/network/history", response_model=schemas.NetworkHistoryResponse, tags=["Historical Data"])
def get_network_history(timeRange: str = "1h", db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        metrics = dao.get_historical_metrics(db, active_server.id, models.NetworkMetric, timeRange)
        if not metrics:
            raise BusinessException(code="6003", message="No historical network data available")

        timestamps = [int(metric.timestamp.timestamp() * 1000) for metric in metrics]
        datas = []
        
        for metric in metrics:
            data = schemas.NetworkRealtimeData(
                interfaces=[
                    {
                        'name': name,
                        'rxPacketsPerSec': stats['rxpck/s'],
                        'txPacketsPerSec': stats['txpck/s'],
                        'rxBytesPerSec': stats['rxkB/s'] * 1024,
                        'txBytesPerSec': stats['txkB/s'] * 1024,
                        'rxDroppedPerSec': err_stats['rxdrop/s'],
                        'txDroppedPerSec': err_stats['txdrop/s'],
                        'rxErrorsPerSec': err_stats['rxerr/s'],
                        'txErrorsPerSec': err_stats['txerr/s'],
                        'speed': speed_duplex_mtu['speed'],
                        'duplex': speed_duplex_mtu['duplex'],
                        'mtu': speed_duplex_mtu['mtu']
                    }
                    for (name, stats), (_, err_stats), (_, speed_duplex_mtu) in zip(
                        metric.net_stats.items(), 
                        metric.net_err_stats.items(), 
                        metric.net_speed_duplex_mtu.items()
                    )
                ],
                tcp=schemas.TCPInfo(
                    listenOverflows=metric.tcp_drops['tcp_listen_overflows'],
                    syncookiesSent=metric.tcp_drops['tcp_syncookies_sent'],
                    listenDrops=metric.tcp_drops['tcp_listen_drops'],
                    retransSegs=metric.tcp_retrans['tcp_retrans_segs/s'],
                    congestionControl=metric.tcp_congestion_control,
                    windowScaling=metric.tcp_window_scaling == '1',
                    maxSynBacklog=metric.tcp_max_syn_backlog,
                    somaxconn=metric.somaxconn,
                    tcpRmem=[int(x) for x in metric.tcp_rmem.split()],
                    tcpWmem=[int(x) for x in metric.tcp_wmem.split()]
                )
            )
            datas.append(data)

        return schemas.NetworkHistoryResponse(
            data=schemas.NetworkHistoryData(
                timestamps=timestamps,
                datas=datas
            )
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="6004", message=f"Failed to retrieve network history data: {str(e)}")

@router_v1.get("/mysql/history", response_model=schemas.MysqlHistoryResponse, tags=["Historical Data"])
def get_mysql_history(timeRange: str = "1h", db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        mysql = dao.get_app_by_type(db, active_server.id, "mysql")
        if not mysql:
            raise BusinessException(code="7000", message="MySQL is not configured for the active server")
        if not scheduler.ssh_manager.mysql_config:
            scheduler.ssh_manager.mysql_config = mysql.config
        
        metrics = dao.get_historical_metrics(db, active_server.id, models.MysqlMetric, timeRange)
        if not metrics:
            raise BusinessException(code="7003", message="No historical MySQL data available")

        timestamps = [int(metric.timestamp.timestamp() * 1000) for metric in metrics]
        datas = []
        
        for metric in metrics:
            data = schemas.MysqlRealtimeData(
                qps=metric.qps,
                tps=metric.tps,
                slowQueries=metric.slow_queries,
                threadsConnected=metric.threads_connected,
                threadsRunning=metric.threads_running,
                abortedConnects=metric.aborted_connects,
                innodbBufferPoolReads=metric.innodb_buffer_pool_reads,
                innodbBufferPoolReadRequests=metric.innodb_buffer_pool_read_requests,
                innodbRowLocksWaits=metric.innodb_row_locks_waits,
                innodbRowLockTime=metric.innodb_row_lock_time,
                innodbLogWaits=metric.innodb_log_waits
            )
            datas.append(data)

        return schemas.MysqlHistoryResponse(
            data=schemas.MysqlHistoryData(
                timestamps=timestamps,
                datas=datas
            )
        )
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="7004", message=f"Failed to retrieve MySQL history data: {str(e)}")

# --- 2. Application Management ---
@router_v1.get("/payload/apps", response_model=schemas.AppRunningResponse, tags=["Application Management"])
async def get_current_apps(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        client = SSHManager(active_server.__dict__)
        apps = await client.get_running_processes(['mysql', 'nginx'])
        return schemas.AppRunningResponse(data=apps)
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2005", message=f"Failed to retrieve running applications: {str(e)}")

@router_v1.post("/payload/config", response_model=schemas.AppCreateResponse, tags=["Application Management"])
def create_app(app: schemas.AppCreate, db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        if app.serverId != active_server.id:
            raise BusinessException(code="2002", message="Application server ID does not match active server")
        
        new_app = dao.create_app(db, app)
        return schemas.AppCreateResponse(data=schemas.AppRetrieve(
            appid=new_app.id,
            appType=new_app.type,
            name=new_app.name,
            description=new_app.description,
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2006", message=f"Failed to create application: {str(e)}")

@router_v1.get("/payload/config", response_model=schemas.AppListResponse, tags=["Application Management"])
def get_configed_apps(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        apps = dao.get_apps(db, server_id=active_server.id)
        return schemas.AppListResponse(data=[
            schemas.AppRetrieve(
                appid=app.id,
                appType=app.type,
                name=app.name,
                description=app.description,
            ) for app in apps
        ])
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2007", message=f"Failed to retrieve configured applications: {str(e)}")

@router_v1.get("/payload/suggestions", response_model=schemas.SuggestionResponse, tags=["Application Management"])
def get_app_suggestions(appid: str, db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        if not appid:
            raise BusinessException(code="2008", message="Application ID is required")

        suggestions = dao.get_latest_suggestion(db=db, server_id=active_server.id, app_id=appid)
        if not suggestions or suggestions.applied:
            raise BusinessException(code="2008", message="No suggestions available for this application")
        
        return schemas.SuggestionResponse(data=schemas.SuggestionData(
            appid=appid,
            content=[
                {
                    'type': tp,
                    'report': suggestions.suggestion['report'][f"{tp}_report"],
                    'param': suggestions.suggestion['param'][tp],
                }
                for tp in suggestions.suggestion['param'].keys()
            ],
            bottleneck=suggestions.suggestion['bottleneck'],
            suggestionId=suggestions.id,
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2009", message=f"Failed to retrieve application suggestions: {str(e)}")

@router_v1.post("/payload/analyze", response_model=schemas.SuggestionResponse, tags=["Application Management"])
def create_app_suggestions(_app: schemas.AppAnalyzeRequest, mock: bool = False, db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")

        if not _app.appid:
            raise BusinessException(code="2010", message="Application ID is required")

        app = dao.get_app(db, _app.appid)
        result, stage_result = LinuxTune_service.RecommenderService(
            host_ip=active_server.ip,
            host_port=active_server.port,
            host_user=active_server.user,
            host_password=active_server.password,
            app=app.type,
            appconfig=app.config,
            mock=mock
        )
        if not result:
            raise BusinessException(code="2011", message="No suggestions available for this application")
        
        suggestions = dao.create_suggestion(db=db, result=result, stage_result=stage_result, server_id=active_server.id, app_id=app.id)
        
        return schemas.SuggestionResponse(data=schemas.SuggestionData(
            appid=app.id,
            content=[
                {
                    'type': tp,
                    'report': suggestions.suggestion['report'][f"{tp}_report"],
                    'param': suggestions.suggestion['param'][tp],
                }
                for tp in suggestions.suggestion['param'].keys()
            ],
            bottleneck=suggestions.suggestion['bottleneck'],
            suggestionId=suggestions.id,
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2012", message=f"Failed to analyze application: {str(e)}")

@router_v1.post("/payload/optimize", response_model=schemas.SuggestionApplyResponse, tags=["Application Management"])
def apply_suggestion(request: schemas.AppAnalyzeApplyRequest, mock: bool = False, db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")

        if not request.appid:
            raise BusinessException(code="2013", message="Application ID is required")
        
        if not request.suggestionId:
            raise BusinessException(code="2013", message="Suggestion ID is required")

        suggestion = dao.get_suggestions(db=db, suggestion_id=request.suggestionId, server_id=active_server.id, app_id=request.appid)
        if not suggestion:
            raise BusinessException(code="2014", message="Suggestion not found")

        app = dao.get_app(db, request.appid)
        # TODO: apply the suggestion rather than recommending again
        result = LinuxTune_service.OptimizerService(
            host_ip=active_server.ip,
            host_port=active_server.port,
            host_user=active_server.user,
            host_password=active_server.password,
            app=app.type,
            appconfig=app.config,
            mock=mock,
            original_result=suggestion.stage_result
        )

        if not result:
            raise BusinessException(code="2015", message="Failed to apply suggestion")
        
        result['improvement'] = (result['bestresult'] - result['baseline']) / result['baseline'] * 100 if result['baseline'] else 0
        # TODO: calculate health score based on the result
        result['health'] = 75
        dao.create_optimization_history(db=db, suggestion_id=suggestion.id, result=result, app_id=request.appid)

        return schemas.SuggestionApplyResponse(data=schemas.SuggestionApplyData(
            before=result['baseline'],
            after=result['bestresult'],
            improvement=result['improvement'],
            health=result['health'],
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2016", message=f"Failed to apply suggestion: {str(e)}")

@router_v1.get("/payload/history", response_model=schemas.SuggestionHistoryResponse, tags=["Application Management"])
def get_app_history(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")

        history = dao.get_optimization_history(db=db, server_id=active_server.id, isApp=True)

        suggestion_items = []
        for item in history:
            suggestion = dao.get_suggestions(db=db, suggestion_id=item.suggestion_id, server_id=active_server.id, isApplied=True)
            if suggestion:
                suggestion_items.append({**suggestion.__dict__, 'improvement': item.improvement})

        return schemas.SuggestionHistoryResponse(data=schemas.SuggestionHistoryData(
            timestamps=[int(item.timestamp.timestamp() * 1000) for item in history],
            datas=[schemas.SuggHistoryData(
                appid=item['app_id'],
                content=[
                    {
                        'type': tp,
                        'report': item['suggestion']['report'][f"{tp}_report"],
                        'param': item['suggestion']['param'][tp],
                    }
                    for tp in item['suggestion']['param'].keys()
                ],
                bottleneck=item['suggestion']['bottleneck'],
                suggestionId=item['id'],
                improvement=item['improvement'],
            ) for item in suggestion_items]
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2003", message=f"Failed to retrieve application history: {str(e)}")

# --- 7. Performance Optimization ---
# Get latest optimization suggestions
@router_v1.get("/optimize/suggestions", response_model=schemas.SuggestionResponse, tags=["Performance Optimization"])
def get_optimization_suggestions(serverId: str, db: Session = Depends(get_db)):
    try:
        suggestions = dao.get_latest_suggestion(db=db, server_id=serverId)
        if not suggestions or suggestions.applied:
            raise BusinessException(code="2008", message="No suggestions available for this application")
        print(suggestions.suggestion)
        return schemas.SuggestionResponse(data=schemas.SuggestionData(
            appid=None,  # No specific app ID for system-wide suggestions
            content=[
                {
                    'type': tp,
                    'report': suggestions.suggestion['report'][f"{tp}_report"],
                    'param': suggestions.suggestion['param'][tp],
                }
                for tp in suggestions.suggestion['param'].keys()
            ],
            bottleneck=suggestions.suggestion['bottleneck'],
            suggestionId=suggestions.id,
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2009", message=f"Failed to retrieve application suggestions: {str(e)}")

@router_v1.post("/optimize/analyze", response_model=schemas.SuggestionResponse, tags=["Performance Optimization"])
def create_suggestions(mock: bool = False, db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")

        result, stage_result = LinuxTune_service.RecommenderService(
            host_ip=active_server.ip,
            host_port=active_server.port,
            host_user=active_server.user,
            host_password=active_server.password,
            mock=mock
        )
        if not result:
            raise BusinessException(code="2011", message="No suggestions available for this application")

        suggestions = dao.create_suggestion(db=db, result=result, stage_result=stage_result, server_id=active_server.id)

        return schemas.SuggestionResponse(data=schemas.SuggestionData(
            appid=None,
            content=[
                {
                    'type': tp,
                    'report': suggestions.suggestion['report'][f"{tp}_report"],
                    'param': suggestions.suggestion['param'][tp],
                }
                for tp in suggestions.suggestion['param'].keys()
            ],
            bottleneck=suggestions.suggestion['bottleneck'],
            suggestionId=suggestions.id,
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2012", message=f"Failed to analyze application: {str(e)}")

@router_v1.post("/optimize/apply", response_model=schemas.SuggestionApplyResponse, tags=["Performance Optimization"])
def apply_suggestion(req: schemas.SuggestionApplyRequest, mock: bool = False, db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")

        if not req.suggestionId:
            raise BusinessException(code="2013", message="Suggestion ID is required")

        suggestion = dao.get_suggestions(db=db, suggestion_id=req.suggestionId, server_id=active_server.id)
        if not suggestion:
            raise BusinessException(code="2014", message="Suggestion not found or already applied")

        result = LinuxTune_service.OptimizerService(
            host_ip=active_server.ip,
            host_port=active_server.port,
            host_user=active_server.user,
            host_password=active_server.password,
            mock=mock,
            original_result=suggestion.stage_result
        )

        if not result:
            raise BusinessException(code="2015", message="Failed to apply suggestion")
        
        result['improvement'] = (result['bestresult'] - result['baseline']) / result['baseline'] * 100 if result['baseline'] else 0
        # TODO: calculate health score based on the result
        result['health'] = 90
        dao.create_optimization_history(db=db, suggestion_id=suggestion.id, result=result)

        return schemas.SuggestionApplyResponse(data=schemas.SuggestionApplyData(
            before=result['baseline'],
            after=result['bestresult'],
            improvement=result['improvement'],
            health=result['health'],
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2016", message=f"Failed to apply suggestion: {str(e)}")

@router_v1.get("/optimize/history", response_model=schemas.SuggestionHistoryResponse, tags=["Performance Optimization"])
def get_optimization_history(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")

        history = dao.get_optimization_history(db=db, server_id=active_server.id)

        suggestion_items = []
        for item in history:
            suggestion = dao.get_suggestions(db=db, suggestion_id=item.suggestion_id, server_id=active_server.id, isApplied=True)
            if suggestion:
                suggestion_items.append({**suggestion.__dict__, 'improvement': item.improvement})

        return schemas.SuggestionHistoryResponse(data=schemas.SuggestionHistoryData(
            timestamps=[int(item.timestamp.timestamp() * 1000) for item in history],
            datas=[schemas.SuggHistoryData(
                appid=None,
                content=[
                    {
                        'type': tp,
                        'report': item['suggestion']['report'][f"{tp}_report"],
                        'param': item['suggestion']['param'][tp],
                    }
                    for tp in item['suggestion']['param'].keys()
                ],
                bottleneck=item['suggestion']['bottleneck'],
                suggestionId=item['id'],
                improvement=item['improvement'],
            ) for item in suggestion_items]
        ))
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2003", message=f"Failed to retrieve application history: {str(e)}")

@router_v1.post("/optimize/param", response_model=schemas.SuccessResponse, tags=["Performance Optimization"])
def set_optimization_parameters(changes: schemas.ParameterChange, db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")

        if not changes or not changes.name or not changes.value:
            raise BusinessException(code="2017", message="No parameters or value to change")

        # Apply the parameter changes
        client = SSHManager(active_server.__dict__)
        result = client.set_system_param(changes.name, changes.value)

        if not result:
            raise BusinessException(code="2018", message="Failed to apply parameter changes")
        
        return schemas.SuccessResponse(message="Parameters updated successfully")
    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="2019", message=f"Failed to set optimization parameters: {str(e)}")

# --- Realtime data endpoints (mirroring the spec) ---

@router_v1.get("/cpu/processes", response_model=schemas.ProcessListResponse, tags=["Realtime Data"])
def get_cpu_processes(db: Session = Depends(get_db)):
    try:
        active_server = dao.get_active_server(db)
        if not active_server:
            raise BusinessException(code="2001", message="No active server selected")
        
        metric = dao.get_latest_metric(db, active_server.id, models.CpuMetric)
        if not metric:
            raise BusinessException(code="3005", message="No CPU data available yet")
        
        processes = metric.ps_state if metric.ps_state else []

        return schemas.ProcessListResponse(data=processes)

    except BusinessException:
        raise
    except Exception as e:
        raise BusinessException(code="3006", message=f"Failed to retrieve process data: {str(e)}")

app.include_router(router_v1)

@app.on_event("startup")
async def startup_event():
    # On startup, check if there's an active server and start the scheduler
    db = next(get_db())
    active_server = dao.get_active_server(db)
    db.close()
    if active_server:
        print(f"Found active server on startup: {active_server.servername}. Starting data collection.")
        await scheduler.start(active_server.id)

@app.on_event("shutdown")
async def shutdown_event():
    print("Shutting down. Stopping data collection scheduler.")
    await scheduler.stop()
