from fastapi import FastAPI, HTTPException
import uvicorn
import torch
import numpy as np
import pandas as pd
from pydantic import BaseModel
from typing import List, Dict, Any
import os
import hashlib

from src.config_loader import ConfigLoader
from src.logger import Logger
from src.data_processor import DataProcessor
from src.model_manager import ModelManager
from src.model_registry import ModelRegistry

app = FastAPI(title="手机价格预测服务")

# 全局变量
config = None
logger = None
data_processor = None
models = {}  # 存储多个模型版本
traffic_split = []  # 流量分配比例


class PredictionRequest(BaseModel):
    """预测请求数据模型"""
    features: List[Dict[str, float]]
    user_id: str = None  # 用于A/B测试的用户标识


class PredictionResponse(BaseModel):
    """预测响应数据模型"""
    price_range: int
    probabilities: List[float]
    model_version: str
    request_id: str


def init_service():
    """初始化服务"""
    global config, logger, data_processor, models, traffic_split

    # 加载配置
    config = ConfigLoader('../config/pipeline_config.yaml')

    # 初始化日志
    logger = Logger.quick_init(name="prediction_service")
    logger.info("===== 启动预测服务 =====")

    # 初始化数据处理器
    data_processor = DataProcessor(config, logger)

    # 加载特征信息（从示例数据中获取）
    _, x, _ = data_processor.load_data()
    input_dim = x.shape[1]

    # 确定输出维度
    _, _, y = data_processor.load_data()
    output_dims = {
        task: len(np.unique(y))
        for task in config.get('data.tasks', ["price_range"])
    }

    # 加载模型版本
    model_versions = config.get('deployment.model_versions', ["v1"])
    traffic_split = config.get('deployment.traffic_split', [1.0])

    # 验证流量分配
    if len(model_versions) != len(traffic_split):
        raise ValueError("模型版本数量与流量分配比例数量不匹配")

    if not np.isclose(sum(traffic_split), 1.0):
        raise ValueError("流量分配比例总和必须为1.0")

    # 加载每个模型版本
    for version in model_versions:
        model_manager = ModelManager(config, data_processor, logger)
        # 假设版本对应不同的模型文件
        model_manager.config.set('model.name', f"price_predictor_{version}")
        model_manager.load_model(input_dim, output_dims)
        model_manager.model.eval()  # 设置为评估模式
        models[version] = model_manager
        logger.info(f"已加载模型版本: {version}")

    logger.info(f"服务初始化完成，模型版本: {model_versions}, 流量分配: {traffic_split}")


def get_model_version(user_id: str) -> str:
    """基于用户ID确定模型版本，实现A/B测试"""
    model_versions = list(models.keys())

    if not user_id:
        # 无用户ID时，默认使用第一个版本
        return model_versions[0]

    # 基于用户ID的哈希值分配流量
    hash_val = hashlib.md5(user_id.encode()).hexdigest()
    # 将哈希值转换为0-1之间的浮点数
    hash_float = int(hash_val, 16) / float(0xFFFFFFFFFFFFFFFF)

    # 根据流量分配选择模型版本
    cumulative = 0.0
    for i, version in enumerate(model_versions):
        cumulative += traffic_split[i]
        if hash_float <= cumulative:
            return version

    return model_versions[-1]


@app.post("/predict", response_model=PredictionResponse)
async def predict(request: PredictionRequest):
    """预测接口"""
    try:
        # 生成请求ID
        request_id = hashlib.md5(f"{request.user_id}_{str(request.features)}".encode()).hexdigest()[:16]

        # 确定模型版本
        model_version = get_model_version(request.user_id)
        model_manager = models[model_version]

        # 解析特征
        features = request.features
        feature_names = config.get('data.features')

        # 转换为数组
        x = []
        for item in features:
            try:
                x.append([item[name] for name in feature_names])
            except KeyError as e:
                raise HTTPException(status_code=400, detail=f"缺少特征: {str(e)}")

        x = np.array(x)

        # 新增：对输入特征执行数据增强（与训练时保持一致）
        if config.get('data.preprocess.augmentation.enable', False):
            x_tensor = torch.tensor(x, dtype=torch.float32)
            x_augmented = data_processor.data_augmentation(x_tensor).numpy()  # 增强为9维
        else:
            x_augmented = x

        # 再用scaler转换（此时scaler是9维，与x_augmented匹配）
        # 预处理
        x_proc = data_processor.scaler.transform(x)

        # 转换为张量
        x_tensor = torch.tensor(x_proc, dtype=torch.float32).to(model_manager.device)

        # 模型预测
        with torch.no_grad():
            outputs = model_manager.model(x_tensor)

            # 处理多任务输出
            if isinstance(outputs, dict):
                primary_task = next(iter(outputs.keys()))
                logits = outputs[primary_task]
            else:
                logits = outputs

            probs = torch.softmax(logits, dim=1).cpu().numpy()
            price_range = np.argmax(probs, axis=1)[0]

        logger.info(f"预测完成 - 请求ID: {request_id}, 模型版本: {model_version}, 结果: {price_range}")

        return PredictionResponse(
            price_range=price_range,
            probabilities=probs[0].tolist(),
            model_version=model_version,
            request_id=request_id
        )
    except Exception as e:
        logger.error(f"预测错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"预测失败: {str(e)}")


@app.get("/health")
async def health_check():
    """健康检查接口"""
    return {
        "status": "healthy",
        "models": list(models.keys()),
        "timestamp": pd.Timestamp.now().isoformat()
    }


if __name__ == "__main__":
    init_service()
    port = config.get('deployment.api_port', 8000)
    uvicorn.run(app, host="0.0.0.0", port=port)