from flask import Flask, request, jsonify
import psycopg2  # postgresql
import psycopg2.extras
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
from datetime import datetime, timedelta
import warnings

warnings.filterwarnings("ignore")

# 导入配置
from config import DB_CONFIG, MODEL_CONFIG, API_CONFIG, QUERY_SQL


# -------------------------- 加载Kronos模型（与训练时结构一致）--------------------------
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000, dropout=0.1):
        super().__init__()
        self.dropout = nn.Dropout(p=dropout)
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-np.log(10000.0) / d_model))
        pe = torch.zeros(max_len, 1, d_model)
        pe[:, 0, 0::2] = torch.sin(position * div_term)
        pe[:, 0, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:x.size(0)]
        return self.dropout(x)


class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, n_heads=4, dropout=0.1):
        super().__init__()
        assert d_model % n_heads == 0, "d_model must be divisible by n_heads"
        self.d_k = d_model // n_heads
        self.n_heads = n_heads
        self.w_q = nn.Linear(d_model, d_model)
        self.w_k = nn.Linear(d_model, d_model)
        self.w_v = nn.Linear(d_model, d_model)
        self.w_o = nn.Linear(d_model, d_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)

    def forward(self, q, k, v, mask=None):
        residual = q
        batch_size = q.size(0)
        q = self.w_q(q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
        k = self.w_k(k).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
        v = self.w_v(v).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)

        scores = torch.matmul(q, k.transpose(-2, -1)) / np.sqrt(self.d_k)
        if mask is not None:
            scores = scores.masked_fill(mask == 0, -1e9)
        attn_weights = F.softmax(scores, dim=-1)
        attn_weights = self.dropout(attn_weights)
        output = torch.matmul(attn_weights, v)

        output = output.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_k)
        output = self.w_o(output)
        output = self.dropout(output)
        output += residual
        output = self.layer_norm(output)
        return output, attn_weights


class FeedForwardNetwork(nn.Module):
    def __init__(self, d_model, d_ff=128, activation='relu', dropout=0.1):
        super().__init__()
        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
        self.activation = self._get_activation(activation)

    def _get_activation(self, activation):
        if activation == 'relu':
            return F.relu
        elif activation == 'gelu':
            return F.gelu
        else:
            return F.relu

    def forward(self, x):
        residual = x
        x = self.w_1(x)
        x = self.activation(x)
        x = self.dropout(x)
        x = self.w_2(x)
        x = self.dropout(x)
        x += residual
        x = self.layer_norm(x)
        return x


class EncoderLayer(nn.Module):
    def __init__(self, d_model, n_heads=4, d_ff=128, activation='relu', dropout=0.1):
        super().__init__()
        self.self_attn = MultiHeadAttention(d_model, n_heads, dropout)
        self.ffn = FeedForwardNetwork(d_model, d_ff, activation, dropout)

    def forward(self, x, mask=None):
        x, attn_weights = self.self_attn(x, x, x, mask)
        x = self.ffn(x)
        return x, attn_weights


class DecoderLayer(nn.Module):
    def __init__(self, d_model, n_heads=4, d_ff=128, activation='relu', dropout=0.1):
        super().__init__()
        self.self_attn = MultiHeadAttention(d_model, n_heads, dropout)
        self.cross_attn = MultiHeadAttention(d_model, n_heads, dropout)
        self.ffn = FeedForwardNetwork(d_model, d_ff, activation, dropout)

    def forward(self, x, enc_output, tgt_mask=None, src_mask=None):
        x, self_attn_weights = self.self_attn(x, x, x, tgt_mask)
        x, cross_attn_weights = self.cross_attn(x, enc_output, enc_output, src_mask)
        x = self.ffn(x)
        return x, self_attn_weights, cross_attn_weights


class Kronos(nn.Module):
    def __init__(self, input_dim, history_len, pred_len, n_layers=2, d_model=32,
                 n_heads=4, d_ff=128, dropout=0.1, activation='relu', use_norm=True, device='cpu'):
        super().__init__()
        self.input_dim = input_dim
        self.history_len = history_len
        self.pred_len = pred_len
        self.d_model = d_model
        self.device = device

        self.input_projection = nn.Linear(input_dim, d_model)
        self.use_norm = use_norm
        if use_norm:
            self.input_norm = nn.LayerNorm(d_model)

        self.positional_encoding = PositionalEncoding(d_model, max_len=history_len + pred_len, dropout=dropout)

        self.encoder_layers = nn.ModuleList([
            EncoderLayer(d_model, n_heads, d_ff, activation, dropout)
            for _ in range(n_layers)
        ])

        self.decoder_layers = nn.ModuleList([
            DecoderLayer(d_model, n_heads, d_ff, activation, dropout)
            for _ in range(n_layers)
        ])

        self.output_projection = nn.Linear(d_model, 2)
        self._init_weights()

    def _init_weights(self):
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def generate_target_mask(self, seq_len):
        mask = (torch.triu(torch.ones(seq_len, seq_len, device=self.device)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask

    def forward(self, x):
        batch_size = x.size(0)
        enc_input = self.input_projection(x)
        if self.use_norm:
            enc_input = self.input_norm(enc_input)

        enc_input = self.positional_encoding(enc_input.transpose(0, 1)).transpose(0, 1)
        enc_output = enc_input
        for enc_layer in self.encoder_layers:
            enc_output, _ = enc_layer(enc_output)

        dec_input = enc_output[:, -1:, :].repeat(1, self.pred_len, 1)
        dec_input = self.positional_encoding(dec_input.transpose(0, 1)).transpose(0, 1)
        tgt_mask = self.generate_target_mask(self.pred_len)

        dec_output = dec_input
        for dec_layer in self.decoder_layers:
            dec_output, _, _ = dec_layer(dec_output, enc_output, tgt_mask=tgt_mask)

        output = self.output_projection(dec_output)
        return output


# -------------------------- 工具函数 --------------------------
def get_db_connection():
    """创建 PostgreSQL 数据库连接（强制返回字典格式结果）"""
    try:
        conn = psycopg2.connect(
            host=DB_CONFIG["host"],
            port=DB_CONFIG["port"],
            user=DB_CONFIG["user"],
            password=DB_CONFIG["password"],
            dbname=DB_CONFIG["dbname"],
        )
        # 🔥 强制设置游标为字典格式（确保键是字符串字段名）
        conn.cursor_factory = psycopg2.extras.RealDictCursor  # 替换为 RealDictCursor（更稳定）
        return conn
    except Exception as e:
        raise Exception(f"PostgreSQL 数据库连接失败：{str(e)}")


def get_holiday_flag(date: pd.Timestamp) -> int:
    """判断是否为节假日（与训练时一致）"""
    if date.weekday() >= 5:
        return 1
    legal_holidays = [
        (1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
        (4, 5), (4, 6), (4, 7), (5, 1), (5, 2), (5, 3), (6, 10), (6, 11), (6, 12),
        (9, 15), (9, 16), (9, 17), (10, 1), (10, 2), (10, 3), (10, 4), (10, 5), (10, 6), (10, 7)
    ]
    if (date.month, date.day) in legal_holidays:
        return 1
    return 0


def preprocess_input_data(df: pd.DataFrame, scaler, history_len: int) -> torch.Tensor:
    """预处理输入数据（与训练时一致）"""
    # 🔥 确保时间字段是小写（与上面的处理一致）
    if "create_time" not in df.columns:
        raise Exception("数据中缺少 create_time 字段（已强制转为小写）")

    # 1. 提取时间特征（无需修改，因为字段名已小写）
    df["create_time"] = pd.to_datetime(df["create_time"])
    df["hour"] = df["create_time"].dt.hour
    df["weekday"] = df["create_time"].dt.weekday
    df["month"] = df["create_time"].dt.month
    df["holiday"] = df["create_time"].apply(get_holiday_flag)

    # 后续代码不变...

    # 2. 定义特征列（与训练时一致）
    feature_cols = [
        "electricity_num", "electricity_fee", "power_factor",
        "sharp_electricity", "peak_electricity", "flat_electricity",
        "valley_electricity", "deep_valley_electricity",
        "hour", "weekday", "month", "holiday"
    ]

    # 3. 确保特征列完整
    missing_cols = [col for col in feature_cols if col not in df.columns]
    if missing_cols:
        raise Exception(f"缺失特征列：{missing_cols}")

    # 4. 处理缺失值和异常值
    df_features = df[feature_cols].copy()
    df_features = df_features.fillna(method="ffill").fillna(method="bfill")  # 填充缺失值

    # 5. 归一化（使用训练好的scaler）
    data_scaled = scaler.transform(df_features)

    # 6. 构造输入序列（batch_size=1, history_len, input_dim）
    input_seq = data_scaled[-history_len:].reshape(1, history_len, -1)  # 取最新的history_len个数据
    input_tensor = torch.tensor(input_seq, dtype=torch.float32).to(DEVICE)

    return input_tensor


def generate_prediction_times(last_time: datetime, pred_len: int, freq="30T") -> list:
    """生成96个预测时间点（与数据粒度一致）"""
    # 正确逻辑：训练时PRED_LEN=96是固定24小时，所以频率=24h/96=15分钟/点
    freq = "15T"  # 强制15分钟粒度（与API输出一致）
    prediction_times = [last_time + timedelta(minutes=15 * (i + 1)) for i in range(pred_len)]
    # 格式化时间字符串（ISO格式）
    return [t.strftime("%Y-%m-%d %H:%M:%S") for t in prediction_times]


# -------------------------- 初始化模型和设备 --------------------------
# 设备选择（CPU优先，GPU可选）
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备：{DEVICE}")

# 加载归一化器
try:
    with open(MODEL_CONFIG["scaler_path"], "rb") as f:
        SCALER = pickle.load(f)
    print(f"归一化器加载成功：{MODEL_CONFIG['scaler_path']}")
except Exception as e:
    raise Exception(f"归一化器加载失败：{str(e)}")

# 初始化并加载模型
try:
    MODEL = Kronos(
        input_dim=MODEL_CONFIG["input_dim"],
        history_len=MODEL_CONFIG["history_len"],
        pred_len=MODEL_CONFIG["pred_len"],
        n_layers=MODEL_CONFIG["n_layers"],
        d_model=MODEL_CONFIG["d_model"],
        n_heads=MODEL_CONFIG["n_heads"],
        d_ff=MODEL_CONFIG["d_ff"],
        dropout=MODEL_CONFIG["dropout"],
        activation=MODEL_CONFIG["activation"],
        device=DEVICE
    ).to(DEVICE)

    # 加载模型权重
    MODEL.load_state_dict(torch.load(MODEL_CONFIG["model_path"], map_location=DEVICE))
    MODEL.eval()  # 切换到评估模式
    print(f"模型加载成功：{MODEL_CONFIG['model_path']}")
except Exception as e:
    raise Exception(f"模型加载失败：{str(e)}")

# -------------------------- Flask API 初始化 --------------------------
app = Flask(__name__)


# -------------------------- API 接口 --------------------------
@app.route("/api/predict/electricity", methods=["GET"])
def predict_electricity():
    """
    电量电价预测API
    请求方式：GET
    请求参数：device_id（设备ID，必填）
    返回结果：96个时间点的电量和电价预测值
    """
    try:
        # 1. 获取请求参数
        device_id = request.args.get("device_id")
        if not device_id:
            return jsonify({
                "code": 400,
                "msg": "参数错误：device_id不能为空",
                "data": None
            }), 400

        # 2. 从数据库查询设备历史数据
        conn = get_db_connection()
        try:
            with conn.cursor() as cursor:
                cursor.execute(QUERY_SQL, (device_id, MODEL_CONFIG["history_len"]))
                results = cursor.fetchall()
                if len(results) < MODEL_CONFIG["history_len"]:
                    return jsonify({
                        "code": 404,
                        "msg": f"设备{device_id}历史数据不足（需{MODEL_CONFIG['history_len']}条，实际{len(results)}条）",
                        "data": None
                    }), 404

                # 🔥 关键修复：兼容字典列表和元组列表，处理列名
                if isinstance(results[0], dict):
                    # 情况1：返回字典列表（字段名是字符串）
                    df = pd.DataFrame(results)
                    # 字段名强制转为字符串并小写
                    df.columns = [str(col).lower() for col in df.columns]
                else:
                    # 情况2：返回元组列表（字段名是索引，按 QUERY_SQL 字段顺序映射）
                    # 按 QUERY_SQL 的字段顺序定义列名（必须与 SQL 字段顺序完全一致！）
                    sql_columns = [
                        "create_time", "electricity_num", "electricity_fee",
                        "power_factor", "sharp_electricity", "peak_electricity",
                        "flat_electricity", "valley_electricity", "deep_valley_electricity"
                    ]
                    df = pd.DataFrame(results, columns=sql_columns)
                    # 字段名强制小写
                    df.columns = [col.lower() for col in df.columns]

                # 按 create_time 排序（此时列名已确保是字符串小写）
                df = df.sort_values("create_time").reset_index(drop=True)
                last_time = pd.to_datetime(df["create_time"].iloc[-1])
        finally:
            conn.close()

        # 3. 预处理输入数据
        input_tensor = preprocess_input_data(df, SCALER, MODEL_CONFIG["history_len"])

        # 4. 模型预测（禁用梯度计算，提速）
        with torch.no_grad():
            pred_scaled = MODEL(input_tensor)  # (1, 96, 2)：电量、电价
            pred_np = pred_scaled.cpu().numpy().squeeze()  # (96, 2)

        # 5. 反归一化（恢复原始数值范围）
        dummy_data = np.zeros((MODEL_CONFIG["pred_len"], 12))  # (96, 12)
        dummy_data[:, :2] = pred_np  # 前两列填充预测值（电量、电价）
        pred_original = SCALER.inverse_transform(dummy_data)[:, :2]  # 只取前两列的反归一化结果

        # 6. 生成预测时间点
        prediction_times = generate_prediction_times(last_time, MODEL_CONFIG["pred_len"])

        # 7. 构造返回结果
        result = {
            "device_id": device_id,
            "prediction_start_time": prediction_times[0],
            "prediction_end_time": prediction_times[-1],
            "time_interval": "15分钟",  # 与96个点对应24小时一致
            "predictions": [
                {
                    "time": prediction_times[i],
                    "electricity_num": round(float(pred_original[i][0]), 4),  # 电量（保留4位小数）
                    "electricity_fee": round(float(pred_original[i][1]), 6)  # 电价（保留6位小数）
                }
                for i in range(MODEL_CONFIG["pred_len"])
            ],
            "metrics": {
                "prediction_count": MODEL_CONFIG["pred_len"],
                "data_source": "数据库历史数据",
                "model_version": "kronos_electric_fast_v1"
            }
        }

        return jsonify({
            "code": 200,
            "msg": "预测成功",
            "data": result
        }), 200

    except Exception as e:
        # 异常捕获与日志输出
        error_msg = f"预测失败：{str(e)}"
        print(f"[{datetime.now()}] {error_msg}")
        return jsonify({
            "code": 500,
            "msg": error_msg,
            "data": None
        }), 500


# -------------------------- 健康检查接口 --------------------------
@app.route("/api/health", methods=["GET"])
def health_check():
    """API健康检查"""
    return jsonify({
        "code": 200,
        "msg": "API服务正常运行",
        "data": {
            "model_loaded": True,
            "device": str(DEVICE),
            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        }
    }), 200


# -------------------------- 主函数 --------------------------
if __name__ == "__main__":
    app.run(
        host=API_CONFIG["host"],
        port=API_CONFIG["port"],
        debug=API_CONFIG["debug"]
    )