import csv
import io
import json
import math
import os
import time
from datetime import datetime
from pathlib import Path
from typing import List
import struct

import numpy as np
import pandas as pd
import requests
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from starlette.responses import StreamingResponse

from . import output_pb2

from config import settings
from .main_api import simulate_pipeline_network
from utils.logger import create_logger
from utils.pressure import process_pressure_data

logger = create_logger(__name__)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = Path(__file__).parent.parent

router = APIRouter(prefix="/predict", tags=["代理模型预测"])
# 定义请求体模型
class ProcessCSVRequest(BaseModel):
    headers: List[str]
    data: List[List[str]]

    # 可选：添加校验
    def validate_data(self):
        expected_len = len(self.headers)
        for i, row in enumerate(self.data):
            if len(row) != expected_len:
                raise ValueError(f"第 {i + 1} 行数据长度 {len(row)} 与 headers 长度 {expected_len} 不匹配")

class SafeJSONEncoder(json.JSONEncoder):
    def encode(self, obj):
        def replace_nan(obj):
            if isinstance(obj, dict):
                return {k: replace_nan(v) for k, v in obj.items()}
            elif isinstance(obj, list):
                return [replace_nan(item) for item in obj]
            elif isinstance(obj, float) and math.isnan(obj):
                return None
            else:
                return obj
        cleaned = replace_nan(obj)
        return super().encode(cleaned)

@router.post("/process_json/", summary="数据预测接口，接收JSON并流式返回Protobuf")
async def process_json_endpoint(request: ProcessCSVRequest):
    """
    接收 JSON 格式的 CSV 数据，处理后流式返回 Protobuf 消息（带长度前缀）
    每个消息对应一个预测输出文件（B_predictions.csv 等）
    """
    try:
        start_time = time.time()
        # ========== 1. 解析并验证 JSON 数据 ==========
        with open(PROJECT_ROOT / "log" / "c.json", 'w') as f:
            json.dump(request.model_dump(), f, ensure_ascii=False)
        logger.info(f"Received request with headers: {request.headers}")
        headers = request.headers
        rows = request.data

        if not headers:
            raise HTTPException(status_code=400, detail="headers 不能为空")
        if not rows:
            raise HTTPException(status_code=400, detail="data 至少需要一行")

        for i, row in enumerate(rows):
            if len(row) != len(headers):
                raise HTTPException(
                    status_code=400,
                    detail=f"第 {i + 1} 行数据列数不匹配（期望 {len(headers)}，实际 {len(row)}）"
                )

        # ========== 2. 转为 CSV 字符串 ==========
        csv_buffer = io.StringIO()
        writer = csv.writer(csv_buffer)
        writer.writerow(headers)
        writer.writerows(rows)
        csv_content = csv_buffer.getvalue()
        csv_buffer.close()

        logger.info(f"Converted to CSV, length: {len(csv_content)}")

        # ========== 3. 设置全局数据并运行预测 ==========
        result = simulate_pipeline_network(yc_data=csv_content)
        if isinstance(result, dict) and result.get('status') == 'failed':
            err = result.get('error')
            details = result.get('details', '')
            logger.error(f"Prediction failed: {err}, details: {details}")
            short_details = str(details)[-1000:] if len(str(details)) > 1000 else str(details)
            raise HTTPException(status_code=500, detail=f"预测失败: {err}. 详情: {short_details}")

        # ========== 4. 定义预测文件路径 ==========
        predict_dir = os.path.join(BASE_DIR, settings["filepath"]["predict_result_dir"])
        prediction_files = [
            "B_predictions.csv",
            "E_predictions.csv",
            "H_predictions.csv",
            "KC_predictions.csv",
            "NO_predictions.csv",
            "RE_predictions.csv",
            "RG_predictions.csv",
            "T_predictions.csv",
        ]

        # ========== 5. 流式生成器：读 CSV → 转 Protobuf → 加长度前缀 → yield ==========
        def protobuf_stream():
            for fname in prediction_files:
                fpath = os.path.join(predict_dir, fname)
                pb_msg = output_pb2.PredictionOutput()
                pb_msg.filename = fname

                if not os.path.exists(fpath):
                    logger.warning(f"预测文件不存在，跳过: {fpath}")
                    pb_msg.headers.append("error")
                    err_row = pb_msg.data.add()
                    err_row.values.append("File not generated")
                else:
                    try:
                        with open(fpath, 'r', encoding='utf-8') as f:
                            reader = csv.reader(f)
                            all_rows = list(reader)

                        if not all_rows:
                            logger.info(f"空文件，跳过数据行: {fname}")
                            pb_msg.headers.append("warning")
                            err_row = pb_msg.data.add()
                            err_row.values.append("Empty file")
                        else:
                            headers = all_rows[0]
                            data_rows = all_rows[1:]

                            pb_msg.headers.extend(headers)
                            for row in data_rows:
                                pb_row = pb_msg.data.add()
                                pb_row.values.extend(row)

                    except Exception as e:
                        logger.error(f"读取或解析 CSV 失败 {fpath}: {e}")
                        pb_msg.headers.append("error")
                        err_row = pb_msg.data.add()
                        err_row.values.append(f"Parse error: {str(e)}")

                # 序列化并加长度前缀
                try:
                    serialized = pb_msg.SerializeToString()  # bytes
                    prefix = struct.pack('>I', len(serialized))  # 4字节大端
                    yield prefix + serialized
                    logger.info(f"已发送 Protobuf 消息: {fname}")
                except Exception as e:
                    logger.error(f"序列化 Protobuf 失败 {fname}: {e}")
                    # 可选：发送一个错误消息
                    error_msg = output_pb2.PredictionOutput()
                    error_msg.filename = fname
                    error_msg.headers.append("error")
                    err_row = error_msg.data.add()
                    err_row.values.append(f"Serialization failed: {str(e)}")
                    serialized = error_msg.SerializeToString()
                    prefix = struct.pack('>I', len(serialized))
                    yield prefix + serialized

        end_time = time.time()
        logger.info(f"接口 process_json 调用时长：{(end_time - start_time):.2f} s")
        # ========== 6. 返回流式响应 ==========
        return StreamingResponse(
            content=protobuf_stream(),
            media_type="application/x-protobuf"
        )
    except UnicodeDecodeError:
        raise HTTPException(status_code=400, detail="Invalid encoding. Please ensure the CSV is UTF-8 encoded.")
    except Exception as e:
        logger.exception(f"Internal server error: {e}")
        raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")

@router.get("/trigger_data/", summary="触发数据生成")
async def trigger_data():
    logger.info(f"触发成功")
    generate_station_data_scheduler()

def generate_station_data_scheduler():
    """
    用于触发
    """
    try:
        start_time = time.time()
        # 1. 从嘉诚的接口获取当前数据，然后保存成为csv文件
        simulate_pipeline_network()

        # ========== 2. 定义路径 ==========
        predict_dir = PROJECT_ROOT / "predict_result" / "zhangjiacheng"
        no_predictions_path = predict_dir / "NO_predictions.csv"
        logger.info(f"predict_dir: {predict_dir}")
        logger.info(f"no_predictions_path: {no_predictions_path}")
        mapping_excel_path = PROJECT_ROOT / "data" / "reflect_files" / "station_pressure_reflection.xlsx"

        # 检查文件是否存在
        if not os.path.exists(no_predictions_path):
            raise FileNotFoundError(f"未找到预测文件: {no_predictions_path}")
        if not os.path.exists(mapping_excel_path):
            raise FileNotFoundError(f"未找到映射表文件: {mapping_excel_path}")

        # ========== 3. 读取数据 ==========
        logger.info(f"正在读取映射表: {mapping_excel_path}")
        df1 = pd.read_excel(mapping_excel_path, engine="openpyxl", dtype={'station': str})

        logger.info(f"正在读取预测数据: {no_predictions_path}")
        df2 = pd.read_csv(no_predictions_path)
        df2 = df2.round(2)

        # ========== 4. 处理压力数据 ==========
        logger.info("正在处理压力数据（整点筛选 + 平均）...")
        processed_df = process_pressure_data(
            df1,
            df2,
            type_column='压力对比type值',  # 请根据实际列名调整
            time_column='TIME'
        )

        if processed_df.empty:
            raise ValueError("处理后的压力数据为空，无法发送")
        logger.info(f" 数据处理完成，共 {len(processed_df)} 条记录")

        # ========== 5. 转换为 JSON（重命名 + 时间转时间戳）==========
        df_renamed = processed_df.rename(columns={
            '场站编号': 'stationId',
            '进口压力': 'inputPress',
            '出口压力': 'outputPress',
            '时间': 'time'
        })

        # 将时间字符串转为整点时间戳
        df_renamed['time'] = df_renamed['time'].apply(lambda dt_str: int(
            datetime.strptime(dt_str, settings["project"]["time_format"])
            .replace(minute=0, second=0, microsecond=0)
            .timestamp()
        ))

        #  清洗 NaN 数据 → 转为 None（即 JSON 中的 null）
        df_cleaned = df_renamed.replace({np.nan: None})
        station_predict_dtos = df_cleaned.to_dict(orient="records")

        # ========== 6. 构建 modelInputDTO（来自 yc.csv）==========
        yc_csv_path = predict_dir / "after_processing_yc_data.csv"
        logger.info(f" 正在读取 yc.csv 文件: {yc_csv_path}")

        if not os.path.exists(yc_csv_path):
            raise FileNotFoundError(f"未找到 yc.csv 文件: {yc_csv_path}")

        try:
            df_yc = pd.read_csv(yc_csv_path)
            logger.info(f" 成功读取 yc.csv，共 {len(df_yc)} 行，{len(df_yc.columns)} 列")

            #  关键修复：将 NaN 转为 None，保留原始数据类型
            df_yc_clean = df_yc.where(pd.notnull(df_yc), None)

            model_input_dto = {
                "headers": df_yc.columns.tolist(),
                "data": [df_yc.astype(str).values.tolist()[0], df_yc.astype(str).values.tolist()[-1]]
            }
            logger.info(f"📊 modelInputDTO 构建完成，列数: {len(df_yc.columns)}，行数: {len(df_yc)}")

        except Exception as e:
            logger.error(f"❌ 处理 yc.csv 时出错: {e}")
            raise

        # ========== 7. 合并为最终请求体 ==========
        final_payload = {
            "modelInputDTO": model_input_dto,
            "stationPredictDTOS": station_predict_dtos
        }

        try:
            json_data = json.dumps(final_payload, ensure_ascii=False, separators=(',', ':'))
            with open(PROJECT_ROOT / "log" / "b.json", 'w', encoding='utf-8') as f:
                f.write(json_data)
            logger.info(f"📦 请求体构建完成，总长度: {len(json_data)} 字符")
        except Exception as e:
            logger.error(f"❌ 序列化最终 JSON 失败: {e}")
            raise

        # ========== 8. 发送合并后的数据到接口 ==========
        url = settings["api_url"]["receive_station_predict_data_url"]
        headers = {
            'accept': '*/*',
            'Authorization': 'auth',
            'Tetproj': '05c911b5efbc4f53a659ac27fbc16614',
            'Content-Type': 'application/json'
        }

        logger.info(f"📤 正在发送合并数据到接口: {url}")
        try:
            response = requests.post(
                url=url,
                headers=headers,
                data=json_data.encode('utf-8'),  # 发送 bytes
                timeout=30
            )

            if response.status_code == 200:
                logger.info(" 数据发送成功！")
            else:
                logger.error(f"❌ 数据发送失败，状态码: {response.status_code}, 响应: {response.text[:1000]}")
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ 请求异常: {e}")

        end_time = time.time()
        logger.info(f"接口 trigger_data 调用时长：{(end_time - start_time):.2f} s")
    except UnicodeDecodeError:
        raise HTTPException(status_code=400, detail="Invalid encoding. Please ensure the CSV is UTF-8 encoded.")
    except Exception as e:
        logger.exception(f"Internal server error: {e}")
        raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")


if __name__ == "__main__":
    generate_station_data_scheduler()
