import os
import csv
import requests #用于发送HTTP请求，调用模型API
import json
import numpy as np
import ast  #用于安全解析字符串维python对象
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score #用于计算评估指标

# 设置中文显示（确保系统有对应字体）
plt.rcParams["font.family"] = ["Microsoft YaHei", "SimHei", "FangSong"]
plt.rcParams["axes.unicode_minus"] = False


def read_csv_with_true(file_path):
    """读取CSV文件，返回特征数据和真实气动力残差（fa_true）"""
    features = [] #存储待预测的特征数据
    fa_true_list = [] #存储真实气动力残差，用于评估
    with open(file_path, 'r') as f:
        reader = csv.DictReader(f)
        required_columns = ['v', 'q', 'pwm', 'fa_true']
        for col in required_columns:
            if col not in reader.fieldnames:
                raise ValueError(f"CSV文件 {file_path} 缺少必要列：{col}（需包含fa_true）")

        #逐行解析数据
        for row_num, row in enumerate(reader, start=1):
            try:
                # 解析特征（v, q, pwm）
                v = _parse_vector(row['v'], 'v', row_num)
                q = _parse_vector(row['q'], 'q', row_num)
                pwm = _parse_vector(row['pwm'], 'pwm', row_num)

                # 解析真实气动力残差（fa_true）
                fa_true = _parse_vector(row['fa_true'], 'fa_true', row_num)

                # 验证维度
                if len(v) != 3 or len(q) != 4 or len(pwm) != 4 or len(fa_true) != 3:
                    raise ValueError(
                        f"维度错误：v={len(v)}, q={len(q)}, pwm={len(pwm)}, fa_true={len(fa_true)}"
                    )

                #将解析好的数据添加到列表
                features.append({'v': v, 'q': q, 'pwm': pwm})
                fa_true_list.append(fa_true)
            except Exception as e:
                print(f"⚠️ 行 {row_num} 解析失败：{str(e)}")
                continue

    if not features or not fa_true_list:
        raise ValueError(f"文件 {file_path} 中无有效数据或真实值（fa_true）")

    return features, fa_true_list


def _parse_vector(vector_str, field_name, row_num):
    """安全解析向量字符串，处理np.float64等复杂格式"""
    try:
        # 常规格式：[0.1, 0.2, 0.3]
        vector = [float(x.strip()) for x in vector_str.strip('[]').split(',') if x.strip()]
        return vector
    except ValueError:
        # 复杂格式：[np.float64(0.1), ...]
        try:
            clean_str = vector_str.replace('np.float64(', '').replace(')', '')
            vector = ast.literal_eval(clean_str)
            return [float(x) for x in vector]
        except Exception as e:
            print(f"🚫 行 {row_num} - 字段 '{field_name}' 解析失败: {vector_str}")
            raise ValueError(f"字段格式错误（行 {row_num}）") from e


def read_multi_adapt_data(adapt_dir):
    """读取指定目录下所有CSV文件作为适应集，合并数据"""
    adaptinput = []
    adaptlabel = []
    # 遍历目录下的CSV文件
    for filename in os.listdir(adapt_dir):
        if filename.endswith('.csv'):
            file_path = os.path.join(adapt_dir, filename)
            with open(file_path, 'r') as f:
                reader = csv.DictReader(f)
                required_columns = ['v', 'q', 'pwm', 'fa']
                # 检查必要列
                for col in required_columns:
                    if col not in reader.fieldnames:
                        raise ValueError(f"适应集文件 {filename} 缺少必要列：{col}")
                # 逐行解析
                for row in reader:
                    try:
                        v = _parse_vector(row['v'], 'v', reader.line_num)
                        q = _parse_vector(row['q'], 'q', reader.line_num)
                        pwm = _parse_vector(row['pwm'], 'pwm', reader.line_num)
                        fa = _parse_vector(row['fa'], 'fa', reader.line_num)

                        # 验证维度
                        if len(v)!= 3 or len(q)!= 4 or len(pwm)!= 4 or len(fa)!= 3:
                            raise ValueError(f"文件 {filename} 行 {reader.line_num} 维度错误")

                        adaptinput.append({'v': v, 'q': q, 'pwm': pwm})
                        adaptlabel.append(fa)
                    except Exception as e:
                        print(f"文件 {filename} 行 {reader.line_num} 解析失败：{str(e)}")
                        continue
    return adaptinput, adaptlabel


def batch_predict(features, adaptinput, adaptlabel, batch_size=100, lam=0):
    """批量预测主函数，调用模型服务"""
    api_url = "http://localhost:5000/predict_batch"
    results = [] #存储所有预测结果
    total = len(features) #待预测样本总数
    print(f"开始批量预测，共{total}个样本，每批处理{batch_size}个")

    #分批次处理，避免单次请求数据量过大
    for i in range(0, total, batch_size):
        batch = features[i:i + batch_size]
        try:
            request_data = {
                'batch_data': batch,
                'adaptinput': adaptinput,
                'adaptlabel': adaptlabel,
                'lam': lam
            }

            #发送POST请求（json格式）
            response = requests.post(
                api_url,
                headers={"Content-Type": "application/json"},
                data=json.dumps(request_data) #将python字典转为json字符串
            )
            response.raise_for_status()
            #解析相应：获取预测的fa_list
            batch_results = response.json()['fa_list']
            results.extend(batch_results)#合并当前批次结果

            #打印进度
            processed = min(i + batch_size, total)
            print(f"已处理 {processed}/{total} 个样本")
        except Exception as e:
            print(f"处理第{i}-{i + batch_size}个样本失败：{str(e)}")
            results.extend([[None, None, None]] * len(batch))

    return results#返回所有样本的预测结果


def compute_metrics(y_true, y_pred):
    """计算评估指标（MSE、MAE、R²）"""
    # 过滤无效预测值（含None的样本）
    valid_idx = [i for i, pred in enumerate(y_pred) if None not in pred]
    y_true_valid = np.array([y_true[i] for i in valid_idx])
    y_pred_valid = np.array([y_pred[i] for i in valid_idx])

    if len(valid_idx) == 0:
        return {"error": "无有效预测值"}

    # 整体指标（所有分量平均）
    mse = mean_squared_error(y_true_valid, y_pred_valid)
    mae = mean_absolute_error(y_true_valid, y_pred_valid)
    r2 = r2_score(y_true_valid, y_pred_valid)

    # 各分量单独指标
    metrics = {
        "整体": {
            "样本数": len(valid_idx),
            "MSE": round(mse, 6),
            "MAE": round(mae, 6),
            "R²": round(r2, 6)
        },
        "x分量": {
            "MSE": round(mean_squared_error(y_true_valid[:, 0], y_pred_valid[:, 0]), 6),
            "MAE": round(mean_absolute_error(y_true_valid[:, 0], y_pred_valid[:, 0]), 6),
            "R²": round(r2_score(y_true_valid[:, 0], y_pred_valid[:, 0]), 6)
        },
        "y分量": {
            "MSE": round(mean_squared_error(y_true_valid[:, 1], y_pred_valid[:, 1]), 6),
            "MAE": round(mean_absolute_error(y_true_valid[:, 1], y_pred_valid[:, 1]), 6),
            "R²": round(r2_score(y_true_valid[:, 1], y_pred_valid[:, 1]), 6)
        },
        "z分量": {
            "MSE": round(mean_squared_error(y_true_valid[:, 2], y_pred_valid[:, 2]), 6),
            "MAE": round(mean_absolute_error(y_true_valid[:, 2], y_pred_valid[:, 2]), 6),
            "R²": round(r2_score(y_true_valid[:, 2], y_pred_valid[:, 2]), 6)
        }
    }
    return metrics


def plot_comparison(y_true, y_pred, output_path, title="预测值与真实值对比"):
    """绘制预测值与真实值的时间序列对比图（x、y、z分量）"""
    # 过滤无效样本
    valid_idx = [i for i, pred in enumerate(y_pred) if None not in pred]
    if len(valid_idx) == 0:
        print("⚠️ 无有效数据用于绘图")
        return

    y_true_valid = np.array([y_true[i] for i in valid_idx])
    y_pred_valid = np.array([y_pred[i] for i in valid_idx])
    timesteps = np.arange(len(valid_idx))

    # 创建3个子图
    fig, axes = plt.subplots(3, 1, figsize=(12, 10), sharex=True)
    fig.suptitle(title, fontsize=14)

    # x分量对比
    axes[0].plot(timesteps, y_true_valid[:, 0], label="真实值 (fa_true_x)", color='blue')
    axes[0].plot(timesteps, y_pred_valid[:, 0], label="预测值 (fa_pred_x)", color='red', linestyle='--')
    axes[0].set_ylabel("fa_x")
    axes[0].legend()
    axes[0].grid(True)

    # y分量对比
    axes[1].plot(timesteps, y_true_valid[:, 1], label="真实值 (fa_true_y)", color='blue')
    axes[1].plot(timesteps, y_pred_valid[:, 1], label="预测值 (fa_pred_y)", color='red', linestyle='--')
    axes[1].set_ylabel("fa_y")
    axes[1].legend()
    axes[1].grid(True)

    # z分量对比
    axes[2].plot(timesteps, y_true_valid[:, 2], label="真实值 (fa_true_z)", color='blue')
    axes[2].plot(timesteps, y_pred_valid[:, 2], label="预测值 (fa_pred_z)", color='red', linestyle='--')
    axes[2].set_xlabel("时间步")
    axes[2].set_ylabel("fa_z")
    axes[2].legend()
    axes[2].grid(True)

    # 保存并关闭图表
    plt.tight_layout()
    plt.savefig(output_path, dpi=300)
    plt.close()
    print(f"📊 对比图已保存：{output_path}")


def save_results(input_file, output_file, results, fa_true_list, metrics):
    """保存预测结果、真实值和评估指标到文件"""
    # 保存预测结果CSV（UTF-8编码避免中文乱码）
    with open(input_file, 'r', encoding='utf-8') as in_f, \
         open(output_file, 'w', newline='', encoding='utf-8') as out_f:
        reader = csv.DictReader(in_f)
        # 扩展字段：添加预测值和真实值列
        fieldnames = reader.fieldnames + ['fa_pred_x', 'fa_pred_y', 'fa_pred_z',
                                          'fa_true_x', 'fa_true_y', 'fa_true_z']
        writer = csv.DictWriter(out_f, fieldnames=fieldnames)
        writer.writeheader()

        for row, fa_pred, fa_true in zip(reader, results, fa_true_list):
            row['fa_pred_x'] = fa_pred[0] if fa_pred[0] is not None else ''
            row['fa_pred_y'] = fa_pred[1] if fa_pred[1] is not None else ''
            row['fa_pred_z'] = fa_pred[2] if fa_pred[2] is not None else ''
            row['fa_true_x'] = fa_true[0]
            row['fa_true_y'] = fa_true[1]
            row['fa_true_z'] = fa_true[2]
            writer.writerow(row)
    print(f"📄 结果已保存到：{output_file}")

    # 保存评估指标到文本文件
    metrics_file = os.path.splitext(output_file)[0] + "_metrics.txt"
    with open(metrics_file, 'w', encoding='utf-8') as f:
        f.write("预测评估指标\n")
        f.write("=" * 30 + "\n")
        for key, val in metrics.items():
            f.write(f"{key}:\n")
            for k, v in val.items():
                f.write(f"  {k}: {v}\n")
            f.write("-" * 30 + "\n")
    print(f"📈 评估指标已保存到：{metrics_file}")


def main():
    """主流程：加载数据→预测→评估→可视化→保存结果"""
    INPUT_DIR = "data/processed"       # 待预测数据目录（含fa_true）
    OUTPUT_DIR = "data/predictions"    # 结果保存目录
    ADAPT_DIR = "data/adapt_files"     # 适应集文件目录（存放多个CSV）
    BATCH_SIZE = 100                   # 预测批量大小
    PLOT_DIR = os.path.join(OUTPUT_DIR, "plots")  # 可视化图表目录
    os.makedirs(PLOT_DIR, exist_ok=True)

    # 加载多文件适应集
    try:
        adaptinput, adaptlabel = read_multi_adapt_data(ADAPT_DIR)
        print(f"适应集加载完成，共 {len(adaptinput)} 个样本（来自 {len(os.listdir(ADAPT_DIR))} 个文件）")
    except Exception as e:
        print(f"❌ 适应集加载失败：{str(e)}")
        return

    # 处理每个待预测文件
    for filename in os.listdir(INPUT_DIR):
        if filename.endswith('.csv'):
            input_path = os.path.join(INPUT_DIR, filename)
            output_file = f"predicted_{filename}"
            output_path = os.path.join(OUTPUT_DIR, output_file)
            plot_path = os.path.join(PLOT_DIR, f"comparison_{filename}.png")
            print(f"\n处理文件：{filename}")

            try:
                # 读取特征和真实值
                features, fa_true_list = read_csv_with_true(input_path)
                print(f"读取到 {len(features)} 个样本，包含真实气动力残差（fa_true）")

                # 批量预测
                results = batch_predict(features, adaptinput, adaptlabel, batch_size=BATCH_SIZE)

                # 计算评估指标
                metrics = compute_metrics(fa_true_list, results)
                print("评估指标：")
                for key, val in metrics.items():
                    print(f"  {key}: {val}")

                # 绘制对比图
                plot_comparison(fa_true_list, results, plot_path,
                               title=f"{filename} 预测值与真实值对比")

                # 保存结果和指标
                save_results(input_path, output_path, results, fa_true_list, metrics)
                print(f"✅ 文件{filename}处理完成")
            except Exception as e:
                print(f"❌ 文件{filename}处理失败：{str(e)}")
                continue


if __name__ == '__main__':
    main()