# backend_combined_optimized.py - 优化后的完整后端服务
import flask_sqlalchemy
from flask import Flask, request, jsonify, send_from_directory
from flask_cors import CORS
import geopandas as gpd
import json
import os
from sqlalchemy import create_engine, text, cast, String, or_
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import warnings
import pathlib
from esda import Moran, Moran_Local, G_Local
from libpysal.weights import KNN
import pymysql
import traceback
import hashlib
import decimal
from flask_cors import CORS

warnings.filterwarnings('ignore')

app = Flask(__name__)
CORS(app)

# ==================== 配置参数 ====================
CFG = {
    "shp_file": "china_province.geojson",
    "db_config": {
        'host': 'localhost',
        'user': 'root',
        'database': '工程实践3',
        'port': 3306,
        'charset': 'utf8mb4',
        'password': '123456',
    },
    "db_table": "年度数据",
    "join_key_shp": "name",
    "join_key_db": "地区",
    "out_dir": "output",
    "perm": 99,  # 优化参数：排列检验次数从999减少到99
}

# 创建输出目录
pathlib.Path(CFG["out_dir"]).mkdir(exist_ok=True)

# ==================== 基本查询功能 ===============
MYSQL_USER = 'root'
MYSQL_PASSWORD = '123456'
MYSQL_HOST = 'localhost'
MYSQL_PORT = 3306
MYSQL_DB_NAME = '工程实践3'

MYSQL_URI = f'mysql+pymysql://{MYSQL_USER}:{MYSQL_PASSWORD}@{MYSQL_HOST}:{MYSQL_PORT}/{MYSQL_DB_NAME}'

# 初始化Flask应用
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = MYSQL_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_POOL_RECYCLE'] = 280
app.config['JSON_AS_ASCII'] = False
CORS(app)
db = flask_sqlalchemy.SQLAlchemy(app)

TARGET_TABLES = [
    '人口数据', '地方财政支出数据', '季度指数数据',
    '季度数据', '年度数据', '消费品数据'
]

TableMap = {}


def reflect_tables():
    with app.app_context():
        meta = db.MetaData()
        meta.reflect(bind=db.engine)
        for tbl in TARGET_TABLES:
            TableMap[tbl] = db.Table(tbl, meta, autoload_with=db.engine)


# 新增单位映射表
UNIT_MAP = {
    '人口数据': '万',
    '地方财政支出数据': '亿',
    '季度指数数据': '',  # 指数通常没有单位
    '季度数据': '亿',
    '年度数据': '亿',
    '消费品数据': '亿',
}
# 文本列（无需单位）
TEXT_COLS = ['地区', '省份', '城市', '名称', '描述', '指标名称']


# 2. 工具函数 - 智能小数位数处理
def format_numeric_value(value):
    # ... (omitted: format_numeric_value function remains the same) ...
    """智能格式化数值，根据实际小数位数处理"""
    if value is None:
        return None

    # 处理 decimal.Decimal 类型
    if isinstance(value, decimal.Decimal):
        value = float(value)

    # 如果是整数，直接返回
    if isinstance(value, int):
        return str(value)

    # 如果是浮点数
    if isinstance(value, float):
        # 检查是否为整数（没有小数部分）
        if value.is_integer():
            return str(int(value))
        else:
            # 转换为字符串检查小数位数
            str_value = str(value)
            if '.' in str_value:
                decimal_part = str_value.split('.')[1]
                # 计算有效小数位数（去除末尾的0）
                significant_decimals = len(decimal_part.rstrip('0'))
                # 保留实际的小数位数，最多保留4位
                decimals_to_keep = min(significant_decimals, 4)
                return f"{value:.{decimals_to_keep}f}"
            else:
                return str(int(value))

    # 如果是字符串，尝试解析
    if isinstance(value, str):
        # 去除前后空格
        str_value = value.strip()

        # 空字符串处理
        if not str_value:
            return value

        # 尝试解析为数字
        try:
            # 检查是否是整数
            if '.' not in str_value:
                try:
                    int_value = int(str_value)
                    return str(int_value)
                except ValueError:
                    pass

            # 尝试解析为浮点数
            float_value = float(str_value)

            # 检查是否为整数
            if float_value.is_integer():
                return str(int(float_value))
            else:
                # 分析原始字符串的小数位数
                if '.' in str_value:
                    original_decimals = len(str_value.split('.')[1].rstrip('0'))
                    # 保留实际小数位数，最多4位
                    decimals_to_keep = min(original_decimals, 4)
                    return f"{float_value:.{decimals_to_keep}f}"
                else:
                    return str(int(float_value))

        except (ValueError, TypeError):
            # 解析失败，返回原字符串
            return value

    return str(value)


# 修改 row2dict 以利用 UNIT_MAP
def row2dict(row, columns):
    """将行数据转换为字典，并格式化数值"""
    result = {}
    for col in columns:
        value = getattr(row, col)
        # 对数值列进行格式化（排除明确的文本列）
        if col not in TEXT_COLS:
            result[col] = format_numeric_value(value)
        else:
            result[col] = value
    return result


def fake_pk(row, columns):
    # ... (omitted: fake_pk function remains the same) ...
    raw = '|'.join(str(getattr(row, c)) for c in columns)
    return hashlib.md5(raw.encode()).hexdigest()


# ==================== 耦合协调度分析类 ====================
class CouplingCoordinationAnalysis:
    def __init__(self):
        self.engine = None
        self.result_gdf = None

    def init_engine(self):
        """初始化数据库引擎"""
        try:
            if self.engine is None:
                self.engine = create_engine(
                    'mysql+pymysql://root:123456@localhost:3306/工程实践3',
                    pool_size=5,
                    max_overflow=10,
                    pool_recycle=3600,
                    pool_pre_ping=True
                )
            return True
        except Exception as e:
            print(f"数据库引擎初始化失败: {e}")
            return False

    def get_available_years(self):
        """获取可用的年份列表"""
        return [str(year) for year in range(2005, 2025)]

    def load_and_process_data(self, target_year='2024'):
        """加载并处理指定年份的数据"""
        connection = None
        try:
            print(f"开始处理 {target_year} 年数据...")

            if not self.init_engine():
                return False

            connection = self.engine.connect()

            print("加载人口数据...")
            population_df = pd.read_sql("SELECT * FROM `人口数据`", connection)
            print("人口数据加载完成")

            print("加载GDP数据...")
            gdp_df = pd.read_sql("SELECT * FROM `年度数据`", connection)
            print("GDP数据加载完成")

            print(f"人口数据形状: {population_df.shape}")
            print(f"GDP数据形状: {gdp_df.shape}")

            # 检查年份是否存在
            pop_cols = [col for col in population_df.columns if str(target_year) in str(col)]
            gdp_cols = [col for col in gdp_df.columns if str(target_year) in str(col)]

            if not pop_cols:
                print(f"未找到 {target_year} 年的人口数据")
                return False
            if not gdp_cols:
                print(f"未找到 {target_year} 年的GDP数据")
                return False

            # 提取指定年份的数据
            pop_data = population_df[['地区', pop_cols[0]]].copy()
            pop_data.columns = ['region', 'population']
            gdp_data = gdp_df[['地区', gdp_cols[0]]].copy()
            gdp_data.columns = ['region', 'gdp']

            # 合并数据
            merged_data = pd.merge(pop_data, gdp_data, on='region', how='inner')
            print(f"数据合并完成，包含 {len(merged_data)} 个地区")

            # 数据清洗
            merged_data = merged_data.dropna()
            merged_data['population'] = pd.to_numeric(merged_data['population'], errors='coerce')
            merged_data['gdp'] = pd.to_numeric(merged_data['gdp'], errors='coerce')
            merged_data = merged_data.dropna()

            print(f"数据清洗后，包含 {len(merged_data)} 个地区")

            # 计算耦合协调度
            result_data = self.calculate_coupling_coordination(merged_data)

            # 加载地理数据
            if not os.path.exists('china_province.geojson'):
                print("错误: 找不到 china_province.geojson 文件")
                return False

            geo_data = gpd.read_file('china_province.geojson')
            print(f"原始地理数据包含 {len(geo_data)} 个区域")

            # 清理地理数据
            if 'name' in geo_data.columns:
                geo_data = geo_data[geo_data['name'] != '']
                mainland_provinces = [
                    '北京市', '天津市', '河北省', '山西省', '内蒙古自治区', '辽宁省', '吉林省', '黑龙江省',
                    '上海市', '江苏省', '浙江省', '安徽省', '福建省', '江西省', '山东省', '河南省', '湖北省',
                    '湖南省', '广东省', '广西壮族自治区', '海南省', '重庆市', '四川省', '贵州省', '云南省',
                    '西藏自治区', '陕西省', '甘肃省', '青海省', '宁夏回族自治区', '新疆维吾尔自治区'
                ]
                geo_data = geo_data[geo_data['name'].isin(mainland_provinces)]

            print(f"清理后地理数据包含 {len(geo_data)} 个区域")

            # 合并地理数据
            self.result_gdf = geo_data.merge(result_data, left_on='name', right_on='region', how='inner')

            print(f"{target_year} 年数据处理完成，包含 {len(self.result_gdf)} 个地区")
            return True

        except Exception as e:
            print(f"{target_year} 年数据处理错误: {e}")
            import traceback
            traceback.print_exc()
            return False
        finally:
            if connection:
                connection.close()

    def calculate_coupling_coordination(self, data):
        """计算耦合协调度"""
        print("计算耦合协调度...")

        # 数据标准化
        scaler = MinMaxScaler()
        data[['population_norm', 'gdp_norm']] = scaler.fit_transform(data[['population', 'gdp']])

        U_p = data['population_norm'].values
        U_e = data['gdp_norm'].values

        # 计算耦合度 (C)
        epsilon = 1e-8
        C_numerator = U_p * U_e
        C_denominator = ((U_p + U_e) / 2) ** 2 + epsilon
        C = np.sqrt(C_numerator / C_denominator)

        # 计算综合发展指数 (T) 和协调度 (D)
        alpha, beta = 0.5, 0.5
        T = alpha * U_p + beta * U_e
        D = np.sqrt(C * T)

        # 判断协调类型
        coordination_level = []
        development_type = []

        for i in range(len(D)):
            if D[i] >= 0.8:
                level = "优质协调"
            elif D[i] >= 0.7:
                level = "中级协调"
            elif D[i] >= 0.6:
                level = "初级协调"
            elif D[i] >= 0.5:
                level = "勉强协调"
            elif D[i] >= 0.3:
                level = "中度失调"
            else:
                level = "严重失调"

            if U_p[i] > U_e[i] + 0.15:
                dev_type = "经济滞后型"
            elif U_e[i] > U_p[i] + 0.15:
                dev_type = "人口滞后型"
            else:
                dev_type = "同步发展型"

            coordination_level.append(level)
            development_type.append(dev_type)

        # 保存结果
        data['U_p'] = U_p
        data['U_e'] = U_e
        data['coupling_degree'] = C
        data['comprehensive_index'] = T
        data['coordination_degree'] = D
        data['coordination_level'] = coordination_level
        data['development_type'] = development_type
        data['final_type'] = [f"{level}-{dtype}" for level, dtype in zip(coordination_level, development_type)]

        print("耦合协调度计算完成")
        return data

    def get_all_trend_data(self):
        """获取2005-2024年所有年份的趋势数据"""
        try:
            trend_data = []
            years = [str(year) for year in range(2005, 2025)]

            print("开始加载所有年份的趋势数据...")

            for i, year in enumerate(years):
                print(f"进度: {i + 1}/{len(years)} - 处理 {year} 年数据")

                if self.load_and_process_data(year):
                    for idx, row in self.result_gdf.iterrows():
                        trend_data.append({
                            'year': int(year),
                            'region': row['region'],
                            'coordination_degree': float(row['coordination_degree']),
                            'coordination_level': row['coordination_level'],
                            'development_type': row['development_type'],
                            'population': int(row['population']),
                            'gdp': float(row['gdp'])
                        })
                else:
                    print(f"跳过 {year} 年数据（处理失败）")

            print(f"趋势数据加载完成，共 {len(trend_data)} 条记录")
            return trend_data
        except Exception as e:
            print(f"获取趋势数据错误: {e}")
            return []


# ==================== GDP空间分析功能（优化版） ====================
def get_database_data():
    """从数据库获取真实数据"""
    print("连接数据库获取真实数据...")
    try:
        conn = pymysql.connect(**CFG["db_config"])

        cursor = conn.cursor()
        cursor.execute(f"SHOW TABLES LIKE '{CFG['db_table']}'")
        table_exists = cursor.fetchone()

        if not table_exists:
            print(f"表 {CFG['db_table']} 不存在")
            return None

        cursor.execute(f"DESCRIBE {CFG['db_table']}")
        columns = [col[0] for col in cursor.fetchall()]
        print(f"表字段: {columns}")

        query = f"SELECT * FROM {CFG['db_table']}"
        df = pd.read_sql_query(query, conn)
        conn.close()

        print(f"成功加载 {len(df)} 条记录")
        print("数据样例:")
        print(df.head())

        return df

    except Exception as e:
        print(f"数据库连接失败: {e}")
        return None


def detect_year_columns(df):
    """检测年份列"""
    year_columns = []
    for col in df.columns:
        if col in [CFG['join_key_db'], 'id', 'ID', '序号']:
            continue

        if any(keyword in str(col) for keyword in ['年']):
            year_columns.append(col)
        elif str(col).isdigit() and len(str(col)) == 4 and 2000 <= int(col) <= 2030:
            year_columns.append(col)
        elif any(keyword in str(col).lower() for keyword in ['year', 'y']):
            year_columns.append(col)

    print(f"检测到年份列: {year_columns}")
    return year_columns


def perform_real_spatial_analysis():
    """使用真实数据进行空间分析 - 优化版"""
    print("开始真实数据空间分析（优化版）...")

    try:
        # 1. 读取矢量数据
        if not os.path.exists(CFG["shp_file"]):
            print(f"矢量文件不存在: {CFG['shp_file']}")
            return False

        gdf_base = gpd.read_file(CFG["shp_file"])
        print(f"加载矢量数据: {len(gdf_base)} 个省份")
        print(f"省份列表: {list(gdf_base[CFG['join_key_shp']].values)}")

        # 2. 读取数据库真实数据
        df_db = get_database_data()
        if df_db is None or len(df_db) == 0:
            print("无法获取数据库数据")
            return False

        # 3. 检测年份列
        year_columns = detect_year_columns(df_db)
        if not year_columns:
            print("未找到有效的年份列")
            return False

        # 4. 处理每个年份
        successful_years = []

        for year_col in year_columns:
            try:
                print(f"处理年份: {year_col}")

                # 合并数据
                gdf_merged = gdf_base.merge(
                    df_db[[CFG["join_key_db"], year_col]],
                    left_on=CFG["join_key_shp"],
                    right_on=CFG["join_key_db"],
                    how='inner'
                )

                print(f"合并后数据量: {len(gdf_merged)} 个省份")

                # 清理GDP数据
                gdf_merged["gdp"] = pd.to_numeric(gdf_merged[year_col], errors="coerce")
                gdf_merged = gdf_merged[gdf_merged["gdp"].notna()].copy()

                print(f"有效GDP数据: {len(gdf_merged)} 个省份")
                print(
                    f"GDP统计: 均值={gdf_merged['gdp'].mean():.0f}, 最大值={gdf_merged['gdp'].max():.0f}, 最小值={gdf_merged['gdp'].min():.0f}")

                if len(gdf_merged) < 5:
                    print(f"{year_col} 有效数据不足，跳过")
                    continue

                # 转换为投影坐标系进行计算
                gdf_projected = gdf_merged.to_crs(epsg=3857)

                # 空间权重矩阵 - 优化：固定K=4近邻
                print("计算空间权重...")
                k = min(4, len(gdf_projected) - 1)  # 关键优化：固定K=4
                w = KNN.from_dataframe(gdf_projected, k=k)
                w.transform = "r"

                # GDP数据
                y = gdf_projected["gdp"].values

                # 全局Moran's I - 优化：排列检验次数减少到99
                print("计算全局Moran's I...")
                moran = Moran(y, w, permutations=CFG["perm"])  # 关键优化：perm=99
                print(f"全局Moran's I: {moran.I:.3f}, p-value: {moran.p_sim:.3f}")

                # LISA分析 - 优化：排列检验次数减少到99
                print("进行LISA分析...")
                lisa = Moran_Local(y, w, permutations=CFG["perm"])  # 关键优化：perm=99
                gdf_projected["lisa_I"] = lisa.Is
                gdf_projected["lisa_p"] = lisa.p_sim
                gdf_projected["lisa_q"] = lisa.q

                # Gi*热点分析 - 优化：排列检验次数减少到99
                print("进行Gi*热点分析...")
                gi = G_Local(y, w, permutations=CFG["perm"])  # 关键优化：perm=99
                gdf_projected["gi_z"] = gi.Zs
                gdf_projected["gi_p"] = gi.p_sim

                # 转回地理坐标系
                gdf_result = gdf_projected.to_crs(epsg=4326)

                # 添加分类标签
                lisa_labels = {1: "高-高聚类", 2: "低-低聚类", 3: "高-低异常", 4: "低-高异常"}
                gdf_result["lisa_type"] = gdf_result["lisa_q"].map(lisa_labels)

                def classify_gi(row):
                    if pd.isna(row["gi_p"]) or row["gi_p"] >= 0.05:
                        return "不显著"
                    elif row["gi_z"] > 0:
                        return "热点"
                    else:
                        return "冷点"

                gdf_result["gi_type"] = gdf_result.apply(classify_gi, axis=1)
                gdf_result["analysis_year"] = year_col
                gdf_result["moran_I"] = moran.I
                gdf_result["moran_p"] = moran.p_sim

                # 保存结果
                if save_analysis_result(gdf_result, year_col):
                    successful_years.append(year_col)
                    print(f"{year_col} 分析完成")

                    # 显示分析结果统计
                    lisa_counts = gdf_result["lisa_type"].value_counts()
                    gi_counts = gdf_result["gi_type"].value_counts()
                    print(f"LISA分布: {lisa_counts.to_dict()}")
                    print(f"Gi*分布: {gi_counts.to_dict()}")

            except Exception as e:
                print(f"{year_col} 处理失败: {e}")
                continue

        # 5. 保存年份列表
        years_data = {
            "available_years": successful_years,
            "total_count": len(successful_years),
            "status": "success",
            "message": f"成功处理 {len(successful_years)} 个年份"
        }

        with open(f"{CFG['out_dir']}/available_years.json", 'w', encoding='utf-8') as f:
            json.dump(years_data, f, ensure_ascii=False, indent=2)

        print(f"真实数据空间分析完成!")
        print(f"输出目录: {CFG['out_dir']}")
        print(f"可用年份: {successful_years}")

        return True

    except Exception as e:
        print(f"空间分析整体失败: {e}")
        return False


def save_analysis_result(gdf, year):
    """保存分析结果"""
    try:
        output_columns = [
            CFG["join_key_shp"], 'gdp', 'lisa_I', 'lisa_p', 'lisa_q',
            'lisa_type', 'gi_z', 'gi_p', 'gi_type', 'analysis_year',
            'moran_I', 'moran_p', 'geometry'
        ]

        available_columns = [col for col in output_columns if col in gdf.columns]
        gdf_output = gdf[available_columns].copy()

        clean_year = year.replace(' ', '_').replace('/', '_')
        shp_path = f"{CFG['out_dir']}/{clean_year}_spatial_analysis.shp"

        gdf_output.to_file(shp_path, encoding='utf-8')
        print(f"保存结果: {shp_path}")

        csv_path = f"{CFG['out_dir']}/{clean_year}_analysis.csv"

        csv_data = gdf_output.drop(columns=['geometry']).copy()

        column_mapping = {
            CFG["join_key_shp"]: "省份名称",
            'gdp': "GDP数值",
            'lisa_I': "LISA统计量",
            'lisa_p': "LISA_p值",
            'lisa_q': "LISA类型编码",
            'lisa_type': "LISA聚类类型",
            'gi_z': "Gi*_Z值",
            'gi_p': "Gi*_p值",
            'gi_type': "Gi*热点类型",
            'analysis_year': "分析年份",
            'moran_I': "全局Moran_I",
            'moran_p': "全局Moran_p值"
        }

        csv_data.rename(columns=column_mapping, inplace=True)
        csv_data.to_csv(csv_path, index=False, encoding='utf-8-sig')
        print(f"保存CSV: {csv_path}")

        return True

    except Exception as e:
        print(f"保存失败 {year}: {e}")
        return False


# ==================== 创建分析实例 ====================
analyzer = CouplingCoordinationAnalysis()


# ==================== 路由定义 ====================


# ==================== 耦合协调度分析路由 ====================

@app.route('/api/data', methods=['GET'])
def get_analysis_data():
    """获取指定年份的分析数据"""
    year = request.args.get('year', '2024')
    print(f"收到API数据请求，年份: {year}")

    if analyzer.load_and_process_data(year):
        # 修复：将numpy类型转换为Python原生类型
        gdf_fixed = analyzer.result_gdf.copy()

        # 处理数值列中的numpy类型
        for col in gdf_fixed.select_dtypes(include=[np.number]).columns:
            gdf_fixed[col] = gdf_fixed[col].apply(lambda x: float(x) if not pd.isna(x) else None)

        # 处理其他可能包含numpy类型的列
        object_cols = gdf_fixed.select_dtypes(include=['object']).columns
        for col in object_cols:
            gdf_fixed[col] = gdf_fixed[col].apply(lambda x: str(x) if x is not None else None)

        geojson_data = json.loads(gdf_fixed.to_json())

        stats = {
            "total_regions": len(gdf_fixed),
            "coordination_range": {
                "min": float(gdf_fixed['coordination_degree'].min()),
                "max": float(gdf_fixed['coordination_degree'].max())
            },
            "level_distribution": gdf_fixed['coordination_level'].value_counts().to_dict(),
            "type_distribution": gdf_fixed['development_type'].value_counts().to_dict(),
            "average_coordination": float(gdf_fixed['coordination_degree'].mean())
        }

        print(f"返回 {year} 年数据，包含 {len(gdf_fixed)} 个地区")
        return jsonify({
            "success": True,
            "year": year,
            "geojson": geojson_data,
            "statistics": stats
        })
    else:
        return jsonify({
            "success": False,
            "message": f"{year}年数据处理失败",
            "year": year
        })


@app.route('/api/regions', methods=['GET'])
def get_regions_list():
    """获取指定年份的地区列表和排名"""
    year = request.args.get('year', '2024')
    print(f"收到地区列表请求，年份: {year}")

    if analyzer.load_and_process_data(year):
        regions = []
        for idx, row in analyzer.result_gdf.iterrows():
            regions.append({
                "name": row['region'],
                "coordination_degree": float(row['coordination_degree']),
                "final_type": row['final_type'],
                "coordination_level": row['coordination_level'],
                "development_type": row['development_type'],
                "population": int(row['population']),
                "gdp": float(row['gdp']),
                "U_p": float(row['U_p']),
                "U_e": float(row['U_e']),
                "coupling_degree": float(row['coupling_degree'])
            })

        regions.sort(key=lambda x: x['coordination_degree'], reverse=True)

        print(f"返回 {year} 年地区列表，共 {len(regions)} 个地区")
        return jsonify({
            "success": True,
            "year": year,
            "regions": regions
        })
    else:
        return jsonify({
            "success": False,
            "message": f"{year}年数据加载失败",
            "year": year
        })


# ==================== 基础路由 ====================#
@app.route('/api/tables')
def list_tables():
    """返回所有目标表的名称和对应的API路径"""
    tables_info = []
    for tbl_name in TARGET_TABLES:
        api_path = f'/api/{tbl_name}'
        # 增加单位信息到 tables_info
        unit = UNIT_MAP.get(tbl_name, '')
        tables_info.append({
            'name': tbl_name,
            'api_base_path': api_path,
            'unit': unit
        })
    return jsonify(code=0, msg='ok', data=tables_info)


@app.route('/api/<table_name>')
def list_data(table_name):
    # ... (omitted: list_data function remains the same) ...
    if table_name not in TableMap:
        return jsonify(code=404, msg='表不存在'), 404
    tbl = TableMap[table_name]
    cols = [c.name for c in tbl.columns]
    page = int(request.args.get('page', 1))
    size = int(request.args.get('size', 20))
    search = request.args.get('search', '').strip()

    stmt = tbl.select()
    if search:
        like_str = f'%{search}%'
        str_cols = [c for c in tbl.columns if str(c.type) == 'VARCHAR']
        stmt = stmt.where(
            or_(*[cast(c, String).like(like_str) for c in str_cols]))

    total = db.session.execute(
        stmt.with_only_columns(db.func.count()).order_by(None)).scalar()
    rows = db.session.execute(
        stmt.offset((page - 1) * size).limit(size)).all()

    data = []
    for r in rows:
        d = row2dict(r, cols)
        d['_pk'] = fake_pk(r, cols)
        data.append(d)

    return jsonify(code=0, msg='ok', data=data, total=total)


@app.route('/api/<table_name>/<pk>')
def one_data(table_name, pk):
    # ... (omitted: one_data function remains the same) ...
    if table_name not in TableMap:
        return jsonify(code=404, msg='表不存在'), 404
    tbl = TableMap[table_name]
    cols = [c.name for c in tbl.columns]
    for row in db.session.execute(tbl.select()).all():
        if fake_pk(row, cols) == pk:
            return jsonify(code=0, msg='ok', data=row2dict(row, cols))
    return jsonify(code=404, msg='记录不存在'), 404


# 4. 前端页面路由
@app.route('/')
def admin():
    return send_from_directory('.', 'admin.html')


@app.route('/<path:path>')
def static_files(path):
    return send_from_directory('.', path)


# ================= 基础路由 ==================#

@app.route('/api/all_trend', methods=['GET'])
def get_all_trend_data():
    """获取所有年份的趋势数据"""
    print("收到全部趋势数据请求")
    trend_data = analyzer.get_all_trend_data()

    if trend_data:
        return jsonify({
            "success": True,
            "total_records": len(trend_data),
            "years_covered": len(set(item['year'] for item in trend_data)),
            "trend_data": trend_data
        })
    else:
        return jsonify({
            "success": False,
            "message": "趋势数据加载失败"
        })


@app.route('/api/years', methods=['GET'])
def get_available_years():
    """获取可用年份列表"""
    years = analyzer.get_available_years()
    return jsonify({
        "success": True,
        "years": years,
        "total_years": len(years)
    })


# ==================== GDP空间分析路由 ====================

@app.route('/api/spatial/test', methods=['GET'])
def test_spatial_api():
    return jsonify({
        "status": "success",
        "message": "GDP空间分析API服务（优化版）",
        "version": "1.0",
        "data_source": "MySQL数据库"
    })


@app.route('/api/spatial/available-years', methods=['GET'])
def get_spatial_available_years():
    try:
        file_path = f"{CFG['out_dir']}/available_years.json"
        if os.path.exists(file_path):
            with open(file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
        else:
            data = {
                "available_years": [],
                "total_count": 0,
                "status": "no_data",
                "message": "请先运行数据刷新"
            }

        return jsonify(data)

    except Exception as e:
        return jsonify({
            "available_years": [],
            "total_count": 0,
            "status": "error",
            "message": str(e)
        })


@app.route('/api/spatial/data/<year>', methods=['GET'])
def get_spatial_data_api(year):
    try:
        clean_year = year.replace(' ', '_').replace('/', '_')
        shp_path = f"{CFG['out_dir']}/{clean_year}_spatial_analysis.shp"

        if os.path.exists(shp_path):
            gdf = gpd.read_file(shp_path)
            geojson = gdf.to_json()
            print(f"从文件读取 {year} 的空间分析数据")
            return geojson, 200, {'Content-Type': 'application/json; charset=utf-8'}
        else:
            return jsonify({
                "type": "FeatureCollection",
                "features": [],
                "message": f"未找到 {year} 的预处理数据，请先访问 /api/spatial/refresh 生成数据",
                "action_required": True
            }), 404

    except Exception as e:
        return jsonify({"error": str(e)}), 500


@app.route('/api/spatial/stats/<year>', methods=['GET'])
def get_spatial_year_stats_api(year):
    """获取年份统计信息"""
    try:
        clean_year = year.replace(' ', '_').replace('/', '_')
        shp_path = f"{CFG['out_dir']}/{clean_year}_spatial_analysis.shp"

        if os.path.exists(shp_path):
            gdf = gpd.read_file(shp_path)

            stats = {
                "year": year,
                "province_count": len(gdf),
                "gdp_total": float(gdf['gdp'].sum()),
                "gdp_avg": float(gdf['gdp'].mean()),
                "gdp_max": float(gdf['gdp'].max()),
                "gdp_min": float(gdf['gdp'].min()),
                "moran_I": float(gdf['moran_I'].iloc[0]) if 'moran_I' in gdf.columns else None,
                "moran_p": float(gdf['moran_p'].iloc[0]) if 'moran_p' in gdf.columns else None,
                "lisa_distribution": gdf['lisa_type'].value_counts().to_dict(),
                "gi_distribution": gdf['gi_type'].value_counts().to_dict()
            }
            return jsonify(stats)
        else:
            return jsonify({
                "error": f"未找到 {year} 的数据",
                "solution": "请先访问 /api/spatial/refresh 生成空间分析数据"
            }), 404

    except Exception as e:
        return jsonify({"error": str(e)}), 500


@app.route('/api/spatial/refresh', methods=['POST'])
def refresh_spatial_data():
    """手动刷新空间分析数据 - 优化版"""
    try:
        print("手动刷新空间分析数据（优化版）...")
        success = perform_real_spatial_analysis()
        return jsonify({
            "status": "success" if success else "error",
            "message": "空间分析数据刷新完成" if success else "数据刷新失败",
            "timestamp": pd.Timestamp.now().isoformat()
        })
    except Exception as e:
        return jsonify({"status": "error", "message": str(e)})


# ==================== 主程序 ====================

if __name__ == '__main__':
    reflect_tables()
    print("=" * 60)
    print("人口经济分析与空间分析后端服务（优化版）")
    print("支持功能:")
    print("  - 人口与经济耦合协调度分析")
    print("  - GDP空间自相关分析 (Moran's I, LISA, Gi*) - 优化参数")
    print("优化特性:")
    print("  - 排列检验次数: 99次（原999次）")
    print("  - 近邻数量: K=4（固定）")
    print("  - 计算速度: 大幅提升")
    print("支持年份: 2005-2024")
    print("API服务地址: http://localhost:5000")
    print("=" * 60)

    # 只初始化数据库连接
    if analyzer.init_engine():
        print("数据库连接初始化成功")
    else:
        print("数据库连接初始化失败")

    # 不进行启动时的全量空间分析，改为按需计算
    print("启动完成，空间分析需要手动触发")
    print("使用: POST /api/spatial/refresh 来生成空间分析数据")

    app.run(debug=False)