import asyncio
import datetime
import json
import threading
from profit import *
from sqlalchemy import inspect
from sqlalchemy import text
import aiohttp
import requests
from flask import render_template
from optimization import *
from utils.util import pao_pai, zeng_ya, lx_qi_ju, jd_qi_ju
from sqlalchemy import text, MetaData, Table
import uuid
import pandas as pd
from EUR_predict import build_ensemble_model, process_data, cross_validate_and_predict, eur_function
from sqlalchemy import create_engine
from sklearn.ensemble import RandomForestRegressor, StackingRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.neural_network import MLPRegressor
import math
from math import sqrt
from datetime import datetime
from datetime import timedelta
from sklearn.metrics import mean_absolute_error, mean_squared_error
from utils.tables import *
import warnings
from ctypes.wintypes import DOUBLE
from sqlalchemy import create_engine, Column, Float, String, Integer, MetaData, Table, Double, VARCHAR, DECIMAL
from flask import Flask, request, jsonify
from profit import add_first_positive_year, calculate_annual_total_cost, calculate_cash_income, \
    calculate_net_present_value, calculate_present_cashoutflow, combine_data, generate_cashflow_out
# from optimization import encode_categorical_variables
import random
import hashlib
from data_process import *
from revise_irr import *
import numpy_financial as npf
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.kernel_ridge import KernelRidge

warnings.filterwarnings("ignore")

# Database connection
username = 'root'
password = '123456'
host = '192.168.0.189'
port = '3306'
database = 'shale-gas'

# Java 接口的 URL
url = f'http://{host}:8080/zyw-aps/gas_production_platform_pre/gasProductionPlatformPre/updateForecast'


def call_java_api_in_thread(javaUrl):
    print("Calling Java API")
    try:
        response = requests.get(javaUrl)
        # 这里可以处理响应，但在这个例子中我们只是启动线程并不等待它
        json_response = {
            "success": True,
            "message": "Java API",
            "code": 200
        }
        return json_response
    except Exception as e:
        print(f"Error calling Java API: {e}")


# Establish DB connection
def get_db_connection():
    engine = create_engine(f'mysql+pymysql://{username}:{password}@{host}:{port}/{database}')
    return engine


engine = create_engine(f'mysql+pymysql://{username}:{password}@{host}:{port}/{database}')

# Model mapping (simplified to use shorthand for models)
model_mapping = {
    "rf": {"model": "RandomForestRegressor", "params": {"n_estimators": 100, "random_state": 42}},
    "svr": {"model": "XGBRegressor", "params": {"n_estimators": 100, "random_state": 42}},
    "lgb": {"model": "LGBMRegressor",
            "params": {"n_estimators": 100, 'objective': 'regression', "metric": 'mae', "verbose": -1,
                       "random_state": 42}},
}

# Final model mapping
final_model_mapping = {
    "mlp": {"model": "MLPRegressor",
            "params": {"hidden_layer_sizes": [200, 100, 50], "activation": "relu", "solver": "adam", "max_iter": 500,
                       "random_state": 42}},
    "lasso": {"model": "Lasso", "params": {"alpha": 0.01}},
    "ridge": {"model": "Ridge", "params": {"alpha": 0.01}},
    "gb": {"model": "GradientBoostingRegressor", "params": {"n_estimators": 300, "random_state": 42, "loss": "huber"}}
}


@app.route('/')
def index():
    return render_template('../templates/form.html')


@app.route('/deal', methods=['POST'])  # 计算增产气量
def process_form():
    # pao_pai('alldata-2024.1.2.csv', '☆泡排台账-2023.12.31（川庆）.csv', 0.030, 30, '泡排增产气量.csv')
    # 确保在应用上下文中执行数据库操作
    with app.app_context():
        # 假设你想查询所有记录
        results = GasProductionWell.query.all()
        # 将结果转换为字典列表
        data = [
            {'id': result.id, '井号': result.well_no, '日期': result.collect_date, '日产气': result.production_gas_day}
            for result in results]
        # 使用 pandas DataFrame 构造函数将字典列表转换为 DataFrame
        df_rcq = pd.DataFrame(data)

    selectOption = request.form.get('selectOption')
    pppath = request.files.get('pppath')
    dr = float(request.form.get('dr'))
    d = int(request.form.get('d'))
    countdays = int(request.form.get('countdays'))
    # price = float(request.form.get('price', '1.37'))

    print(selectOption, pppath, dr, d, countdays)

    if selectOption == '泡排':
        df_pp = pao_pai(df_rcq, pppath, dr, d, countdays)
        df = df_pp[['井号', '平台', '施工日期', '停止施工日期或当前日期',
                    '统计天数', '措施前平均日产', '措施后平均日产', '增幅',
                    '绝对增加量', '增产气量']]

    elif selectOption == '增压':
        df_zy = zeng_ya(df_rcq, pppath, dr, d, countdays)
        df = df_zy[['井号', '平台', '增压日期', '增压停止日期',
                    '增压天数\n（天）', '措施前平均日产', '措施后平均日产', '增幅',
                    '绝对增加量', '增产气量']]

    elif selectOption == '连续气举':
        df_lx_qi = lx_qi_ju(df_rcq, pppath, dr, d, countdays)
        df = df_lx_qi[['井号', '平台', '最初施工日期', '最后施工日期',
                       '施工天数', '措施前平均日产', '措施后平均日产', '增幅',
                       '绝对增加量', '增产气量']]

    elif selectOption == '间断气举':
        df_jd_qi = jd_qi_ju(df_rcq, pppath, dr, d, countdays)
        df = df_jd_qi[['井号', '平台', '最初施工日期', '最后施工日期',
                       '施工天数', '措施前平均日产', '措施后平均日产', '增幅',
                       '绝对增加量', '增产气量']]

    # df['inc_output'] = df['production_inc'] * price
    df['id'] = df.index.map(lambda _: str(uuid.uuid4()))
    df['type'] = df.index.map(lambda _: selectOption)
    # df.columns = ['井号', '平台', '开始施工日期', '结束施工日期',
    #               '施工天数', '措施前平均日产', '措施后平均日产', '增幅',
    #               '绝对增加量', '增产气量']
    df.columns = ['well_no', 'platform_no', 'begin_time', 'end_time',
                  'days', 'before_pro', 'after_pro', 'amplify',
                  'absolute_inc', 'production_inc', 'id', 'type']
    df.loc[df['platform_no'] == '威208', 'platform_no'] = '威204H43'
    df.loc[df['platform_no'] == '威209', 'platform_no'] = '威204H62'
    # 清空表中的数据
    with app.app_context():
        db.session.execute(text("DELETE FROM gas_production_increase;"))
        db.session.commit()  # 提交事务
    df.to_sql('gas_production_increase', con=db.engine, index=False, if_exists='append')

    # df.to_csv(savepath, encoding='utf-8-sig', index=False)

    json_response = {
        "success": True,
        "message": "",
        "code": 200,
        "result": {
        }
    }
    return json_response
    # return redirect(url_for('index'))  # 完成后重定向到首页


# 确保分页参数有默认值
def get_pagination_args():  # 分页默认值
    pageNo = request.args.get('page', 1, type=int)
    pageSize = request.args.get('per_page', 10, type=int)
    return pageNo, pageSize


@app.route('/getIncreaseList', methods=['GET'])  # 获取增产气量
def getIncreaseList():
    try:
        pageNo = int(request.args.get('pageNo'))
        pageSize = int(request.args.get('pageSize'))
    except:
        pageNo, pageSize = get_pagination_args()
    with app.app_context():
        # 使用paginate方法进行分页
        pagination = GasProductionIncrease.query.paginate(page=pageNo, per_page=pageSize, error_out=False)
        items = pagination.items

        # 将结果转换为字典列表
        data = [
            {'id': result.id, 'wellNo': result.well_no, 'platformNo': result.platform_no,
             'beginTime': result.begin_time.strftime('%Y-%m-%d')
                , 'endTime': result.end_time.strftime('%Y-%m-%d'), 'days': result.days, 'beforePro': result.before_pro,
             'afterPro': result.after_pro, 'amplify': result.amplify, 'absoluteInc': result.absolute_inc,
             'productionInc': result.production_inc
             }
            for result in items]
    json_response = {
        "success": True,
        "message": "",
        "code": 200,
        "result": {
            "records": data,
            # 可以在响应中包含分页信息
            'total': pagination.total,
            'size': pageSize,
            'current': pageNo,
            "orders": [],
            "optimizeCountsql": True,
            "searchCount": "",
            "countId": None,
            "maxLimit": None,
            "pages": pagination.pages
        }
    }
    return json_response


def get_platform_info(platform_no):
    # 查询平台信息
    platform = GasBasePlatform.query.filter_by(platform_no=platform_no).first()
    if not platform:
        return jsonify({'error': 'Platform not found'}), 404

    # 查询与该平台相关联的气井信息
    wells = []
    center = []
    for connect in platform.wells:
        try:
            well_info = [float(connect.well.lng), float(connect.well.lat)]
            wells.append(well_info)
        except:
            pass
    try:
        center = [float(platform.lng), float(platform.lat)]
        # 计算每个点的极角并添加到列表中
        polar_angles = [(calculate_polar_angle(coord[0], coord[1], center[0], center[1]), coord) for coord in wells]
        # 根据极角对点进行排序
        wells = [coord for angle, coord in sorted(polar_angles, key=lambda x: x[0])]
    except:
        pass
    return center, wells


# 计算极角的函数
def calculate_polar_angle(lon, lat, center_lon, center_lat):
    # 将经纬度转换为弧度
    lon_rad = math.radians(lon)
    lat_rad = math.radians(lat)
    center_lon_rad = math.radians(center_lon)
    center_lat_rad = math.radians(center_lat)

    # 计算两点之间的方位角（极角），结果范围在(-pi, pi]
    delta_lon = lon_rad - center_lon_rad
    y = math.sin(delta_lon) * math.cos(lat_rad)
    x = math.cos(center_lat_rad) * math.sin(lat_rad) - math.sin(center_lat_rad) * math.cos(lat_rad) * math.cos(
        delta_lon)
    polar_angle = math.atan2(y, x)

    # 将极角转换到[0, 2*pi)范围
    if polar_angle < 0:
        polar_angle += 2 * math.pi

    return polar_angle


# 计算并获取投产比
@app.route('/getIncreasePlatformList', methods=['GET', 'POST'])
def getIncreasePlatformList():
    # selectOption = request.args.get('selectOption', '泡排')
    with app.app_context():
        # 假设你想查询所有记录
        results = GasProductionIncrease.query.all()
        # 将结果转换为字典列表
        data = [
            {'platform_no': result.platform_no, 'production_inc': result.production_inc}
            for result in results]
        selectOption = results[0].type
        data = pd.DataFrame(data)
        production_sums = data.groupby('platform_no')['production_inc'].sum().reset_index()

        results = GasMeasuresCostManagement.query.all()
        # 将结果转换为字典列表
        data = [
            {'platform_no': result.platform_no, 'pp_cost': result.bubble_row_cost,
             'lxqj_cost': result.continuous_gas_lift_cost
                , 'jdqj_cost': result.intermittent_gas_lift_cost, 'zy_cost': result.pressurization_costs}
            for result in results]
        gas_cost = pd.DataFrame(data)
        # 查询所有平台号
        platforms = GasBasePlatform.query.all()
        platform_no_list = [platform.platform_no for platform in platforms]
        gas_sites = GasBaseSite.query.all()
    if selectOption == '泡排':
        gas_cost = gas_cost[['platform_no', 'pp_cost']]

    elif selectOption == '增压':
        gas_cost = gas_cost[['platform_no', 'zy_cost']]

    elif selectOption == '连续气举':
        gas_cost = gas_cost[['platform_no', 'lxqj_cost']]

    elif selectOption == '间断气举':
        gas_cost = gas_cost[['platform_no', 'jdqj_cost']]

    production_sums['output'] = production_sums['production_inc'] * 1.37 * 10000

    input_output = pd.merge(production_sums, gas_cost, on=['platform_no'], how='left')
    input_output.columns = ['platform_no', 'production_inc', 'output', 'input']
    input_output['production_ratio'] = np.where(input_output['input'] == 0, 0,
                                                input_output['output'] / input_output['input'])

    # 计算production_ratio的最小值和最大值，并向下/向上取整
    min_val = np.floor(input_output['production_ratio'].min()).astype(int)
    max_val = np.ceil(input_output['production_ratio'].max()).astype(int)

    # 创建整数区间并分为四个区间
    interval_size = (max_val - min_val) // 4 if (max_val - min_val) % 4 == 0 else (max_val - min_val) // 4
    interval_ends = [min_val + i * interval_size for i in range(1, 5)]
    intervals = [f"{min_val}-{interval_ends[0]}",
                 f"{interval_ends[0]}-{interval_ends[1]}",
                 f"{interval_ends[1]}-{interval_ends[2]}",
                 f"{interval_ends[2]}-{max_val}"]

    # 定义一个函数，根据production_ratio确定所属的区间
    def assign_section(ratio):
        for i, interval in enumerate(intervals):
            start, end = map(int, interval.split('-'))
            if start <= ratio < end:
                return interval
        return None  # 理论上不应该走到这里，除非数据异常

    # 应用函数，增加一列'section'
    input_output['section'] = input_output['production_ratio'].apply(assign_section)

    # 计算production_inc的最小值和最大值，并向下/向上取整
    min_val1 = np.floor(input_output['production_inc'].min()).astype(int)
    max_val1 = np.ceil(input_output['production_inc'].max()).astype(int)
    min_val1 = (min_val1 // 100) * 100

    # 创建整数区间并分为四个区间
    interval_size1 = (max_val1 - min_val1) // 4 if (max_val1 - min_val1) % 4 == 0 else (max_val1 - min_val1) // 4
    interval_ends1 = [min_val1 + i * interval_size1 for i in range(1, 5)]
    intervals1 = [f"{min_val1}-{interval_ends1[0]}",
                  f"{interval_ends1[0]}-{interval_ends1[1]}",
                  f"{interval_ends1[1]}-{interval_ends1[2]}",
                  f"{interval_ends1[2]}-{max_val1}"]

    # 定义向上取整到百位数的函数
    def ceil_to_nearest_hundred(num):
        # 先将字符串转换为整数
        num = int(num)
        # 向上取整到最近的百位数
        return math.ceil(num / 100.0) * 100

        # 处理数组中的每个范围字符串

    new_ranges = []
    for range_str in intervals1:
        start, end = range_str.split('-')
        # 对起始和结束数字都进行向上取整到百位数
        start_ceil = ceil_to_nearest_hundred(start)
        end_ceil = ceil_to_nearest_hundred(end)
        # 将处理后的范围添加到新数组中
        new_ranges.append(f'{start_ceil}-{end_ceil}')
    intervals1 = new_ranges

    def assign_section1(ratio):
        for i, interval in enumerate(intervals1):
            start, end = map(int, interval.split('-'))
            if start <= ratio < end:
                return interval
        return None  # 理论上不应该走到这里，除非数据异常

    # 应用函数，增加一列'section1'
    input_output['section1'] = input_output['production_inc'].apply(assign_section1)

    dataJsons = []
    for row in input_output.itertuples(index=False):
        platform_no_list = [x for x in platform_no_list if x != row.platform_no]
        center, wells = get_platform_info(row.platform_no)
        dataJson = {'platformNo': row.platform_no,
                    'production_inc': float(row.production_inc),
                    'output': float(row.output), 'input': float(row.input), 'section': row.section,
                    'section1': row.section1,
                    'production_ratio': float(row.production_ratio),
                    'center': center, 'lnglat': wells}
        dataJsons.append(dataJson)
    for platform_no in platform_no_list:
        center, wells = get_platform_info(platform_no)
        dataJson = {'platformNo': platform_no, 'production_inc': None,
                    'output': None, 'input': None, 'section': None, 'section1': None, 'center': center,
                    'lnglat': wells}
        dataJsons.append(dataJson)
    siteJsons = []
    for gas_site in gas_sites:
        center = [gas_site.lng, gas_site.lat]
        dataJson = {'siteName': gas_site.site_name, 'center': center}
        siteJsons.append(dataJson)
    # 清空表中的数据
    with app.app_context():
        db.session.execute(text("DELETE FROM gas_production_input_output;"))
        db.session.commit()  # 提交事务
    input_output.to_sql('gas_production_input_output', con=db.engine, index=False, if_exists='append')

    json_response = {
        "success": True,
        "message": "",
        "code": 200,
        'sections': intervals,
        'sections1': intervals1,
        "result": {
            "records": dataJsons,
            "gasSites": siteJsons
        }
    }
    return json_response


def get_param_value(param_name, default_value=0, target_type=int):
    """
    从form、json或者args中获取指定参数的值，并转换为指定类型

    参数:
    param_name (str): 要获取的参数名称
    default_value: 获取不到参数时使用的默认值，默认为0
    target_type: 期望转换的目标类型，默认为int类型

    返回:
    转换后的参数值，如果获取失败则返回默认值
    """
    value = request.form.get(param_name)
    if value is None:
        json_data = request.get_json(silent=True)
        if json_data and param_name in json_data:
            value = json_data[param_name]
        else:
            value = request.args.get(param_name)

    if value is None:
        return default_value
    try:
        return target_type(value)
    except ValueError:
        return default_value


@app.route('/getIncreasePlatformList1', methods=['GET', 'POST'])
def getIncreasePlatformList1():
    colorNum = get_param_value('colorNum', 12, int)
    with app.app_context():
        # 假设你想查询所有记录
        results = GasProductionIncrease.query.all()
        # 将结果转换为字典列表
        data = [
            {'platform_no': result.platform_no, 'production_inc': result.production_inc}
            for result in results]
        selectOption = results[0].type
        data = pd.DataFrame(data)
        production_sums = data.groupby('platform_no')['production_inc'].sum().reset_index()

        results = GasMeasuresCostManagement.query.all()
        # 将结果转换为字典列表
        data = [
            {'platform_no': result.platform_no, 'pp_cost': result.bubble_row_cost,
             'lxqj_cost': result.continuous_gas_lift_cost
                , 'jdqj_cost': result.intermittent_gas_lift_cost, 'zy_cost': result.pressurization_costs}
            for result in results]
        gas_cost = pd.DataFrame(data)
        # 查询所有平台号
        platforms = GasBasePlatform.query.all()
        platform_no_list = [platform.platform_no for platform in platforms]
        gas_sites = GasBaseSite.query.all()
    if selectOption == '泡排':
        gas_cost = gas_cost[['platform_no', 'pp_cost']]

    elif selectOption == '增压':
        gas_cost = gas_cost[['platform_no', 'zy_cost']]

    elif selectOption == '连续气举':
        gas_cost = gas_cost[['platform_no', 'lxqj_cost']]

    elif selectOption == '间断气举':
        gas_cost = gas_cost[['platform_no', 'jdqj_cost']]

    production_sums['output'] = production_sums['production_inc'] * 1.37 * 10000

    input_output = pd.merge(production_sums, gas_cost, on=['platform_no'], how='left')
    input_output.columns = ['platform_no', 'production_inc', 'output', 'input']
    input_output['production_ratio'] = np.where(input_output['input'] == 0, 0,
                                                input_output['output'] / input_output['input'])

    # 计算production_ratio的最小值和最大值，并向下/向上取整
    min_val = np.floor(input_output['production_ratio'].min())
    max_val = np.ceil(input_output['production_ratio'].max())

    # 创建整数区间并分为四个区间
    interval_size = (max_val - min_val) / colorNum
    interval_ends = [min_val + i * interval_size for i in range(1, colorNum + 1)]

    # intervals = [f"{min_val:.2f}-{interval_ends[0]:.2f}",
    #              f"{interval_ends[0]:.2f}-{interval_ends[1]:.2f}",
    #              f"{interval_ends[1]:.2f}-{interval_ends[2]:.2f}",
    #              f"{interval_ends[2]:.2f}-{interval_ends[3]:.2f}",
    #              f"{interval_ends[3]:.2f}-{interval_ends[4]:.2f}",
    #              f"{interval_ends[4]:.2f}-{interval_ends[5]:.2f}",
    #              f"{interval_ends[5]:.2f}-{interval_ends[6]:.2f}",
    #              f"{interval_ends[6]:.2f}-{interval_ends[7]:.2f}",
    #              f"{interval_ends[7]:.2f}-{interval_ends[8]:.2f}",
    #              f"{interval_ends[8]:.2f}-{interval_ends[9]:.2f}",
    #              f"{interval_ends[9]:.2f}-{interval_ends[10]:.2f}",
    #              f"{interval_ends[10]:.2f}-{max_val:.2f}"]

    intervals = []
    for i in range(0, colorNum - 1):
        if i == 0:
            intervals.append(f"{min_val:.2f}-{interval_ends[0]:.2f}")
        elif i == colorNum - 2:
            intervals.append(f"{interval_ends[i]:.2f}-{max_val:.2f}")
            break
        intervals.append(f"{interval_ends[i]:.2f}-{interval_ends[i + 1]:.2f}")

    # 定义一个函数，根据production_ratio确定所属的区间
    def assign_section(ratio):
        for i, interval in enumerate(intervals):
            start, end = map(float, interval.split('-'))
            if start <= ratio < end:
                return interval
        return None  # 理论上不应该走到这里，除非数据异常

    # 应用函数，增加一列'section'
    input_output['section'] = input_output['production_ratio'].apply(assign_section)

    # 计算production_inc的最小值和最大值，并向下/向上取整
    min_val1 = np.floor(input_output['production_inc'].min())
    max_val1 = np.ceil(input_output['production_inc'].max())
    min_val1 = (min_val1 // 100) * 100
    # 先将字符串转换为整数
    max_val1 = int(max_val1)
    # 向上取整到最近的百位数
    max_val1 = math.ceil(max_val1 / 100.0) * 100

    # 创建整数区间并分为四个区间
    interval_size1 = (max_val1 - min_val1) / 12
    interval_ends1 = [min_val1 + i * interval_size1 for i in range(1, 13)]

    # intervals1 = [f"{min_val1:.0f}-{interval_ends1[0]:.0f}",
    #               f"{interval_ends1[0]:.0f}-{interval_ends1[1]:.0f}",
    #               f"{interval_ends1[1]:.0f}-{interval_ends1[2]:.0f}",
    #               f"{interval_ends1[2]:.0f}-{interval_ends1[3]:.0f}",
    #               f"{interval_ends1[3]:.0f}-{interval_ends1[4]:.0f}",
    #               f"{interval_ends1[4]:.0f}-{interval_ends1[5]:.0f}",
    #               f"{interval_ends1[5]:.0f}-{interval_ends1[6]:.0f}",
    #               f"{interval_ends1[6]:.0f}-{interval_ends1[7]:.0f}",
    #               f"{interval_ends1[7]:.0f}-{interval_ends1[8]:.0f}",
    #               f"{interval_ends1[8]:.0f}-{interval_ends1[9]:.0f}",
    #               f"{interval_ends1[9]:.0f}-{interval_ends1[10]:.0f}",
    #               f"{interval_ends1[10]:.0f}-{max_val1:.0f}"]

    intervals1 = []
    for i in range(0, colorNum - 1):
        if i == 0:
            intervals1.append(f"{min_val1:.0f}-{interval_ends1[0]:.0f}")
        elif i == colorNum - 2:
            intervals1.append(f"{interval_ends1[i]:.0f}-{max_val1:.0f}")
            break
        intervals1.append(f"{interval_ends1[i]:.0f}-{interval_ends1[i + 1]:.0f}")

    def assign_section1(ratio):
        for i, interval in enumerate(intervals1):
            start, end = map(int, interval.split('-'))
            if start <= ratio < end:
                return interval
        return None  # 理论上不应该走到这里，除非数据异常

    # 应用函数，增加一列'section1'
    input_output['section1'] = input_output['production_inc'].apply(assign_section1)

    dataJsons = []
    for row in input_output.itertuples(index=False):
        platform_no_list = [x for x in platform_no_list if x != row.platform_no]
        center, wells = get_platform_info(row.platform_no)
        dataJson = {'platformNo': row.platform_no,
                    'production_inc': 0 if math.isnan(float(row.production_inc)) else float(row.production_inc),
                    'output': 0 if math.isnan(float(row.output)) else float(row.output),
                    'input': 0 if math.isnan(float(row.input)) else float(row.input),
                    'section': row.section,
                    'section1': row.section1,
                    'production_ratio': 0 if math.isnan(float(row.production_ratio)) else float(row.production_ratio),
                    'center': center, 'lnglat': wells}
        if is_json_format(dataJson):
            dataJsons.append(dataJson)
    for platform_no in platform_no_list:
        center, wells = get_platform_info(platform_no)
        dataJson = {'platformNo': platform_no, 'production_inc': None,
                    'output': None, 'input': None, 'section': None, 'section1': None, 'center': center,
                    'lnglat': wells}
        dataJsons.append(dataJson)
    siteJsons = []
    for gas_site in gas_sites:
        center = [gas_site.lng, gas_site.lat]
        dataJson = {'siteName': gas_site.site_name, 'center': center}
        siteJsons.append(dataJson)
    # 清空表中的数据
    with app.app_context():
        db.session.execute(text("DELETE FROM gas_production_input_output;"))
        db.session.commit()  # 提交事务
    input_output.to_sql('gas_production_input_output', con=db.engine, index=False, if_exists='append')

    json_response = {
        'success': True,
        'message': '',
        'code': 200,
        'sections': intervals,
        'sections1': intervals1,
        'result': {
            'records': dataJsons,
            'gasSites': siteJsons
        }
    }

    return json_response
def is_json_format(dictionary):
    try:
        json.dumps(dictionary)
        return True
    except (TypeError, OverflowError):
        return False
# 定义一个函数来计算距离
def calculate_distance(row, target_intake, target_exhaust, target_gas):
    try:
        if row.grade3_exhaust_pressure is None:
            if row.grade2_exhaust_pressure is None:
                return sqrt(
                    (row.grade1_intake_pressure - target_intake) ** 2 +
                    (row.grade1_exhaust_pressure - target_exhaust) ** 2 +
                    (row.exhaust_gas - target_gas) ** 2
                ), row.grade1_exhaust_pressure

            else:

                return sqrt(
                    (row.grade1_intake_pressure - target_intake) ** 2 +
                    (row.grade2_exhaust_pressure - target_exhaust) ** 2 +
                    (row.exhaust_gas - target_gas) ** 2
                ), row.grade2_exhaust_pressure

        return sqrt(
            (row.grade1_intake_pressure - target_intake) ** 2 +
            (row.grade3_exhaust_pressure - target_exhaust) ** 2 +
            (row.exhaust_gas - target_gas) ** 2
        ), row.grade3_exhaust_pressure

    except:
        return float('inf'), None


# def calculate_distance(row, target_intake, target_exhaust, target_gas):
#     try:
#         if row.grade3_exhaust_pressure is None:
#             if row.grade2_exhaust_pressure is None:
#                 if row.grade1_exhaust_pressure <= target_exhaust:
#                     return sqrt(
#                         (row.grade1_intake_pressure - target_intake) ** 2 +
#                         (row.grade1_exhaust_pressure - target_exhaust) ** 2 +
#                         (row.exhaust_gas - target_gas) ** 2
#                     ), row.grade1_exhaust_pressure
#                 else:
#                     return float('inf'), None
#             else:
#                 if row.grade2_exhaust_pressure <= target_exhaust:
#                     return sqrt(
#                         (row.grade1_intake_pressure - target_intake) ** 2 +
#                         (row.grade2_exhaust_pressure - target_exhaust) ** 2 +
#                         (row.exhaust_gas - target_gas) ** 2
#                     ), row.grade2_exhaust_pressure
#                 else:
#                     return float('inf'), None
#         if row.grade3_exhaust_pressure <= target_exhaust:
#             return sqrt(
#                 (row.grade1_intake_pressure - target_intake) ** 2 +
#                 (row.grade3_exhaust_pressure - target_exhaust) ** 2 +
#                 (row.exhaust_gas - target_gas) ** 2
#             ), row.grade3_exhaust_pressure
#         else:
#             return float('inf'), None
#     except:
#         return float('inf'), None

@app.route('/getCompressor', methods=['GET', 'POST'])  # 匹配压缩机
def getCompressor():  # 输入进气压力 排气压力 排气量 --> 满足条件的多个压缩机
    intake_pressure = float(request.args.get('intakePressure', 2))  # 进气压力
    exhaust_pressure = float(request.args.get('exhaustPressure', 3))  # 排气压力
    exhaust_gas = float(request.args.get('exhaustGas', 6))  # 排气量
    data, current_pressure = matchCompressor(intake_pressure, exhaust_pressure, exhaust_gas)

    json_response = {
        "success": True,
        "message": "",
        "code": 200,
        "result": {
            "records": data,
        }
    }
    return json_response


def matchCompressor(intake_pressure, exhaust_pressure, exhaust_gas):  # 输入进气压力 排气压力 排气量 --> 满足条件的多个压缩机
    # 构建查询条件
    query = GasBaseCompressor.query.filter(
        GasBaseCompressor.intake_pressure_min <= intake_pressure,
        GasBaseCompressor.intake_pressure_max >= intake_pressure,
        GasBaseCompressor.exhaust_pressure >= exhaust_pressure,
        GasBaseCompressor.exhaust_gas_min <= exhaust_gas,
        GasBaseCompressor.exhaust_gas_max >= exhaust_gas
    )
    with app.app_context():
        # 执行查询并获取结果
        compressors = query.all()
        compressor_ids = [result.id for result in compressors]
        # 使用in_操作符进行查询
        results = GasCompressorWorkcondition.query.filter(
            GasCompressorWorkcondition.compressor_id.in_(compressor_ids)).all()

    # 初始化最小距离和最佳匹配行
    min_distance = float('inf')
    best_match = None
    # 遍历查询结果，找到最接近的匹配
    current_pressure = None
    for row in results:
        distance, pressure = calculate_distance(row, intake_pressure, exhaust_pressure, exhaust_gas)
        if distance < min_distance or (
                distance == min_distance and row.power < (best_match.power if best_match else float('inf'))):
            min_distance = distance
            best_match = row
            if pressure is not None:
                current_pressure = pressure
    if best_match is None:
        return None, None
    data = None
    for result in compressors:
        if result.id == best_match.compressor_id:
            data = [{
                'id': result.id,
                'updateBy': result.update_by,
                'updateTime': result.update_time.isoformat() if result.update_time else None,
                'sysOrgCode': result.sys_org_code,
                'createBy': result.create_by,  # 同样，这个通常也会保持 createBy
                'createTime': result.create_time.isoformat() if result.create_time else None,
                'groupModel': result.group_model,
                'compressorModel': result.compressor_model,
                'intakePressureMin': result.intake_pressure_min,
                'intakePressureMax': result.intake_pressure_max,
                'intakePressureOptimal': result.intake_pressure_optimal,
                'intakeTemperature': result.intake_temperature,
                'exhaustGasMin': result.exhaust_gas_min,
                'exhaustGasMax': result.exhaust_gas_max,
                'exhaustPressure': result.exhaust_pressure,
                'exhaustTemperature': result.exhaust_temperature,
                'compressorColumns': result.compressor_columns,
                'compressorCylinders': result.compressor_cylinders,
                'ratedPower': result.rated_power,
                'rotateSpeed': best_match.rotate_speed,  # 运行转速rpm
                'powerLoadate': best_match.power_load_rate,  # 轴功率负荷率%
                'power': best_match.power,  # 运行功率
                'ratedRotateSpeed': result.rated_rotate_speed,
                'weight': result.weight,
                'size': result.size,
                'lease_costs': result.lease_costs
            }]
    return data, current_pressure


@app.route('/updateCompressor', methods=['GET', 'POST'])  # 更新压缩机
def updateCompressor():  # 输入进气压力 排气压力 排气量 --> 满足条件的多个压缩机
    preDays = float(request.json.get('preDays', 60))  # 进气压力
    electricity = float(request.json.get('electricity', 0.725))  # 进气压力

    # 确保在应用上下文中执行数据库操作
    with app.app_context():
        # 查询所有记录
        results = GasProductionPlatformPre.query.all()
        # 将结果转换为字典列表
        plat_pre = [
            {'平台id': result.platform_id, '平台号': result.platform_no, '日期': result.collect_date,
             '气量预测值': result.pre_value,
             '输气压力': result.pressure_transport}
            for result in results]
        plat_df = pd.DataFrame(plat_pre)
        # if plat_df['输气压力'].values.__contains__(0):
        #     return {
        #         "success": False,
        #         "message": "请等待更新输气压力！",
        #         "code": 200
        #     }

        results = GasProductionWellPre.query.all()
        # 将结果转换为字典列表
        well_pre = [
            {'井号': result.well_no, '平台号': result.platform_no, '日期': result.collect_date,
             '套压预测值': result.cover_pre_value, '油压预测值': result.oil_pre_value,
             '已生产天数': result.pro_days}
            for result in results]

        results = GasPlatformCompressor.query.all()
        plat_com = [
            {'platform_id': result.platform_id, 'platform_no': result.platform_no,
             'originally_compressor_id': result.originally_compressor_id,
             'originally_compressor_name': result.originally_compressor_name,
             'originally_compressor_power': result.originally_compressor_power,
             'originally_procurement_costs': result.originally_procurement_costs}
            for result in results]

        results = GasBaseWell.query.all()
        base_well = [
            {'井号': result.well_no, '油管是否投产': result.is_production}
            for result in results]
    plat_com_ori = pd.DataFrame(plat_com)  # 现有压缩机情况
    columns = ['platform_id', 'platform_no',
               'now_compressor_id', 'now_compressor_name', 'now_compressor_power',
               'now_procurement_costs', 'day_save_money', 'date']
    plat_com_pre = pd.DataFrame(columns=columns)  # 存放预测压缩机情况

    well_df = pd.DataFrame(well_pre)
    plat_df['日期'] = pd.to_datetime(plat_df['日期'])
    well_df['日期'] = pd.to_datetime(well_df['日期'])
    # 读取“日期”列的最小值
    min_date = plat_df['日期'].min()
    # 计算加上preDays后的日期
    to_date = min_date + timedelta(days=preDays - 1)
    # 筛选在new_date之前的数据
    plat_df = plat_df[plat_df['日期'] == to_date]
    well_df = well_df[well_df['日期'] == to_date]
    base_well = pd.DataFrame(base_well)
    well_df = pd.merge(well_df, base_well, on='井号', how='outer')
    well_df['预测值'] = np.where(well_df['油管是否投产'] == '1', well_df['油压预测值'], well_df['套压预测值'])
    well_df.drop(columns=['套压预测值', '油压预测值'], inplace=True)  # 删除 '套压预测值' 和 '油压预测值' 列

    def find_min_casing_pressure(platform_no, well_df, key):
        matched_rows = well_df[well_df['平台号'] == platform_no]
        if not matched_rows.empty:
            return matched_rows[key].min()
        else:
            return float('nan')

    plat_df['最小预测值'] = plat_df['平台号'].apply(lambda x: find_min_casing_pressure(x, well_df, '预测值'))
    plat_df['最小已生产天数'] = plat_df['平台号'].apply(lambda x: find_min_casing_pressure(x, well_df, '已生产天数'))
    # 遍历plat_df中的每一行
    for index, row in plat_df.iterrows():
        platform_no = row['平台号']
        platform_id = row['平台id']
        intake_pressure = float(row['最小预测值'])
        exhaust_pressure = float(row['输气压力'])
        exhaust_gas = float(row['气量预测值'])
        if intake_pressure < exhaust_pressure:
            data, current_pressure = matchCompressor(intake_pressure, exhaust_pressure, exhaust_gas)  # 输入进气压力 排气压力 排气量
            if data is not None:
                # print(current_pressure, exhaust_pressure, platform_no)
                # 创建一个字典，包含要添加的数据
                new_data = {
                    'platform_id': [platform_id],
                    'platform_no': [platform_no],
                    'now_compressor_id': [data[0]['id']],
                    'now_compressor_name': [data[0]['groupModel']],
                    'now_compressor_power': [data[0]['ratedPower']],
                    'now_procurement_costs': [data[0]['lease_costs']],
                    'day_save_money': [0],  # 输压变化值
                    'min_pro_days': row['最小已生产天数'],
                    'exhaust_pressure': [exhaust_pressure],
                    'current_pressure': [current_pressure],
                }
                plat_com_pre = pd.concat([plat_com_pre, pd.DataFrame(new_data)], ignore_index=True)

    # 根据'platform_no'字段进行外连接
    plat_com_all = pd.merge(plat_com_ori, plat_com_pre, on='platform_no', how='outer')
    plat_com_all['date'] = to_date

    plat_com_all = plat_com_all.merge(
        plat_df[['平台号', '气量预测值']],  # 只选择需要的列进行合并
        left_on='platform_no',  # plat_com_all中的连接键
        right_on='平台号',  # plat_df中的连接键
        how='left'
    )
    plat_com_all = plat_com_all.rename(columns={'气量预测值': 'exhaust_gas'})

    plat_df = plat_df[['平台号', '输气压力']]
    plat_df.columns = ['platform_no', 'exhaust_pressure']
    plat_com_all = pd.merge(plat_com_all, plat_df, on='platform_no', how='left')

    plat_com_all['exhaust_pressure'] = plat_com_all['exhaust_pressure_y']
    # 删除原始列
    plat_com_all.drop(columns=['exhaust_pressure_x', 'exhaust_pressure_y', '平台号'], inplace=True)

    # plat_com_all = plat_com_all[
    #     (plat_com_all['originally_compressor_power'].ge(plat_com_all['now_compressor_power'])) | (
    #         plat_com_all['originally_compressor_power'].isna()) | (plat_com_all['now_compressor_power'].isna())]

    # 使用apply和lambda表达式来创建新列platform_id
    plat_com_all['platform_id'] = plat_com_all.apply(
        lambda row: row['platform_id_x'] if pd.notna(row['platform_id_x']) else (
            row['platform_id_y'] if pd.notna(row['platform_id_y']) else np.nan), axis=1)
    # 删除原始的platform_id_x和platform_id_y列
    plat_com_all.drop(columns=['platform_id_x', 'platform_id_y'], inplace=True)

    plat_com_all = plat_com_all[
        ~((plat_com_all['originally_compressor_id'].isna()) & (plat_com_all['min_pro_days'] > 600))]

    # plat_com_all = plat_com_all.dropna(subset=['now_compressor_id'])

    # plat_com_all.to_csv('E:\myProject\gas-python\output\plat_com_all.csv', index=False)
    plat_com_all = plat_com_all.drop('min_pro_days', axis=1)
    # 计算日节约电费
    # 注意：这里我们假设一天运行24小时，因此总电费是基于24小时的功率消耗计算的
    plat_com_all['day_save_money'] = (plat_com_all['originally_compressor_power'] - plat_com_all[
        'now_compressor_power']) * 24 * electricity  # 电费需要修改

    # 处理NaN值：如果originally_compressor_power为NaN，则将day_save_money也设置为NaN
    # plat_com_all['day_save_money'] = plat_com_all.apply(
    #     lambda row: row['originally_compressor_power'] * 24 * electricity if pd.isna(row['now_compressor_power']) else
    #     row['day_save_money'], axis=1)

    # 找到day_save_money小于0的行，并将指定列设置为NaN
    plat_com_all.loc[plat_com_all['day_save_money'] < 0, [
        'now_compressor_id', 'now_compressor_name', 'now_compressor_power', 'now_procurement_costs', 'day_save_money'
    ]] = np.nan

    plat_com_all['id'] = [str(uuid.uuid4()) for _ in range(len(plat_com_all))]  # 生成唯一的 ID
    # 使用 apply 方法应用这个函数并创建新列
    plat_com_all['rec_act'] = plat_com_all.apply(calculate_rec_act, axis=1)

    plat_com_all.loc[plat_com_all['rec_act'] == 2, 'current_pressure'] = plat_com_all['exhaust_pressure']
    # print(plat_com_all.columns)
    # 清空表中的数据
    with app.app_context():
        db.session.execute(text("DELETE FROM gas_platform_compressor_pre;"))
        db.session.commit()  # 提交事务
    plat_com_all.to_sql('gas_platform_compressor_pre', con=db.engine, index=False, if_exists='append')

    json_response = {
        "success": True,
        "message": "",
        "code": 200
    }
    return json_response


# 定义一个函数来计算 rec_act 的值
def calculate_rec_act(row):
    if pd.isna(row['originally_compressor_id']):
        return 0
    elif pd.isna(row['now_compressor_id']):
        return 2
    else:
        return 1


@app.route('/predict', methods=['GET', 'POST'])
def predict():
    try:
        # Retrieve form data
        selected_models = request.args.getlist('models') or ['lgb', 'svr', 'rf']  # Default to all models
        final_model_choice = request.args.get('finalModel', 'mlp')  # Default to "mlp" if not provided
        learning_rate = float(request.args.get('learningRate', 0.01))  # Default learning rate is 0.01
        n_splits = int(request.args.get('nSplits', 5))  # Default to 5 splits
        n_trees = int(request.args.get('nTrees', 100))
        predict_variables = request.args.getlist('predictVariables') or ['thickness1_layer', 'brmc1_layer',
                                                                         'toc1_layer', 'por1_layer', 'qall1_layer',
                                                                         'sg1_layer',
                                                                         'coefficient_pressure', 'subject_hop_crowd',
                                                                         'subject_hop_hole', 'crowd_distance',
                                                                         'af_stage_length', 'sand_strength',
                                                                         'liquid_strength', 'dro4mbabo_rate',
                                                                         'segment_length', 'drilling_depth',
                                                                         'days90_first_year']
        page = int(request.args.get('pageNo', 1))  # Default to page 1
        size = int(request.args.get('pageSize', 10))  # Default page size is 10

        # Build base learners
        base_learners_config = {}
        for model_key in selected_models:  # Models are like ["rf", "xgb"]
            if model_key in model_mapping:  # Check if model exists in the mapping
                model_info = model_mapping[model_key]
                # If the model supports a learning rate, adjust it
                if 'learning_rate' in model_info['params']:
                    model_info['params']['learning_rate'] = learning_rate
                if 'n_estimators' in model_info['params']:
                    model_info['params']['n_estimators'] = n_trees
                base_learners_config[model_key] = model_info
            else:
                return jsonify({"state": "error", "message": f"Model '{model_key}' not found in model mapping."}), 400

        # Select and instantiate the final model
        final_model_info = final_model_mapping.get(final_model_choice, final_model_mapping['gb'])

        # Build the user_selection dictionary
        user_selection = {
            "base_learners": base_learners_config,
            "final_model": final_model_info
        }

        # Build the ensemble model
        stacking_model = build_ensemble_model(user_selection=user_selection)

        # Connect to the database and retrieve data
        engine = get_db_connection()
        table_name = 'gas_well_para'
        noprocess_var = ['id', 'update_by', 'update_time', 'sys_org_code', 'create_by', 'create_time',
                         'actual_production', 'duong_production', 'lng', 'lat', 'well_state']
        df = pd.read_sql_table(table_name, con=engine)
        df = df.drop(noprocess_var, axis=1)

        base_variables = ['well_no', 'm_values', 'a_values', 'days330_first_year', 'core_area', 'well_type']
        df = df[predict_variables + base_variables]

        # Data processing
        Dataset_X, Dataset_y_a, Dataset_y_m, Dataset_y_p = process_data(df)
        df_combined = pd.concat([Dataset_X, Dataset_y_a, Dataset_y_m, Dataset_y_p], axis=1)
        original_columns = list(Dataset_X.columns)
        new_columns = ['a_values', 'm_values', 'days330_first_year']
        df_combined.columns = original_columns + new_columns

        df_with_values = df_combined[df_combined['days330_first_year'].notna()]
        df_missing_values = df_combined[df_combined['days330_first_year'].isna()]

        # Perform cross+alidation and prediction
        df_with_values_predictions = cross_validate_and_predict(
            df_with_values, p_model=stacking_model, a_model=stacking_model, m_model=stacking_model, n_splits=n_splits)

        if df_missing_values.empty:
            df_with_predictions = df_with_values_predictions
        else:
            df_with_predictions = well_none_predict(df_with_values_predictions, df_missing_values,
                                                    a_model=stacking_model, m_model=stacking_model,
                                                    p_model=stacking_model)

        # 计算每一个气井的EUR
        well_ids = df_with_predictions.index.unique()
        eur_values = []
        year_production_values = {f'year{i + 1}_production': [] for i in range(20)}  # 为19年生成列
        for well_id in well_ids:
            well_data = df_with_predictions[df_with_predictions.index == well_id]
            a_fit = well_data['Predicted_a'].values[0]
            m_fit = well_data['Predicted_m'].values[0]
            first_production = well_data['Predicted_330'].values[0]
            year_production, eur = eur_function(first_production, a_fit, m_fit)
            eur_values.append(eur)
            for i in range(20):
                year_production_values[f'year{i + 1}_production'].append(year_production[i])

        for year_col, year_values in year_production_values.items():
            df_with_predictions[year_col] = year_values
        df_with_predictions['predicted_eur'] = eur_values
        df_with_predictions = df_with_predictions.reset_index()
        columns_order = ['well_no'] + [col for col in df_with_predictions.columns if col != 'well_no']
        df_with_predictions = df_with_predictions[columns_order]

        def mean_relative_error(y_true, y_pred):
            return np.abs((y_true - y_pred) / y_true)

        # 定义响应变量
        responses_variable = ['well_no', 'Predicted_330', 'days330_first_year', 'predicted_eur', 'a_values',
                              'm_values'] + [f'year{i + 2}_production' for i in range(19)]
        df_response = df_with_predictions[responses_variable]

        # 计算 MAE 和 MRE，其中对于 days330_first_year 为 NaN 的行，mae 和 mre 设置为 NaN
        df_response['mae'] = df_response.apply(
            lambda row: mean_absolute_error([row['days330_first_year']], [row['Predicted_330']]) if pd.notna(
                row['days330_first_year']) else np.nan, axis=1
        )

        df_response['mre'] = df_response.apply(
            lambda row: (row['mae'] / row['days330_first_year']) if pd.notna(row['days330_first_year']) else np.nan,
            axis=1
        )

        # 重命名列
        df_response = df_response.rename(columns={'Predicted_330': 'predicted330'})

        # Paginated result
        # Convert to dictionary format with record-based representation
        write_name = 'gas_eur_predict'  # 替换为你希望的SQL表名
        df_response['update_by'] = 'gas-admin'  # 更新人，默认为 system 或通过其他方式动态获取
        df_response['update_time'] = datetime.now()  # 当前时间作为更新时间
        df_response['sys_org_code'] = 'A11'  # 部门编号
        df_response['create_by'] = 'gas-admin'  # 创建人
        df_response['create_time'] = datetime.now()  # 创建时间
        df_response['id'] = [str(uuid.uuid4()) for _ in range(len(df_response))]

        metadata = MetaData()
        table = Table(write_name, metadata, autoload_with=engine)
        sql_columns = [column.name for column in table.columns]
        df_response = df_response[sql_columns]

        # 为 'id' 列生成唯一的 UUID
        df_response['update_time'] = pd.to_datetime(df_response['update_time'])
        df_response['create_time'] = pd.to_datetime(df_response['create_time'])
        df_response['well_no'] = df_response['well_no'].astype(str)
        df_response['id'] = df_response['id'].astype(str)
        df_response['update_by'] = df_response['update_by'].astype(str)
        df_response['sys_org_code'] = df_response['sys_org_code'].astype(str)
        df_response['create_by'] = df_response['create_by'].astype(str)

        with engine.connect() as connection:
            connection.execute(text(f"TRUNCATE TABLE {write_name}"))
        # 然后使用 pandas 的 to_sql() 插入新的数据
        df_response.to_sql(write_name, con=engine, index=False, if_exists='append')
        print(np.mean(df_response['mre']))

        # Return the result as JSON
        response = {
            "state": "success",
            "code": 200
        }
        return jsonify(response)

    except ValueError as ve:
        return jsonify({"state": "error", "message": f"ValueError: {str(ve)}"}), 400
    except Exception as e:
        return jsonify({"state": "error", "message": f"An error occurred: {str(e)}"}), 500


@app.route('/optimize', methods=['GET'])
def optimize():
    try:
        mapping = {
            'wellType': 'well_type',
            'coreArea': 'core_area',
            'thickness1Layer': 'thickness1_layer',
            'brmc1Layer': 'brmc1_layer',
            'toc1Layer': 'toc1_layer',
            'por1Layer': 'por1_layer',
            'qall1Layer': 'qall1_layer',
            'sg1Layer': 'sg1_layer',
            'coefficientPressure': 'coefficient_pressure',
            'subjectHopCrowd': 'subject_hop_crowd',
            'subjectHopHole': 'subject_hop_hole',
            'crowdDistance': 'crowd_distance',
            'afStageLength': 'af_stage_length',
            'sandStrength': 'sand_strength',
            'liquidStrength': 'liquid_strength',
            'dro4mbaboRate': 'dro4mbabo_rate',
            'segmentLength': 'segment_length',
            'drillingDepth': 'drilling_depth'}

        # Get data from the database and process it
        engine = get_db_connection()
        X_all, y_a, y_m, y_p, label_encoder_type, label_encoder_region = get_processed_data()

        raw_variables = list(mapping.values())
        default_var_values = X_all[raw_variables].median()

        # Adjust variables (received from the frontend, camelCase)
        adjust_var_case = ['liquidStrength', 'sandStrength', 'afStageLength', 'crowdDistance']
        adjust_var = [mapping[var] for var in adjust_var_case]

        df_predict = X_all.copy()
        raw_variables = list(mapping.values())
        # Default values based on the dataset median
        # default_var_values = X_all[raw_variables].median()

        # Default upper and lower bounds based on quantiles
        default_up_bound = X_all[adjust_var].quantile(0.95)
        default_down_bound = X_all[adjust_var].quantile(0.05)

        profit_rate = float(request.args.get('profitRate', 0.06))
        interest_rate = float(request.args.get('interestRate', 0.06))
        gas_price = float(request.args.get('gasPrice', 0.989))
        swarm_size = int(request.args.get('swarmSize', 20))
        max_iteration = int(request.args.get('maxIter', 10))
        well_type = request.args.get('wellType', 'Ⅲ类井')
        core_area = request.args.get('coreArea', '外围')

        # Get upper and lower bounds from the frontend or use defaults
        up_bounds = {
            mapping[var]: float(request.form.get(f'{var}UpperBound', default_up_bound[mapping[var]]))
            for var in adjust_var_case
        }
        down_bounds = {
            mapping[var]: float(request.form.get(f'{var}LowerBound', default_down_bound[mapping[var]]))
            for var in adjust_var_case
        }

        # Base learners for stacking model

        # Base learners for stacking model
        gb_base = GradientBoostingRegressor(random_state=40, learning_rate=0.02, n_estimators=500, max_features='sqrt',
                                            min_samples_leaf=1)
        rfr_base = RandomForestRegressor(n_estimators=300, random_state=20, min_samples_leaf=1)
        xgb_base = XGBRegressor(n_estimators=300, random_state=20, objective='reg:absoluteerror')
        lgb_base = LGBMRegressor(verbosity=-1, objective='mae', n_estimators=500, reg_alpha=0.2, learning_rate=0.01,
                                 min_samples_leaf=1)
        huber_base = HuberRegressor(epsilon=1.75, max_iter=10000)
        ridge_base = Ridge(alpha=0.015)
        mlp_base = MLPRegressor(hidden_layer_sizes=(500, 500, 300, 100),  ## Hidden layer neuron count
                                activation='relu',
                                solver='adam',
                                alpha=0.005,  ## L2 penalty parameter
                                max_iter=100,
                                random_state=123,
                                early_stopping=True,  ## Whether to stop training early
                                validation_fraction=0.2,  ## 20% as validation set
                                tol=1e-8)
        k_base = KernelRidge(alpha=1.0, kernel='rbf', gamma=0.3)

        # Create stacking models
        base_learners = [
            ('rfr', rfr_base),
            ('lgb', lgb_base),
            ('xgb', xgb_base),
            ('gb', gb_base),
            ('ridge', ridge_base),
            ('huber', huber_base),
            ('k_base', k_base)
        ]

        stacking_model_a = StackingRegressor(estimators=base_learners, final_estimator=mlp_base)
        stacking_model_m = StackingRegressor(estimators=base_learners, final_estimator=mlp_base)
        stacking_model_p = StackingRegressor(estimators=base_learners, final_estimator=mlp_base)

        scaler_a = StandardScaler()
        scaler_m = StandardScaler()
        scaler_p = StandardScaler()

        def importance_calculation(X, y, model):
            model.fit(X, y)
            feature_importance = model.feature_importances_
            feature_importance = np.abs(feature_importance) / np.sum(np.abs(feature_importance))
            return feature_importance

        y_process_a = y_a
        y_process_m = y_m
        y_process_p = y_p

        training_X_a = X_all[pd.notna(y_process_a.values)]
        training_y_a = y_process_a[pd.notna(y_process_a.values)]

        training_X_m = X_all[pd.notna(y_process_m.values)]
        training_y_m = y_process_m[pd.notna(y_process_m.values)]

        training_X_p = X_all[pd.notna(y_process_p.values)]
        training_y_p = y_process_p[pd.notna(y_process_p.values)]

        # Fit models
        X_scaled_a = scaler_a.fit_transform(training_X_a)
        X_scaled_m = scaler_m.fit_transform(training_X_m)
        X_scaled_p = scaler_p.fit_transform(training_X_p)
        feature_p = importance_calculation(X_scaled_p, training_y_p, xgb_base)

        def mape(y_true, y_pred):
            return np.mean(np.abs((y_pred - y_true) / y_true))

        def medianape(y_true, y_pred):
            return np.median(np.abs((y_pred - y_true) / y_true))

        target_i = 'days330_first_year'
        model_i = GradientBoostingRegressor(loss='huber')
        X_scaled_im = X_scaled_p

        X_train, X_test, y_train, y_test = train_test_split(X_scaled_im, training_y_p, test_size=0.1, random_state=5)
        stacking_model_p.fit(X_train, y_train)
        pred_mlp = stacking_model_p.predict(X_test)

        stacking_model_a.fit(X_scaled_a, training_y_a)
        stacking_model_m.fit(X_scaled_m, training_y_m)
        stacking_model_p.fit(X_scaled_im, training_y_p)
        # well_type = "Ⅲ类井"
        # Get raw variable values from frontend or use default values
        raw_var_values = {}
        for camel_case_var, snake_case_var in mapping.items():
            if camel_case_var in ['wellType', 'coreArea']:
                raw_var_values[snake_case_var] = request.args.get(camel_case_var)
            else:
                raw_var_values[snake_case_var] = float(
                    request.args.get(camel_case_var, default_var_values[snake_case_var]))

        new_data = pd.DataFrame([raw_var_values], columns=X_all.columns)
        print(new_data['liquid_strength'])
        new_data['well_type'] = well_type
        new_data['core_area'] = core_area
        new_data, _, _ = encode_categorical_variables(new_data, label_encoder_type, label_encoder_region)

        def predict_para(newdata, scaler_a, scaler_m, model_a, model_m):
            new_scaled_a = scaler_a.transform(newdata)
            new_scaled_m = scaler_m.transform(newdata)
            a_predict = model_a.predict(new_scaled_a)
            m_predict = model_m.predict(new_scaled_m)
            return a_predict, m_predict

        # a_values, m_values = predict_para(newdata = new_data, scaler_a = scaler_a, scaler_m = scaler_m, scaler_p = scaler_p, model_a=stacking_model_a, model_m=stacking_model_m, model_p=stacking_model_p)
        # Define bounds for optimization
        cost_var = ['drilling_depth', 'liquid_strength', 'sand_strength', 'af_stage_length', 'crowd_distance']
        new_adjust_var = adjust_var.copy()
        cost_para = [0.2688, (0.01813113 + 0.00131927), (0.044 + 0.036), 0.06329, 1 / 42.028]

        def eur_function(first_production, a_fit, m_fit, dur_eur=20):
            start_value = 331
            end_value = 330 * dur_eur
            dur = np.arange(start_value, end_value + 1)
            inital_q = intial_production_calculation(first_production, a_fit, m_fit, days=330)
            daily_pro = duong_decay((dur, np.repeat(inital_q, len(dur))), a_fit, m_fit)
            year_production = sum_year_production(daily_pro, 330)
            year_production_all = np.insert(year_production, 0, first_production)
            return year_production_all, sum(year_production_all)

        location = request.args.get('Location', 'new')
        if location == 'new':
            dj_ratio = [0.22, 0.22, 0.35, 0.28, 0.23, 0.18, 0.15, 0.12, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10,
                        0.10, 0.10, 0.10]
            business_cost = [0, 231.24, 365.31, 307.36, 250.72, 271.33, 220.50, 190.42, 160.69, 147.23, 137.79, 130.93,
                             125.40, 120.33, 115.65, 111.35, 107.40, 103.76, 100.41, 97.33]
        else:
            dj_ratio = [0.57, 0.55, 0.30, 0.23, 0.20, 0.15, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10,
                        0.10, 0.10, 0.10]
            business_cost = [0] * 20

        ######
        def calculate_hash(parameters):
            param_str = str(parameters)
            hash_value = hashlib.md5(param_str.encode()).hexdigest()
            seed_value = int(hash_value, 16) % (2 ** 32)
            return int(seed_value)

        def run_pso(parameters, param_bounds, new_data, adjust_var, cost_var, swarmsize=swarm_size,
                    maxiter=max_iteration, use_fixed_seed=True,
                    num_points=500):
            """
            粒子群优化（PSO），支持根据 new_data 构建离散搜索空间，每个参数的变动仅影响自身区间划分。

            参数:
            - parameters: 初始参数值
            - param_bounds: 参数上下界列表
            - new_data: 包含初始值的 DataFrame
            - adjust_var: 待优化的参数名称列表
            - swarmsize: 粒子群大小
            - maxiter: 最大迭代次数
            - use_fixed_seed: 是否使用固定随机种子
            - num_points: 离散化搜索点数量

            返回:
            - best_solution: 最优解
            - best_fitness: 最优目标值
            - optimization_history_df: 优化历史
            """
            optimization_history = []

            def build_discretized_search_space(new_data, adjust_var, param_bounds, num_points=500):
                discrete_spaces = {}
                for i, var in enumerate(adjust_var):
                    lb, ub = param_bounds[i]
                    initial_value = new_data.iloc[0][var]

                    # 计算上下界范围
                    range_below = max(0, initial_value - lb) * 0.9
                    range_above = max(0, ub - initial_value) * 1.1
                    total_range = range_below + range_above

                    # 避免 total_range 为 0
                    if total_range <= 0:
                        discrete_spaces[var] = np.array([initial_value])
                        continue

                    # 根据比例分配点数
                    points_below = max(1, int(num_points * (range_below / total_range)))
                    points_above = max(1, num_points - points_below)

                    # 生成搜索空间
                    below_space = np.linspace(lb, initial_value, points_below, endpoint=False)
                    above_space = np.linspace(initial_value, ub, points_above)
                    discrete_spaces[var] = np.unique(np.concatenate([below_space, above_space]))

                return discrete_spaces

            # 离散搜索空间
            discrete_spaces = build_discretized_search_space(new_data, adjust_var, param_bounds, num_points)

            # 设置随机种子
            seed_value = calculate_hash(parameters)
            if use_fixed_seed:
                random.seed(seed_value)
                np.random.seed(seed_value)

            def map_to_discrete_space(position, discrete_spaces, adjust_var):
                """
                将连续值映射到最近的离散点
                """
                return [
                    min(discrete_spaces[var], key=lambda x: abs(x - pos))
                    for pos, var in zip(position, adjust_var)
                ]

            def objective_function(parameters):
                """
                包装后的目标函数
                """
                # 映射到离散搜索点
                discrete_parameters = map_to_discrete_space(parameters, discrete_spaces, adjust_var)
                # 更新 new_data
                new_data[adjust_var] = discrete_parameters
                # 示例目标函数计算
                first_production = stacking_model_p.predict(scaler_p.transform(new_data))
                year_production, result_eur_production = eur_new_function(first_production, ratio=dj_ratio)
                revenue_new, _ = revenue_new_function(year_production, ratio=dj_ratio, qj=gas_price,
                                                      business=business_cost)
                cost_new = cost_new_function(new_data, cost_var, para=cost_para)
                revenue_rate = (revenue_new - cost_new) / cost_new
                constraint_penalty = max(0, -revenue_rate) * 1e6
                loss = -result_eur_production + constraint_penalty
                optimization_history.append(loss)
                return loss

            # 执行 PSO 优化
            best_solution, best_fitness = pso(
                objective_function,
                lb=[bound[0] for bound in param_bounds],
                ub=[bound[1] for bound in param_bounds],
                swarmsize=swarm_size,
                maxiter=max_iteration
            )

            # 记录优化历史
            optimization_history_df = pd.DataFrame({
                'iteration': list(range(1, len(optimization_history) + 1)),
                'loss': optimization_history
            })

            return best_solution, best_fitness, optimization_history_df

        # Define bounds for optimization
        param_bounds = [(down_bounds[var], up_bounds[var]) for var in adjust_var]
        print(param_bounds)
        # Call PSO with discretized search space
        parameters = new_data.iloc[0][adjust_var].values.tolist()
        print(parameters)
        # 调用 run_pso
        best_solution, best_fitness, optimization_history_df = run_pso(
            parameters=parameters,  # 初始参数值
            param_bounds=param_bounds,  # 参数的上下界
            new_data=new_data,  # 包含参数的 DataFrame
            adjust_var=adjust_var,  # 待优化参数的名称列表
            cost_var=cost_var,
            swarmsize=swarm_size,  # 粒子群大小
            maxiter=max_iteration,  # 最大迭代次数
            use_fixed_seed=True  # 是否使用固定随机种子
        )
        print(111)
        optimization_history_df['id'] = [str(uuid.uuid4()) for _ in range(optimization_history_df.shape[0])]
        after_data = pd.DataFrame([raw_var_values])
        after_data['well_type'] = well_type
        after_data['core_area'] = core_area
        after_data, _, _ = encode_categorical_variables(after_data, label_encoder_type, label_encoder_region)
        after_data[adjust_var] = best_solution
        after_data['subject_hop_crowd'] = after_data['segment_length'] / after_data['crowd_distance']
        pred_first_production = stacking_model_p.predict(scaler_p.transform(after_data))
        print(pred_first_production)
        year_production, result_eur_production = eur_new_function(pred_first_production, ratio=dj_ratio)
        revenue_result, revenue_year = revenue_new_function(year_production, ratio=dj_ratio, qj=gas_price)
        cost_result = cost_new_function(after_data, var=cost_var, para=cost_para)
        syl = float((revenue_result - cost_result) / cost_result)
        year_production, result_eur_production = eur_new_function(pred_first_production, ratio=dj_ratio)

        history_name = "gas_optimize_history"
        with engine.connect() as conn:
            conn.execute(text(f"TRUNCATE TABLE {history_name}"))

        optimization_history_df.to_sql('gas_optimize_history', con=engine, if_exists='append', index=False)

        for i in range(20):
            after_data[f'revenue_year{i + 1}'] = revenue_year[i]
            after_data[f'gas_production_year{i + 1}'] = year_production[i]

        after_data['total_cost'] = cost_result
        after_data['total_revenue'] = revenue_result
        after_data['yield'] = syl

        after_data['yield'] = after_data['yield'].clip(lower=-1, upper=1)  # 限制 yield 在合理范围内
        after_data['yield'] = after_data['yield'].round(6)  # 保留 6 位小数

        # 替换 NaN 值为 None，以避免数据库插入错误
        after_data = after_data.replace({np.nan: None})

        # 确保所有数值列类型为 float
        numeric_columns = after_data.select_dtypes(include=[np.number]).columns
        after_data[numeric_columns] = after_data[numeric_columns].astype(float)
        after_data['id'] = [str(uuid.uuid4()) for _ in range((after_data.shape[0]))]

        write_name = 'gas_well_para_optimization'
        with engine.connect() as connection:
            connection.execute(text(f"TRUNCATE TABLE {write_name}"))
            # 使用 pandas 的 to_sql 方法写入数据到数据库
            after_data.to_sql(write_name, con=engine, index=False, if_exists='append')

        a_values, m_values = predict_para(newdata=new_data, scaler_a=scaler_a, scaler_m=scaler_m,
                                          model_a=stacking_model_a, model_m=stacking_model_m)

        q_i = intial_production_calculation(pred_first_production, a_values, m_values, 330) * 0.9

        duong_days = np.arange(1, 20 * 330)
        predicted_production = duong_decay((duong_days, np.repeat(q_i, len(duong_days))), a=a_values, m=m_values)

        duong_df = pd.DataFrame({
            'days': duong_days,
            'fit_production': predicted_production,
            'a_values': np.repeat(a_values, len(duong_days)),
            'm_values': np.repeat(m_values, len(duong_days))}
        )

        duong_df['id'] = [str(uuid.uuid4()) for _ in range((duong_df.shape[0]))]
        write_name_2 = 'gas_duong_optimization'
        with engine.connect() as connection:
            connection.execute(text(f"TRUNCATE TABLE {write_name_2}"))
        # 然后使用 pandas 的 to_sql() 插入新的数据
        duong_df.to_sql(write_name_2, con=engine, index=False, if_exists='append')

        return jsonify({
            "state": "success",
            "code": 200,
            "best_solution": best_solution.tolist(),
            "best_fitness": best_fitness
        })

    except Exception as e:
        return jsonify({"error": str(e)})


@app.route('/profit', methods=['POST', 'GET'])
def profit():
    try:
        dr = float(request.form.get('dr', 0.06))  # 如果未提供，默认为 0.06
        cy = int(request.form.get('cy', 2024))  # 如果未提供，默认为 2024
    except ValueError:
        return jsonify({"error": "贴现率或年份数据错误"}), 300

    # 确保上传的文件存在
    required_files = ['gas_prices', 'gas_year_production', 'total_cost', 'construct_cost_df']
    for file_key in required_files:
        if file_key not in request.files:
            return jsonify({"error": f"Missing file: {file_key}"}), 400

    # 获取上传的文件
    file_gas_prices = request.files['gas_prices']
    file_gas_year_production = request.files['gas_year_production']
    file_total_cost = request.files['total_cost']
    file_construct_cost_df = request.files['construct_cost_df']

    # 确保所有文件都有内容
    if file_gas_prices.filename == '' or file_gas_year_production.filename == '' or file_total_cost.filename == '' or file_construct_cost_df.filename == '':
        return jsonify({"error": "One or more files are not selected"}), 420

    try:
        # 使用 pandas 读取上传的 Excel 文件
        gas_prices_df = pd.read_excel(file_gas_prices)
        year_production_df = pd.read_excel(file_gas_year_production)
        total_cost_df = pd.read_excel(file_total_cost)
        construct_cost_df = pd.read_excel(file_construct_cost_df)
        year_columns = [str(col) for col in year_production_df.columns if str(col).isdigit()]
        # 将年份列转换为整数，并找到最大的年份
        cy = max(map(int, year_columns))

        first_year_df = year_production_df[['well_no', 'first_production']]
        year_production_df = year_production_df.drop(columns=['first_production'])
        # 计算历年现金流支出
        total_cost_year_df = calculate_annual_total_cost(total_cost_df, construct_cost_df, year_production_df)
        generated_cashflow_out = generate_cashflow_out(total_cost_year_df)
        generated_cashflow_out.rename(columns=lambda x: f'cash_outflow{x}' if x.isdigit() else x, inplace=True)

        # 计算历年现值支出
        cash_outflow_years = [int(col.replace('cash_outflow', '')) for col in generated_cashflow_out.columns if
                              col.startswith('cash_outflow')]
        present_cashoutflow = calculate_present_cashoutflow(generated_cashflow_out, cash_outflow_years,
                                                            discount_rate=dr, current_year=cy)
        # 计算历年现金流收入与现值收入
        present_cashincome = calculate_cash_income(year_production_df, gas_prices_df, discount_rate=dr,
                                                   current_year=cy)

        # 进行数据合并
        combined_profit_df = combine_data(present_cashoutflow, present_cashincome)

        # 计算历年净现值与累计净现值
        df_net_present_value = calculate_net_present_value(combined_profit_df)
        df_all_profit = pd.merge(combined_profit_df, df_net_present_value, on='well_no', how='left')

        # 计算生产周期与回收年限
        df_all_profit['production_dur'] = cy - df_all_profit['construct_year']
        reclaim_year = add_first_positive_year(df_all_profit)
        df_all_profit['reclaim_year'] = reclaim_year
        df_all_profit['reclaim_dur'] = df_all_profit['reclaim_year'] - df_all_profit['construct_year']

        ######计算未来产能以及评估回收年限##########
        df_dr = {
            "时间": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
            "核心": ["36.00%", "52.00%", "66.00%", "75.00%", "79.00%", "83.00%", "88.00%", "91.00%", "92.00%", "93.00%",
                     "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%"],
            "次核心": ["38.00%", "54.00%", "68.00%", "76.00%", "81.00%", "85.00%", "89.00%", "92.00%", "93.00%",
                       "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%",
                       "93.00%"],
            "外围": ["46.00%", "60.00%", "72.00%", "78.00%", "82.00%", "87.00%", "91.00%", "92.00%", "93.00%", "93.00%",
                     "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%", "93.00%"]
        }
        df_dr = pd.DataFrame(df_dr)
        df_dr["核心"] = df_dr["核心"].str.rstrip('%').astype('float') / 100
        df_dr["次核心"] = df_dr["次核心"].str.rstrip('%').astype('float') / 100
        df_dr["外围"] = df_dr["外围"].str.rstrip('%').astype('float') / 100

        df_fixed_cost = pd.DataFrame({
            'year': list(range(1, 21)),
            'fixed_cost': [68.3, 64.43396226, 60.78675685, 57.34599703, 54.0999972, 51.03773321, 48.14880491,
                           45.42340086, 42.85226496, 40.42666506,
                           38.13836326, 35.97958798, 33.94300753, 32.02170522, 30.20915587, 28.49920365, 26.88604118,
                           25.36418979, 23.92848093, 22.57403862]
        })
        first_year_production_df = create_first_year_production_df(first_year_df, df_all_profit)
        production_predictions_df = predict_production(first_year_production_df, df_dr)
        production_predictions_df = pd.merge(production_predictions_df, df_all_profit[['well_no', 'construct_year']],
                                             how='left', on='well_no')
        revenues_df = calculate_revenue(production_predictions_df, df_fixed_cost, df_all_profit, discount_rate=dr)
        net_present_values_df = calculate_cumulative_net_present_value(revenues_df, df_all_profit, cy,
                                                                       first_year_production_df['construct_year'].max())
        df_combined_profit = pd.merge(df_all_profit, net_present_values_df, on='well_no', how='left')
        df_combined_profit = calculate_first_positive_year(df_combined_profit, first_year_production_df)
        df_combined_profit = set_exceeded_production_to_none(df_combined_profit)
        # df_combined_profit.to_excel('data/production_predict_profit.xlsx',index=False)

        # 保存到 SQL 数据库
        metadata = MetaData()
        df = df_combined_profit.copy()
        all_years = [int(col[-4:]) for col in df_combined_profit.columns if
                     col.startswith('cumulative_net_present_value')]
        df['id'] = [str(uuid.uuid4()) for _ in range(df.shape[0])]
        df['reclaim_year'] = df['first_positive_year'].apply(lambda x: None if x == '20年以上' else int(x))
        df['reclaim_dur'] = df['reclaim_year'] - df['construct_year']

        columns = [
            Column('id', VARCHAR(50), primary_key=True),
            Column('well_no', VARCHAR(50), comment="井号"),
            Column('construct_year', Integer, comment="建井年份"),
            Column('construct_cost_year', Integer, comment="建井成本流出年份"),
            Column('construct_cost', DECIMAL(20, 4), comment="建井成本"),
            Column('well_type', VARCHAR(50), comment="气井类型"),
            Column('core_area', VARCHAR(50), comment="核心区"),
            Column('production_dur', Integer, comment="已生产年份"),
            Column('first_positive_year', VARCHAR(100), comment="成本回收年份"),
            Column('year_difference', VARCHAR(100), comment="成本回收周期"),
            Column('reclaim_year', Integer, comment="回收年份"),
            Column('reclaim_dur', Integer, comment="回收周期")

        ]

        # 假设 all_years 已经定义，表示年份的范围
        for year in all_years:
            columns.extend([
                Column(f'cash_outflow{year}', DECIMAL(20, 4), comment=f"现金支出{year}"),
                Column(f'present_value_out{year}', DECIMAL(20, 4), comment=f"现值支出{year}"),
                Column(f'cash_income{year}', DECIMAL(20, 4), comment=f"现金收入{year}"),
                Column(f'present_value_income{year}', DECIMAL(20, 4), comment=f"现值收入{year}"),
                Column(f'net_present_value{year}', DECIMAL(20, 4), comment=f"分年净现值{year}"),
                Column(f'cumulative_net_present_value{year}', DECIMAL(20, 4), comment=f"累计净现值{year}")
            ])

        # 创建表结构
        financial_data = Table('gas_profit_analysis', metadata, *columns, extend_existing=True)
        metadata.create_all(engine)

        def check_and_add_columns(engine, df, table_name):
            # 获取当前表的结构
            inspector = inspect(engine)
            existing_columns = [col['name'] for col in inspector.get_columns(table_name)]
            # 找出df中的列哪些不存在于表结构中
            new_columns = [col for col in df.columns if col not in existing_columns]

            # 动态生成ALTER语句，逐个添加缺少的列
            with engine.connect() as conn:
                for col in new_columns:
                    # 默认将所有新列设置为 DOUBLE 类型
                    alter_query = f"ALTER TABLE {table_name} ADD COLUMN {col} DOUBLE"
                    # 使用 text() 将字符串转为可执行对象
                    conn.execute(text(alter_query))
                    # 添加注释
                    comment = df.columns[col] if col in df.columns else ""
                    if comment:
                        conn.execute(text(f"COMMENT ON COLUMN {table_name}.{col} IS '{comment}'"))

        write_name = 'gas_profit_analysis'
        # 在插入数据之前，调用该函数
        check_and_add_columns(engine, df, write_name)
        # 清空表中之前的数据
        with engine.connect() as conn:
            conn.execute(text(f"TRUNCATE TABLE {write_name}"))
        df.to_sql(write_name, con=engine, if_exists='append', index=False)

        # 添加列注释
        def add_column_comments_mysql(engine, table_name, columns):
            inspector = inspect(engine)
            existing_columns = [col['name'] for col in inspector.get_columns(table_name)]
            with engine.connect() as conn:
                for column in columns:
                    if column.name in existing_columns and column.comment:
                        alter_comment_query = f"ALTER TABLE {table_name} MODIFY COLUMN {column.name} {str(column.type.compile(engine.dialect))} COMMENT '{column.comment}'"
                        conn.execute(text(alter_comment_query))

        # 添加注释到表中列
        add_column_comments_mysql(engine, write_name, columns)

        # 构造新表并存储
        new_df_columns = ['well_no', 'year', 'construct_year', 'cumulative_net_present_value', 'present_value_out',
                          'present_value_income', 'first_positive_year', 'year_difference']
        new_rows = []
        for year in all_years:
            for _, row in df.iterrows():
                new_rows.append({
                    'well_no': row['well_no'],
                    'year': year,
                    'construct_year': row['construct_year'],
                    'cumulative_net_present_value': row.get(f'cumulative_net_present_value{year}', None),
                    'present_value_out': row.get(f'present_value_out{year}', None),
                    'present_value_income': row.get(f'present_value_income{year}', None),
                    'first_positive_year': row.get('first_positive_year', None),
                    'year_difference': row.get('year_difference', None)
                })

        new_df = pd.DataFrame(new_rows, columns=new_df_columns)
        new_df['id'] = [str(uuid.uuid4()) for _ in range(new_df.shape[0])]
        # 替换 NaN 值为 None，以避免数据库插入错误
        new_df = new_df.replace({np.nan: None})
        new_table_name = 'gas_profit_analysis_panel'

        # 创建新表结构
        new_columns = [
            Column('id', VARCHAR(50), primary_key=True),
            Column('well_no', VARCHAR(50), comment="井号"),
            Column('year', Integer, comment="年份"),
            Column('construct_year', Integer, comment="建井年份"),
            Column('cumulative_net_present_value', DECIMAL(20, 4), comment="累计净现值"),
            Column('present_value_out', DECIMAL(20, 4), comment="支出现值"),
            Column('present_value_income', DECIMAL(20, 4), comment="流入现值"),
            Column('first_positive_year', VARCHAR(255), comment="成本回收年份"),
            Column('year_difference', VARCHAR(255), comment="成本回收周期")
        ]

        new_financial_data = Table(new_table_name, metadata, *new_columns, extend_existing=True)
        metadata.drop_all(engine, [new_financial_data])  # 确保删除旧表
        metadata.create_all(engine)  # 创建新表

        with engine.connect() as conn:
            conn.execute(text(f"DELETE FROM {new_table_name}"))

        new_df.to_sql(new_table_name, con=engine, if_exists='append', index=False)

        with engine.connect() as conn:
            sort_query = f"CREATE TABLE temp_table AS SELECT * FROM {new_table_name} ORDER BY well_no, year;"
            conn.execute(text(sort_query))
            conn.execute(text(f"DROP TABLE {new_table_name}"))
            conn.execute(text(f"ALTER TABLE temp_table RENAME TO {new_table_name}"))

        # 成功时返回成功消息
        return jsonify({"message": "Success", "code": 200})

    except Exception as e:
        return jsonify({"error": str(e)}), 500


@app.route('/new_irr', methods=['GET', 'POST'])
def upload_file():
    print(f"收到 {request.method} 请求到 /new_irr 端点")
    print("开始处理请求...")
    if request.method == 'POST':
        try:
            print("开始从数据库加载数据...")
            df = new_load_gas_production_and_investment_data()
            print(f"数据加载完成，数据行数: {len(df) if not df.empty else 0}")

            if df.empty:
                return jsonify({"code": 500, "state": "error", "message": "No data found in the database."}), 500

            print("开始计算输出...")
            print(df)

            output_df, total_npv, internal_rate_of_return, payback_period = new_calculate_output(df)
            print(f"计算完成: NPV={total_npv}, IRR={internal_rate_of_return}, 回收期={payback_period}")
            # 保存输出数据到 Excel 文件
            output_filename = "new_output.xlsx"
            output_df.to_excel(output_filename, index=False)
            insert_data_into_gas_npv_table( output_df, engine)

            def save_metrics_data(engine, metrics_table, total_npv, internal_rate_of_return, payback_period,
                                  total_investment, production_eur):
                # 处理回收期数据，提取数值部分并取整
                if isinstance(payback_period, str) and '年' in payback_period:
                    payback_period_value = int(round(float(payback_period.replace('年', ''))))
                else:
                    payback_period_value = int(round(float(payback_period)))

                metrics_data = pd.DataFrame([
                    {'id': str(uuid.uuid4()), 'metric_name': 'NPV', 'metric_values': total_npv},
                    {'id': str(uuid.uuid4()), 'metric_name': '内部收益率', 'metric_values': internal_rate_of_return},
                    {'id': str(uuid.uuid4()), 'metric_name': '投资回收年限', 'metric_values': payback_period_value},
                    # 取整为整数
                    {'id': str(uuid.uuid4()), 'metric_name': '总投资', 'metric_values': total_investment},
                    {'id': str(uuid.uuid4()), 'metric_name': '总产气量', 'metric_values': production_eur},
                ])

                try:
                    with engine.connect() as conn:
                        conn.execute(text("TRUNCATE TABLE gas_metrics"))
                        metrics_data.to_sql('gas_metrics', con=engine, if_exists='append', index=False)
                        print("Metrics data inserted successfully.")
                except SQLAlchemyError as e:
                    print(f"Error saving metrics data: {e}")

            metadata = MetaData()
            metric_table_name = 'gas_metrics'
            metrics_table = Table(
                metric_table_name, metadata,
                Column('id', VARCHAR(36), primary_key=True, comment='主键ID'),
                Column('metric_name', VARCHAR(36), comment='指标名称'),
                Column('metric_values', DECIMAL(20, 4), comment='指标值')
            )
            total_investment = sum(output_df["建设投资"])
            production_eur = sum(df["yield"])
            save_metrics_data(engine, metrics_table, total_npv, internal_rate_of_return, payback_period, total_investment,
                              production_eur)
            return jsonify({"code": 200, "state": "success", "message": "数据已成功计算并保存。"})

        except Exception as e:
            print(f"处理请求时出错: {str(e)}")
            import traceback
            traceback.print_exc()
            return jsonify({"code": 500, "state": "error", "message": f"处理请求时出错: {str(e)}"}), 500

    elif request.method == 'GET':
        try:
            default_data = {
                "instalments": [
                    f"第{i}年" for i in range(1, 21)
                ],
                "yield": [
                    5.5149, 9.6081, 3.6296, 2.2372, 1.7147, 1.4523, 1.2888, 1.1564, 1.0492,
                    0.9625, 0.8927, 0.8371, 0.7935, 0.7521, 0.7128, 0.6754, 0.6399, 0.6062,
                    0.5741, 0.5437
                ],
                "commodity_rate": [0.96] * 20,
                "quantity_goods": [
                    5.29, 9.22, 3.48, 2.15, 1.65, 1.39, 1.24, 1.11, 1.01, 0.92, 0.86, 0.80,
                    0.76, 0.72, 0.68, 0.65, 0.61, 0.58, 0.55, 0.52],
                "price": [1170] + [1275] * 19,
                "subsidy": [200] + [0] * 19,
                "urban_construction_tax": [0.12] * 20,
                "mineral_resources_tax_rate": [0] * 5 + [0.0532] * 15,
                "mineral_resource_compensation_rate": [0] * 20,
                "operating_fee": [230] * 20,
                "management_fee": [20] * 20,
                "selling_rate": [0.01] * 20,
                "income_tax_rate": [0.15] * 17 + [0.25] * 3,
                "operational_investment": [0] * 20,
                "liquidity": [0] * 20,
                "selling_fee": [0] * 20,
                "vat_rate": [0.09] * 20,
                "value_factor": [0.8] * 20,
                "security_fee_factor": [7.5] * 20,
                "construction_investment": [0] * 20,
                "finance_expenses": [0] * 20,
                "disposal_fee": [0] * 20,
            }
            default_data = pd.DataFrame(default_data)
            # 加载产量和建设投资数据
            production_data, investment_data = load_gas_production_and_investment_data()
            # 确保数据长度匹配
            if len(production_data) > 20:
                production_data = production_data[:20]  # 截断到20个元素

            elif len(production_data) < 20:
                # 如果不足20个元素，用0填充
                padding_length = 20 - len(production_data)
                production_data = np.append(production_data, [0] * padding_length)

            if len(investment_data) > 20:
                investment_data = investment_data[:20]  # 截断到20个元素

            elif len(investment_data) < 20:
                # 如果不足20个元素，用0填充
                padding_length = 20 - len(investment_data)
                investment_data = np.append(investment_data, [0] * padding_length)

            # 确保数据长度为20
            assert len(production_data) == 20, f"产量数据长度应为20，实际为{len(production_data)}"
            assert len(investment_data) == 20, f"投资数据长度应为20，实际为{len(investment_data)}"

            # 将产量数据填入"产量（亿方）"项目中，从第1年到第20年
            default_data.loc[:19, "yield"] = production_data
            default_data.loc[:19, "construction_investment"] = investment_data

            # 确保commodity_rate列存在
            if 'gas_price' in default_data.columns and 'commodity_rate' not in default_data.columns:
                default_data['commodity_rate'] = default_data['gas_price']
            elif 'commodity_rate' not in default_data.columns:
                default_data['commodity_rate'] = 0.96  # 默认值
            total_investment = sum(default_data["construction_investment"])
            # 设置第 21 年弃置费为 total_investment 的 5%
            default_data["disposal_fee"] = [0] * 19 + [total_investment * 0.05]
            print("开始计算输出...")
            print(f"数据行数: {len(default_data)}")
            print(f"数据列: {default_data.columns.tolist()}")
            output_df, total_npv, internal_rate_of_return, payback_period = new_calculate_output(default_data)

            # 保存输出数据到 Excel 文件
            insert_data_into_gas_npv_table(output_df, engine)
            def save_metrics_data(engine, metrics_table, total_npv, internal_rate_of_return, payback_period,
                                  total_investment, production_eur):
                # 处理回收期数据，提取数值部分并取整
                if isinstance(payback_period, str) and '年' in payback_period:
                    payback_period_value = int(round(float(payback_period.replace('年', ''))))
                else:
                    payback_period_value = int(round(float(payback_period)))
                metrics_data = pd.DataFrame([
                    {'id': str(uuid.uuid4()), 'metric_name': 'NPV', 'metric_values': total_npv},
                    {'id': str(uuid.uuid4()), 'metric_name': '内部收益率', 'metric_values': internal_rate_of_return},
                    {'id': str(uuid.uuid4()), 'metric_name': '投资回收年限', 'metric_values': payback_period_value},
                    {'id': str(uuid.uuid4()), 'metric_name': '总投资', 'metric_values': total_investment},
                    {'id': str(uuid.uuid4()), 'metric_name': '总产气量', 'metric_values': production_eur},
                ])
                try:
                    with engine.connect() as conn:
                        conn.execute(text("TRUNCATE TABLE gas_metrics"))
                        metrics_data.to_sql('gas_metrics', con=engine, if_exists='append', index=False)
                        print("Metrics data inserted successfully.")
                except SQLAlchemyError as e:
                    print(f"Error saving metrics data: {e}")
            metadata = MetaData()
            metric_table_name = 'gas_metrics'
            metrics_table = Table(
                metric_table_name, metadata,
                Column('id', VARCHAR(36), primary_key=True, comment='主键ID'),
                Column('metric_name', VARCHAR(36), comment='指标名称'),
                Column('metric_values', DECIMAL(20, 4), comment='指标值')
            )
            total_investment = sum(output_df["建设投资"])
            production_eur = sum(production_data)
            save_metrics_data(engine, metrics_table, total_npv, internal_rate_of_return, payback_period,
                              total_investment,production_eur)
            return jsonify({"code": 200, "state": "success", "message": "数据已成功计算并保存。"})

        except Exception as e:
            print(f"处理GET请求时出错: {str(e)}")
            import traceback
            traceback.print_exc()
            return jsonify({"code": 500, "state": "error", "message": f"处理请求时出错: {str(e)}"}), 500

@app.route('/run_update', methods=['GET'])
def run_update():
    try:
        # 加载初始数据并预处理
        df_xz = pd.read_sql('gas_production_well', con=engine)
        df_xz = df_xz[['well_no', 'production_gas_day', 'collect_date', 'production_gas_year', 'production_time']]
        df_xz['production_time'] = df_xz['production_time'].fillna(0)

        df = df_xz.drop('production_time', axis=1).copy()
        df['collect_date'] = pd.to_datetime(df['collect_date'])
        df['days'] = df.sort_values(by=['well_no', 'collect_date']).groupby('well_no').cumcount() + 1

        # 调用数据处理和生产数据拟合
        df_process1 = process_gas_data(df, engine)
        duong_fit1 = fit_well_production(df_process1, duong_decay, cumulative_production)

        # 保存数据至数据库表
        table_name = 'gas_well_fit'
        with engine.connect() as connection:
            connection.execute(text(f"TRUNCATE TABLE {table_name}"))
        duong_fit1.to_sql(table_name, con=engine, index=False, if_exists='append')

        # 处理和拟合排除条件的数据
        df_p = df_xz[df_xz['production_time'] > 6].copy()
        df_p['collect_date'] = pd.to_datetime(df_p['collect_date'])
        df_p['days'] = df_p.sort_values(by=['well_no', 'collect_date']).groupby('well_no').cumcount() + 1

        df_process2 = process_gas_data(df_p, engine)
        duong_fit2 = fit_well_production(df_process2, duong_decay, cumulative_production)

        # 保存数据至排除条件的数据库表
        table_name_exclude = 'gas_well_fit_exclude'
        with engine.connect() as connection:
            connection.execute(text(f"TRUNCATE TABLE {table_name_exclude}"))
        duong_fit2.to_sql(table_name_exclude, con=engine, index=False, if_exists='append')

        # 合并基础参数表和拟合结果
        df_para = pd.read_sql_table('gas_base_well', con=engine)
        df_fit = pd.read_sql_table('gas_well_fit', con=engine)
        df_para['core_area'] = df_para['core_area'].replace({'外围1': '外围', '外围2': '外围'})
        df_combine = pd.merge(df_para, df_fit, how='left', on='well_no').drop(['is_coordinate', 'is_production'],
                                                                              axis=1)

        # 保存合并结果至数据库
        table_name_para = 'gas_well_para'
        with engine.connect() as connection:
            connection.execute(text(f"TRUNCATE TABLE {table_name_para}"))
        df_combine.to_sql(table_name_para, con=engine, index=False, if_exists='append')

        # 加载并合并生产数据和预测数据
        para_chunks = pd.read_sql_table('gas_well_fit', con=engine, chunksize=10000)
        para_df = pd.concat(para_chunks, ignore_index=True)

        daily_chunks = pd.read_sql_table('gas_daily_production', con=engine, chunksize=10000)
        daily_df = pd.concat(daily_chunks, ignore_index=True)

        df_unique_1 = predict_and_merge_production(para_df, daily_df, duong_decay)
        df_unique_1['id'] = [str(uuid.uuid4()) for _ in range(df_unique_1.shape[0])]

        # 保存预测数据至数据库
        table_name_eur = 'gas_duong_eur'
        with engine.connect() as connection:
            connection.execute(text(f"TRUNCATE TABLE {table_name_eur}"))
        df_unique_1.to_sql(table_name_eur, con=engine, index=False, if_exists='append')

        return jsonify({"message": "Data successfully saved.", "state": "success", "code": 200})
    except Exception as e:
        return jsonify({"error": str(e)}), 500


@app.route('/multi_irr', methods=['GET'])
def multi_irr():
    try:
        # 清空目标表中的现有数据
        with engine.connect() as conn:
            conn.execute(text("TRUNCATE TABLE gas_npv_output_multiple_scenarios"))
            conn.execute(text("TRUNCATE TABLE gas_metrics_multiple_scenarios"))

        # 从数据库加载初始数据
        work_df = new_load_gas_production_and_investment_data()
        if work_df.empty:
            return jsonify({"code": 500, "state": "error", "message": "数据库中没有找到数据"}), 500

        # 从 `gas_npv_multiple_scenarios_exports` 表中获取批次数据
        query = """
            SELECT batch_id, instalments, yield, construction_investment
            FROM gas_npv_multiple_scenarios_exports
        """
        batch_df = pd.read_sql(query, engine)

        if batch_df.empty:
            return jsonify({"code": 500, "state": "error",
                            "message": "gas_npv_multiple_scenarios_exports 表中没有找到批次数据"}), 500

        # 遍历每个唯一的 batch_id 进行处理
        batch_ids = batch_df['batch_id'].unique()

        for batch_id in batch_ids:
            # 筛选当前 batch_id 的数据
            batch_data = batch_df[batch_df['batch_id'] == batch_id]

            # 将 batch_data 中的数据根据 instalments 值更新到 work_df 中
            for _, row in batch_data.iterrows():
                # 根据 `instalments` 找到对应的行
                instalment_value = row['instalments']

                # 将 `yield` 和 `construction_investment` 值填入到对应的行
                work_df.loc[work_df['instalments'] == instalment_value, 'yield'] = float(row['yield'])
                work_df.loc[work_df['instalments'] == instalment_value, 'construction_investment'] = float(
                    row['construction_investment'])

            # 将所有数据中的 Decimal 类型转换为 float 类型
            work_df = work_df.applymap(lambda x: float(x) if isinstance(x, decimal.Decimal) else x)

            # Debugging 打印，检查 `work_df` 的结构和内容

            # 尝试计算指标
            try:
                output_df, total_npv, internal_rate_of_return, payback_period = new_calculate_output(work_df)
            except Exception as e:
                print(f"在 calculate_output 中出错，batch_id {batch_id}: {e}")
                return jsonify({"code": 500, "state": "error",
                                "message": f"在 calculate_output 中出错，batch_id {batch_id}: {e}"}), 500

            # 将 `batch_id` 和唯一 ID 添加到 `output_df` 中
            output_df['batch_id'] = batch_id
            output_df['id'] = [str(uuid.uuid4()) for _ in range(len(output_df))]

            # 将 `output_df` 赋值给 `detailed_output_df`，删除不需要的 `yield` 列（如果存在）
            detailed_output_df = output_df.copy()
            if 'yield' in detailed_output_df.columns:
                detailed_output_df = detailed_output_df.drop(columns=['yield'])

            save_to_sql_multi(output_df, engine)

            # 计算总投资和生产 EUR
            total_investment = work_df['construction_investment'].sum()  # 建设投资总和
            production_eur = work_df['yield'].sum()  # 产量总和

            # 首先定义所有需要处理的变量
            irr_value = None
            payback_period_value = None

            # 处理内部收益率
            if internal_rate_of_return is not None:
                try:
                    irr_value = float(internal_rate_of_return)
                    if np.isnan(irr_value) or np.isinf(irr_value):
                        irr_value = None
                except (ValueError, TypeError):
                    irr_value = None

            # 处理投资回收年限
            if isinstance(payback_period, str):
                import re
                number_str = re.sub(r'[^\d.]', '', payback_period)
                try:
                    payback_period_value = float(number_str)
                except ValueError:
                    payback_period_value = None
            else:
                payback_period_value = payback_period

            # 为 `gas_metrics_multiple_scenarios` 准备数据
            metrics = [
                {"id": str(uuid.uuid4()), "batch_id": batch_id, "metric_name": "NPV",
                 "metric_values": float(total_npv)},
                {"id": str(uuid.uuid4()), "batch_id": batch_id, "metric_name": "内部收益率",
                 "metric_values": float(irr_value) if irr_value is not None else None},
                {"id": str(uuid.uuid4()), "batch_id": batch_id, "metric_name": "投资回收年限",
                 "metric_values": float(payback_period_value) if payback_period_value is not None else None},
                {"id": str(uuid.uuid4()), "batch_id": batch_id, "metric_name": "总投资",
                 "metric_values": float(total_investment)},
                {"id": str(uuid.uuid4()), "batch_id": batch_id, "metric_name": "总产气量",
                 "metric_values": float(production_eur)}
            ]

            # 修改后的代码 - 在循环内创建新的连接
            # 转换 metrics 列表为 DataFrame 并保存到 `gas_metrics_multiple_scenarios`
            metrics_df = pd.DataFrame(metrics)

            with engine.connect() as conn:
                metrics_df.to_sql('gas_metrics_multiple_scenarios', con=engine, if_exists='append', index=False)

        # 返回成功响应
        return jsonify({"code": 200, "state": "success", "message": "多场景IRR计算和数据保存成功。"})

    except Exception as e:
        print(f"Error: {str(e)}")
        return jsonify({"code": 500, "state": "error", "message": f"发生错误: {str(e)}"}), 500


@app.route('/zyModels', methods=['POST', 'GET'])  # 计算增产气量
def zyModels():
    modelType = request.args.get('modelType', '套压&油压预测模型')  # 平台气量预测模型
    print(modelType + ' 开始更新。。。')
    train_size = float(request.args.get('train_size', 0.8))
    if modelType == '套压&油压预测模型':
        # 确保在应用上下文中执行数据库操作
        with app.app_context():
            # 假设你想查询所有记录
            results = GasProductionWell.query.all()
            # 将结果转换为字典列表
            data = [
                {'id': result.id, '井号': result.well_no, '日期': result.collect_date, '套压': result.cover_pressure,
                 '油压': result.oil_pressure}
                for result in results]

        # 使用 pandas DataFrame 构造函数将字典列表转换为 DataFrame
        df = pd.DataFrame(data)
        # 确保日期列是datetime类型
        df['日期'] = pd.to_datetime(df['日期'])

        # 计算每个 well_no 对应的最早和最晚生产日期
        wd_grouped = df.groupby('井号')['日期'].agg(['min', 'max']).reset_index()
        wd_grouped.columns = ['well_no', '最早生产日期', '最晚生产日期']
        # 计算生产天数
        wd_grouped['pro_days'] = (wd_grouped['最晚生产日期'] - wd_grouped['最早生产日期']).dt.days
        wd_grouped = wd_grouped[['well_no', 'pro_days']]

        # 按井号分组
        grouped = df.groupby('井号')
        # 创建一个字典来存储每个井处理后的数据
        lagged_data = {}

        # 为每个井创建滞后特征
        for well, group in grouped:
            group = group.dropna(subset=['日期'])

            # 按日期排序
            group = group.sort_values(by='日期')

            # 创建滞后特征
            lag_features = group[['套压']].copy()

            for lag in range(1, 60 + 1, 1):
                lag_features[f'套压_lag_{lag}'] = group['套压'].shift(lag)
            for lag in range(61, 180 + 1, 6):
                lag_features[f'套压_lag_{lag}'] = group['套压'].shift(lag)
            for lag in range(181, 330 + 1, 30):
                lag_features[f'套压_lag_{lag}'] = group['套压'].shift(lag)

            # 丢弃有缺失值的行
            lag_features.fillna(0, inplace=True)

            lag_features['日期'] = group['日期']

            # 添加井号作为索引（可选）
            lag_features['井号'] = well

            # 存储处理后的数据
            lagged_data[well] = lag_features

        lagged_data1 = {}
        # 为每个井创建滞后特征
        for well, group in grouped:
            group = group.dropna(subset=['日期'])
            # 按日期排序
            group = group.sort_values(by='日期')

            # 创建滞后特征
            lag_features = group[['油压']].copy()
            for lag in range(1, 60 + 1, 1):
                lag_features[f'油压_lag_{lag}'] = group['油压'].shift(lag)
            for lag in range(61, 180 + 1, 6):
                lag_features[f'油压_lag_{lag}'] = group['油压'].shift(lag)
            for lag in range(181, 330 + 1, 30):
                lag_features[f'油压_lag_{lag}'] = group['油压'].shift(lag)

            # 丢弃有缺失值的行
            lag_features.fillna(0, inplace=True)

            lag_features['日期'] = group['日期']

            # 添加井号作为索引（可选）
            lag_features['井号'] = well

            # 存储处理后的数据
            lagged_data1[well] = lag_features

        # 创建一个字典来存储每个井的模型
        models = {}
        # 为每个井设置训练集和测试集，并训练模型
        for well, data in lagged_data.items():
            # 提取特征和标签
            X = data.drop(columns=['套压', '井号', '日期'])
            y = data['套压']

            # 设置训练集和测试集（例如，使用前80%作为训练集，后20%作为测试集）
            split_index = int(len(data) * train_size)
            X_train, X_test = X.iloc[:-1], X.iloc[split_index:]
            y_train, y_test = y.iloc[:-1], y.iloc[split_index:]

            # 初始化模型
            # model = LinearRegression()
            model = SVR(C=1.0, epsilon=0.01, kernel='rbf')  # C是正则化参数，epsilon是ε-insensitive损失函数的参数，kernel是核函数类型

            # 训练模型
            model.fit(X_train, y_train)

            # 存储模型
            models[well] = model

        # 创建一个字典来存储每个井的模型
        models1 = {}
        # 为每个井设置训练集和测试集，并训练模型
        for well, data in lagged_data1.items():
            # 提取特征和标签
            X = data.drop(columns=['油压', '井号', '日期'])
            y = data['油压']

            # 设置训练集和测试集（例如，使用前80%作为训练集，后20%作为测试集）
            split_index = int(len(data) * train_size)
            X_train, X_test = X.iloc[:-1], X.iloc[split_index:]
            y_train, y_test = y.iloc[:-1], y.iloc[split_index:]

            # 初始化模型
            model1 = SVR(C=1.0, epsilon=0.01, kernel='rbf')  # LinearRegression()

            # 训练模型
            model1.fit(X_train, y_train)

            # 存储模型
            models1[well] = model1

        # 评估每个井的模型
        results = {}
        result_dfs = []
        for well, model in models.items():
            # 提取测试集
            X_test = lagged_data[well].drop(columns=['套压', '井号', '日期']).iloc[
                     int(len(lagged_data[well]) * train_size):]
            y_test = lagged_data[well]['套压'].iloc[int(len(lagged_data[well]) * train_size):]
            dates = lagged_data[well]['日期'].iloc[int(len(lagged_data[well]) * train_size):]
            # 预测
            y_pred = model.predict(X_test)

            # 将结果添加到列表中
            result_df = pd.DataFrame({
                'well_no': [well] * len(y_test),  # 为每个数据点重复 well 名称
                'collect_date': dates,
                'cover_true_value': y_test,
                'cover_pre_value': y_pred
            })
            result_dfs.append(result_df)

            # 计算误差
            mse = mean_squared_error(y_test, y_pred)
            mae = mean_absolute_error(y_test, y_pred)

            # 存储结果
            results[well] = [mse, mae]

        # 将所有结果合并为一个 DataFrame
        df0 = pd.concat(result_dfs, ignore_index=True)

        # 评估每个井的模型
        results1 = {}
        result_dfs1 = []
        for well, model in models1.items():
            # 提取测试集
            X_test = lagged_data1[well].drop(columns=['油压', '井号', '日期']).iloc[
                     int(len(lagged_data1[well]) * train_size):]
            y_test = lagged_data1[well]['油压'].iloc[int(len(lagged_data1[well]) * train_size):]
            # 预测
            y_pred = model.predict(X_test)

            # 将结果添加到列表中
            result_df = pd.DataFrame({
                'oil_true_value': y_test,
                'oil_pre_value': y_pred
            })
            result_dfs1.append(result_df)

            # 计算误差
            mse = mean_squared_error(y_test, y_pred)
            mae = mean_absolute_error(y_test, y_pred)

            # 存储结果
            results1[well] = [mse, mae]

        # 将所有结果合并为一个 DataFrame
        df1 = pd.concat(result_dfs1, ignore_index=True)

        # 按照列拼接两个 DataFrame
        merged_df = pd.concat([df0, df1], axis=1)
        # merged_df['id'] = merged_df.index.map(lambda _: str(uuid.uuid4()))
        merged_df.dropna(inplace=True)

        # 清空表中的数据
        with app.app_context():
            db.session.execute(text("DELETE FROM gas_production_well_test;"))
            db.session.commit()  # 提交事务
            # 重置自增计数器
            db.session.execute(text("ALTER TABLE gas_production_well_test AUTO_INCREMENT = 1;"))
            db.session.commit()  # 提交重置事务
        merged_df.to_sql('gas_production_well_test', con=db.engine, index=False, if_exists='append')

        # 创建一个空列表来收集数据
        error_list = []
        for r in results:
            # 添加一条数据到列表中
            error_list.append(
                {'well_no': r, 'cover_mse': results[r][0], 'cover_mae': results[r][1], 'oil_mse': results1[r][0],
                 'oil_mae': results1[r][1]})

        # 使用列表创建 DataFrame
        error_df = pd.DataFrame(error_list)
        # error_df['id'] = error_df.index.map(lambda _: str(uuid.uuid4()))
        error_df.dropna(inplace=True)
        # 清空表中的数据
        with app.app_context():
            db.session.execute(text("DELETE FROM gas_production_well_test_error;"))
            db.session.commit()  # 提交事务
            # 重置自增计数器
            db.session.execute(text("ALTER TABLE gas_production_well_test_error AUTO_INCREMENT = 1;"))
            db.session.commit()  # 提交重置事务
        error_df.to_sql('gas_production_well_test_error', con=db.engine, index=False, if_exists='append')

        # 预测未来330天的套压值
        future_predictions = {}
        result_dfs = []
        for well, model in models.items():

            # 获取最后一天的已知数据作为预测起点
            last_known_data = lagged_data[well].iloc[-1]
            last_date = last_known_data['日期']

            features = []

            for lag in range(1, 60 + 1, 1):
                features.append(f'套压_lag_{lag}')
            for lag in range(61, 180 + 1, 6):
                features.append(f'套压_lag_{lag}')
            for lag in range(181, 330 + 1, 30):
                features.append(f'套压_lag_{lag}')

            base_features = last_known_data[features].values.reshape(1, -1)

            # 生成未来330天的日期
            future_dates = pd.date_range(start=last_date + pd.Timedelta(days=1), periods=330)

            # 初始化预测结果列表
            predictions = []
            current_features = base_features.copy()

            for date in future_dates:
                # 使用模型进行预测
                prediction = model.predict(current_features)[0]
                predictions.append(prediction)

                # 更新特征，为下一次预测创建滞后特征
                current_features[:, :-1] = current_features[:, 1:]  # 向右移动一列
                current_features[:, -1] = prediction  # 在最后一列放置新的预测值

            # 将预测结果和日期存储到字典中
            future_predictions[well] = pd.DataFrame({'日期': future_dates, '预测套压': predictions})

            # 将结果添加到列表中
            result_df = pd.DataFrame({
                'well_no': [well] * len(future_dates),  # 为每个数据点重复 well 名称
                'collect_date': future_dates,
                'cover_pre_value': predictions
            })
            result_dfs.append(result_df)

        # 将所有结果合并为一个 DataFrame
        df0 = pd.concat(result_dfs, ignore_index=True)

        # 预测未来330天的套压值
        future_predictions = {}
        result_dfs = []
        for well, model in models1.items():

            # 获取最后一天的已知数据作为预测起点
            last_known_data = lagged_data1[well].iloc[-1]
            last_date = last_known_data['日期']

            features = []

            for lag in range(1, 60 + 1, 1):
                features.append(f'油压_lag_{lag}')
            for lag in range(61, 180 + 1, 6):
                features.append(f'油压_lag_{lag}')
            for lag in range(181, 330 + 1, 30):
                features.append(f'油压_lag_{lag}')

            base_features = last_known_data[features].values.reshape(1, -1)

            # 生成未来330天的日期
            future_dates = pd.date_range(start=last_date + pd.Timedelta(days=1), periods=330)

            # 初始化预测结果列表
            predictions = []
            current_features = base_features.copy()

            for date in future_dates:
                # 使用模型进行预测
                prediction = model.predict(current_features)[0]
                predictions.append(prediction)

                # 更新特征，为下一次预测创建滞后特征
                current_features[:, :-1] = current_features[:, 1:]  # 向右移动一列
                current_features[:, -1] = prediction  # 在最后一列放置新的预测值

            # 将预测结果和日期存储到字典中
            future_predictions[well] = pd.DataFrame({'日期': future_dates, '预测油压': predictions})

            # 将结果添加到列表中
            result_df = pd.DataFrame({
                'oil_pre_value': predictions
            })
            result_dfs.append(result_df)

        # 将所有结果合并为一个 DataFrame
        df1 = pd.concat(result_dfs, ignore_index=True)

        # 按照列拼接两个 DataFrame
        pre_df = pd.concat([df0, df1], axis=1)
        pre_df['id'] = pre_df.index.map(lambda _: str(uuid.uuid4()))
        pre_df.dropna(inplace=True)
        # 查询 GasBaseWell 中所有的 well_no 及其对应的 platform_no
        query = (
            db.session.query(
                GasBaseWell.well_no,
                GasBasePlatform.platform_no
            )
            .join(GasPlarformWellConnect, GasBaseWell.id == GasPlarformWellConnect.well_id)
            .join(GasBasePlatform, GasPlarformWellConnect.platform_id == GasBasePlatform.id)
        )

        # 执行查询并获取结果
        results = query.all()

        # 将结果转换为 DataFrame
        data = [{'well_no': well_no, 'platform_no': platform_no} for well_no, platform_no in results]
        well_platform_df = pd.DataFrame(data)

        pre_df = pd.merge(well_platform_df, pre_df, on='well_no', how='left')
        pre_df = pd.merge(pre_df, wd_grouped, on='well_no', how='left')
        pre_df = pre_df.drop(columns=['id'])
        # 清空表中的数据
        with app.app_context():
            db.session.execute(text("DELETE FROM gas_production_well_pre;"))
            db.session.commit()  # 提交事务
            # 重置自增计数器
            db.session.execute(text("ALTER TABLE gas_production_well_pre AUTO_INCREMENT = 1;"))
            db.session.commit()  # 提交重置事务
        pre_df[['cover_pre_value', 'oil_pre_value']] = pre_df[['cover_pre_value', 'oil_pre_value']].round(2)
        pre_df.to_sql('gas_production_well_pre', con=db.engine, index=False, if_exists='append')

        json_response = {
            "success": True,
            "message": "",
            "code": 200
        }
        return json_response
    elif modelType == '平台气量预测模型':
        # 确保在应用上下文中执行数据库操作
        with app.app_context():
            # 假设你想查询所有记录
            results = GasPlatformProduction.query.all()
            # 将结果转换为字典列表
            data = [
                {'id': result.id,
                 '平台基础表id': result.platform_id,
                 '平台号': result.platform_no,
                 '日期': result.collect_date,
                 '产气量': result.production_gas_year,
                 '输气压力': result.pressure_transport}
                for result in results]
        # 使用 pandas DataFrame 构造函数将字典列表转换为 DataFrame
        df = pd.DataFrame(data)
        # 确保日期列是datetime类型
        df['日期'] = pd.to_datetime(df['日期'])

        # 按井号分组
        grouped = df.groupby('平台号')
        # 创建一个字典来存储每个井处理后的数据
        lagged_data = {}

        # 为每个井创建滞后特征
        for well, group in grouped:
            group = group.dropna(subset=['日期'])

            # 按日期排序
            group = group.sort_values(by='日期')

            # 创建滞后特征
            lag_features = group[['产气量']].copy()

            for lag in range(1, 60 + 1, 1):
                lag_features[f'产气量_lag_{lag}'] = group['产气量'].shift(lag)
            for lag in range(61, 180 + 1, 6):
                lag_features[f'产气量_lag_{lag}'] = group['产气量'].shift(lag)
            for lag in range(181, 330 + 1, 30):
                lag_features[f'产气量_lag_{lag}'] = group['产气量'].shift(lag)

            # 丢弃有缺失值的行
            lag_features.fillna(0, inplace=True)

            lag_features['日期'] = group['日期']
            lag_features['平台基础表id'] = group['平台基础表id']

            # 添加井号作为索引（可选）
            lag_features['平台号'] = well

            # 存储处理后的数据
            lagged_data[well] = lag_features

        # 创建一个字典来存储每个井的模型
        models = {}

        # 为每个井设置训练集和测试集，并训练模型
        for well, data in lagged_data.items():
            # 提取特征和标签
            X = data.drop(columns=['产气量', '平台号', '日期', '平台基础表id'])
            y = data['产气量']

            # 设置训练集和测试集（例如，使用前80%作为训练集，后20%作为测试集）
            split_index = int(len(data) * train_size)
            X_train, X_test = X.iloc[:-1], X.iloc[split_index:]
            y_train, y_test = y.iloc[:-1], y.iloc[split_index:]

            # 初始化模型
            # model = LinearRegression()
            # model = RandomForestRegressor(n_estimators=10, n_jobs=4)
            # 创建SVR模型实例并设置参数
            model = SVR(C=1.0, epsilon=0.1, kernel='rbf')  # C是正则化参数，epsilon是ε-insensitive损失函数的参数，kernel是核函数类型

            # 训练模型
            model.fit(X_train, y_train)

            # 存储模型
            models[well] = model

        # 评估每个井的模型
        results = {}
        result_dfs = []
        for well, model in models.items():
            # 提取测试集
            X_test = lagged_data[well].drop(columns=['产气量', '平台号', '日期', '平台基础表id']).iloc[
                     int(len(lagged_data[well]) * train_size):]

            y_test = lagged_data[well]['产气量'].iloc[int(len(lagged_data[well]) * train_size):]
            dates = lagged_data[well]['日期'].iloc[int(len(lagged_data[well]) * train_size):]
            # 预测
            y_pred = model.predict(X_test)

            # 将结果添加到列表中
            result_df = pd.DataFrame({
                'platform_no': [well] * len(y_test),  # 为每个数据点重复 well 名称
                'collect_date': dates,
                'gas_true_value': y_test,
                'gas_pre_value': y_pred
            })
            result_dfs.append(result_df)

            # 计算误差
            mse = mean_squared_error(y_test, y_pred)
            mae = mean_absolute_error(y_test, y_pred)

            # 存储结果
            results[well] = [mse, mae]

        # 将所有结果合并为一个 DataFrame
        df0 = pd.concat(result_dfs, ignore_index=True)
        # df0['id'] = df0.index.map(lambda _: str(uuid.uuid4()))
        df0.dropna(inplace=True)
        # 清空表中的数据
        with app.app_context():
            db.session.execute(text("DELETE FROM gas_production_platform_test;"))
            db.session.commit()  # 提交事务
            # 重置自增计数器
            db.session.execute(text("ALTER TABLE gas_production_platform_test AUTO_INCREMENT = 1;"))
            db.session.commit()  # 提交重置事务
        df0.to_sql('gas_production_platform_test', con=db.engine, index=False, if_exists='append')

        # 创建一个空列表来收集数据
        error_list = []
        for r in results:
            # 添加一条数据到列表中
            error_list.append(
                {'platform_no': r, 'mse': results[r][0], 'mae': results[r][1]})

        # 使用列表创建 DataFrame
        error_df = pd.DataFrame(error_list)
        # error_df['id'] = error_df.index.map(lambda _: str(uuid.uuid4()))
        error_df.dropna(inplace=True)
        # 清空表中的数据
        with app.app_context():
            db.session.execute(text("DELETE FROM gas_production_platform_test_error;"))
            db.session.commit()  # 提交事务
            # 重置自增计数器
            db.session.execute(text("ALTER TABLE gas_production_platform_test_error AUTO_INCREMENT = 1;"))
            db.session.commit()  # 提交重置事务
        error_df.to_sql('gas_production_platform_test_error', con=db.engine, index=False, if_exists='append')

        # 预测未来330天的套压值
        future_predictions = {}
        result_dfs = []
        for well, model in models.items():

            # 获取最后一天的已知数据作为预测起点
            last_known_data = lagged_data[well].iloc[-1]
            first_known_data = lagged_data[well].iloc[0]
            pdays = (last_known_data['日期'] - first_known_data['日期']).days

            last_date = last_known_data['日期']
            id = last_known_data['平台基础表id']

            features = []

            for lag in range(1, 60 + 1, 1):
                features.append(f'产气量_lag_{lag}')
            for lag in range(61, 180 + 1, 6):
                features.append(f'产气量_lag_{lag}')
            for lag in range(181, 330 + 1, 30):
                features.append(f'产气量_lag_{lag}')

            base_features = last_known_data[features].values.reshape(1, -1)

            # 生成未来330天的日期
            future_dates = pd.date_range(start=last_date + pd.Timedelta(days=1), periods=330)

            # 初始化预测结果列表
            predictions = []
            current_features = base_features.copy()
            baseV = base_features[0][0]
            rate = 0.1
            for date in future_dates:
                # 使用模型进行预测
                prediction = model.predict(current_features)[0]
                if prediction > 11:
                    prediction = prediction - prediction * rate
                    rate += 0.001

                if (baseV > 11) and (pdays > 2000):
                    if prediction < baseV * 0.5:
                        prediction = (baseV + prediction * 1.2) / 2
                    else:
                        prediction = (baseV + prediction) / 2

                predictions.append(prediction)

                # 更新特征，为下一次预测创建滞后特征
                current_features[:, :-1] = current_features[:, 1:]  # 向右移动一列
                current_features[:, -1] = prediction  # 在最后一列放置新的预测值

            # 将预测结果和日期存储到字典中
            future_predictions[well] = pd.DataFrame({'日期': future_dates, '气量预测值': predictions})

            # 将结果添加到列表中
            result_df = pd.DataFrame({
                'platform_id': [id] * len(future_dates),  # 为每个数据点重复 well 名称
                'platform_no': [well] * len(future_dates),  # 为每个数据点重复 well 名称
                'collect_date': future_dates,
                'pre_value': predictions,
                'pressure_transport': [0] * len(future_dates)  # 用于前期测试
            })
            result_dfs.append(result_df)

        # 将所有结果合并为一个 DataFrame
        pre_df = pd.concat(result_dfs, ignore_index=True)
        # pre_df['id'] = pre_df.index.map(lambda _: str(uuid.uuid4()))
        pre_df.dropna(inplace=True)
        # 清空表中的数据
        with app.app_context():
            db.session.execute(text("DELETE FROM gas_production_platform_pre;"))
            db.session.commit()  # 提交事务
            # 重置自增计数器
            db.session.execute(text("ALTER TABLE gas_production_platform_pre AUTO_INCREMENT = 1;"))
            db.session.commit()  # 提交重置事务

        pre_df.to_sql('gas_production_platform_pre', con=db.engine, index=False, if_exists='append')

        json_response = {
            "success": True,
            "message": "",
            "code": 200
        }
        print("Calling Java API")

        requests.get(url)

        # 创建并启动线程来调用Java API
        # threading.Thread(target=call_java_api_in_thread, args=(url,)).start()

        return json_response


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True)
