from idlelib.iomenu import encoding
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy import text
import numpy as np
from scipy.optimize import curve_fit
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy import text
import uuid

# 数据库连接参数（以MySQL为例）
username = 'root'
password = '123456'
host = '192.168.0.189'
port = '3306'
database = 'shale-gas'
# 创建数据库连接引擎
engine = create_engine(f'mysql+pymysql://{username}:{password}@{host}:{port}/{database}')

# Establish DB connection
def get_db_connection():
    engine = create_engine(f'mysql+pymysql://{username}:{password}@{host}:{port}/{database}')
    return engine

def predict_and_merge_production(para_df, daily_df, duong_decay):
    # 合并所有井号的预测数据列表
    results_list = []

    # 循环遍历每一口井，并基于其生产周期进行日产气量的预测
    for index, row in para_df.iterrows():
        well_no = row['well_no']
        a = row['a_values']
        m = row['m_values']
        q = row['q']
        dur = row['duration']  # 生产周期

        # 创建天数数组
        days = np.arange(1, dur + 1)
        # 调用 Duong 递减模型
        predicted_production = duong_decay((days, np.repeat(q, len(days))), a=a, m=m)

        # 将每个井号的预测结果加入列表
        temp_df = pd.DataFrame({
            'well_no': well_no,
            'days': days,
            'duong_predict_production': predicted_production
        })

        results_list.append(temp_df)

    # 合并所有井号的预测结果为一个 DataFrame
    df_results = pd.concat(results_list, ignore_index=True)

    # 重命名 daily_df 的 'duration' 列为 'days'，确保列匹配
    daily_df = daily_df.rename(columns={'duration': 'days'})

    # 将预测结果与 daily_df 合并
    merged_df = pd.merge(df_results, daily_df, on=['well_no', 'days'], how='right')

    # 重命名预测列
    merged_df = merged_df.rename(columns={'duong_predict_production': 'fit_production'})

    # 最终输出的 DataFrame
    merged_df2 = merged_df[['well_no', 'days', 'daily_production', 'fit_production']]
    df_unique = merged_df2.drop_duplicates()

    return df_unique

def duong_decay(X, a, m):
    t = X[0]
    qi = X[1]
    return qi * t ** (-m) * np.exp(a / (1 - m) * (t ** (1 - m) - 1))

def intial_production(production, a, m, days):
    Q = production
    t = days
    return Q * a / (np.exp((a / (1 - m)) * (t ** (1 - m) - 1)))

def cumulative_production(qi, a, m, t=330 * 20, t1=330):
    Q = (qi / a) * (np.exp((a / (1 - m)) * (t ** (1 - m) - 1)))
    Q1 = (qi / a) * (np.exp((a / (1 - m)) * (t1 ** (1 - m) - 1)))
    return Q - Q1

def process_gas_data(df, engine):
    # 按照井号分组处理
    grouped = df.groupby('well_no')
    new_df = pd.DataFrame()

    # 重新编号生产天数
    for _, group in grouped:
        group = group.copy()
        group = group[group['production_gas_day'] != 0]
        group = group[group['production_gas_day'].notna()]

        # 重新编号生产天数，使用 .loc 解决 SettingWithCopyWarning
        group.loc[:, 'new_days'] = (group['days'] != 0).cumsum()
        group.loc[:, 'new_cumulative_production'] = group['production_gas_year']

        # 删除中间有 0 的数据
        group = group[group['new_days'] != 0]
        group.reset_index(drop=True, inplace=True)

        # 将结果合并回到新的 DataFrame
        new_df = pd.concat([new_df, group])

    df = new_df[['well_no', 'production_gas_day', 'collect_date', 'production_gas_year', 'new_days', 'new_cumulative_production']]

    df = df.rename(columns={
        'collect_date': 'date',
        'new_days': 'days',
        'production_gas_day': 'daily_production',
        'new_cumulative_production': 'cumulative_production'
    })

    df_save = df[['well_no', 'daily_production', 'date', 'days', 'cumulative_production']]

    # 将 DataFrame 保存为 SQL 表
    write_name = 'gas_daily_production'
    with engine.connect() as connection:
        connection.execute(text(f"TRUNCATE TABLE {write_name}"))
    df_save.to_sql(write_name, con=engine, index=False, if_exists='append')

    return df_save

def fit_well_production(df, duong_decay, cumulative_production):
    well_id = df['well_no'].unique()
    well_number = []
    well_a_values = []
    well_m_values = []
    well_q_values = []
    well_cum = []
    well_dur = []
    duong_cum = []

    for i in well_id:
        well_df = df[df['well_no'] == i]
        well_production = well_df['daily_production']
        well_days = well_df['days']
        t_data = np.array(well_days)  # 时间数据
        q_data = np.array(well_production.values)  # 对应的产量数据

        qi_fixed = q_data[0]  # 假设 qi 的值
        q_i = np.repeat(qi_fixed, len(t_data))

        initial_guess = (0.5, 0.5)  # 初始参数值，qi_fixed 为已知值
        bounds = ([0, 0], [5, 2])  # 参数的取值范围
        params, covariance = curve_fit(duong_decay, (t_data, q_i), q_data, p0=initial_guess, bounds=bounds, maxfev=10000)
        a_fit, m_fit = params

        well_number.append(i)
        well_a_values.append(a_fit)
        well_m_values.append(m_fit)
        well_q_values.append(qi_fixed)
        well_cum.append(sum(q_data))
        well_dur.append(len(well_days))

        duong_pro = cumulative_production(qi_fixed, a_fit, m_fit, t=len(q_i), t1=1)
        duong_cum.append(duong_pro)

    return pd.DataFrame({
        'well_no': well_number,
        'a_values': well_a_values,
        'm_values': well_m_values,
        'q': well_q_values,
        'actual_production': well_cum,
        'duration': well_dur,
        'duong_production': duong_cum
    })

###########################################
def main_1():
    engine = create_engine(f'mysql+pymysql://{username}:{password}@{host}:{port}/{database}')
    df_xz = pd.read_sql('gas_production_well', con=engine)
    df_xz = df_xz[['well_no','production_gas_day','collect_date','production_gas_year','production_time']]
    df_xz['production_time'] = df_xz['production_time'].fillna(0)

    df = df_xz.drop('production_time',axis=1).copy()
    df['collect_date'] = pd.to_datetime(df['collect_date'])
    df['days'] = df.sort_values(by=['well_no', 'collect_date']) \
        .groupby('well_no').cumcount() + 1

    df_process1 = process_gas_data(df, engine)
    duong_fit1 = fit_well_production(df_process1, duong_decay, cumulative_production)
    table_name = 'gas_well_fit'  # 替换为你希望的SQL表名
    with engine.connect() as connection:
        connection.execute(text(f"TRUNCATE TABLE {table_name}"))
    # 然后使用 pandas 的 to_sql() 插入新的数据
    duong_fit1.to_sql(table_name, con=engine, index=False, if_exists='append')

    ###########################################
    df_p = df_xz[df_xz['production_time'] > 6].copy()
    df_p['collect_date'] = pd.to_datetime(df_p['collect_date'])
    df_p['days'] = df_p.sort_values(by=['well_no', 'collect_date']) \
                     .groupby('well_no').cumcount() + 1

    df_process2 = process_gas_data(df_p, engine)
    duong_fit2 = fit_well_production(df_process2, duong_decay, cumulative_production)
    table_name = 'gas_well_fit_exclude'  # 替换为你希望的SQL表名
    with engine.connect() as connection:
        connection.execute(text(f"TRUNCATE TABLE {table_name}"))
    # 然后使用 pandas 的 to_sql() 插入新的数据
    duong_fit2.to_sql(table_name, con=engine, index=False, if_exists='append')

    ###########################################
    para_name = 'gas_base_well'  # 替换为你的表名
    fit_name = 'gas_well_fit'  # 替换为你的表名
    df_para = pd.read_sql_table(para_name, con=engine)
    df_fit = pd.read_sql_table(fit_name, con=engine)
    df_para['core_area'] = df_para['core_area'].replace({'外围1': '外围', '外围2': '外围'})
    df_combine = pd.merge(df_para,df_fit,how='left',on='well_no')
    df_combine = df_combine.drop(['is_coordinate','is_production'],axis=1)

    table_name = 'gas_well_para'  # 替换为你希望的SQL表名
    with engine.connect() as connection:
        connection.execute(text(f"TRUNCATE TABLE {table_name}"))
    df_combine.to_sql(table_name, con=engine, index=False, if_exists='append')
    print(f'Data has been saved to table {table_name} in the database.')

    # 加载数据
    engine = get_db_connection()
    chunk_size = 10000

    para_chunks = pd.read_sql_table('gas_well_fit', con=engine,chunksize=chunk_size)
    para_df =  pd.concat(para_chunks, ignore_index=True)

    daily_chunks = pd.read_sql_table('gas_daily_production', con=engine, chunksize=chunk_size)
    daily_df =  pd.concat(daily_chunks, ignore_index=True)

    df_unique_1  = predict_and_merge_production(para_df, daily_df, duong_decay)
    df_unique_1['id'] = [str(uuid.uuid4()) for _ in range((df_unique_1.shape[0]))]
    output_df_1  = df_unique_1[['id','well_no', 'days', 'daily_production', 'fit_production']]

    table_name = 'gas_duong_eur'  # 替换为你希望的SQL表名
    with engine.connect() as connection:
        connection.execute(text(f"TRUNCATE TABLE {table_name}"))
    # 使用检查逻辑，避免重复插入
    output_df_1.to_sql(table_name, con=engine, index=False, if_exists='append')

    # 加载数据
    engine = get_db_connection()
    para_df = pd.read_sql_table('gas_well_fit_exclude', con=engine)
    daily_df = pd.read_sql_table('gas_daily_production_exclude', con=engine)
    df_unique_2  = predict_and_merge_production(para_df, daily_df, duong_decay)
    df_unique_2['id'] = [str(uuid.uuid4()) for _ in range((df_unique_2.shape[0]))]
    output_df_2  = df_unique_2[['id','well_no', 'days', 'daily_production', 'fit_production']]

    # 写入数据库
    table_name = 'gas_duong_eur_exclude'  # 替换为你希望的SQL表名
    with engine.connect() as connection:
        connection.execute(text(f"TRUNCATE TABLE {table_name}"))
    # 使用检查逻辑，避免重复插入
    output_df_2.to_sql(table_name, con=engine, index=False, if_exists='append')
def main():
    pass
if __name__ == '__main__':
    main()


