# coding: utf-8
import re
import copy
import datetime
import pandas as pd
from sqlalchemy import create_engine
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
import os
from sklearn.cluster import KMeans
from matplotlib import font_manager
import seaborn as sns
from scipy import stats
from pandas.tseries.offsets import *
import datetime 
from datetime import datetime, date
import matplotlib.ticker as plticker
import math
import itertools
from scipy.signal import savgol_filter
from datetime import timedelta
import statsmodels.api as sm
import statsmodels.formula.api as smf



filedir = r"E:\工作\工作\预测\2023"

inverter_df = pd.read_csv(f'{filedir}\merge_456_2023_test.csv',encoding='utf_8')
inverter_df['timestamp'] = pd.to_datetime(inverter_df['timestamp'], errors='coerce', utc=True)
inverter_df['timestamp'] = pd.to_datetime(inverter_df['timestamp']).dt.strftime('%Y-%m-%d %H:%M:00')
# 只保留 'timestamp' 列中的前18个字符
inverter_df['timestamp'] = inverter_df['timestamp'].str[:19]
df_yaoce = pd.read_csv(f'{filedir}/15min辐照度数据.csv',encoding='utf_8')

inverter_df = inverter_df.merge(df_yaoce, on = 'timestamp', how='left' )


import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import statsmodels.api as sm

def filter_by_three_sigma(df, column_name):
    """
    根据三西格玛准则筛选异常值。    
    参数:
    df (pd.DataFrame): 输入的DataFrame
    column_name (str): 要应用三西格玛准则的列名    
    返回:
    pd.DataFrame: 经过三西格玛准则筛选后的DataFrame
    """
    # 删除列值小于等于 0 的行
    df = df[df[column_name] > 0]
    # 计算列的均值和标准差
    mean = df[column_name].mean()
    std = df[column_name].std()
    # 三西格玛准则的上下限
    lower_limit = mean - 2 * std
    upper_limit = mean + 2 * std
    
    # 筛选出在三西格玛范围内的数据
    df_filtered = df[(df[column_name] > lower_limit) & (df[column_name] < upper_limit)] 
    return df_filtered

def regression_model(inverter_df):
    """
    执行回归分析，生成回归系数并将结果保存为Excel文件。
    
    参数:
    inverter_df (pd.DataFrame): 包含用于回归分析的数据的DataFrame
    timestampname (str): 时间戳列的名称，默认为'timestamp'
    yearmon_col (str): 年月列的名称，默认为'Yearmon'
    xvar (list): 自变量列名的列表，默认为['功率预测系统_总辐射1瞬时值', '功率预测系统_背板温度']
    yvar (list): 因变量列名的列表，默认为['功率']
    
    返回:
    None
    """
    varkeyword  =  '功率预测系统_' 
    timestampname = 'timestamp'  # a string, not a list
    t_df = df_yaoce
    decomposed_timefreq = '5min'
    agg_timefreq = '15min'
    major_var  = '功率预测系统_总辐射1瞬时值_filled'
    yearmon_colname = 'Yearmon'
    datename = 'date'
    id_col = ['station_id','region','matrix_name','device_level1', 'device_level2']
    yearmon_col = 'Yearmon'
    keyword = '功率'
    # 确保timestamp列是datetime格式
    dfin = inverter_df
    dfin[timestampname] = pd.to_datetime(dfin[timestampname]).dt.strftime('%Y-%m-%d %H:%M:00')
    dfin[timestampname] = pd.to_datetime(dfin[timestampname])
    dfin[yearmon_col] = pd.PeriodIndex(dfin[timestampname], freq='M')
    dfin[yearmon_col] = dfin[yearmon_col].astype(str)
    dfin[datename] = pd.PeriodIndex(dfin[timestampname], freq='D')
    dfin[datename] = dfin[datename].astype(str)
    dfin['hour'] = pd.DatetimeIndex(dfin[timestampname]).hour
    dfin['inverter_sn'] = dfin['station_id'] + '__' + dfin['region'] + '__' + \
                      dfin['matrix_name'] + '__' + dfin['device_level1'] + \
                      '__' + dfin['device_level2']
    dfin = filter_by_three_sigma(dfin, '功率预测系统_总辐射1瞬时值_filled')
    yvar =  ['功率_1', '功率_2', '功率_3', '功率_4', '功率_5', '功率_6',
       '功率_7', '功率_8', '功率_9', '功率_10', '功率_11', '功率_12']
    xvar =  ['功率预测系统_总辐射1瞬时值_filled', '功率预测系统_背板温度_filled']
    xvar_secondorder =  ['功率预测系统_背板温度_filled']  

    yvar_scaled = [y + '_scaled' for y in yvar ]
    xvar_scaled = [x + '_scaled' for x in xvar ]
    dfin[timestampname] = pd.to_datetime(dfin[timestampname])
    dfin[datename] = pd.to_datetime(dfin[datename])
    dfin [timestampname] = pd.to_datetime(dfin[timestampname])
    dfin[datename] = pd.to_datetime(dfin[datename])  # 添加这一行将datename列转换为datetime格式


    # 使用一年的滚动数据进行建模
    train_date = datetime(2024, 1, 1)
    # 使用一年的滚动数据进行建模
    df_work = dfin[(dfin[timestampname].dt.hour >= 8) & (dfin[timestampname].dt.hour <= 16) & 
               (dfin[datename] < train_date) & (dfin[datename] >= train_date - timedelta(days=365))][['inverter_sn'] + [timestampname, yearmon_col] + yvar + xvar]

    # 汇总成小时级别
    df_hourly = df_work.groupby(by=['inverter_sn', yearmon_col, pd.Grouper(key=timestampname, axis=0, freq='15min')])[yvar + xvar].sum().reset_index()

    # 初始化存储结果的变量
    equips = df_hourly['inverter_sn'].unique()
    para_coff = pd.DataFrame()
    rsquares = []
    df_u = pd.DataFrame()

    # 第一阶段：初步OLS建模
    for equip in equips:
        df_part = df_hourly[df_hourly['inverter_sn'] == equip]
        para_coff1 = pd.DataFrame()
        df_part_y_f = pd.DataFrame()

        for yloc, y_list in enumerate(yvar):
            keep_y_var = [timestampname, 'inverter_sn', yearmon_col] + xvar + [y_list]
            df_part_y = df_part[keep_y_var]
            df_part_y.loc[df_part_y[y_list] == 0.0, y_list] = np.nan
            df_part_y = df_part_y.dropna()
            df_part_y = df_part_y.groupby(df_part_y[timestampname].dt.date).apply(lambda x: x.loc[x[y_list].shift() != x[y_list]]).reset_index(drop=True)
            df_part_y = filter_by_three_sigma(df_part_y, y_list)

            if df_part_y.shape[0] == 0:
                continue

            y = df_part_y[y_list]
            X = df_part_y[xvar[0]]
            model = sm.OLS(y, X).fit()
            df_part_y[f'{y_list}_hat'] = model.predict(X)
            df_part_y1 = df_part_y[[timestampname, 'inverter_sn', yearmon_col, f'{y_list}_hat']]
        
            if yloc == 0:
                df_part_y_f = pd.concat([df_part_y_f, df_part_y1], axis=1, ignore_index=False)
            else:
                df_part_y_f = pd.concat([df_part_y_f, df_part_y1[f'{y_list}_hat']], axis=1, ignore_index=False)
        
            rsquare = [equip, y_list, model.rsquared]
            #rsquares.append(rsquare)
            
            model_parameters = model.params.reset_index().transpose()
            new_header = model_parameters.iloc[0]
            model_parameters = model_parameters[1:]
            model_parameters.columns = new_header
            model_parameters['inverter_sn'] = equip
            model_parameters['line_number'] = y_list   
            para_coff1 = pd.concat([para_coff1, model_parameters])
        df_part_x = df_part[[timestampname, 'inverter_sn', yearmon_col] + xvar + yvar].merge(df_part_y_f, on=[timestampname, 'inverter_sn', yearmon_col], how='left')
        df_u = pd.concat([df_u, df_part_x])

        para_coff = pd.concat([para_coff, para_coff1])
        para_coff = para_coff[['inverter_sn', 'line_number'] + [xvar[0]]]

    # 第二阶段：使用剩余值和背板温度进行建模
   
    for y_res in yvar:
        df_u[f'{y_res}_residual'] = df_u[y_res] - df_u[f'{y_res}_hat']

    equips = df_u['inverter_sn'].unique()
    para_coff2 = pd.DataFrame()

    for equip in equips:
        df_part = df_u[df_u['inverter_sn'] == equip]
        para_coff1 = pd.DataFrame()

        for y_list in yvar:
            keep_y_var = [timestampname, 'inverter_sn', yearmon_col] + xvar + [f'{y_list}_residual']
            df_part_y = df_part[keep_y_var]
            df_part_y[df_part_y == 0.0] = np.nan
            df_part_y = df_part_y.dropna()

            if df_part_y.shape[0] == 0:
                continue

            y = df_part_y[f'{y_list}_residual']
            X = df_part_y[xvar[1]]
            X = sm.add_constant(X)

            model2 = sm.OLS(y, X).fit()
            df_part_y[f'{y_list}_residual_hat'] = model2.predict(X)
            
            rsquare = [equip, y_list, model2.rsquared]
            
            model_parameters = model2.params.reset_index().transpose()
            new_header = model_parameters.iloc[0]
            model_parameters = model_parameters[1:]
            model_parameters.columns = new_header
            model_parameters['inverter_sn'] = equip
            model_parameters['line_number'] = y_list   
            para_coff1 = pd.concat([para_coff1, model_parameters])

        para_coff2 = pd.concat([para_coff2, para_coff1])
        para_coff2 = para_coff2[['inverter_sn', 'line_number'] + [xvar[1]]]

    # Merge the resulting coefficients from both stages into a single results table
    combined_coefficients = pd.merge(para_coff, para_coff2, on=['inverter_sn', 'line_number'], how='outer')
    combined_coefficients.to_excel('分布式回归系数.xlsx', index=False)

# 示例调用
# run_regression_analysis(inverter_df)
