# 采用环差法预测GDP
import itertools
import pandas as pd
import numpy as np

from data_sets import prepare_xy_data, load_yaml_file, get_data_by_meta, prepare_datasets_dic
from data_process import TjdSingleData

from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge, Lasso
from sklearn.metrics import mean_absolute_error

from pyecharts import options as opts
from pyecharts.charts import Line

from tqdm import tqdm

# 设置legend和text格式
tjd_legend_opts = opts.LegendOpts(
    pos_top="7%", 
    is_show=True, 
    textstyle_opts=opts.TextStyleOpts(font_size=18)
    )
tjd_text_opts = opts.TextStyleOpts(font_size=18)


def max_to_third(se):
    if max(se) / abs(se.mean()) > 10:
        print(se.name, '极端值修正')
        max_v = se.max()
        max_3 = max_v / 3
        se.loc[se == max_v] = max_3
    return se


def extract_series_from_df(df, name):
    if name in df.columns:
        return 1, df[name]
    else:
        return 0, pd.Series()


def prepare_datasets_dic(x_name_dic, meta_data):
    alter_names = ['通信数据', '油气数据', '工业数据', '电信月度数据', '电力数据']
    alternate_data = {s : pd.read_excel(f'./raw_data/{s}.xlsx', index_col=0) for s in alter_names}

    x_data_dic = {}
    for x_name, (x_name_process, x_source) in x_name_dic.items():
        if x_source == 'meta':
            x_get, x_se = extract_series_from_df(meta_data, x_name)
        else:
            df_alter = alternate_data[x_source]
            x_get, x_se = extract_series_from_df(df_alter, x_name)
        if x_get:
            
            x_se = x_se.dropna()
            df_hf = TjdSingleData(x_se)
            
            if len(x_name_process) < 2:
                df_x = df_hf.data
            else:
                if '_' in x_name_process:
                    df_x = df_hf.get_long_string_data(x_name_process).data
                else:
                    df_x = df_hf[x_name_process].data
            print(y_name, '>>', 'feature', '>>', x_name, ' process ', x_name_process)
            x_data_dic[x_name] = max_to_third(df_x)
            x_data_dic[x_name] = df_x
    return x_data_dic


def pred_and_plot(task_name, y_data, x_data_set):
    x_data_df_concat = pd.concat([v for k,v in x_data_set.items()], axis=1)

    y_raw = y_data.copy(deep=True)
    y_data = y_data.diff(1).iloc[1:] # y数据求差分
    x_data_df = x_data_df_concat.diff(1).iloc[1:] # x数据求差分

    # （开始日期和结束日期）
    spilt_date_end = y_data.index[-1]
    spilt_date_start = max(x_data_df.index[0], y_data.index[0])

    X = x_data_df.loc[spilt_date_start:,:]
    y = y_data.loc[spilt_date_start:,:].iloc[:,0]

    X = X.fillna(method='ffill').fillna(method='bfill')
    # 数据标准化

    scaler = StandardScaler()
    # X_scaled = scaler.fit_transform(X)
    X_scaled = X

    # 训练验证数据切分点
    split_point = int(len(X) * 0.83)
    X_train = X_scaled[:split_point]
    X_test = X_scaled[split_point:len(y)]
    X_test2 = X_scaled[split_point:] # test2数据比test数据多1-2期
    y_train = y[:split_point]
    y_test = y[split_point:]

    # 岭回归模型
    ridge = Ridge()
    # 网格搜索岭参数
    param_grid = {'alpha': np.logspace(-8, 8, 200)}
    grid = GridSearchCV(ridge, param_grid, cv=5, scoring='neg_mean_squared_error')
    grid.fit(X_train, y_train)

    # 最佳模型
    best_ridge = grid.best_estimator_
    # 预测
    y_train_p = best_ridge.predict(X_train)
    y_pred = best_ridge.predict(X_test)
    y_pred2 = best_ridge.predict(X_test2)
    # 评估模型
    mse = mean_absolute_error(y_test, y_pred)
    print(f'Mean Squared Error: {mse}')

    # 解释系数
    coefficients = pd.DataFrame(best_ridge.coef_, X.columns, columns=['Coefficient'])
    y_t = pd.Series(y_train_p, index=X.index[:split_point], name='训练期预测')
    y_p2 = pd.Series(y_pred2, index=X.index[split_point:], name='验证期预测值')

    valid_y = pd.concat([y_test, y_p2], axis=1)
    valid_y.columns = ['环差', '环差P']
    train_y = pd.concat([y_train, y_t], axis=1)
    train_y.columns = ['环差', '环差P']

    train_valid = pd.concat([train_y, valid_y], axis=0)

    df = pd.concat([y_raw, train_valid], axis=1)
    df['预测'] = np.nan
    df['误差'] = np.nan
    # 预测结果等于上期预测值 + 预测的环差
    for idx in range(1, len(train_valid)):
        t = train_valid.index[idx]
        t_last_q = train_valid.index[idx-1]
        df.loc[t, '预测'] = df.loc[t_last_q, y_raw.columns[0]] + df.loc[t, '环差P']
        df.loc[t, '误差'] = abs(df.loc[t, '预测'] - df.loc[t, y_raw.columns[0]])
    df = df.round(1)

    # 画图
    b = Line()
    b.add_xaxis([s.strftime("%Y-%m-%d") for s in df.index]) # x数据（常是索引）
    for y_name in df.columns:
        se = df[y_name]
        b.add_yaxis(y_name, y_axis=se.to_list()) # y数据
        
        b.set_global_opts(
            datazoom_opts=[
                opts.DataZoomOpts(range_start=30, range_end=100),
                opts.DataZoomOpts(type_="inside")],
            toolbox_opts=opts.ToolboxOpts(),
            yaxis_opts=opts.AxisOpts(name="%", axislabel_opts=opts.LabelOpts(font_size=16)),
            xaxis_opts=opts.AxisOpts(name="日期", axislabel_opts=opts.LabelOpts(font_size=16)),
            legend_opts=tjd_legend_opts
            )
    # b.render()
    pth_html = f'D:\预测\{task_name}_{y_raw.columns[0]}_{str(mse)[:6]}_.html'
    pth_xlsx = f'D:\预测\{task_name}_{y_raw.columns[0]}_{str(mse)[:6]}_.xlsx'
    save_data = {'df': df, 
                 'mse': mse, 
                 'pth_xlsx': pth_xlsx,
                 'pth_html': pth_html,
                 'coefficients': coefficients,
                 'x_data_df': x_data_df, 
                 'x_data_df_concat': x_data_df_concat,
                 'x_scaled':X_scaled,
                 'html':b}
    return save_data


meta = load_yaml_file('config.yaml')
meta_data = get_data_by_meta(meta['meta'])
tasks_data = load_yaml_file('南网能源院预测GDP报告.yaml')

task = '满足领导需求特殊任务'
tasks_data = tasks_data[task]
y_name = tasks_data['y_name']
x_names = tasks_data['x_names']

belong_to_meta = {k:v for k,v in x_names.items() if v[-1] == 'meta'}

for cb_number in list(range(4,17)):
    x_name_cbs = list(itertools.combinations([k for k,v in x_names.items()], cb_number))
    pbar = tqdm(x_name_cbs, desc=f'cb_number={cb_number}')
    for x_cbs in pbar:
        if '工业增加值当月同比' in x_cbs and '柴油消费量（万吨）' in x_cbs:
            x_choose_names = {k:v for k,v in x_names.items() if k in list(x_cbs)}
            y_data = meta_data.loc[:,[y_name]].dropna()
            y_data.index = pd.to_datetime(y_data.index)
            y_data = y_data.loc[y_data.index > pd.to_datetime('2016-01-01')] 
            y_data.index = pd.to_datetime(y_data.index)
            
            x_choose_names_only_meta = {k:v for k,v in x_choose_names.items() if v[-1] == 'meta'}
            x_data_sets = prepare_datasets_dic(x_name_dic=x_choose_names, meta_data=meta_data)
            x_data_sets_only_meta = prepare_datasets_dic(x_name_dic=x_choose_names_only_meta, meta_data=meta_data)
            
            if len(x_data_sets) > 0 and len(x_data_sets_only_meta) > 0:
            
                save_data = pred_and_plot(task, y_data, x_data_sets)
                save_data_only_meta = pred_and_plot(task, y_data, x_data_sets_only_meta)
                
                mse = save_data['mse']
                mse_only_meta = save_data_only_meta['mse']
                
                if save_data['mse'] < 0.31 and save_data_only_meta['mse'] > 0.65:
                    with pd.ExcelWriter(save_data['pth_xlsx']) as writer:
                        save_data['df'].to_excel(writer, sheet_name='train_valid')
                        save_data_only_meta['df'].to_excel(writer, sheet_name='train_valid_only_meta')
                        save_data['coefficients'].to_excel(writer, sheet_name='系数')
                        save_data['x_data_df'].to_excel(writer, sheet_name='x_data')
                        save_data['x_data_df_concat'].to_excel(writer, sheet_name='x_data_concat')
                        pd.DataFrame(save_data['x_scaled']).to_excel(writer, sheet_name='x_scaled')
                    # save_data['html'].render(save_data['pth_html'])