# # -*- coding:utf-8 -*-
# '''
# 周期性时间序列预测
# '''
# import os
# import numpy as np
# from statsmodels.tsa.seasonal import seasonal_decompose
# from statsmodels.tsa.arima_model import ARIMA
# from datetime import timedelta
# import pandas as pd
# from statsmodels.tsa.holtwinters import ExponentialSmoothing
#
#
# class ModelDecomp(object):
#     def __init__(self, file, vali_size=48, test_size=96):
#         self.ts = self.read_data(file)
#         self.vali_size = vali_size
#         self.test_size = test_size
#         self.train_size = len(self.ts) - self.vali_size
#         self.train1 = self.ts[:len(self.ts) - vali_size]
#         #        self.train1 = self.standard(self.train1)
#         self.train = self._diff_smooth(self.train1)
#         # draw_ts(self.train)
#         self.vali = self.ts[-vali_size:]
#
#     def read_data(self, f):
#         data = pd.read_csv(f)
#         data = data.set_index('date')
#         data.index = pd.to_datetime(data.index)
#         # counttotal500
#         ts = data['count']
#         # draw_ts(ts)
#         return ts
#
#     def _diff_smooth(self, ts):
#         dif = ts.diff().dropna()
#         td = dif.describe()
#         high = td['75%'] + 1.8 * (td['75%'] - td['25%'])
#         low = td['25%'] - 1.5 * (td['75%'] - td['25%'])
#
#         forbid_index = dif[(dif > high) | (dif < low)].index
#         i = 0
#         while i < len(forbid_index) - 1:
#             n = 1
#             start = forbid_index[i]
#             #            print("start=",start)
#             #            print("n+i=", n+i)
#             while forbid_index[i + n] == start + timedelta(hours=n) and (i + n) < len(forbid_index) - 1:
#                 n += 1
#             i += n - 1
#             end = forbid_index[i]
#             #            print("end=", end)
#             value = np.linspace(ts[start - timedelta(hours=1)], ts[end + timedelta(hours=1)], n)
#             #            print(ts[start: end])
#             #            print('第%d次',(i))
#             ts[start: end] = value
#             i += 1
#
#         return ts
#
#     def decomp(self, freq):
#         '''
#         对时间序列进行分解
#         :param freq: 周期
#         '''
#         decomposition = seasonal_decompose(self.train, period=freq, two_sided=False)
#         self.trend = decomposition.trend
#         self.seasonal = decomposition.seasonal
#         self.residual = decomposition.resid
#         # decomposition.plot()
#         #        plt.show()
#
#         d = self.residual.describe()
#         delta = d['75%'] - d['25%']
#
#         self.low_error, self.high_error = (d['25%'] - 1 * delta, d['75%'] + 1 * delta)
#
#     def trend_model(self, order):
#         '''
#         为分解出来的趋势数据单独建模
#         '''
#         self.trend.dropna(inplace=True)
#         self.trend_model = ARIMA(self.trend, order).fit(disp=-1, method='css')
#
#         return self.trend_model
#
#     def add_season(self):
#         '''
#         为预测出的趋势数据添加周期数据和残差数据
#         '''
#         self.train_season = self.seasonal
#         values = []
#         low_conf_values = []
#         high_conf_values = []
#
#         for i, t in enumerate(self.pred_time_index):
#             trend_part = self.trend_pred[i]
#
#             # 相同时间的数据均值
#             season_part = self.train_season[
#                 self.train_season.index.time == t.time()
#                 ].mean()
#
#             # 趋势+周期+误差界限
#             predict = trend_part + season_part
#             low_bound = trend_part + season_part + self.low_error
#             high_bound = trend_part + season_part + self.high_error
#
#             values.append(predict)
#             low_conf_values.append(low_bound)
#             high_conf_values.append(high_bound)
#
#         self.final_pred = pd.Series(values, index=self.pred_time_index, name='predict')
#         self.low_conf = pd.Series(low_conf_values, index=self.pred_time_index, name='low_conf')
#         self.high_conf = pd.Series(high_conf_values, index=self.pred_time_index, name='high_conf')
#
#     def predict_validation(self):
#         '''
#         预测验证集
#         '''
#         # 续接train，生成长度为n的时间索引，赋给预测序列
#         n = self.vali_size
#         self.pred_time_index = pd.date_range(start=self.train.index[-1], periods=n + 1, freq='1H')[1:]
#         self.trend_pred = self.trend_model.forecast(n)[0]
#
#         self.add_season()
#
#     def predict_new(self):
#         '''
#         预测新数据
#         '''
#         # 续接train，生成长度为n的时间索引，赋给预测序列
#         n = self.test_size
#         self.pred_time_index = pd.date_range(start=self.train.index[-1], periods=n + 1, freq='1H')[1:]
#         self.trend_pred = self.trend_model.forecast(n)[0]
#
#         self.add_season()
#
#
# def evaluate(filename, vali_size, test_size):
#     md = ModelDecomp(file=filename, vali_size=vali_size, test_size=test_size)
#     md.decomp(freq=24)
#     md.trend_model(order=(5, 2, 9))
#
#     md.predict_validation()
#     pred = md.final_pred
#     vali = md.vali
#
#
#     md.predict_new()
#     pred2 = md.final_pred
#
#     return pred2
#
#
# def Smooth(filename, vali_size, test_size):
#     data = pd.read_csv(filename)
#     data = data.set_index('date')
#     data.index = pd.to_datetime(data.index)
#     # counttotal500
#     ts = data['count']
#     vali_size = vali_size
#     test_size = test_size
#     train = ts[:len(ts) - vali_size]
#     vali = ts[-vali_size:]
#
#     y = train
#
#     fit2 = ExponentialSmoothing(y, seasonal_periods=24, trend='add', seasonal='mul')
#     fit2 = fit2.fit()
#
#     pred = fit2.forecast(test_size)
#     return pred
#
#
# def result_fusion(pred1, pred2, vali):
#     vali1 = pred1[:len(vali)]
#     vali2 = pred2[:len(vali)]
#     vali = vali
#     MAE1 = np.abs(vali1.values - vali.values)
#     MAE2 = np.abs(vali2.values - vali.values)
#     MAE1 = MAE1.reshape(9, 24)
#     MAE2 = MAE2.reshape(9, 24)
#     MAE1sum = MAE1.sum(axis=0)
#     MAE2sum = MAE2.sum(axis=0)
#     test1 = pred1[-(len(pred1) - len(vali)):]
#     test2 = pred2[-(len(pred2) - len(vali)):]
#     result = test2.copy()
#     for i in range(len(result)):
#         a = i % 24
#         if (MAE1sum[a] < MAE2sum[a]):
#             result[i] = test1[i]
#     return result
#
#
# def predictmodel(filename, test_size):
#     filename = filename
#     pred1 = evaluate(filename, 216, 216 + test_size)
#     pred2 = Smooth(filename, 216, 216 + test_size)
#     data = pd.read_csv(filename).set_index('date')
#     data.index = pd.to_datetime(data.index)
#     ts = data['count']
#     vali = ts[-216:]
#     return result_fusion(pred1, pred2, vali)
