# -*- coding: utf-8 -*-
import io
import pickle
import numpy as np
import pandas as pd
import xgboost as xgb
import pymysql
import matplotlib.pyplot as plt
from time import time

# Monthly energy prediction based on XGBoost model
class XGBDay:
    def __init__(self):
        self.conn = self.connect_to_db()
        self.cursor = self.conn.cursor()
        self.sort_column, self.shift_column, self.shift_num = 'data_date', 'data_value', 7
        self.type, self.model_type = 'day', 'xgboost'
        self.regressor_mean = xgb.XGBRegressor(
            objective='reg:squarederror'
        )
        self.regressor_upper = xgb.XGBRegressor(
            objective='reg:quantileerror',
            quantile_alpha=0.95
        )
        self.regressor_lower = xgb.XGBRegressor(
            objective='reg:quantileerror',
            quantile_alpha=0.05
        )

    # mysql database connector
    @staticmethod
    def connect_to_db():
        connection = pymysql.connect(
            host='localhost',
            port=3306,
            user='root',
            password='shuke1208',
            database='electricity_use_forecast',
            autocommit=True)
        return connection

    # get energy data
    def fetch_data(self, train_data_table_name):
        sql = """SELECT * FROM {table_name} 
                 WHERE type = '{type}'""".format(table_name=train_data_table_name, type=self.type)
        self.cursor.execute(sql)
        dataset = self.cursor.fetchall()
        dataset_columns = ['provincecode', 'cons_no', 'type', 'data_date', 'pointsdate', 'data_value']
        dataset_df = pd.DataFrame(dataset, columns=dataset_columns)
        dataset_df.drop(columns=['pointsdate', 'type'], inplace=True)
        return dataset_df

    # get energy data one user
    def fetch_data_one(self, table_name, p_code, c_no):
        sql = """SELECT * FROM {table_name} WHERE type = '{type}' AND provincecode = '{p_code}' AND 
                 cons_no = '{c_no}';""".format(table_name=table_name, type=self.type, p_code=p_code, c_no=c_no)
        self.cursor.execute(sql)
        dataset = self.cursor.fetchall()
        dataset_columns = ['provincecode', 'cons_no', 'type', 'data_date', 'pointsdate', 'data_value']
        dataset_df = pd.DataFrame(dataset, columns=dataset_columns)
        dataset_df.drop(columns=['pointsdate', 'type'], inplace=True)
        return dataset_df

    # train at the first time, insert all the data
    def insert_data_many(self, data):
        sql = """INSERT INTO {table_name} (provincecode, cons_no, type, model_type, model_saved) 
                 VALUES (%s, %s, %s, %s, %s)""".format(table_name='model_saved')
        self.cursor.executemany(sql, data)

    # update the model
    def update_data(self, data, p_code, c_no):
        # 第一步：先查询数据库
        sql_s = """SELECT 1 FROM model_saved WHERE provincecode = '{}' AND cons_no = '{}' AND type = '{}'
                   AND SUBSTR(model_type, 1, 7) = '{}'; """.format(p_code, c_no, self.type, self.model_type)
        self.cursor.execute(sql_s)
        result_search = self.cursor.fetchone()
        # 第二步：判断记录是否存在
        sql_d = """DELETE FROM model_saved WHERE provincecode = '{}' AND cons_no = '{}' AND type = '{}' 
                   AND SUBSTR(model_type, 1, 7) = '{}'; """.format(p_code, c_no, self.type, self.model_type)
        if result_search:
            self.cursor.execute(sql_d)
        # 第三步：插入新的数据
        try:
            sql_insert = """INSERT INTO model_saved (provincecode, cons_no, type, model_type, model_saved)
                            VALUES (%s, %s, %s, %s, %s); """
            self.cursor.executemany(sql_insert, data)
        except pymysql.MySQLError as err:
            print(err)
            self.conn.rollback()  # 发生错误时回滚

    # 获取模型
    def fetch_model_data(self, p_code, c_no):
        sql_fetch = """SELECT * FROM model_saved WHERE provincecode='{p_code}' AND cons_no='{c_no}' AND type='{type}' 
                       AND SUBSTR(model_type, 1, 7) = '{model_type}'; """.format(p_code=p_code,
                                                                                 c_no=c_no,
                                                                                 type=self.type,
                                                                                 model_type=self.model_type)
        self.cursor.execute(sql_fetch)
        fetch_dataset = self.cursor.fetchall()
        save_model_columns = ['provincecode', 'cons_no', 'type', 'model_type', 'model_saved']
        fetch_dataset = pd.DataFrame(fetch_dataset, columns=save_model_columns)
        return fetch_dataset

    # 插入预测结果
    def insert_prediction_data(self, data, p_code, c_no):
        # 第一步：先查询数据库
        sql_search = """SELECT 1 FROM result_pred_tmp1 WHERE provincecode = '{p_code}' and cons_no = '{c_no}' and 
                        type = '{type}' and model_type = '{model_type}';""".format(p_code=p_code,
                                                                                   c_no=c_no,
                                                                                   type=self.type,
                                                                                   model_type=self.model_type)
        self.cursor.execute(sql_search)
        result_search = self.cursor.fetchone()
        #  第二步：判断记录是否存在
        if result_search:
            sql_delete = """DELETE FROM result_pred_tmp1 WHERE provincecode = '{p_code}' and cons_no = '{c_no}' and 
                        type = '{type}' and model_type = '{model_type}';""".format(p_code=p_code,
                                                                                   c_no=c_no,
                                                                                   type=self.type,
                                                                                   model_type=self.model_type)
            self.cursor.execute(sql_delete)
        # 第三步：插入新的数据
        sql_insert = """INSERT INTO result_pred_tmp1(provincecode, cons_no, type, data_date, model_type, pq_predict,
                        pq_predict_lb, pq_predict_ub) VALUES (%s, %s, %s, %s, %s, %s, %s, %s);"""
        try:
            # 执行SQL语句
            self.cursor.executemany(sql_insert, data)
        except pymysql.MySQLError as err:
            print(err)
            # 发生错误时回滚
            self.conn.rollback()

    def get_lagged_data(self, data):
        data = data.copy()
        data.sort_values(by=self.sort_column)
        for i in range(self.shift_num):
            data[self.shift_column + '_' + str(i + 1)] = data[self.shift_column].shift(i + 1)
        data.dropna(how='any', axis=0, inplace=True)
        data = data.set_index(self.sort_column)
        return data

    # train
    def train(self):
        train_data = self.fetch_data('result_tmp1')
        list_of_tuples = list(zip(train_data.provincecode, train_data.cons_no))
        list_of_tuples = list(set(list_of_tuples))
        model_list = []
        for p_code, c_no in list_of_tuples:
            data = train_data.loc[(train_data.provincecode == p_code) & (train_data.cons_no == c_no), :]
            data = self.get_lagged_data(data)
            data = data.values
            x, y = data[:, 3:], data[:, 2]

            regressor_lower, regressor_mean, regressor_upper = (
                self.regressor_lower, self.regressor_mean, self.regressor_upper)
            regressor_lower.fit(x, y) # lower prediction
            regressor_mean.fit(x, y) # mean prediction
            regressor_upper.fit(x, y) # upper prediction

            buffer_lower = io.BytesIO()
            pickle.dump(regressor_lower, buffer_lower)
            buffer_lower_content = buffer_lower.getvalue()
            buffer_lower_content = str(buffer_lower_content)
            model_list.append([p_code, c_no, self.type, self.model_type + '_lower', buffer_lower_content])

            buffer_mean = io.BytesIO()
            pickle.dump(regressor_mean, buffer_mean)
            buffer_mean_content = buffer_mean.getvalue()
            buffer_mean_content = str(buffer_mean_content)
            model_list.append([p_code, c_no, self.type, self.model_type + '_mean', buffer_mean_content])

            buffer_upper = io.BytesIO()
            pickle.dump(regressor_upper, buffer_upper)
            buffer_upper_content = buffer_upper.getvalue()
            buffer_upper_content = str(buffer_upper_content)
            model_list.append([p_code, c_no, self.type, self.model_type + '_upper', buffer_upper_content])
        self.insert_data_many(model_list)

    # update
    def update(self):
        update_data = self.fetch_data('result_tmp1')
        list_of_tuples = list(zip(update_data.provincecode, update_data.cons_no))
        list_of_tuples = list(set(list_of_tuples))
        for p_code, c_no in list_of_tuples:
            model_list = []
            data = update_data.loc[(update_data.provincecode == p_code) & (update_data.cons_no == c_no), :]
            data = self.get_lagged_data(data)
            data = data.values
            x, y = data[:, 3:], data[:, 2]

            regressor_lower, regressor_mean, regressor_upper = (
                self.regressor_lower, self.regressor_mean, self.regressor_upper)
            regressor_lower.fit(x, y)  # lower prediction
            regressor_mean.fit(x, y)  # mean prediction
            regressor_upper.fit(x, y)  # upper prediction

            buffer_lower = io.BytesIO()
            pickle.dump(regressor_lower, buffer_lower)
            buffer_lower_content = buffer_lower.getvalue()
            buffer_lower_content = str(buffer_lower_content)
            model_list.append([p_code, c_no, self.type, self.model_type + '_lower', buffer_lower_content])

            buffer_mean = io.BytesIO()
            pickle.dump(regressor_mean, buffer_mean)
            buffer_mean_content = buffer_mean.getvalue()
            buffer_mean_content = str(buffer_mean_content)
            model_list.append([p_code, c_no, self.type, self.model_type + '_mean', buffer_mean_content])

            buffer_upper = io.BytesIO()
            pickle.dump(regressor_upper, buffer_upper)
            regressor_upper_content = buffer_upper.getvalue()
            regressor_upper_content = str(regressor_upper_content)
            model_list.append([p_code, c_no, self.type, self.model_type + '_upper', regressor_upper_content])
            self.update_data(model_list, p_code, c_no)

    # prediction
    def predict(self, p_code_and_c_no: list):
        for p_code, c_no in p_code_and_c_no:
            # fetch data x
            prediction_data = self.fetch_data_one('result_tmp1_c2', p_code, c_no)
            prediction_data = prediction_data.sort_values(by=[self.sort_column], ascending=True)
            test_x = np.atleast_2d(prediction_data.data_value.values[-self.shift_num:])
            # fetch model data
            fetch_buffer_content = self.fetch_model_data(p_code, c_no)
            buffer_lower = eval(fetch_buffer_content.query("model_type.str.contains('lower')").model_saved.values[0])
            buffer_mean = eval(fetch_buffer_content.query("model_type.str.contains('mean')").model_saved.values[0])
            buffer_upper = eval(fetch_buffer_content.query("model_type.str.contains('upper')").model_saved.values[0])

            buffer_lower = io.BytesIO(buffer_lower)
            buffer_mean = io.BytesIO(buffer_mean)
            buffer_upper = io.BytesIO(buffer_upper)

            model_lower = pickle.load(buffer_lower)
            model_mean = pickle.load(buffer_mean)
            model_upper = pickle.load(buffer_upper)

            row_num, col_num = test_x.shape
            test_x_copy = test_x
            prediction_result = []
            for i in range(3):
                pred_res = [p_code, c_no, self.type, str(i+1), self.model_type]
                prediction_lower = model_lower.predict(test_x_copy[:, -col_num:])
                prediction_mean = model_mean.predict(test_x_copy[:, -col_num:])
                prediction_upper = model_upper.predict(test_x_copy[:, -col_num:])
                pred_res.extend([prediction_mean[0], prediction_lower[0], prediction_upper[0]])
                prediction_result.append(pred_res)
                test_x_copy = np.concatenate((test_x_copy, np.atleast_2d(prediction_mean)), axis=1)
            self.insert_prediction_data(prediction_result, p_code, c_no)


if __name__ == '__main__':
    model = XGBDay()
    # model.train()  # 首次训练调用train方法
    # model.update()  # 更新调用update方法
    # 预测的时候需要入参省码和户号
    model.predict([['110', '3701004461505'], ['110', '3701004461504'], ['110', '3701004461503']])

