# -*- coding: utf-8 -*-
import io
import os
import pickle
import numpy as np
import pandas as pd
import pymysql
import matplotlib.pyplot as plt
from time import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
from app.core.config import settings



# quantile_loss_function
def multi_quantile_loss(preds, target, quantiles):
    assert isinstance(preds, torch.Tensor), "Predictions must be a torch.Tensor"
    assert isinstance(target, torch.Tensor), "Target must be a torch.Tensor"
    assert isinstance(quantiles, (list, torch.Tensor)), "Quantiles must be a list or torch.Tensor"
    assert len(preds.shape) == 2, "Predictions must have 2 dimensions (batch_size, num_quantiles)"
    assert preds.shape[1] == len(quantiles), (f"Number of predictions ({preds.shape[1]}) "
                                              f"must match the number of quantiles ({len(quantiles)})")
    assert preds.shape == target.shape, "Shape of predictions must match shape of target"
    if isinstance(quantiles, list):
        assert all(0 < q < 1 for q in quantiles), "Quantiles should be in (0, 1) range"
    else:
        assert torch.all((0 < quantiles) & (quantiles < 1)), "Quantiles should be in (0, 1) range"
    # Convert quantiles to a tensor if it's a list
    if isinstance(quantiles, list):
        quantiles_tensor = torch.tensor(quantiles, device=preds.device).view(1, -1)
    else:
        quantiles_tensor = quantiles.view(1, -1)
    # Calculate errors
    errors = target - preds
    # Calculate losses for each quantile
    losses = torch.max((quantiles_tensor - 1) * errors, quantiles_tensor * errors)
    # Sum the losses and take the mean
    loss = torch.mean(torch.sum(losses, dim=1))
    return loss


# Define a simple neural network architecture
class QuantileNet(nn.Module):
    def __init__(self, input_size, output_size):
        super(QuantileNet, self).__init__()
        self.fc1 = nn.Linear(input_size, 64)  # Assuming input features are 1-dimensional
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Linear(64, 64)
        self.relu2 = nn.ReLU()
        self.fc3 = nn.Linear(64, output_size)  # Output layer with output_size nodes

    def forward(self, x):
        x = self.relu1(self.fc1(x))
        x = self.relu2(self.fc2(x))
        x = self.fc3(x)
        return x


class QuantileNetEnergy:
    def __init__(self):
        self.quantiles = [0.5, 0.05, 0.95]
        self.conn = self.connect_to_db()
        self.cursor = self.conn.cursor()
        self.type, self.model_type = 'day', 'nn'
        self.sort_column, self.shift_column, self.shift_num = 'data_date', 'data_value', 7
        self.input_size, self.output_size = self.shift_num, len(self.quantiles)  # past 7 days to predict the future
        self.model = QuantileNet(self.input_size, self.output_size)
        self.epochs = 500
        self.optimizer = optim.Adam(self.model.parameters(), lr=0.01)

    # my database connector
    @staticmethod
    def connect_to_db():
        print(f"模式: {settings.ENVIRONMENT}")
        print(f"数据库主机: {settings.DB_HOST}")
        print(f"数据库端口: {settings.DB_PORT}")
        print(f"数据库名称: {settings.DB_NAME}")
        print(f"数据库用户: {settings.DB_USER}")
        print(f"数据库密码: {settings.DB_PASSWORD}")
        connection = pymysql.connect(
            host=settings.DB_HOST,
            port=settings.DB_PORT,
            user=settings.DB_USER,
            password=settings.DB_PASSWORD,
            database=settings.DB_NAME,
            autocommit=True)
        return connection

    # get energy data
    def fetch_data(self, table_name):
        sql = """SELECT * FROM {table_name} 
                 WHERE type = '{type}'""".format(table_name=table_name, type=self.type)
        self.cursor.execute(sql)
        dataset = self.cursor.fetchall()
        dataset_columns = ['provincecode', 'cons_no', 'type', 'data_date', 'pointsdate', 'data_value']
        dataset_df = pd.DataFrame(dataset, columns=dataset_columns)
        dataset_df.drop(columns=['pointsdate', 'type'], inplace=True)
        return dataset_df

    # get energy data one user
    def fetch_data_one(self, table_name, p_code, c_no):
        sql = """SELECT * FROM {table_name} WHERE type = '{type}' AND provincecode = '{p_code}' AND 
                 cons_no = '{c_no}';""".format(table_name=table_name,
                                               type=self.type,
                                               p_code=p_code,
                                               c_no=c_no)
        self.cursor.execute(sql)
        dataset = self.cursor.fetchall()
        dataset_columns = ['provincecode', 'cons_no', 'type', 'data_date', 'pointsdate', 'data_value']
        dataset_df = pd.DataFrame(dataset, columns=dataset_columns)
        dataset_df.drop(columns=['pointsdate', 'type'], inplace=True)
        return dataset_df

    # insert many data to 'model_saved'
    def insert_data_many(self, data):
        sql = """INSERT INTO {table_name} (provincecode, cons_no, type, model_type, model_saved) 
                 VALUES (%s, %s, %s, %s, %s)""".format(table_name='model_saved')
        self.cursor.executemany(sql, data)

    # update the model
    def update_data(self, data, p_code, c_no):
        # 第一步：先查询数据库
        sql_s = """SELECT 1 FROM model_saved WHERE provincecode = '{}' AND cons_no = '{}' AND type = '{}'
                   AND SUBSTR(model_type, 1, 2) = '{}'; """.format(p_code, c_no, self.type, self.model_type)
        self.cursor.execute(sql_s)
        result_search = self.cursor.fetchone()
        # 第二步：判断记录是否存在
        sql_d = """DELETE FROM model_saved WHERE provincecode = '{}' AND cons_no = '{}' AND type = '{}' 
                   AND SUBSTR(model_type, 1, 2) = '{}'; """.format(p_code, c_no, self.type, self.model_type)
        if result_search:
            self.cursor.execute(sql_d)
        # 第三步：插入新的数据
        try:
            sql_insert = """INSERT INTO model_saved (provincecode, cons_no, type, model_type, model_saved)
                            VALUES (%s, %s, %s, %s, %s); """
            self.cursor.executemany(sql_insert, data)
        except pymysql.MySQLError as err:
            print(err)
            self.conn.rollback()  # 发生错误时回滚

    # 获取模型
    def fetch_model_data(self, p_code, c_no):
        sql_fetch = """SELECT * FROM model_saved 
                       WHERE provincecode='{p_code}' AND cons_no='{c_no}' AND type='{type}' 
                       AND SUBSTR(model_type, 1, 2) = '{model_type}'; """.format(p_code=p_code,
                                                                                 c_no=c_no,
                                                                                 type=self.type,
                                                                                 model_type=self.model_type)
        self.cursor.execute(sql_fetch)
        fetch_dataset = self.cursor.fetchall()
        save_model_columns = ['provincecode', 'cons_no', 'type', 'model_type', 'model_saved']
        fetch_dataset = pd.DataFrame(fetch_dataset, columns=save_model_columns)
        return fetch_dataset

    # 插入预测结果
    def insert_prediction_data(self, data, p_code, c_no):
        # 第一步：先查询数据库
        sql_search = """SELECT 1 FROM result_pred_tmp1 WHERE provincecode = '{p_code}' and cons_no = '{c_no}' and 
                        type = '{type}' and model_type = '{model_type}';""".format(p_code=p_code,
                                                                                   c_no=c_no,
                                                                                   type=self.type,
                                                                                   model_type=self.model_type)
        self.cursor.execute(sql_search)
        result_search = self.cursor.fetchone()
        #  第二步：判断记录是否存在
        if result_search:
            sql_delete = """DELETE FROM result_pred_tmp1 WHERE provincecode = '{p_code}' and cons_no = '{c_no}' and 
                        type = '{type}' and model_type = '{model_type}';""".format(p_code=p_code,
                                                                                   c_no=c_no,
                                                                                   type=self.type,
                                                                                   model_type=self.model_type)
            self.cursor.execute(sql_delete)
        # 第三步：插入新的数据
        sql_insert = """INSERT INTO result_pred_tmp1(provincecode, cons_no, type, data_date, model_type, pq_predict,
                        pq_predict_lb, pq_predict_ub) VALUES (%s, %s, %s, %s, %s, %s, %s, %s);"""
        try:
            # 执行SQL语句
            self.cursor.executemany(sql_insert, data)
        except pymysql.MySQLError as err:
            print(err)
            # 发生错误时回滚
            self.conn.rollback()

    # 获取数据的滞后变量
    def get_lagged_data(self, data):
        data = data.copy()
        data.sort_values(by=self.sort_column)
        for i in range(self.shift_num):
            data[self.shift_column + '_' + str(i + 1)] = data[self.shift_column].shift(i + 1)
        data.dropna(how='any', axis=0, inplace=True)
        data = data.set_index(self.sort_column)
        return data

    def train(self):
        train_data = self.fetch_data('result_tmp1')
        list_of_tuples = list(zip(train_data.provincecode, train_data.cons_no))
        list_of_tuples = list(set(list_of_tuples))
        model_list = []
        for p_code, c_no in list_of_tuples:
            data = train_data.loc[(train_data.provincecode == p_code) & (train_data.cons_no == c_no), :]
            data = self.get_lagged_data(data)
            data = data.values.astype(float)
            x, y = data[:, 3:], data[:, 2].reshape(-1, 1)
            x, y = torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.float32)
            y_expanded = y.expand(-1, len(self.quantiles))
            dataset = TensorDataset(x, y_expanded)
            dataloader = DataLoader(dataset, batch_size=len(x))
            regressor_nn = self.model
            optimizer = self.optimizer
            self.model.train()
            for epoch in range(self.epochs):
                for x_batch, y_batch in dataloader:
                    optimizer.zero_grad()
                    preds = regressor_nn(x_batch)
                    loss = multi_quantile_loss(preds, y_batch, self.quantiles)
                    loss.backward()
                    optimizer.step()
                if (epoch+1) % 10 == 0:
                    print('Epoch {}, Loss: {}'.format(epoch+1, loss.item()))
            buffer_nn = io.BytesIO()
            pickle.dump(regressor_nn, buffer_nn)
            buffer_nn_content = buffer_nn.getvalue()
            buffer_nn_content = str(buffer_nn_content)
            model_list.append([p_code, c_no, self.type, self.model_type, buffer_nn_content])
            # preds = regressor_nn(x).data.numpy()
            # print(preds)
            # plt.figure(figsize=(18, 5))
            # plt.plot(preds[:,0], '--', label='lower')
            # plt.plot(preds[:,1], '--', label='mean')
            # plt.plot(preds[:,2], '--', label='upper')
            # plt.show()
        self.insert_data_many(model_list)

    def update(self):
        update_data = self.fetch_data('result_tmp1')
        list_of_tuples = list(zip(update_data.provincecode, update_data.cons_no))
        list_of_tuples = list(set(list_of_tuples))
        for p_code, c_no in list_of_tuples:
            model_list = []
            data = update_data.loc[(update_data.provincecode == p_code) & (update_data.cons_no == c_no), :]
            data = self.get_lagged_data(data)
            data = data.values.astype(float)
            x, y = data[:, 3:], data[:, 2].reshape(-1, 1)
            x, y = torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.float32)
            y_expanded = y.expand(-1, len(self.quantiles))
            dataset = TensorDataset(x, y_expanded)
            dataloader = DataLoader(dataset, batch_size=len(x))
            regressor_nn = self.model
            optimizer = self.optimizer
            self.model.train()
            for epoch in range(self.epochs):
                for x_batch, y_batch in dataloader:
                    optimizer.zero_grad()
                    preds = regressor_nn(x_batch)
                    loss = multi_quantile_loss(preds, y_batch, self.quantiles)
                    loss.backward()
                    optimizer.step()
                if (epoch+1) % 10 == 0:
                    print('Epoch {}, Loss: {}'.format(epoch+1, loss.item()))
            buffer_nn = io.BytesIO()
            pickle.dump(regressor_nn, buffer_nn)
            buffer_nn_content = buffer_nn.getvalue()
            buffer_nn_content = str(buffer_nn_content)
            model_list.append([p_code, c_no, self.type, self.model_type, buffer_nn_content])
            self.update_data(model_list, p_code, c_no)

    def predict(self, p_code_and_c_no: list):
        for p_code, c_no in p_code_and_c_no:
            # fetch data x
            prediction_data = self.fetch_data_one('result_tmp1_c2', p_code, c_no)
            prediction_data = prediction_data.sort_values(by=[self.sort_column], ascending=True)
            test_x = np.atleast_2d(prediction_data.data_value.values[-self.shift_num:])
            # fetch model data
            fetch_buffer_content = self.fetch_model_data(p_code, c_no)
            buffer_nn = eval(fetch_buffer_content.model_saved.values[0])
            buffer_nn = io.BytesIO(buffer_nn)
            model_nn = pickle.load(buffer_nn)

            test_x_copy = torch.tensor(test_x.astype(float), dtype=torch.float32)
            prediction_result = []
            for i in range(7):
                pred_res = [p_code, c_no, self.type, str(i + 1), self.model_type]
                predictions = model_nn(test_x_copy[:, -self.shift_num:])
                predictions = predictions.data.numpy().flatten()
                predictions[predictions < 0] = 0
                predictions = list(predictions)
                pred_res.extend(predictions)
                prediction_result.append(pred_res)
                test_x_copy = torch.cat((test_x_copy, torch.atleast_2d(torch.tensor(predictions[0]))), dim=1)
            self.insert_prediction_data(prediction_result, p_code, c_no)
            print(prediction_result)


if __name__ == '__main__':
    model = QuantileNetEnergy()
    # model.train()
    model.update()
    model.predict([['110', '3701004461505'], ['110', '3701004461504'], ['110', '3701004461503']])