# -*- coding: utf-8 -*-
import io
import os
import pickle
import numpy as np
import pandas as pd
import pymysql
import matplotlib.pyplot as plt
from time import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
from app.core.config import settings
from app.core.database import db_pool  # 直接引用全局连接池


# quantile_loss_function
def multi_quantile_loss(preds, target, quantiles):
    assert isinstance(preds, torch.Tensor), "Predictions must be a torch.Tensor"
    assert isinstance(target, torch.Tensor), "Target must be a torch.Tensor"
    assert isinstance(quantiles, (list, torch.Tensor)), "Quantiles must be a list or torch.Tensor"
    assert len(preds.shape) == 2, "Predictions must have 2 dimensions (batch_size, num_quantiles)"
    assert preds.shape[1] == len(quantiles), (f"Number of predictions ({preds.shape[1]}) "
                                              f"must match the number of quantiles ({len(quantiles)})")
    assert preds.shape == target.shape, "Shape of predictions must match shape of target"
    if isinstance(quantiles, list):
        assert all(0 < q < 1 for q in quantiles), "Quantiles should be in (0, 1) range"
    else:
        assert torch.all((0 < quantiles) & (quantiles < 1)), "Quantiles should be in (0, 1) range"
    # Convert quantiles to a tensor if it's a list
    if isinstance(quantiles, list):
        quantiles_tensor = torch.tensor(quantiles, device=preds.device).view(1, -1)
    else:
        quantiles_tensor = quantiles.view(1, -1)
    # Calculate errors
    errors = target - preds
    # Calculate losses for each quantile
    losses = torch.max((quantiles_tensor - 1) * errors, quantiles_tensor * errors)
    # Sum the losses and take the mean
    loss = torch.mean(torch.sum(losses, dim=1))
    return loss


# Define a simple neural network architecture
class QuantileNet(nn.Module):
    def __init__(self, input_size, output_size):
        super(QuantileNet, self).__init__()
        self.fc1 = nn.Linear(input_size, 64)  # Assuming input features are 1-dimensional
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Linear(64, 64)
        self.relu2 = nn.ReLU()
        self.fc3 = nn.Linear(64, output_size)  # Output layer with output_size nodes

    def forward(self, x):
        x = self.relu1(self.fc1(x))
        x = self.relu2(self.fc2(x))
        x = self.fc3(x)
        return x


class QuantileNetEnergy:
    def __init__(self):
        self.quantiles = [0.5, 0.05, 0.95]
        self.type, self.model_type = 'day', 'nn'
        self.sort_column, self.shift_column, self.shift_num = 'data_date', 'data_value', 7
        self.input_size, self.output_size = self.shift_num, len(self.quantiles)  # past 7 days to predict the future
        self.model = QuantileNet(self.input_size, self.output_size)
        self.epochs = 500
        self.optimizer = optim.Adam(self.model.parameters(), lr=0.01)

    # 直接使用全局连接池获取连接
    async def _get_connection(self):
        return await db_pool.acquire()  # 全局连接池

    # get energy data
    async def fetch_data(self, table_name):
        sql = f"SELECT * FROM {table_name} WHERE type = '{self.type}'"
        conn = await self._get_connection()
        try:
            async with conn.cursor() as cursor:
                await cursor.execute(sql)
                dataset = await cursor.fetchall()
                dataset_columns = ['provincecode', 'cons_no', 'type', 'data_date', 'pointsdate', 'data_value']
                dataset_df = pd.DataFrame(dataset, columns=dataset_columns)
                dataset_df.drop(columns=['pointsdate', 'type'], inplace=True)
                return dataset_df
        finally:
            db_pool.release(conn)

    # get energy data one user
    async def fetch_data_one(self, table_name, p_code, c_no):
        sql = f"SELECT * FROM {table_name} WHERE type = '{self.type}' AND provincecode = '{p_code}' AND cons_no = '{c_no}'"
        conn = await self._get_connection()
        try:
            async with conn.cursor() as cursor:
                await cursor.execute(sql)
                dataset = await cursor.fetchall()
                dataset_columns = ['provincecode', 'cons_no', 'type', 'data_date', 'pointsdate', 'data_value']
                dataset_df = pd.DataFrame(dataset, columns=dataset_columns)
                dataset_df.drop(columns=['pointsdate', 'type'], inplace=True)
                return dataset_df
        finally:
            db_pool.release(conn)

    # insert many data to 'model_saved'
    async def insert_data_many(self, data):
        sql = "INSERT INTO model_saved (provincecode, cons_no, type, model_type, model_saved) VALUES (%s, %s, %s, %s, %s)"
        conn = await self._get_connection()
        try:
            async with conn.cursor() as cursor:
                await cursor.executemany(sql, data)
                await conn.commit()
        except pymysql.MySQLError as err:
            print(err)
            await conn.rollback()
        finally:
            db_pool.release(conn)

    # update the model
    async def update_data(self, data, p_code, c_no):
        sql_s = f"SELECT 1 FROM model_saved WHERE provincecode = '{p_code}' AND cons_no = '{c_no}' AND type = '{self.type}' AND SUBSTR(model_type, 1, 2) = '{self.model_type}'"
        sql_d = f"DELETE FROM model_saved WHERE provincecode = '{p_code}' AND cons_no = '{c_no}' AND type = '{self.type}' AND SUBSTR(model_type, 1, 2) = '{self.model_type}'"
        sql_insert = "INSERT INTO model_saved (provincecode, cons_no, type, model_type, model_saved) VALUES (%s, %s, %s, %s, %s)"
        conn = await self._get_connection()
        try:
            async with conn.cursor() as cursor:
                await cursor.execute(sql_s)
                result_search = await cursor.fetchone()
                if result_search:
                    await cursor.execute(sql_d)
                await cursor.executemany(sql_insert, data)
                await conn.commit()
        except pymysql.MySQLError as err:
            print(err)
            await conn.rollback()
        finally:
            db_pool.release(conn)

    # 获取模型
    async def fetch_model_data(self, p_code, c_no):
        sql_fetch = f"SELECT * FROM model_saved WHERE provincecode='{p_code}' AND cons_no='{c_no}' AND type='{self.type}' AND SUBSTR(model_type, 1, 2) = '{self.model_type}'"
        conn = await self._get_connection()
        try:
            async with conn.cursor() as cursor:
                await cursor.execute(sql_fetch)
                fetch_dataset = await cursor.fetchall()
                save_model_columns = ['provincecode', 'cons_no', 'type', 'model_type', 'model_saved']
                fetch_dataset = pd.DataFrame(fetch_dataset, columns=save_model_columns)
                return fetch_dataset
        finally:
            db_pool.release(conn)

    # 插入预测结果
    async def insert_prediction_data(self, data, p_code, c_no):
        sql_search = f"SELECT 1 FROM result_pred_tmp1 WHERE provincecode = '{p_code}' and cons_no = '{c_no}' and type = '{self.type}' and model_type = '{self.model_type}'"
        sql_delete = f"DELETE FROM result_pred_tmp1 WHERE provincecode = '{p_code}' and cons_no = '{c_no}' and type = '{self.type}' and model_type = '{self.model_type}'"
        sql_insert = "INSERT INTO result_pred_tmp1(provincecode, cons_no, type, data_date, model_type, pq_predict, pq_predict_lb, pq_predict_ub) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
        conn = await self._get_connection()
        try:
            async with conn.cursor() as cursor:
                await cursor.execute(sql_search)
                result_search = await cursor.fetchone()
                if result_search:
                    await cursor.execute(sql_delete)
                await cursor.executemany(sql_insert, data)
                await conn.commit()
        except pymysql.MySQLError as err:
            print(err)
            await conn.rollback()
        finally:
            db_pool.release(conn)

    # 获取数据的滞后变量
    def get_lagged_data(self, data):
        data = data.copy()
        data.sort_values(by=self.sort_column)
        for i in range(self.shift_num):
            data[f"{self.shift_column}_{i + 1}"] = data[self.shift_column].shift(i + 1)
        data.dropna(how='any', axis=0, inplace=True)
        data = data.set_index(self.sort_column)
        return data

    async def train(self):
        train_data = await self.fetch_data('result_tmp1')
        list_of_tuples = list(zip(train_data.provincecode, train_data.cons_no))
        list_of_tuples = list(set(list_of_tuples))
        model_list = []
        for p_code, c_no in list_of_tuples:
            data = train_data.loc[(train_data.provincecode == p_code) & (train_data.cons_no == c_no), :]
            data = self.get_lagged_data(data)
            data = data.values.astype(float)
            x, y = data[:, 3:], data[:, 2].reshape(-1, 1)
            x, y = torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.float32)
            y_expanded = y.expand(-1, len(self.quantiles))
            dataset = TensorDataset(x, y_expanded)
            dataloader = DataLoader(dataset, batch_size=len(x))
            regressor_nn = self.model
            optimizer = self.optimizer
            self.model.train()
            for epoch in range(self.epochs):
                for x_batch, y_batch in dataloader:
                    optimizer.zero_grad()
                    preds = regressor_nn(x_batch)
                    loss = multi_quantile_loss(preds, y_batch, self.quantiles)
                    loss.backward()
                    optimizer.step()
                if (epoch + 1) % 10 == 0:
                    print(f'Epoch {epoch + 1}, Loss: {loss.item()}')
            buffer_nn = io.BytesIO()
            pickle.dump(regressor_nn, buffer_nn)
            buffer_nn_content = buffer_nn.getvalue()
            model_list.append([p_code, c_no, self.type, self.model_type, buffer_nn_content])
        await self.insert_data_many(model_list)

    async def update(self):
        update_data = await self.fetch_data('result_tmp1')
        list_of_tuples = list(zip(update_data.provincecode, update_data.cons_no))
        list_of_tuples = list(set(list_of_tuples))
        for p_code, c_no in list_of_tuples:
            model_list = []
            data = update_data.loc[(update_data.provincecode == p_code) & (update_data.cons_no == c_no), :]
            data = self.get_lagged_data(data)
            data = data.values.astype(float)
            x, y = data[:, 3:], data[:, 2].reshape(-1, 1)
            x, y = torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.float32)
            y_expanded = y.expand(-1, len(self.quantiles))
            dataset = TensorDataset(x, y_expanded)
            dataloader = DataLoader(dataset, batch_size=len(x))
            regressor_nn = self.model
            optimizer = self.optimizer
            self.model.train()
            for epoch in range(self.epochs):
                for x_batch, y_batch in dataloader:
                    optimizer.zero_grad()
                    preds = regressor_nn(x_batch)
                    loss = multi_quantile_loss(preds, y_batch, self.quantiles)
                    loss.backward()
                    optimizer.step()
                if (epoch + 1) % 10 == 0:
                    print(f'Epoch {epoch + 1}, Loss: {loss.item()}')
            buffer_nn = io.BytesIO()
            pickle.dump(regressor_nn, buffer_nn)
            buffer_nn_content = buffer_nn.getvalue()
            model_list.append([p_code, c_no, self.type, self.model_type, buffer_nn_content])
            await self.update_data(model_list, p_code, c_no)

    async def predict(self, p_code_and_c_no: list):
        for p_code, c_no in p_code_and_c_no:
            # fetch data x
            prediction_data = await self.fetch_data_one('result_tmp1_c2', p_code, c_no)
            prediction_data = prediction_data.sort_values(by=[self.sort_column], ascending=True)
            test_x = np.atleast_2d(prediction_data.data_value.values[-self.shift_num:])
            # fetch model data
            fetch_buffer_content = await self.fetch_model_data(p_code, c_no)
            buffer_nn = fetch_buffer_content.model_saved.values[0]
            buffer_nn = io.BytesIO(buffer_nn)
            model_nn = pickle.load(buffer_nn)

            test_x_copy = torch.tensor(test_x.astype(float), dtype=torch.float32)
            prediction_result = []
            for i in range(7):
                pred_res = [p_code, c_no, self.type, str(i + 1), self.model_type]
                predictions = model_nn(test_x_copy[:, -self.shift_num:])
                predictions = predictions.data.numpy().flatten()
                predictions[predictions < 0] = 0
                predictions = list(predictions)
                pred_res.extend(predictions)
                prediction_result.append(pred_res)
                test_x_copy = torch.cat((test_x_copy, torch.atleast_2d(torch.tensor(predictions[0]))), dim=1)
            await self.insert_prediction_data(prediction_result, p_code, c_no)
            print(prediction_result)


if __name__ == '__main__':
    import asyncio
    model = QuantileNetEnergy()
    loop = asyncio.get_event_loop()
    # loop.run_until_complete(model.train())
    loop.run_until_complete(model.update())
    loop.run_until_complete(model.predict([['110', '3701004461505'], ['110', '3701004461504'], ['110', '3701004461503']]))