# -*- coding: utf-8 -*-
import io
import pickle
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import warnings
from app.core.database import db_pool  # 直接引用全局连接池

warnings.filterwarnings("ignore")

# 保持损失函数和模型定义不变（同前）
# quantile_loss_function
def multi_quantile_loss(preds, target, quantiles):
    assert isinstance(preds, torch.Tensor), "Predictions must be a torch.Tensor"
    assert isinstance(target, torch.Tensor), "Target must be a torch.Tensor"
    assert isinstance(quantiles, (list, torch.Tensor)), "Quantiles must be a list or torch.Tensor"
    assert len(preds.shape) == 2, "Predictions must have 2 dimensions (batch_size, num_quantiles)"
    assert preds.shape[1] == len(quantiles), (f"Number of predictions ({preds.shape[1]}) "
                                              f"must match the number of quantiles ({len(quantiles)})")
    assert preds.shape == target.shape, "Shape of predictions must match shape of target"
    if isinstance(quantiles, list):
        assert all(0 < q < 1 for q in quantiles), "Quantiles should be in (0, 1) range"
    else:
        assert torch.all((0 < quantiles) & (quantiles < 1)), "Quantiles should be in (0, 1) range"
    # Convert quantiles to a tensor if it's a list
    if isinstance(quantiles, list):
        quantiles_tensor = torch.tensor(quantiles, device=preds.device).view(1, -1)
    else:
        quantiles_tensor = quantiles.view(1, -1)
    # Calculate errors
    errors = target - preds
    # Calculate losses for each quantile
    losses = torch.max((quantiles_tensor - 1) * errors, quantiles_tensor * errors)
    # Sum the losses and take the mean
    loss = torch.mean(torch.sum(losses, dim=1))
    return loss


# Define a simple neural network architecture
class QuantileNet(nn.Module):
    def __init__(self, input_size, output_size):
        super(QuantileNet, self).__init__()
        self.fc1 = nn.Linear(input_size, 64)  # Assuming input features are 1-dimensional
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Linear(64, 64)
        self.relu2 = nn.ReLU()
        self.fc3 = nn.Linear(64, output_size)  # Output layer with output_size nodes

    def forward(self, x):
        x = self.relu1(self.fc1(x))
        x = self.relu2(self.fc2(x))
        x = self.fc3(x)
        return x


class QuantileNetEnergy:
    def __init__(self):
        self.quantiles = [0.5, 0.05, 0.95]
        self.type, self.model_type = 'day', 'nn'
        self.sort_column, self.shift_column, self.shift_num = 'data_date', 'data_value', 7
        self.input_size, self.output_size = self.shift_num, len(self.quantiles)
        self.model = QuantileNet(self.input_size, self.output_size)
        self.epochs = 500
        self.optimizer = optim.Adam(self.model.parameters(), lr=0.01)

    # 直接使用全局连接池获取连接
    async def _get_connection(self):
        return await db_pool.acquire()  # 全局连接池

    # 数据库操作方法（移除连接池注入参数，直接使用全局实例）
    async def fetch_data(self, table_name: str):
        sql = "SELECT * FROM %s WHERE type = %s"
        async with await self._get_connection() as conn:
            async with conn.cursor() as cursor:
                await cursor.execute(sql, (table_name, self.type))
                return pd.DataFrame(await cursor.fetchall(), columns=[
                    'provincecode', 'cons_no', 'type', 'data_date', 'pointsdate', 'data_value'
                ]).drop(columns=['pointsdate', 'type'])

    # 其他数据库方法（fetch_data_one/insert_data_many等）同理，移除db_pool参数，使用全局连接池

    # 训练和预测方法（与前版本逻辑一致，依赖全局连接池）
        def update(self):
            update_data = self.fetch_data('result_tmp1')
            list_of_tuples = list(zip(update_data.provincecode, update_data.cons_no))
            list_of_tuples = list(set(list_of_tuples))
            for p_code, c_no in list_of_tuples:
                model_list = []
                data = update_data.loc[(update_data.provincecode == p_code) & (update_data.cons_no == c_no), :]
                data = self.get_lagged_data(data)
                data = data.values
                x, y = data[:, 3:], data[:, 2]

                regressor_lower, regressor_mean, regressor_upper = (
                    self.regressor_lower, self.regressor_mean, self.regressor_upper)
                regressor_lower.fit(x, y)  # lower prediction
                regressor_mean.fit(x, y)  # mean prediction
                regressor_upper.fit(x, y)  # upper prediction

                buffer_lower = io.BytesIO()
                pickle.dump(regressor_lower, buffer_lower)
                buffer_lower_content = buffer_lower.getvalue()
                buffer_lower_content = str(buffer_lower_content)
                model_list.append([p_code, c_no, self.type, self.model_type + '_lower', buffer_lower_content])

                buffer_mean = io.BytesIO()
                pickle.dump(regressor_mean, buffer_mean)
                buffer_mean_content = buffer_mean.getvalue()
                buffer_mean_content = str(buffer_mean_content)
                model_list.append([p_code, c_no, self.type, self.model_type + '_mean', buffer_mean_content])

                buffer_upper = io.BytesIO()
                pickle.dump(regressor_upper, buffer_upper)
                regressor_upper_content = buffer_upper.getvalue()
                regressor_upper_content = str(regressor_upper_content)
                model_list.append([p_code, c_no, self.type, self.model_type + '_upper', regressor_upper_content])
                self.update_data(model_list, p_code, c_no)

        # prediction
        def predict(self, p_code_and_c_no: list):
            for p_code, c_no in p_code_and_c_no:
                # fetch data x
                prediction_data = self.fetch_data_one('result_tmp1_c2', p_code, c_no)
                prediction_data = prediction_data.sort_values(by=[self.sort_column], ascending=True)
                test_x = np.atleast_2d(prediction_data.data_value.values[-self.shift_num:])
                # fetch model data
                fetch_buffer_content = self.fetch_model_data(p_code, c_no)
                buffer_lower = eval(
                    fetch_buffer_content.query("model_type.str.contains('lower')").model_saved.values[0])
                buffer_mean = eval(fetch_buffer_content.query("model_type.str.contains('mean')").model_saved.values[0])
                buffer_upper = eval(
                    fetch_buffer_content.query("model_type.str.contains('upper')").model_saved.values[0])

                buffer_lower = io.BytesIO(buffer_lower)
                buffer_mean = io.BytesIO(buffer_mean)
                buffer_upper = io.BytesIO(buffer_upper)

                model_lower = pickle.load(buffer_lower)
                model_mean = pickle.load(buffer_mean)
                model_upper = pickle.load(buffer_upper)

                row_num, col_num = test_x.shape
                test_x_copy = test_x
                prediction_result = []
                for i in range(3):
                    pred_res = [p_code, c_no, self.type, str(i + 1), self.model_type]
                    prediction_lower = model_lower.predict(test_x_copy[:, -col_num:])
                    prediction_mean = model_mean.predict(test_x_copy[:, -col_num:])
                    prediction_upper = model_upper.predict(test_x_copy[:, -col_num:])
                    pred_res.extend([prediction_mean[0], prediction_lower[0], prediction_upper[0]])
                    prediction_result.append(pred_res)
                    test_x_copy = np.concatenate((test_x_copy, np.atleast_2d(prediction_mean)), axis=1)
                self.insert_prediction_data(prediction_result, p_code, c_no)
                print(prediction_result)

    if __name__ == '__main__':
        model = LGBDay()
        # model.train()  # 首次训练调用train方法
        model.update()  # 更新调用update方法
        # 预测的时候需要入参省码和户号
        model.predict([['110', '3701004461505'], ['110', '3701004461504'], ['110', '3701004461503']])
