import math
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
import optuna
from scipy.io import loadmat
from scipy.sparse import random as sparse_random

import json
from datetime import datetime

class ESN():
    def __init__(self, data, N, leakage_rate=0.1, rho=1, sparsity=2, T_train=400, T_predict=400, T_discard=20, eta=1e-4, seed=2050):
        self.data = data

        self.N = N  # reservoir size 库的大小

        self.leakage_rate = leakage_rate  #泄漏率

        self.rho = rho  # spectral radius 谱半径

        self.sparsity = sparsity  # average degree 平均度       sparsity：稀疏性

        self.T_train = T_train  # training steps

        self.T_predict = T_predict  # prediction steps

        self.T_discard = T_discard  # discard first T_discard steps  discard：丢弃

        self.eta = eta  # regularization constant 正则化常数

        self.seed = seed  # random seed

    def initialize(self):
        """
        对连接权矩阵W_IR和W_res进行初始化
        其中W_IR(N*1)是从输入到库的连接权矩阵，W_res(N*N)是从储备池状态矩阵
        """
        if self.seed > 0:
            np.random.seed(self.seed)
        # 生成形状为N * 1的，元素为[-1, 1]之间的随机值的矩阵
        self.W_IR = np.random.rand(self.N, 1) * 2 - 1  # [-1, 1] uniform
        # 生成形状为N * N的，元素为[0, 1]之间的随机值的矩阵
        W_res = np.random.rand(self.N, self.N)
        # 使用 scipy.sparse 生成稀疏随机矩阵
        sparsity_level = self.sparsity / self.N
        #sparsity_level = 0.03
        W_res_sparse = sparse_random(self.N, self.N, density=sparsity_level,
                                     data_rvs=lambda s: np.random.uniform(-1, 1, size=s))
        W_res = W_res_sparse.toarray()

        # 确保 W_res 不是全零矩阵
        if np.all(W_res == 0):
            raise ValueError("W_res is a zero matrix. Adjust sparsity or N.")

        # 规范化
        eigvals = np.linalg.eigvals(W_res)
        max_eigval = np.max(np.abs(eigvals))
        if max_eigval == 0:
            max_eigval = 1  # 防止除以零
        W_res /= max_eigval
        W_res *= self.rho
        self.W_res = W_res
        # 将W_res中大于self.sparsity / self.N的元素置0
        #W_res[W_res > self.sparsity / self.N] = 0
        # np.linalg.eigvals(W_res)求出W_res的特征值，W_res矩阵除以自身模最大的特征值的模
        #W_res /= np.max(np.abs(np.linalg.eigvals(W_res)))
        # 在乘以谱半径
        #W_res *= self.rho  # set spectral radius = rho
        #self.W_res = W_res

    def train(self):
        u = self.data[:, :self.T_train]  # traning data T_train = 8000
        assert u.shape == (1, self.T_train)


        r = np.zeros((self.N, self.T_train + 1))  # initialize reservoir state r(N*(T_train + 1))
        # 使用W_res取值  使用w_IR   加在一起 编码 所有输入 赋值到r中
        for t in range(self.T_train):
            # @是Python3.5之后加入的矩阵乘法运算符
            #r[:, t + 1] = np.tanh(self.W_res @ r[:, t] + self.W_IR @ u[:, t])
            r[:, t+1] = self.leakage_rate * r[:, t]+ (1-self.leakage_rate)*np.tanh(self.W_res @ r[:, t] + self.W_IR @ u[:, t])
        # disgard first T_discard steps  r丢弃前T_discard步变成r_p
        # 选择前面数据编码好的
        self.r_p = r[:, self.T_discard+1:]  # length=T_train-T_discard
        # 选择后面没编码的数据
        v = self.data[:, self.T_discard+1:self.T_train+1]  # target
        # 前面的数据编码解码后面的数据，并编码矩阵（编码和编码之间的关系）
        # 解码 self.eta * np.identity(self.N) 是一个正则化项，eta是正则化常数，np.identity(self.N) 生成一个 N×N 的单位矩阵。加上正则化项可以防止过拟合。
        #self.W_RO = v @ self.r_p.T @ np.linalg.pinv(self.r_p @ self.r_p.T + self.eta * np.identity(self.N))#计算伪逆矩阵的函数
        # 添加正则化项，构建新的矩阵 A 和 b
        A = self.r_p.T
        b = v.T

        # 正则化参数
        eta_sqrt = np.sqrt(self.eta)
        reg_matrix = eta_sqrt * np.eye(self.N)

        # 扩展 A 和 b
        A_reg = np.vstack([A, reg_matrix])
        b_reg = np.vstack([b, np.zeros((self.N, 1))])

        # 使用最小二乘法求解 W_RO
        W_RO_T, residuals, rank, s = np.linalg.lstsq(A_reg, b_reg, rcond=None)
        self.W_RO = W_RO_T.T

        train_error = np.sum((self.W_RO @ self.r_p - v) ** 2)
        #print('Training error: %.4g' % train_error)
        return train_error

    def predict(self):
        u_pred = np.zeros((1, self.T_predict))  # u_pred是形状为(1, self.T_predict)的全零矩阵
        r_pred = np.zeros((self.N, self.T_predict))  # r_pred是形状为(N, self.T_predict)的全零矩阵
        r_pred[:, 0] = self.r_p[:, -1]  # warm start 热启动
        for step in range(self.T_predict - 1):
            u_pred[:, step] = self.W_RO @ r_pred[:, step]
            r_pred[:, step + 1] = np.tanh(self.W_res @ r_pred[:, step] + self.W_IR @ u_pred[:, step])
        u_pred[:, -1] = self.W_RO @ r_pred[:, -1]
        self.pred = u_pred
        #print(self.data[self.T_train:self.T_train + self.T_predict])
        #print(self.T_predict)
        #new = self.data[:,self.T_train:self.T_train + self.T_predict]
        #print(new)
        #return math.sqrt(sum((self.pred[:,:]-self.data[:, self.T_train:self.T_train+self.T_predict])**2))

    def plot_predict(self):
        ground_truth = self.data[:,
                                 self.T_train: self.T_train + self.T_predict]#选择所有行，列从self.T_train到 self.T_train + self.T_predict,左闭右开
        plt.figure(figsize=(12, 6))

        plt.plot(self.pred.T, 'r', label='predict', alpha=0.6)
        plt.plot(ground_truth.T, 'b', label='truth', alpha=0.6)
        plt.legend(loc='upper right', fontsize='large')
        plt.show()

    def calc_error(self):
        ground_truth = self.data[:,
                                 self.T_train: self.T_train + self.T_predict]
        rmse_list = []
        for step in range(1, self.T_predict+1):
            error = np.sqrt(
                np.mean((self.pred[:, :step] - ground_truth[:, :step]) ** 2))
            rmse_list.append(error)
        return rmse_list
    def rmse_error(self):
        ground_truth = self.data[:,
                                 self.T_train: self.T_train + self.T_predict]
        rmse_list = []
        sum_val=0
        for step in range(1, self.T_predict+1):
            sum_val=sum_val+(self.pred[:, step-1:step] - ground_truth[:, step-1:step]) ** 2

        return math.sqrt(sum_val/self.T_predict)
def loadData(filename):
    data = np.load(filename)  # data.shape = (10000,)
    data = np.reshape(data, (1, data.shape[0]))  # data.shape = (1, 10000)
    return data

data_file_name = 'MG_Data.npy'
data = loadData(data_file_name)


def objective(trial):
    # 2. 使用trial对象建议超参数取值
    reservoir_size = trial.suggest_int('reservoir_size', 70, 500)  # 搜索整数范围 100 到 1000
    leakage_rate = trial.suggest_float('leakage_rate', 0.001, 0.1)
    rho = trial.suggest_float('rho', 0.9, 1)
    sparsity = trial.suggest_float('sparsity', 1, 10)
    # data = loadmat('Demo_Data.mat')
    # data = data['X'].T
    #data = np.load('mackey_glass_t17.npy')

    #data = np.load('MG_Data.npy')
    #data = data.reshape((1,-1))
    esn = ESN(data, reservoir_size, leakage_rate, rho, sparsity, 800, 300, 50, 1e-6, 2050)
    esn.initialize()
    esn.train()
    esn.predict()
    return esn.rmse_error()
    #esn.plot_predict()

def save_model(file_path, model_info):
    # 检查文件是否存在，如果存在则读取现有数据
    if os.path.exists(file_path):
        with open(file_path, 'r') as f:
            try:
                data = json.load(f)
                # 如果文件内容不是列表，转换为列表
                if not isinstance(data, list):
                    data = [data]
            except json.JSONDecodeError:
                data = []
    else:
        data = []

    # 将新的 model_info 添加到数据列表中
    data.append(model_info)

    # 将更新后的数据写回到 JSON 文件
    with open(file_path, 'w') as f:
        json.dump(data, f, indent=4, ensure_ascii=False)  # 使用 ensure_ascii=False 支持中文字符


if __name__ == "__main__":
    # 创建Optuna study
    study = optuna.create_study(direction='minimize')

    # 运行Optuna搜索
    study.optimize(objective, n_trials=500)

    # 打印最佳超参数和得分
    print('Best hyperparameters: ', study.best_params)

    print('Best score: ', study.best_value)

    # data = loadmat('Demo_Data.mat')
    # data = data['X'].T
    #data = np.load('MG_Data.npy')
    # data = np.load('mackey_glass_t17.npy')
    # data = data.reshape((1, -1))
    hy = study.best_params
    esn = ESN(data, hy['reservoir_size'], hy['leakage_rate'], hy['rho'], hy['sparsity'], 800, 300, 50, 1e-6, 2050)

    esn.initialize()
    esn.train()
    esn.predict()
    esn.plot_predict()


    model_info = {
        'data_file_name':data_file_name,
        'best_hyperparameters': study.best_params,
        'best_score': study.best_value,
        'timestamp': datetime.now().isoformat(),  # 获取当前时间并转换为 ISO 格式
        'description': 'Optimized parameters for ESN model using Optuna.'  # 其他描述信息
    }
    # 定义文件路径
    file_path = 'best_hyperparameters.json'
    save_model(file_path, model_info)


