import heapq
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import argparse
import os
import time
import scipy.sparse as sp
import scipy.sparse.linalg as spla
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
import scipy.linalg as linalg
import time
from sklearn.preprocessing import StandardScaler
import joblib  # 用于保存和加载 scaler

method_name = ["高斯回代", "LU", "SVD", "QR", "cholesky", "高斯-赛德尔迭代法", "共轭梯度法"]
order = 10


# 定义模型
class SolverChoser(nn.Module):
    def __init__(self):
        super(SolverChoser, self).__init__()
        self.fc1 = nn.Linear(order * order, 256)  # 输入order*order维特征
        self.fc2 = nn.Linear(256, 128)
        self.fc3 = nn.Linear(128, 64)
        self.fc4 = nn.Linear(64, len(method_name))  # 输出各个类别的概率

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = torch.relu(self.fc3(x))
        x = self.fc4(x)
        return x


def solve_gaussian_elimination(matrix, b):
    start_time = time.perf_counter()
    matrix_dense = matrix.todense()  # 转换为稠密矩阵
    _ = np.linalg.solve(matrix_dense, b)
    end_time = time.perf_counter()
    elapsed_time = end_time - start_time
    return elapsed_time


def solve_lu_decomposition(matrix, b):
    start_time = time.perf_counter()
    lu = spla.splu(matrix)  # LU分解
    _ = spla.spsolve(matrix, b)  # 求解
    end_time = time.perf_counter()
    elapsed_time = end_time - start_time
    return elapsed_time


def solve_svd(matrix, b):
    start_time = time.perf_counter()
    U, s, Vt = spla.svds(matrix, k=min(matrix.shape) - 1)  # SVD分解
    S_inv = np.diag(1 / s)
    _ = Vt.T @ S_inv @ U.T @ b  # 求解
    end_time = time.perf_counter()
    elapsed_time = end_time - start_time
    return elapsed_time


def solve_qr(matrix, b):
    start_time = time.perf_counter()
    # 将稀疏矩阵转换为稠密矩阵
    matrix_dense = matrix.todense()
    # QR分解
    Q, R = linalg.qr(matrix_dense, mode="economic")
    # 计算 Q^T * b
    Qt_b = Q.T @ b
    # 使用上三角矩阵 R 解方程 R * x = Qt_b
    x = linalg.solve(R, Qt_b)
    end_time = time.perf_counter()
    elapsed_time = end_time - start_time
    return elapsed_time


def solve_cholesky(matrix, b):
    start_time = time.perf_counter()
    try:
        L = spla.cholesky(matrix, lower=True)  # Cholesky分解
        y = spla.spsolve(L, b)
        _ = spla.spsolve(L.T, y)  # 求解
    except:
        raise ValueError("Matrix must be symmetric")
    end_time = time.perf_counter()
    elapsed_time = end_time - start_time
    return elapsed_time


def solve_jacobi(matrix, b, tol=1e-8, max_iter=1000):
    start_time = time.perf_counter()
    D = sp.diags(matrix.diagonal())
    R = matrix - D
    x = np.zeros_like(b)
    for _ in range(max_iter):
        x_new = spla.spsolve(D, b - R @ x)
        if np.linalg.norm(x_new - x) < tol:
            break
        x = x_new
    end_time = time.perf_counter()
    elapsed_time = end_time - start_time
    return elapsed_time


def solve_gauss_seidel(matrix, b, tol=1e-8, max_iter=1000):
    start_time = time.perf_counter()
    L = sp.tril(matrix)
    U = matrix - L
    x = np.zeros_like(b)
    for _ in range(max_iter):
        x_new = spla.spsolve(L, b - U @ x)
        if np.linalg.norm(x_new - x) < tol:
            break
        x = x_new
    end_time = time.perf_counter()
    elapsed_time = end_time - start_time
    return elapsed_time


def solve_conjugate_gradient(matrix, b, tol=1e-8, max_iter=1000):
    start_time = time.perf_counter()
    _ = spla.cg(matrix, b, tol=tol, maxiter=max_iter)  # 共轭梯度法求解
    end_time = time.perf_counter()
    elapsed_time = end_time - start_time
    return elapsed_time


def time_solvers(matrix, b):
    # 逐个调用各个求解器，并返回耗时列表
    elapsed_times = []

    # 高斯消元法
    elapsed_times.append(solve_gaussian_elimination(matrix, b) * 4)

    # LU分解法
    elapsed_times.append(solve_lu_decomposition(matrix, b))

    # SVD分解法
    elapsed_times.append(solve_svd(matrix, b))

    # QR分解法
    elapsed_times.append(solve_qr(matrix, b))

    # 雅可比迭代法
    elapsed_times.append(solve_jacobi(matrix, b))

    # 高斯-赛德尔迭代法
    elapsed_times.append(solve_gauss_seidel(matrix, b))

    # 共轭梯度法
    elapsed_times.append(solve_conjugate_gradient(matrix, b))

    return elapsed_times


def generate_sparse_positive_definite_matrix(n, density=0.05):
    """
    生成指定大小的稀疏正定矩阵。

    参数:
    n -- 矩阵的维度
    density -- 稀疏矩阵的非零元素密度，默认为0.05

    返回:
    返回一个n*n的稀疏正定矩阵，类型为scipy.sparse.coo_matrix。
    """
    # 初始化一个n*n的随机矩阵A
    A = np.random.randn(n, n)

    # 计算矩阵总共的元素数量
    total_elements = n * n
    # 根据密度计算需要的非零元素数量
    num_nonzeros = int(total_elements * density)
    # 初始化一个全零的n*n矩阵作为稀疏矩阵
    sparse_matrix = np.zeros((n, n))

    # 随机选择非零元素的索引，确保没有重复
    indices = np.random.choice(total_elements, num_nonzeros, replace=False)
    # 将一维索引转换为二维的行和列索引
    row_indices = indices // n
    col_indices = indices % n
    # 为选中的位置赋予随机值
    sparse_matrix[row_indices, col_indices] = np.random.randn(num_nonzeros)

    # 确保矩阵为对称矩阵，从而满足正定矩阵的条件之一
    sparse_matrix = (sparse_matrix + sparse_matrix.T) / 2

    # 确保主元占优
    for i in range(n):
        # 获取第i行的非零元素（不包括对角线）
        row_nonzero_values = sparse_matrix[i, :].flatten()
        row_nonzero_values[i] = 0
        # 找到最大绝对值
        max_abs_value = np.sum(np.abs(row_nonzero_values))
        # 设置对角线元素大于最大绝对值
        sparse_matrix[i, i] = max_abs_value * 2 + 1e-6

    # 将numpy数组转换为coo_matrix格式
    sparse_matrix = csc_matrix(sparse_matrix)

    # 返回生成的稀疏正定矩阵
    return sparse_matrix


def generate_data(n_samples):
    X = np.zeros((n_samples, order * order))
    y = np.zeros((n_samples,), dtype=int)

    print(f"正在生成数据... {'|'.join(method_name)}")
    for i in range(n_samples):
        matrix = generate_sparse_positive_definite_matrix(order)
        b = np.random.randn(order)

        # 各个求解器求解时间
        consumed_time_list = time_solvers(matrix, b)

        # 记录时间数据
        X[i] = matrix.toarray().flatten()  # 将 orderxorder 矩阵展平为 400 维特征
        y[i] = np.argmin(consumed_time_list)  # 标签为最小时间对应的索引
        consumed_time_str_list = [f"{t:.2e}" for t in consumed_time_list]
        print(f"第 {i+1} 个样本，耗时 {'|'.join(consumed_time_str_list)} ")
    print(f"共生成 {n_samples} 个样本")
    for i, method in enumerate(method_name):
        print(f"{method} 方法最快的样本数量为: {np.sum(y == i)}")

    return X, y


def train_model(model, train_loader, criterion, optimizer):
    model.train()
    epoch_loss = 0
    total_batches = 0

    for X_batch, y_batch in train_loader:
        optimizer.zero_grad()
        outputs = model(X_batch)
        loss = criterion(outputs, y_batch)
        loss.backward()
        optimizer.step()

        # 累加损失函数值
        epoch_loss += loss.item()
        total_batches += 1

    # 返回每个 epoch 的平均损失
    return epoch_loss / total_batches


def test_model(model, X_test):
    model.eval()
    with torch.no_grad():
        X_test_tensor = torch.tensor(X_test, dtype=torch.float32)
        predictions = model(X_test_tensor)
    return predictions.numpy()


def main():
    parser = argparse.ArgumentParser(description="训练模型预测最快的矩阵操作")
    parser.add_argument("-r", "--retrain", action="store_true", help="强制重新训练模型")
    args = parser.parse_args()

    model_path = "model.pth"
    scaler_path = "scaler.pkl"

    model = SolverChoser()
    if not args.retrain and os.path.exists(model_path):
        model.load_state_dict(torch.load(model_path, weights_only=True))
        scaler = joblib.load(scaler_path)
    else:
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.001)

        # 生成训练数据
        X_train, y_train = generate_data(n_samples=1000)
        scaler = StandardScaler()
        X_train = scaler.fit_transform(X_train)

        train_data = torch.utils.data.TensorDataset(
            torch.tensor(X_train, dtype=torch.float32), torch.tensor(y_train, dtype=torch.long)  # 标签必须是 long 类型
        )
        train_loader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)

        # 训练模型
        for epoch in range(10):
            average_loss = train_model(model, train_loader, criterion, optimizer)
            print(f"第 {epoch+1}/10 轮次, 平均损失: {average_loss}")

        torch.save(model.state_dict(), model_path)
        joblib.dump(scaler, scaler_path)  # 保存 scaler

    # 生成测试矩阵
    matrix = generate_sparse_positive_definite_matrix(order)
    b = np.random.randn(order)

    X_test = matrix.toarray().flatten()  # 将测试矩阵展平
    # 将 X_test 变为二维数组，形状为 (1, n_features)
    X_test = X_test.reshape(1, -1)
    X_test = scaler.transform(X_test)  # 使用加载的 scaler 进行变换

    predictions = test_model(model, X_test)
    fastest_pred = np.argmax(predictions[0])

    print(f"预测最快的方法: {method_name[fastest_pred]}")

    # 实际跑一遍，看看真实时间

    consumed_time_list = time_solvers(matrix, b)
    # 使用enumerate()为列表中的每个元素添加索引
    # 然后按值对列表进行排序
    sorted_numbers = sorted(enumerate(consumed_time_list), key=lambda x: x[1])
    # 提取前三个最小的值及其对应的索引
    min_three = sorted_numbers[:3]
    print("实际最快的方法前三名:")
    # 打印结果
    for i, (index, value) in enumerate(min_three):
        print(f"    i: {method_name[index]}( {value} sec)")


if __name__ == "__main__":
    main()
