{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-31T04:22:44.958943Z",
     "start_time": "2025-07-31T04:22:44.944590Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import numpy as np\n",
    "import scipy as sp\n",
    "from scipy.optimize import least_squares\n",
    "path = \"D:/7_30data/\""
   ],
   "id": "d24d81887c0c5754",
   "outputs": [],
   "execution_count": 44
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "函数定义",
   "id": "68cd29872a3a7be3"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-31T04:22:44.881675800Z",
     "start_time": "2025-07-31T02:41:28.859171Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# linear_solve\n",
    "def linear_solve(V, L_rho):\n",
    "    S = V.conj().T @ V\n",
    "    b = V.conj().T @ L_rho\n",
    "    a = sp.linalg.lstsq(S, b)[0]  # 用最小二乘法解线性方程组(更精确) #\n",
    "    # a = sp.linalg.pinv(S) @ b #用伪逆解线性方程组\n",
    "    # 计算 秩\n",
    "    rank_S = np.linalg.matrix_rank(S)\n",
    "    augmented = np.hstack((S, b.reshape(-1, 1)))\n",
    "    rank_augmented = np.linalg.matrix_rank(augmented)\n",
    "    print(\"rank(S) =\", rank_S)\n",
    "    print(\"rank([S | b]) =\", rank_augmented)\n",
    "    print(\"Residual norm for S @ a - b:\",np.linalg.norm(S @ a - b))\n",
    "    return np.real(a)\n",
    "\n",
    "# complex_linear_solve\n",
    "def complex_linear_solve(V, L_rho):\n",
    "    S = V.conj().T @ V\n",
    "    b = V.conj().T @ L_rho\n",
    "    a = sp.linalg.lstsq(S, b,lapack_driver='gelsy')[0]  # 用最小二乘法解线性方程组(更精确) #\n",
    "    rank_S = np.linalg.matrix_rank(S)\n",
    "    augmented = np.hstack((S, b.reshape(-1, 1)))\n",
    "    rank_augmented = np.linalg.matrix_rank(augmented)\n",
    "    print(\"rank(S) =\", rank_S)\n",
    "    print(\"rank([S | b]) =\", rank_augmented)\n",
    "    print(\"Residual norm for S @ a - b:\",np.linalg.norm(S @ a - b))\n",
    "    return np.real(a)\n",
    "\n",
    "\n",
    "# improved_linear_solve\n",
    "def improved_linear_solve(V, L_rho):\n",
    "    # 直接对 V 和 L_rho 进行最小二乘求解\n",
    "    a = sp.linalg.lstsq(V, L_rho)[0] # 直接传入V和L_rho，不再先计算S和b\n",
    "    rank_V = np.linalg.matrix_rank(V)\n",
    "    augmented = np.hstack((V, L_rho.reshape(-1, 1)))\n",
    "    rank_augmented = np.linalg.matrix_rank(augmented)\n",
    "    print(\"rank(V) =\", rank_V)\n",
    "    print(\"rank([V | L_rho]) =\", rank_augmented)\n",
    "    # 计算残差（与原始问题一致）\n",
    "    print(\"Residual norm for V @ a - L_rho:\", np.linalg.norm(V @ a - L_rho))\n",
    "    return np.real(a)\n",
    "\n",
    "\n",
    "# improved_linear_solve\n",
    "def complex_improved_linear_solve(V, L_rho):\n",
    "    a, residuals, rank, s = sp.linalg.lstsq(V, L_rho, lapack_driver='gelsy')\n",
    "    rank_V = np.linalg.matrix_rank(V)\n",
    "    augmented = np.hstack((V, L_rho.reshape(-1, 1)))\n",
    "    rank_augmented = np.linalg.matrix_rank(augmented)\n",
    "    print(\"rank(V) =\", rank_V)\n",
    "    print(\"rank([V | L_rho]) =\", rank_augmented)\n",
    "    print(\"Residual norm for V @ a - L_rho:\", np.linalg.norm(V @ a - L_rho))\n",
    "    return a\n",
    "\n",
    "#  SVD分解\n",
    "def SVD_linear_solve(V, L_rho, regularization_lambda=1e-3, threshold=1e-10):\n",
    "    S = V.conj().T @ V\n",
    "    b = V.conj().T @ L_rho\n",
    "\n",
    "    # 计算增广矩阵的秩\n",
    "    rank_S = np.linalg.matrix_rank(S)\n",
    "    augmented = np.hstack((S, b.reshape(-1, 1)))\n",
    "    rank_augmented = np.linalg.matrix_rank(augmented)\n",
    "    print(\"rank(S) =\", rank_S)\n",
    "    print(\"rank([S | b]) =\", rank_augmented)\n",
    "\n",
    "    # 检查 S 的条件数，避免奇异矩阵\n",
    "    cond_S = np.linalg.cond(S)\n",
    "    print(f\"Condition number of S: {cond_S}\")\n",
    "\n",
    "    # 输出残差\n",
    "    a = sp.linalg.lstsq(S, b,lapack_driver='gelsy')[0]\n",
    "    residual_norm = np.linalg.norm(S @ a - b)\n",
    "    print(f\"Residual norm for S @ a - b (without regularization): {residual_norm:.4e}\")\n",
    "\n",
    "    # 如果 S 是非奇异的，使用 np.linalg.solve 直接求解\n",
    "    if rank_S == S.shape[0] and cond_S < 1e10:\n",
    "        try:\n",
    "            a = np.linalg.solve(S, b)  # 求解 S a = b\n",
    "            residual = np.linalg.norm(S @ a - b)\n",
    "            print(\"Residual norm for S @ a - b (direct solve):\",residual)\n",
    "        except np.linalg.LinAlgError:\n",
    "            print(\"Singular matrix, using pseudo-inverse.\")\n",
    "            a = np.linalg.pinv(S) @ b  # 使用伪逆求解\n",
    "            residual = np.linalg.norm(S @ a - b)\n",
    "            print(\"Residual norm for S @ a - b (pseudo-inverse):\",residual)\n",
    "    else:\n",
    "        # 如果条件数很大或秩不满，使用伪逆或正则化\n",
    "        print(\"Using pseudo-inverse or regularization due to ill-conditioned matrix.\")\n",
    "        A = S + regularization_lambda * np.eye(S.shape[0])  # 加入正则化项\n",
    "        a = np.linalg.solve(A, b)  # 求解正规化后的方程\n",
    "        residual = np.linalg.norm(S @ a - b)\n",
    "        print(\"Residual norm for S @ a - b (regularized solve):\",residual)\n",
    "        return a  # 返回未正则化解\n"
   ],
   "id": "b059bb4a2f2319ea",
   "outputs": [],
   "execution_count": 42
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "加载数据",
   "id": "6720f4fd2d05e8ea"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-31T04:22:55.960097Z",
     "start_time": "2025-07-31T04:22:55.934614Z"
    }
   },
   "cell_type": "code",
   "source": [
    "S = np.load(path+'S_matrix.npy').astype(np.complex128)\n",
    "b = np.load(path+'b.npy').astype(np.complex128)\n",
    "V = np.load(path+'V.npy').astype(np.complex128)\n",
    "L_rho = np.load(path+'L_rho.npy').astype(np.complex128)\n",
    "\n",
    "\n",
    "print(S.shape)\n",
    "print(b.shape)\n",
    "print(V.shape)\n",
    "print(L_rho.shape)"
   ],
   "id": "f87ac166d89641de",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(273, 273)\n",
      "(273,)\n",
      "(4, 273)\n",
      "(4,)\n"
     ]
    }
   ],
   "execution_count": 45
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "调用函数",
   "id": "6b3d3560ff421697"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-31T04:22:44.899892200Z",
     "start_time": "2025-07-31T02:41:39.491259Z"
    }
   },
   "cell_type": "code",
   "source": [
    "x = linear_solve(V,L_rho)\n",
    "\n",
    "y = complex_linear_solve(V, L_rho)\n",
    "\n",
    "z = improved_linear_solve(V, L_rho)\n",
    "\n",
    "f = complex_improved_linear_solve(V, L_rho)\n",
    "\n",
    "m = SVD_linear_solve(V, L_rho, regularization_lambda=1e-3)"
   ],
   "id": "9c47e2b2e4601e70",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "rank(S) = 2\n",
      "rank([S | b]) = 2\n",
      "Residual norm for S @ a - b: 1.0600097036628602e-06\n",
      "rank(S) = 2\n",
      "rank([S | b]) = 2\n",
      "Residual norm for S @ a - b: 1.446190670206653e-10\n",
      "rank(V) = 2\n",
      "rank([V | L_rho]) = 3\n",
      "Residual norm for V @ a - L_rho: 3.820397473785654\n",
      "rank(V) = 2\n",
      "rank([V | L_rho]) = 3\n",
      "Residual norm for V @ a - L_rho: 3.560365299236483\n",
      "rank(S) = 2\n",
      "rank([S | b]) = 2\n",
      "Condition number of S: 9.71224779993506e+80\n",
      "Residual norm for S @ a - b (without regularization): 1.4462e-10\n",
      "Using pseudo-inverse or regularization due to ill-conditioned matrix.\n",
      "Residual norm for S @ a - b (regularized solve): 0.017604664717021448\n"
     ]
    }
   ],
   "execution_count": 43
  }
 ],
 "metadata": {},
 "nbformat": 4,
 "nbformat_minor": 5
}
