{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "12058891",
   "metadata": {},
   "source": [
    "\n",
    "# 基于修改后的 MindQuantum 0.5.0 采用变分算法重构半导体双量子点单-三重态量子 CX 门"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "11259757",
   "metadata": {},
   "source": [
    "# 采用自定义的 MQLayer abs 来实现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "6201e93d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fid_max: 0.5161779 i: 0\n",
      "fid_eval_max: 0.04705876975848316 fid_eval: 0.04705876975848316\n",
      "\n",
      "fid_max: 0.6141951 i: 3\n",
      "fid_eval_max: 0.08969107733524914 fid_eval: 0.08969107733524914\n",
      "\n",
      "fid_max: 0.6474364 i: 5\n",
      "fid_eval_max: 0.08969107733524914 fid_eval: 0.08553935298495903\n",
      "\n",
      "fid_max: 0.8532114 i: 10\n",
      "fid_eval_max: 0.1686321335094972 fid_eval: 0.1686321335094972\n",
      "\n",
      "fid_max: 0.88086706 i: 23\n",
      "fid_eval_max: 0.6809218204786021 fid_eval: 0.6809218204786021\n",
      "\n",
      "fid_max: 0.90866256 i: 46\n",
      "fid_eval_max: 0.6997058894599907 fid_eval: 0.6997058894599907\n",
      "\n",
      "fid_max: 0.90866256 i: 100\n",
      "fid_eval_max: 0.7143077819435213 fid_eval: 0.7143077819435213\n",
      "\n",
      "fid_max: 0.92959166 i: 117\n",
      "fid_eval_max: 0.8778321296272341 fid_eval: 0.8778321296272341\n",
      "\n",
      "fid_max: 0.93221647 i: 126\n",
      "fid_eval_max: 0.8778321296272341 fid_eval: 0.8622968079848766\n",
      "\n",
      "fid_max: 0.9883202 i: 145\n",
      "fid_eval_max: 0.8778321296272341 fid_eval: 0.8372576453429879\n",
      "\n",
      "fid_max: 0.9883202 i: 200\n",
      "fid_eval_max: 0.8778321296272341 fid_eval: 0.6739914481047691\n",
      "\n",
      "fid_max: 0.9883202 i: 300\n",
      "fid_eval_max: 0.8778321296272341 fid_eval: 0.7796888158781782\n",
      "\n",
      "fid_max: 0.9883202 i: 400\n",
      "fid_eval_max: 0.8778321296272341 fid_eval: 0.6398048323150193\n",
      "\n",
      "fid_max: 0.9883202 i: 500\n",
      "fid_eval_max: 0.9027406986844781 fid_eval: 0.9027406986844781\n",
      "\n",
      "fid_max: 0.9883202 i: 600\n",
      "fid_eval_max: 0.9112192473103575 fid_eval: 0.9112192473103575\n",
      "\n",
      "fid_max: 0.9929112 i: 687\n",
      "fid_eval_max: 0.9222937603622249 fid_eval: 0.9222937603622249\n",
      "\n",
      "fid_max: 0.9929112 i: 700\n",
      "fid_eval_max: 0.943542611057136 fid_eval: 0.943542611057136\n",
      "\n",
      "fid_max: 0.9929112 i: 800\n",
      "fid_eval_max: 0.943542611057136 fid_eval: 0.5649081037194286\n",
      "\n",
      "fid_max: 0.9929112 i: 900\n",
      "fid_eval_max: 0.943542611057136 fid_eval: 0.8888657433677348\n",
      "\n",
      "fid_max: 0.99304456 i: 952\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.9610898180316196\n",
      "\n",
      "fid_max: 0.99304456 i: 1000\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.81834189046744\n",
      "\n",
      "fid_max: 0.99304456 i: 1100\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.9383658691722134\n",
      "\n",
      "fid_max: 0.99304456 i: 1200\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.5127703411754384\n",
      "\n",
      "fid_max: 0.99304456 i: 1300\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.37675543631979863\n",
      "\n",
      "fid_max: 0.99304456 i: 1400\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.8384974303752865\n",
      "\n",
      "fid_max: 0.99304456 i: 1500\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.10592668215241585\n",
      "\n",
      "fid_max: 0.99304456 i: 1600\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.8704599882705588\n",
      "\n",
      "fid_max: 0.99304456 i: 1700\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.7871208437588815\n",
      "\n",
      "fid_max: 0.99304456 i: 1800\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.7726242794631332\n",
      "\n",
      "fid_max: 0.99304456 i: 1900\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.8385954384512891\n",
      "\n",
      "fid_max: 0.99304456 i: 2000\n",
      "fid_eval_max: 0.9610898180316196 fid_eval: 0.8976963407263675\n",
      "\n",
      "fid_max: 0.9935062 i: 2022\n",
      "fid_eval_max: 0.9707668206093534 fid_eval: 0.9707668206093534\n",
      "\n",
      "fid_max: 0.99580216 i: 2055\n",
      "fid_eval_max: 0.9852949990392668 fid_eval: 0.9852949990392668\n",
      "\n",
      "fid_max: 0.99830145 i: 2062\n",
      "fid_eval_max: 0.9928129208798002 fid_eval: 0.9928129208798002\n",
      "\n",
      "fid_max: 0.9985543 i: 2077\n",
      "fid_eval_max: 0.9951917038557343 fid_eval: 0.9951917038557343\n",
      "\n",
      "fid_max: 0.9996511 i: 2087\n",
      "fid_eval_max: 0.9980901594521994 fid_eval: 0.9980901594521994\n",
      "\n",
      "fid_max: 0.9997304 i: 2095\n",
      "fid_eval_max: 0.9983301612905414 fid_eval: 0.9983301612905414\n",
      "\n",
      "fid_max: 0.9997304 i: 2100\n",
      "fid_eval_max: 0.9983301612905414 fid_eval: 0.997235836355582\n",
      "\n",
      "fid_max: 0.99973094 i: 2148\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9994103731512852\n",
      "\n",
      "fid_max: 0.999902 i: 2157\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9991309956328116\n",
      "\n",
      "fid_max: 0.999902 i: 2200\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9992512901522669\n",
      "\n",
      "fid_max: 0.999902 i: 2300\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9994025251741567\n",
      "\n",
      "fid_max: 0.999902 i: 2400\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.998135899067948\n",
      "\n",
      "fid_max: 0.999902 i: 2500\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9645841353396216\n",
      "\n",
      "fid_max: 0.999902 i: 2600\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.1228082529237197\n",
      "\n",
      "fid_max: 0.999902 i: 2700\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.3301950142710377\n",
      "\n",
      "fid_max: 0.999902 i: 2800\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8044199672210441\n",
      "\n",
      "fid_max: 0.999902 i: 2900\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.03934407001418262\n",
      "\n",
      "fid_max: 0.999902 i: 3000\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.3608109247544365\n",
      "\n",
      "fid_max: 0.999902 i: 3100\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9328996085674572\n",
      "\n",
      "fid_max: 0.999902 i: 3200\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9361782192209447\n",
      "\n",
      "fid_max: 0.999902 i: 3300\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9803747881662102\n",
      "\n",
      "fid_max: 0.999902 i: 3400\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8019664496023204\n",
      "\n",
      "fid_max: 0.999902 i: 3500\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.4949631404475071\n",
      "\n",
      "fid_max: 0.999902 i: 3600\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.5043680768311425\n",
      "\n",
      "fid_max: 0.999902 i: 3700\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8383033143087733\n",
      "\n",
      "fid_max: 0.999902 i: 3800\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8795796560595432\n",
      "\n",
      "fid_max: 0.999902 i: 3900\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.88933582747908\n",
      "\n",
      "fid_max: 0.999902 i: 4000\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8703265915540728\n",
      "\n",
      "fid_max: 0.999902 i: 4100\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.7615270628187315\n",
      "\n",
      "fid_max: 0.999902 i: 4200\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8268399833114108\n",
      "\n",
      "fid_max: 0.999902 i: 4300\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8767138710870129\n",
      "\n",
      "fid_max: 0.999902 i: 4400\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.786987498380345\n",
      "\n",
      "fid_max: 0.999902 i: 4500\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8585704841581173\n",
      "\n",
      "fid_max: 0.999902 i: 4600\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9922696251726183\n",
      "\n",
      "fid_max: 0.999902 i: 4700\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9726582891412598\n",
      "\n",
      "fid_max: 0.999902 i: 4800\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9180114630150484\n",
      "\n",
      "fid_max: 0.999902 i: 4900\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9243021315199548\n",
      "\n",
      "fid_max: 0.999902 i: 5000\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8762975121461671\n",
      "\n",
      "fid_max: 0.999902 i: 5100\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9096828705689085\n",
      "\n",
      "fid_max: 0.999902 i: 5200\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8342623297569467\n",
      "\n",
      "fid_max: 0.999902 i: 5300\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.6891539705718968\n",
      "\n",
      "fid_max: 0.999902 i: 5400\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9248344359746336\n",
      "\n",
      "fid_max: 0.999902 i: 5500\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8253580770275226\n",
      "\n",
      "fid_max: 0.999902 i: 5600\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9005787085133841\n",
      "\n",
      "fid_max: 0.999902 i: 5700\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.94521884724438\n",
      "\n",
      "fid_max: 0.999902 i: 5800\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9810516434257693\n",
      "\n",
      "fid_max: 0.999902 i: 5900\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9810204227095995\n",
      "\n",
      "fid_max: 0.999902 i: 6000\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9464085797172543\n",
      "\n",
      "fid_max: 0.999902 i: 6100\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9699807746384534\n",
      "\n",
      "fid_max: 0.999902 i: 6200\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8806199916873105\n",
      "\n",
      "fid_max: 0.999902 i: 6300\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.9224116194601797\n",
      "\n",
      "fid_max: 0.999902 i: 6400\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8748911083747202\n",
      "\n",
      "fid_max: 0.999902 i: 6500\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.93955886645662\n",
      "\n",
      "fid_max: 0.999902 i: 6600\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.6660813889425264\n",
      "\n",
      "fid_max: 0.999902 i: 6700\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.8840656479978153\n",
      "\n",
      "fid_max: 0.999902 i: 6800\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.903756586732145\n",
      "\n",
      "fid_max: 0.999902 i: 6900\n",
      "fid_eval_max: 0.9994103731512852 fid_eval: 0.889900647924277\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import sys\n",
    "from numpy import kron\n",
    "from mindquantum import *\n",
    "from scipy.linalg import expm\n",
    "import mindspore as ms\n",
    "from mindspore import ops, Tensor\n",
    "ms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target=\"CPU\")\n",
    "from mindspore.nn import Adam, TrainOneStepCell\n",
    "from mindspore.common.parameter import Parameter\n",
    "from mindspore.common.initializer import initializer  \n",
    "ms.set_seed(1)\n",
    "np.random.seed(1)\n",
    "\n",
    "train_x = np.load('./src/CNOT_train_x.npy', allow_pickle=True)\n",
    "eval_x = np.load('./src/CNOT_eval_x.npy', allow_pickle=True)\n",
    "train_y = np.load('./src/CNOT_train_y.npy', allow_pickle=True)\n",
    "eval_y = np.load('./src/CNOT_eval_y.npy', allow_pickle=True)\n",
    "\n",
    "s_x = X.matrix()\n",
    "s_z = Z.matrix()\n",
    "one = I.matrix()\n",
    "dt = np.pi/10\n",
    "\n",
    "def _matrix_0(coeff):\n",
    "    return expm(-1j*(coeff*s_z+s_x)*dt)\n",
    "\n",
    "def _diff_matrix_0(coeff):\n",
    "    return -1j*_matrix_0(coeff)@(s_z*dt)\n",
    "\n",
    "def _matrix_c_0(coeff):\n",
    "    return expm(-1j*(coeff*kron(s_z, one) + kron(one, s_z) + kron(s_x, one) + kron(one, s_x) + coeff*kron(s_z-one, s_z-one))*5*dt)\n",
    "\n",
    "def _diff_matrix_c_0(coeff):\n",
    "    return -1j*_matrix_c_0(coeff)@((kron(s_z, one) + kron(s_z-one, s_z-one)) * 5*dt)\n",
    "\n",
    "def _matrix_c_1(coeff):\n",
    "    return expm(-1j*(kron(s_z, one) + coeff*kron(one, s_z) + kron(s_x, one) + kron(one, s_x) + coeff*kron(s_z-one, s_z-one))*5*dt)\n",
    "\n",
    "def _diff_matrix_c_1(coeff):\n",
    "    return -1j*_matrix_c_1(coeff)@((kron(one, s_z) + kron(s_z-one, s_z-one)) *  5*dt)\n",
    "\n",
    "gate_0 = gene_univ_parameterized_gate('gete_0', _matrix_0, _diff_matrix_0)\n",
    "gate_c_0 = gene_univ_parameterized_gate('gete_c_0', _matrix_c_0, _diff_matrix_c_0)\n",
    "gate_c_1 = gene_univ_parameterized_gate('gete_c_1', _matrix_c_1, _diff_matrix_c_1)\n",
    "\n",
    "circ = Circuit()\n",
    "\n",
    "circ += gate_0('00').on(0)\n",
    "circ += gate_0('01').on(0)\n",
    "circ += gate_0('02').on(0)\n",
    "circ += gate_0('03').on(0)\n",
    "circ += gate_0('04').on(0)\n",
    "circ += gate_0('05').on(0)\n",
    "circ += gate_0('06').on(0)\n",
    "circ += gate_0('07').on(0)\n",
    "circ += gate_0('08').on(0)\n",
    "circ += gate_0('09').on(0)\n",
    "\n",
    "circ += gate_0('10').on(1)\n",
    "circ += gate_0('11').on(1)\n",
    "circ += gate_0('12').on(1)\n",
    "circ += gate_0('13').on(1)\n",
    "circ += gate_0('14').on(1)\n",
    "circ += gate_0('15').on(1)\n",
    "circ += gate_0('16').on(1)\n",
    "circ += gate_0('17').on(1)\n",
    "circ += gate_0('18').on(1)\n",
    "circ += gate_0('19').on(1)\n",
    "\n",
    "circ += gate_c_0('0').on([1,0])\n",
    "circ += gate_c_0('1').on([1,0])\n",
    "circ += gate_c_1('2').on([1,0])\n",
    "circ += gate_c_1('3').on([1,0])\n",
    "\n",
    "circ += gate_0('010').on(0)\n",
    "circ += gate_0('011').on(0)\n",
    "circ += gate_0('012').on(0)\n",
    "circ += gate_0('013').on(0)\n",
    "circ += gate_0('014').on(0)\n",
    "circ += gate_0('015').on(0)\n",
    "circ += gate_0('016').on(0)\n",
    "circ += gate_0('017').on(0)\n",
    "circ += gate_0('018').on(0)\n",
    "circ += gate_0('019').on(0)\n",
    "\n",
    "circ += gate_0('110').on(1)\n",
    "circ += gate_0('111').on(1)\n",
    "circ += gate_0('112').on(1)\n",
    "circ += gate_0('113').on(1)\n",
    "circ += gate_0('114').on(1)\n",
    "circ += gate_0('115').on(1)\n",
    "circ += gate_0('116').on(1)\n",
    "circ += gate_0('117').on(1)\n",
    "circ += gate_0('118').on(1)\n",
    "circ += gate_0('119').on(1)\n",
    "\n",
    "infid_list = [] # mean of infid\n",
    "error_list = [] # max of infid\n",
    "lr = 0.1\n",
    "\n",
    "ham = Hamiltonian(QubitOperator('')) \n",
    "sim = Simulator('projectq', circ.n_qubits)\n",
    "sim_left = Simulator('projectq',circ.n_qubits)\n",
    "\n",
    "grad_ops = sim.get_expectation_with_grad(ham,\n",
    "                                         circ,\n",
    "                                         circ_left=Circuit(),\n",
    "                                         simulator_left=sim_left,\n",
    "                                         ansatz_params_name=circ.params_name)\n",
    "Quantum_net = MQLayer(grad_ops)\n",
    "opti = Adam(Quantum_net.trainable_params(), learning_rate=lr)  \n",
    "net = TrainOneStepCell(Quantum_net, opti)\n",
    "m = 0 # 用于计数 \n",
    "fid_max = 0\n",
    "fid_eval_max = 0\n",
    "for j in range(len(train_x)):\n",
    "    res = net(Tensor(train_x[j]), Tensor(train_y[j]))\n",
    "    if res[0,0] > fid_max or j%100==0:\n",
    "        fid_max = max(fid_max, res[0,0])\n",
    "        print('\\nfid_max:', fid_max, 'i:', j)\n",
    "        params = abs(Quantum_net.weight.asnumpy())\n",
    "        final_state = []\n",
    "        for k in range(100): # 100 个测试点\n",
    "            sim.reset()\n",
    "            sim.set_qs(eval_x[k])\n",
    "            sim.apply_circuit(circ, params)\n",
    "            final_state.append(sim.get_qs())\n",
    "        fid_eval = np.real(np.min([np.abs(np.vdot(bra, ket)) for bra, ket in zip(np.array(final_state), eval_y)]))\n",
    "        fid_eval_max = max(fid_eval_max, fid_eval)\n",
    "        print('fid_eval_max:',fid_eval_max, 'fid_eval:', fid_eval)\n",
    "        \n",
    "# fid_max: 0.999902 i: 2157\n",
    "# fid_eval_max: 0.9994103731512852 fid_eval: 0.9991309956328116"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "5605ac66",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "error_min: 0.9344227390500963 error_now: 0.9344227390500963 train_num: 0\n",
      "error_min: 0.18707726294315064 error_now: 0.2224786599881965 train_num: 50\n",
      "error_min: 0.09122507669191227 error_now: 0.09991302978380401 train_num: 100\n",
      "error_min: 0.09122507669191227 error_now: 0.21148050805691232 train_num: 150\n",
      "error_min: 0.09122507669191227 error_now: 0.2661200669849586 train_num: 200\n",
      "error_min: 0.060363093454498085 error_now: 0.06975054471592068 train_num: 250\n",
      "error_min: 0.060363093454498085 error_now: 0.07814969942078154 train_num: 300\n",
      "error_min: 0.04797640778561907 error_now: 0.11732427286745106 train_num: 350\n",
      "error_min: 0.04797640778561907 error_now: 0.08709595845186013 train_num: 400\n",
      "error_min: 0.04797640778561907 error_now: 0.06702499341321333 train_num: 450\n",
      "error_min: 0.04797640778561907 error_now: 0.08796698402188474 train_num: 500\n",
      "error_min: 0.04797640778561907 error_now: 0.25978454278764296 train_num: 550\n",
      "error_min: 0.04797640778561907 error_now: 0.1279056719458066 train_num: 600\n",
      "error_min: 0.04797640778561907 error_now: 0.07722679032964308 train_num: 650\n",
      "error_min: 0.04797640778561907 error_now: 0.08268471387319465 train_num: 700\n",
      "error_min: 0.04797640778561907 error_now: 0.13584013857251898 train_num: 750\n",
      "error_min: 0.04797640778561907 error_now: 0.0695572238828005 train_num: 800\n",
      "error_min: 0.04797640778561907 error_now: 0.07159687361969924 train_num: 850\n",
      "error_min: 0.04797640778561907 error_now: 0.11911962293647194 train_num: 900\n",
      "error_min: 0.04797640778561907 error_now: 0.23433061806349853 train_num: 950\n",
      "error_min: 0.04797640778561907 error_now: 0.12079173838042767 train_num: 1000\n",
      "error_min: 0.04797640778561907 error_now: 0.20368619584726855 train_num: 1050\n",
      "error_min: 0.04797640778561907 error_now: 0.08778450991049092 train_num: 1100\n",
      "error_min: 0.04797640778561907 error_now: 0.07183892975487027 train_num: 1150\n",
      "error_min: 0.04797640778561907 error_now: 0.11391353944266869 train_num: 1200\n",
      "error_min: 0.04797640778561907 error_now: 0.1413694449531897 train_num: 1250\n",
      "error_min: 0.04797640778561907 error_now: 0.18156470676927572 train_num: 1300\n",
      "error_min: 0.04797640778561907 error_now: 0.09381780623812452 train_num: 1350\n",
      "error_min: 0.04797640778561907 error_now: 0.15535739454813235 train_num: 1400\n",
      "error_min: 0.04797640778561907 error_now: 0.07992927212 train_num: 1450\n",
      "error_min: 0.04797640778561907 error_now: 0.10778121590928169 train_num: 1500\n",
      "error_min: 0.04797640778561907 error_now: 0.09599449558472184 train_num: 1550\n",
      "error_min: 0.04797640778561907 error_now: 0.07616890949324318 train_num: 1600\n",
      "error_min: 0.04797640778561907 error_now: 0.12678011100633246 train_num: 1650\n",
      "error_min: 0.04797640778561907 error_now: 0.1798198485032465 train_num: 1700\n",
      "error_min: 0.04797640778561907 error_now: 0.10301867351667349 train_num: 1750\n",
      "error_min: 0.04797640778561907 error_now: 0.17023826771662298 train_num: 1800\n",
      "error_min: 0.04797640778561907 error_now: 0.06830149384024387 train_num: 1850\n",
      "error_min: 0.04797640778561907 error_now: 0.11215259953319912 train_num: 1900\n",
      "error_min: 0.04797640778561907 error_now: 0.08272699236330217 train_num: 1950\n",
      "error_min: 0.04797640778561907 error_now: 0.11885414417175277 train_num: 2000\n",
      "error_min: 0.04797640778561907 error_now: 0.12469204276635659 train_num: 2050\n",
      "error_min: 0.04797640778561907 error_now: 0.11361672229387565 train_num: 2100\n",
      "error_min: 0.04797640778561907 error_now: 0.09439913432086056 train_num: 2150\n",
      "error_min: 0.04797640778561907 error_now: 0.09084590015404703 train_num: 2200\n",
      "error_min: 0.04797640778561907 error_now: 0.08616032772514925 train_num: 2250\n",
      "error_min: 0.04797640778561907 error_now: 0.10788606419335434 train_num: 2300\n",
      "error_min: 0.04797640778561907 error_now: 0.0770577784671097 train_num: 2350\n",
      "error_min: 0.04797640778561907 error_now: 0.08775282573491627 train_num: 2400\n",
      "error_min: 0.04797640778561907 error_now: 0.14445684390117086 train_num: 2450\n",
      "error_min: 0.04797640778561907 error_now: 0.11698285973891343 train_num: 2500\n",
      "error_min: 0.04797640778561907 error_now: 0.10074024149328953 train_num: 2550\n",
      "error_min: 0.04797640778561907 error_now: 0.2250462238384966 train_num: 2600\n",
      "error_min: 0.04797640778561907 error_now: 0.111371046046563 train_num: 2650\n",
      "error_min: 0.04797640778561907 error_now: 0.0797805862231925 train_num: 2700\n",
      "error_min: 0.04797640778561907 error_now: 0.07415732647183348 train_num: 2750\n",
      "error_min: 0.04797640778561907 error_now: 0.09575496277708584 train_num: 2800\n",
      "error_min: 0.04797640778561907 error_now: 0.15741351372996482 train_num: 2850\n",
      "error_min: 0.04797640778561907 error_now: 0.09179733704582049 train_num: 2900\n",
      "error_min: 0.04797640778561907 error_now: 0.28944348712420664 train_num: 2950\n",
      "error_min: 0.04797640778561907 error_now: 0.21522090917719883 train_num: 3000\n",
      "error_min: 0.04797640778561907 error_now: 0.12365549000144449 train_num: 3050\n",
      "error_min: 0.04797640778561907 error_now: 0.11389180767037366 train_num: 3100\n",
      "error_min: 0.04797640778561907 error_now: 0.17011717547209804 train_num: 3150\n",
      "error_min: 0.04797640778561907 error_now: 0.11053985103492292 train_num: 3200\n",
      "error_min: 0.04797640778561907 error_now: 0.11867958365046705 train_num: 3250\n",
      "error_min: 0.04797640778561907 error_now: 0.17855052667452764 train_num: 3300\n",
      "error_min: 0.04797640778561907 error_now: 0.17611930003553844 train_num: 3350\n",
      "error_min: 0.04797640778561907 error_now: 0.13832610780753174 train_num: 3400\n",
      "error_min: 0.04797640778561907 error_now: 0.10935154470831598 train_num: 3450\n",
      "error_min: 0.04797640778561907 error_now: 0.14737648823411664 train_num: 3500\n",
      "error_min: 0.04797640778561907 error_now: 0.1328978794733373 train_num: 3550\n",
      "error_min: 0.04797640778561907 error_now: 0.12220160099499222 train_num: 3600\n",
      "error_min: 0.04797640778561907 error_now: 0.12358232745060116 train_num: 3650\n",
      "error_min: 0.04797640778561907 error_now: 0.10188483040356722 train_num: 3700\n",
      "error_min: 0.04797640778561907 error_now: 0.18209194194849332 train_num: 3750\n",
      "error_min: 0.04797640778561907 error_now: 0.1216352980495804 train_num: 3800\n",
      "error_min: 0.04797640778561907 error_now: 0.11762926517484096 train_num: 3850\n",
      "error_min: 0.04797640778561907 error_now: 0.1890701040417816 train_num: 3900\n",
      "error_min: 0.04797640778561907 error_now: 0.1917832187631473 train_num: 3950\n",
      "error_min: 0.04797640778561907 error_now: 0.18319611243218226 train_num: 4000\n",
      "error_min: 0.04797640778561907 error_now: 0.17703590162216976 train_num: 4050\n",
      "error_min: 0.04797640778561907 error_now: 0.13460597474971359 train_num: 4100\n",
      "error_min: 0.04797640778561907 error_now: 0.1076693109438096 train_num: 4150\n",
      "error_min: 0.04797640778561907 error_now: 0.1771417063320294 train_num: 4200\n",
      "error_min: 0.04797640778561907 error_now: 0.19967674041050687 train_num: 4250\n",
      "error_min: 0.04797640778561907 error_now: 0.17939343018588783 train_num: 4300\n",
      "error_min: 0.04797640778561907 error_now: 0.13226450246703003 train_num: 4350\n",
      "error_min: 0.04797640778561907 error_now: 0.20418428046110804 train_num: 4400\n",
      "error_min: 0.04797640778561907 error_now: 0.24853926627825051 train_num: 4450\n",
      "error_min: 0.04797640778561907 error_now: 0.14905715527808439 train_num: 4500\n",
      "error_min: 0.04797640778561907 error_now: 0.13069355797961757 train_num: 4550\n",
      "error_min: 0.04797640778561907 error_now: 0.10683313892852353 train_num: 4600\n",
      "error_min: 0.04797640778561907 error_now: 0.08701216370020459 train_num: 4650\n",
      "error_min: 0.04797640778561907 error_now: 0.10569463043449068 train_num: 4700\n",
      "error_min: 0.04797640778561907 error_now: 0.12373361832141139 train_num: 4750\n",
      "error_min: 0.04797640778561907 error_now: 0.12567474176275417 train_num: 4800\n",
      "error_min: 0.04797640778561907 error_now: 0.14676758161391446 train_num: 4850\n",
      "error_min: 0.04797640778561907 error_now: 0.16288105668683006 train_num: 4900\n",
      "error_min: 0.04797640778561907 error_now: 0.24997049617848177 train_num: 4950\n",
      "error_min: 0.04797640778561907 error_now: 0.3560383439630789 train_num: 5000\n",
      "error_min: 0.04797640778561907 error_now: 0.2202037719736356 train_num: 5050\n",
      "error_min: 0.04797640778561907 error_now: 0.23724837163022894 train_num: 5100\n",
      "error_min: 0.04797640778561907 error_now: 0.6740702342772058 train_num: 5150\n",
      "error_min: 0.04797640778561907 error_now: 0.2656207926013522 train_num: 5200\n",
      "error_min: 0.04797640778561907 error_now: 0.251907339754905 train_num: 5250\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "error_min: 0.04797640778561907 error_now: 0.2610912950188613 train_num: 5300\n",
      "error_min: 0.04797640778561907 error_now: 0.2574529416795339 train_num: 5350\n",
      "error_min: 0.04797640778561907 error_now: 0.2712633633372684 train_num: 5400\n",
      "error_min: 0.04797640778561907 error_now: 0.15755535597223969 train_num: 5450\n",
      "error_min: 0.04797640778561907 error_now: 0.16682371195460544 train_num: 5500\n",
      "error_min: 0.04797640778561907 error_now: 0.166188621450158 train_num: 5550\n",
      "error_min: 0.04797640778561907 error_now: 0.17302744399830206 train_num: 5600\n",
      "error_min: 0.04797640778561907 error_now: 0.18163728369561583 train_num: 5650\n",
      "error_min: 0.04797640778561907 error_now: 0.16544330905202764 train_num: 5700\n",
      "error_min: 0.04797640778561907 error_now: 0.18292586732290594 train_num: 5750\n",
      "error_min: 0.04797640778561907 error_now: 0.17336322211955257 train_num: 5800\n",
      "error_min: 0.04797640778561907 error_now: 0.1294092035890113 train_num: 5850\n",
      "error_min: 0.04797640778561907 error_now: 0.49980756152186434 train_num: 5900\n",
      "error_min: 0.04797640778561907 error_now: 0.1527797900511082 train_num: 5950\n",
      "error_min: 0.04797640778561907 error_now: 0.1601627982470073 train_num: 6000\n",
      "error_min: 0.04797640778561907 error_now: 0.11203032778340105 train_num: 6050\n",
      "error_min: 0.04797640778561907 error_now: 0.18717035817814653 train_num: 6100\n",
      "error_min: 0.04797640778561907 error_now: 0.21478919153987897 train_num: 6150\n",
      "error_min: 0.04797640778561907 error_now: 0.21060884047828454 train_num: 6200\n",
      "error_min: 0.04797640778561907 error_now: 0.22523596221361641 train_num: 6250\n",
      "error_min: 0.04797640778561907 error_now: 0.1874807009163202 train_num: 6300\n",
      "error_min: 0.04797640778561907 error_now: 0.23005662598420717 train_num: 6350\n",
      "error_min: 0.04797640778561907 error_now: 0.3669333393747215 train_num: 6400\n",
      "error_min: 0.04797640778561907 error_now: 0.22975773697606294 train_num: 6450\n",
      "error_min: 0.04797640778561907 error_now: 0.12536376712010378 train_num: 6500\n",
      "error_min: 0.04797640778561907 error_now: 0.10348537935493751 train_num: 6550\n",
      "error_min: 0.04797640778561907 error_now: 0.24600590653804277 train_num: 6600\n",
      "error_min: 0.04797640778561907 error_now: 0.14996275600749287 train_num: 6650\n",
      "error_min: 0.04797640778561907 error_now: 0.2885527440668796 train_num: 6700\n",
      "error_min: 0.04797640778561907 error_now: 0.12963158677733844 train_num: 6750\n",
      "error_min: 0.04797640778561907 error_now: 0.21578500171929416 train_num: 6800\n",
      "error_min: 0.04797640778561907 error_now: 0.1630202703627427 train_num: 6850\n",
      "error_min: 0.04797640778561907 error_now: 0.3093557206991364 train_num: 6900\n",
      "error_min: 0.04797640778561907 error_now: 0.1307787880127882 train_num: 6950\n",
      "\n",
      "error_min: 0.04797640778561907 train_num: 312\n",
      "params_tem:\n",
      " [0.02635398 0.02121594 0.05574841 0.12570833 0.10653849 0.41323474\n",
      " 0.24771751 0.27008548 0.33187172 0.07250276 1.4933792  1.3861762\n",
      " 0.6105549  0.39302704 0.02079554 0.1038827  0.6122872  1.2834444\n",
      " 1.6945735  1.2476656  0.02858558 2.833644   1.2430115  0.77166635\n",
      " 0.03464523 0.28156394 0.83511335 0.07544014 0.064255   0.01940136\n",
      " 0.02137233 0.01320201 0.5091019  1.4221824  0.15027332 0.87718713\n",
      " 1.2636441  1.1839712  0.7476013  0.24003571 0.35493502 0.1782767\n",
      " 0.541026   1.2284135 ]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import sys\n",
    "from numpy import kron\n",
    "from mindquantum import *\n",
    "from scipy.linalg import expm\n",
    "import mindspore as ms\n",
    "from mindspore import ops, Tensor\n",
    "ms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target=\"CPU\")\n",
    "from mindspore.nn import Adam, TrainOneStepCell\n",
    "from mindspore.common.parameter import Parameter\n",
    "from mindspore.common.initializer import initializer  \n",
    "ms.set_seed(1)\n",
    "np.random.seed(1)\n",
    "\n",
    "train_x = np.load('./src/cx_train_x.npy', allow_pickle=True)\n",
    "eval_x = np.load('./src/cx_eval_x.npy', allow_pickle=True)\n",
    "train_y = np.load('./src/cx_train_y.npy', allow_pickle=True)\n",
    "eval_y = np.load('./src/cx_eval_y.npy', allow_pickle=True)\n",
    "\n",
    "s_x = X.matrix()\n",
    "s_z = Z.matrix()\n",
    "one = I.matrix()\n",
    "dt = np.pi/10\n",
    "\n",
    "def _matrix_0(coeff):\n",
    "    return expm(-1j*(coeff*s_z+s_x)*dt)\n",
    "\n",
    "def _diff_matrix_0(coeff):\n",
    "    return -1j*_matrix_0(coeff)@(s_z*dt)\n",
    "\n",
    "def _matrix_c_0(coeff):\n",
    "    return expm(-1j*(coeff*kron(s_z, one) + kron(one, s_z) + kron(s_x, one) + kron(one, s_x) + coeff*kron(s_z-one, s_z-one))*5*dt)\n",
    "\n",
    "def _diff_matrix_c_0(coeff):\n",
    "    return -1j*_matrix_c_0(coeff)@((kron(s_z, one) + kron(s_z-one, s_z-one)) * 5*dt)\n",
    "\n",
    "def _matrix_c_1(coeff):\n",
    "    return expm(-1j*(kron(s_z, one) + coeff*kron(one, s_z) + kron(s_x, one) + kron(one, s_x) + coeff*kron(s_z-one, s_z-one))*5*dt)\n",
    "\n",
    "def _diff_matrix_c_1(coeff):\n",
    "    return -1j*_matrix_c_1(coeff)@((kron(one, s_z) + kron(s_z-one, s_z-one)) *  5*dt)\n",
    "\n",
    "gate_0 = gene_univ_parameterized_gate('gete_0', _matrix_0, _diff_matrix_0)\n",
    "gate_c_0 = gene_univ_parameterized_gate('gete_c_0', _matrix_c_0, _diff_matrix_c_0)\n",
    "gate_c_1 = gene_univ_parameterized_gate('gete_c_1', _matrix_c_1, _diff_matrix_c_1)\n",
    "\n",
    "circ = Circuit()\n",
    "\n",
    "circ += gate_0('00').on(0)\n",
    "circ += gate_0('01').on(0)\n",
    "circ += gate_0('02').on(0)\n",
    "circ += gate_0('03').on(0)\n",
    "circ += gate_0('04').on(0)\n",
    "circ += gate_0('05').on(0)\n",
    "circ += gate_0('06').on(0)\n",
    "circ += gate_0('07').on(0)\n",
    "circ += gate_0('08').on(0)\n",
    "circ += gate_0('09').on(0)\n",
    "\n",
    "circ += gate_0('10').on(1)\n",
    "circ += gate_0('11').on(1)\n",
    "circ += gate_0('12').on(1)\n",
    "circ += gate_0('13').on(1)\n",
    "circ += gate_0('14').on(1)\n",
    "circ += gate_0('15').on(1)\n",
    "circ += gate_0('16').on(1)\n",
    "circ += gate_0('17').on(1)\n",
    "circ += gate_0('18').on(1)\n",
    "circ += gate_0('19').on(1)\n",
    "\n",
    "circ += gate_c_0('0').on([1,0])\n",
    "circ += gate_c_0('1').on([1,0])\n",
    "circ += gate_c_1('2').on([1,0])\n",
    "circ += gate_c_1('3').on([1,0])\n",
    "\n",
    "circ += gate_0('010').on(0)\n",
    "circ += gate_0('011').on(0)\n",
    "circ += gate_0('012').on(0)\n",
    "circ += gate_0('013').on(0)\n",
    "circ += gate_0('014').on(0)\n",
    "circ += gate_0('015').on(0)\n",
    "circ += gate_0('016').on(0)\n",
    "circ += gate_0('017').on(0)\n",
    "circ += gate_0('018').on(0)\n",
    "circ += gate_0('019').on(0)\n",
    "\n",
    "circ += gate_0('110').on(1)\n",
    "circ += gate_0('111').on(1)\n",
    "circ += gate_0('112').on(1)\n",
    "circ += gate_0('113').on(1)\n",
    "circ += gate_0('114').on(1)\n",
    "circ += gate_0('115').on(1)\n",
    "circ += gate_0('116').on(1)\n",
    "circ += gate_0('117').on(1)\n",
    "circ += gate_0('118').on(1)\n",
    "circ += gate_0('119').on(1)\n",
    "\n",
    "ham = Hamiltonian(QubitOperator('')) \n",
    "sim = Simulator('projectq', circ.n_qubits)\n",
    "sim_left = Simulator('projectq',circ.n_qubits)\n",
    "grad_ops = sim.get_expectation_with_grad(ham,\n",
    "                                         circ,\n",
    "                                         circ_left=Circuit(),\n",
    "                                         simulator_left=sim_left,\n",
    "                                         ansatz_params_name=circ.params_name)\n",
    "lr = 0.05\n",
    "Quantum_net = MQLayer(grad_ops)\n",
    "opti = Adam(Quantum_net.trainable_params(), learning_rate=lr)  \n",
    "net = TrainOneStepCell(Quantum_net, opti)\n",
    "error_min = 1\n",
    "for j in range(len(train_x)):\n",
    "    net(Tensor(train_x[j]), Tensor(train_y[j]))\n",
    "    params = abs(Quantum_net.weight.asnumpy())\n",
    "    final_state = []\n",
    "    for k in range(100): # 100 个测试点\n",
    "        sim.reset()\n",
    "        sim.set_qs(eval_x[k])\n",
    "        sim.apply_circuit(circ, params)\n",
    "        final_state.append(sim.get_qs())\n",
    "    error = 1-np.real(np.min([np.abs(np.vdot(bra, ket)) for bra, ket in zip(np.array(final_state), eval_y)]))\n",
    "    if error < error_min:\n",
    "        error_min = error\n",
    "        params_tem = params\n",
    "        j_tem = j\n",
    "    if j % 50 == 0:\n",
    "        print('error_min:', error_min, 'error_now:', error, 'train_num:', j)\n",
    "    if error_min < 1e-5:\n",
    "        break\n",
    "        \n",
    "print('\\nerror_min:', error_min, 'train_num:', j_tem)\n",
    "print('params_tem:\\n', params_tem)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "9e43975e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "error_min: 0.9441378783013046 error_now: 0.9441378783013046 train_num: 0\n",
      "error_min: 0.783312709481143 error_now: 0.783312709481143 train_num: 50\n",
      "error_min: 0.08822126361334559 error_now: 0.08822126361334559 train_num: 100\n",
      "error_min: 0.03308202264383642 error_now: 0.03684451164030966 train_num: 150\n",
      "error_min: 0.03308202264383642 error_now: 0.04944551869846869 train_num: 200\n",
      "error_min: 0.03308202264383642 error_now: 0.06938714600419116 train_num: 250\n",
      "error_min: 0.03308202264383642 error_now: 0.048101990076230394 train_num: 300\n",
      "error_min: 0.03308202264383642 error_now: 0.042143790431671824 train_num: 350\n",
      "error_min: 0.02973382556355164 error_now: 0.035892231494172755 train_num: 400\n",
      "error_min: 0.02973382556355164 error_now: 0.030352244540728823 train_num: 450\n",
      "error_min: 0.029488509496666593 error_now: 0.04442460177027041 train_num: 500\n",
      "error_min: 0.029488509496666593 error_now: 0.046522714342020266 train_num: 550\n",
      "error_min: 0.029488509496666593 error_now: 0.046006425889988156 train_num: 600\n",
      "error_min: 0.029488509496666593 error_now: 0.04403951031129705 train_num: 650\n",
      "error_min: 0.029488509496666593 error_now: 0.05798771247094192 train_num: 700\n",
      "error_min: 0.029488509496666593 error_now: 0.08192736304097781 train_num: 750\n",
      "error_min: 0.029488509496666593 error_now: 0.10747602132408962 train_num: 800\n",
      "error_min: 0.029488509496666593 error_now: 0.12534249603445313 train_num: 850\n",
      "error_min: 0.029488509496666593 error_now: 0.15724888727723974 train_num: 900\n",
      "error_min: 0.029488509496666593 error_now: 0.10436971143523177 train_num: 950\n",
      "error_min: 0.029488509496666593 error_now: 0.07213183333880313 train_num: 1000\n",
      "error_min: 0.029488509496666593 error_now: 0.06586906882214394 train_num: 1050\n",
      "error_min: 0.029488509496666593 error_now: 0.04467560939258852 train_num: 1100\n",
      "error_min: 0.029488509496666593 error_now: 0.03183118502677018 train_num: 1150\n",
      "error_min: 0.026976519344692917 error_now: 0.026976519344692917 train_num: 1200\n",
      "error_min: 0.022708434743824668 error_now: 0.02291028619102553 train_num: 1250\n",
      "error_min: 0.02010187351140047 error_now: 0.02657777721896526 train_num: 1300\n",
      "error_min: 0.0186076000680242 error_now: 0.021156609010127614 train_num: 1350\n",
      "error_min: 0.0186076000680242 error_now: 0.02588400641363442 train_num: 1400\n",
      "error_min: 0.0186076000680242 error_now: 0.02123485577405515 train_num: 1450\n",
      "error_min: 0.0186076000680242 error_now: 0.01975821501425601 train_num: 1500\n",
      "error_min: 0.0186076000680242 error_now: 0.030795739881463002 train_num: 1550\n",
      "error_min: 0.0186076000680242 error_now: 0.0254917891270664 train_num: 1600\n",
      "error_min: 0.0186076000680242 error_now: 0.03136261805197327 train_num: 1650\n",
      "error_min: 0.0186076000680242 error_now: 0.028470265105315806 train_num: 1700\n",
      "error_min: 0.0186076000680242 error_now: 0.0302821198136054 train_num: 1750\n",
      "error_min: 0.0186076000680242 error_now: 0.047991148030334885 train_num: 1800\n",
      "error_min: 0.0186076000680242 error_now: 0.03575100080655513 train_num: 1850\n",
      "error_min: 0.0186076000680242 error_now: 0.028766914575207303 train_num: 1900\n",
      "error_min: 0.0186076000680242 error_now: 0.03970267637468361 train_num: 1950\n",
      "error_min: 0.0186076000680242 error_now: 0.03454307925842859 train_num: 2000\n",
      "error_min: 0.0186076000680242 error_now: 0.03335841694349262 train_num: 2050\n",
      "error_min: 0.0186076000680242 error_now: 0.04383156373711106 train_num: 2100\n",
      "error_min: 0.0186076000680242 error_now: 0.03022684018849997 train_num: 2150\n",
      "error_min: 0.0186076000680242 error_now: 0.027653664728393035 train_num: 2200\n",
      "error_min: 0.0186076000680242 error_now: 0.04430657782681835 train_num: 2250\n",
      "error_min: 0.0186076000680242 error_now: 0.05331458071282302 train_num: 2300\n",
      "error_min: 0.0186076000680242 error_now: 0.04615711079340912 train_num: 2350\n",
      "error_min: 0.0186076000680242 error_now: 0.05147689853625126 train_num: 2400\n",
      "error_min: 0.0186076000680242 error_now: 0.06419614464146972 train_num: 2450\n",
      "error_min: 0.0186076000680242 error_now: 0.06513561194152861 train_num: 2500\n",
      "error_min: 0.0186076000680242 error_now: 0.035669718069015866 train_num: 2550\n",
      "error_min: 0.0186076000680242 error_now: 0.030668554245012136 train_num: 2600\n",
      "error_min: 0.0186076000680242 error_now: 0.03763360683226957 train_num: 2650\n",
      "error_min: 0.0186076000680242 error_now: 0.025071390476419708 train_num: 2700\n",
      "error_min: 0.017717491287098874 error_now: 0.01774883299501917 train_num: 2750\n",
      "error_min: 0.011824318141879009 error_now: 0.011824318141879009 train_num: 2800\n",
      "error_min: 0.006109334171072178 error_now: 0.006109334171072178 train_num: 2850\n",
      "error_min: 0.0034893595727585325 error_now: 0.0037312554172177492 train_num: 2900\n",
      "error_min: 0.0010856531273657488 error_now: 0.0010856531273657488 train_num: 2950\n",
      "error_min: 0.0004326865596416818 error_now: 0.00044058281281089684 train_num: 3000\n",
      "error_min: 0.0004326865596416818 error_now: 0.0005994127852383402 train_num: 3050\n",
      "error_min: 0.0004326865596416818 error_now: 0.0008201860599900135 train_num: 3100\n",
      "error_min: 0.0004326865596416818 error_now: 0.000531139400926639 train_num: 3150\n",
      "error_min: 0.0002029358951577409 error_now: 0.00021674283392481009 train_num: 3200\n",
      "error_min: 5.734841993287887e-05 error_now: 5.734841993287887e-05 train_num: 3250\n",
      "error_min: 1.4424156855818993e-05 error_now: 1.4424156855818993e-05 train_num: 3300\n",
      "\n",
      "error_min: 9.277068755420004e-06 train_num: 3308\n",
      "params_tem:\n",
      " [2.3973486  1.360061   0.5219009  0.1817361  0.4168763  1.17235\n",
      " 1.808837   1.9328339  1.1321076  0.21697357 0.26678494 0.18694156\n",
      " 0.17362106 1.8773022  2.3033705  0.4273502  0.19230893 0.06170359\n",
      " 1.0831344  2.707224   0.9448203  0.9738663  1.9660488  0.83349556\n",
      " 0.06222223 0.03883766 0.6891924  2.4698865  3.029343   0.63929975\n",
      " 0.10049819 0.10288943 0.30465302 0.44671252 1.5978051  0.80121493\n",
      " 0.14243181 0.07353207 0.68680173 1.7578927  2.406618   1.9629031\n",
      " 1.1089846  0.7566514 ]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import sys\n",
    "from numpy import kron\n",
    "from mindquantum import *\n",
    "from scipy.linalg import expm\n",
    "import mindspore as ms\n",
    "from mindspore import ops, Tensor\n",
    "ms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target=\"CPU\")\n",
    "from mindspore.nn import Adam, TrainOneStepCell\n",
    "from mindspore.common.parameter import Parameter\n",
    "from mindspore.common.initializer import initializer  \n",
    "ms.set_seed(1)\n",
    "np.random.seed(1)\n",
    "\n",
    "train_x = np.load('./src/cx_train_x.npy', allow_pickle=True)\n",
    "eval_x = np.load('./src/cx_eval_x.npy', allow_pickle=True)\n",
    "train_y = np.load('./src/cx_train_y.npy', allow_pickle=True)\n",
    "eval_y = np.load('./src/cx_eval_y.npy', allow_pickle=True)\n",
    "\n",
    "s_x = X.matrix()\n",
    "s_z = Z.matrix()\n",
    "one = I.matrix()\n",
    "dt = np.pi/10\n",
    "\n",
    "def _matrix_0(coeff):\n",
    "    return expm(-1j*(coeff*s_z+s_x)*dt)\n",
    "\n",
    "def _diff_matrix_0(coeff):\n",
    "    return -1j*_matrix_0(coeff)@(s_z*dt)\n",
    "\n",
    "def _matrix_c_0(coeff):\n",
    "    return expm(-1j*(coeff*kron(s_z, one) + kron(one, s_z) + kron(s_x, one) + kron(one, s_x) + coeff*kron(s_z-one, s_z-one))*5*dt)\n",
    "\n",
    "def _diff_matrix_c_0(coeff):\n",
    "    return -1j*_matrix_c_0(coeff)@((kron(s_z, one) + kron(s_z-one, s_z-one)) * 5*dt)\n",
    "\n",
    "def _matrix_c_1(coeff):\n",
    "    return expm(-1j*(kron(s_z, one) + coeff*kron(one, s_z) + kron(s_x, one) + kron(one, s_x) + coeff*kron(s_z-one, s_z-one))*5*dt)\n",
    "\n",
    "def _diff_matrix_c_1(coeff):\n",
    "    return -1j*_matrix_c_1(coeff)@((kron(one, s_z) + kron(s_z-one, s_z-one)) *  5*dt)\n",
    "\n",
    "gate_0 = gene_univ_parameterized_gate('gete_0', _matrix_0, _diff_matrix_0)\n",
    "gate_c_0 = gene_univ_parameterized_gate('gete_c_0', _matrix_c_0, _diff_matrix_c_0)\n",
    "gate_c_1 = gene_univ_parameterized_gate('gete_c_1', _matrix_c_1, _diff_matrix_c_1)\n",
    "\n",
    "circ = Circuit()\n",
    "\n",
    "circ += gate_0('00').on(0)\n",
    "circ += gate_0('01').on(0)\n",
    "circ += gate_0('02').on(0)\n",
    "circ += gate_0('03').on(0)\n",
    "circ += gate_0('04').on(0)\n",
    "circ += gate_0('05').on(0)\n",
    "circ += gate_0('06').on(0)\n",
    "circ += gate_0('07').on(0)\n",
    "circ += gate_0('08').on(0)\n",
    "circ += gate_0('09').on(0)\n",
    "\n",
    "circ += gate_0('10').on(1)\n",
    "circ += gate_0('11').on(1)\n",
    "circ += gate_0('12').on(1)\n",
    "circ += gate_0('13').on(1)\n",
    "circ += gate_0('14').on(1)\n",
    "circ += gate_0('15').on(1)\n",
    "circ += gate_0('16').on(1)\n",
    "circ += gate_0('17').on(1)\n",
    "circ += gate_0('18').on(1)\n",
    "circ += gate_0('19').on(1)\n",
    "\n",
    "circ += gate_c_0('0').on([1,0])\n",
    "circ += gate_c_0('1').on([1,0])\n",
    "circ += gate_c_1('2').on([1,0])\n",
    "circ += gate_c_1('3').on([1,0])\n",
    "\n",
    "circ += gate_0('010').on(0)\n",
    "circ += gate_0('011').on(0)\n",
    "circ += gate_0('012').on(0)\n",
    "circ += gate_0('013').on(0)\n",
    "circ += gate_0('014').on(0)\n",
    "circ += gate_0('015').on(0)\n",
    "circ += gate_0('016').on(0)\n",
    "circ += gate_0('017').on(0)\n",
    "circ += gate_0('018').on(0)\n",
    "circ += gate_0('019').on(0)\n",
    "\n",
    "circ += gate_0('110').on(1)\n",
    "circ += gate_0('111').on(1)\n",
    "circ += gate_0('112').on(1)\n",
    "circ += gate_0('113').on(1)\n",
    "circ += gate_0('114').on(1)\n",
    "circ += gate_0('115').on(1)\n",
    "circ += gate_0('116').on(1)\n",
    "circ += gate_0('117').on(1)\n",
    "circ += gate_0('118').on(1)\n",
    "circ += gate_0('119').on(1)\n",
    "\n",
    "ham = Hamiltonian(QubitOperator('')) \n",
    "sim = Simulator('projectq', circ.n_qubits)\n",
    "sim_left = Simulator('projectq',circ.n_qubits)\n",
    "grad_ops = sim.get_expectation_with_grad(ham,\n",
    "                                         circ,\n",
    "                                         circ_left=Circuit(),\n",
    "                                         simulator_left=sim_left,\n",
    "                                         ansatz_params_name=circ.params_name)\n",
    "lr = 0.01\n",
    "Quantum_net = MQLayer(grad_ops)\n",
    "opti = Adam(Quantum_net.trainable_params(), learning_rate=lr)  \n",
    "net = TrainOneStepCell(Quantum_net, opti)\n",
    "error_min = 1\n",
    "for j in range(len(train_x)):\n",
    "    net(Tensor(train_x[j]), Tensor(train_y[j]))\n",
    "    params = abs(Quantum_net.weight.asnumpy())\n",
    "    final_state = []\n",
    "    for k in range(100): # 100 个测试点\n",
    "        sim.reset()\n",
    "        sim.set_qs(eval_x[k])\n",
    "        sim.apply_circuit(circ, params)\n",
    "        final_state.append(sim.get_qs())\n",
    "    error = 1-np.real(np.min([np.abs(np.vdot(bra, ket)) for bra, ket in zip(np.array(final_state), eval_y)]))\n",
    "    if error < error_min:\n",
    "        error_min = error\n",
    "        params_tem = params\n",
    "        j_tem = j\n",
    "    if j % 50 == 0:\n",
    "        print('error_min:', error_min, 'error_now:', error, 'train_num:', j)\n",
    "    if error_min < 1e-5:\n",
    "        break\n",
    "        \n",
    "print('\\nerror_min:', error_min, 'train_num:', j_tem)\n",
    "print('params_tem:\\n', params_tem)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
