{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pennylane as qml\n",
    "from pennylane import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from pennylane.templates import StronglyEntanglingLayers\n",
    "from functools import partial\n",
    "\n",
    "class VQGenerator:\n",
    "    def __init__(self, n_qubits, n_layers, scaling, device='lightning.gpu', data_reuploading=False):\n",
    "        self.dev = qml.device(device, wires=n_qubits)\n",
    "        self.n_qubits = n_qubits\n",
    "        self.n_layers = n_layers\n",
    "        self.scaling = scaling\n",
    "        self.data_reuploading = data_reuploading\n",
    "        self.initialize_weights()\n",
    "\n",
    "    def initialize_weights(self):\n",
    "        \"\"\"Initialize the weights for the variational circuit.\"\"\"\n",
    "        self.weights = np.random.uniform(low=0, high=2 * np.pi, size=(self.n_layers + 1, self.n_qubits, 3), requires_grad=True)\n",
    "    \n",
    "    def S(self, x):\n",
    "        \"\"\"Data-encoding circuit block.\"\"\"\n",
    "        for w in range(self.n_qubits):\n",
    "            qml.RX(self.scaling * x, wires=w)\n",
    "\n",
    "    def W(self, theta):\n",
    "        \"\"\"Trainable circuit block.\"\"\"\n",
    "        if self.n_qubits == 1:\n",
    "            qml.Rot(theta[0][0], theta[0][1], theta[0][2], wires=0)\n",
    "        else:\n",
    "            StronglyEntanglingLayers(theta.reshape((-1, self.n_qubits, 3)), wires=range(self.n_qubits))\n",
    "\n",
    "    def compute_single_output(self, weights, x):\n",
    "        \"\"\"Compute the quantum model output for a given input x.\"\"\"\n",
    "        if self.data_reuploading:\n",
    "            @qml.qnode(self.dev)\n",
    "            def circuit():\n",
    "                for layer in range(self.n_layers):\n",
    "                    self.W(weights[layer])\n",
    "                    self.S(x)\n",
    "                self.W(weights[-1])\n",
    "                return qml.probs(wires=list(range(self.n_qubits)))\n",
    "        else:\n",
    "            @qml.qnode(self.dev)\n",
    "            def circuit():\n",
    "                self.S(x)\n",
    "                for layer in range(self.n_layers):\n",
    "                    self.W(weights[layer])\n",
    "                self.W(weights[-1])\n",
    "                return qml.probs(wires=list(range(self.n_qubits)))\n",
    "        return circuit()\n",
    "    \n",
    "    def compute_batch_outputs(self, weights, x_batch):\n",
    "        \"\"\"Compute the quantum model outputs for a batch of inputs.\"\"\"\n",
    "        if self.data_reuploading:\n",
    "            @qml.batch_params\n",
    "            @qml.qnode(self.dev)\n",
    "            def circuit(x_batch):\n",
    "                for layer in range(self.n_layers):\n",
    "                    self.W(weights[layer])\n",
    "                    for x in x_batch:\n",
    "                        self.S(x)\n",
    "                self.W(weights[-1])\n",
    "                return [qml.expval(qml.PauliZ(wires=0)) for _ in range(len(x_batch))]\n",
    "        else:\n",
    "            @qml.batch_params\n",
    "            @qml.qnode(self.dev)\n",
    "            def circuit(x_batch):\n",
    "                for x in x_batch:\n",
    "                    self.S(x)\n",
    "                for layer in range(self.n_layers):\n",
    "                    self.W(weights[layer])\n",
    "                self.W(weights[-1])\n",
    "                return [qml.expval(qml.PauliZ(wires=0)) for _ in range(len(x_batch))]\n",
    "        \n",
    "        return circuit(x_batch)\n",
    "\n",
    "    def visualize_quantum_circuit(self, x):\n",
    "        \"\"\"Visualize the quantum circuit for a given input.\"\"\"\n",
    "        if self.data_reuploading:\n",
    "            @qml.qnode(self.dev)\n",
    "            def circuit():\n",
    "                for layer in range(self.n_layers):\n",
    "                    self.W(self.weights[layer])\n",
    "                    self.S(x)\n",
    "                self.W(self.weights[-1])\n",
    "                return qml.probs(wires=list(range(self.n_qubits)))\n",
    "        else:\n",
    "            @qml.qnode(self.dev)\n",
    "            def circuit():\n",
    "                self.S(x)\n",
    "                for layer in range(self.n_layers):\n",
    "                    self.W(self.weights[layer])\n",
    "                self.W(self.weights[-1])\n",
    "                return qml.probs(wires=list(range(self.n_qubits)))\n",
    "        \n",
    "        drawer = qml.draw(circuit, show_all_wires=True)\n",
    "        print(drawer())\n",
    "\n",
    "class Trainer:\n",
    "    def __init__(self, model: VQGenerator, valid_state_mask: np.array,max_steps=200, batch_size=32, opt=\"Adam\", learning_rate=0.2):\n",
    "        self.model = model\n",
    "        self.valid_state_mask = valid_state_mask\n",
    "        self.max_steps = max_steps\n",
    "        self.batch_size = batch_size\n",
    "        self.opt = self.get_optimizer(opt, learning_rate)\n",
    "\n",
    "    def get_optimizer(self, opt, learning_rate):\n",
    "        \"\"\"Retrieve the specified optimizer.\"\"\"\n",
    "        if opt == \"Adam\":\n",
    "            return qml.AdamOptimizer(learning_rate)\n",
    "        elif opt == \"GradientDescent\":\n",
    "            return qml.GradientDescentOptimizer(learning_rate)\n",
    "        else:\n",
    "            raise ValueError(f\"Unsupported optimizer: {opt}\")\n",
    "\n",
    "    def valid_state_loss(self, target_probability):\n",
    "        \"\"\" Need to test.\"\"\"\n",
    "        return - np.log(np.sum(target_probability * self.valid_state_mask))\n",
    "\n",
    "    def cost(self, weights, x, targets):\n",
    "        \"\"\"Calculate the cost for a given set of weights and data.\"\"\"\n",
    "        predictions = [self.model.compute_quantum_model(weights, x_) for x_ in x]\n",
    "        return self.square_loss(targets, predictions)\n",
    "\n",
    "    def train(self, x, target_y):\n",
    "        \"\"\"Train the model using the provided data.\"\"\"\n",
    "        cst_history = [self.cost(self.model.weights, x, target_y)]  # Initial cost\n",
    "        for step in range(self.max_steps):\n",
    "            batch_indices = np.random.randint(0, len(x), self.batch_size)\n",
    "            x_batch = x[batch_indices]\n",
    "            y_batch = target_y[batch_indices]\n",
    "            \n",
    "            self.model.weights, _, _ = self.opt.step(self.cost, self.model.weights, x_batch, y_batch)\n",
    "            \n",
    "            current_cost = self.cost(self.model.weights, x, target_y)\n",
    "            cst_history.append(current_cost)\n",
    "            if (step + 1) % 10 == 0:\n",
    "                print(f\"Cost at step {step + 1:3}: {current_cost:.4f}\")\n",
    "        \n",
    "        return self.model, cst_history\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "ename": "DeviceError",
     "evalue": "Device lightning.tensor does not exist. Make sure the required plugin is installed.",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mDeviceError\u001b[0m                               Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[4], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m qc_model \u001b[38;5;241m=\u001b[39m \u001b[43mVQGenerator\u001b[49m\u001b[43m(\u001b[49m\u001b[43mn_qubits\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m12\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_layers\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m2\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mscaling\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m      2\u001b[0m qc_model\u001b[38;5;241m.\u001b[39mvisualize_quantum_circuit(\u001b[38;5;241m1\u001b[39m)\n",
      "Cell \u001b[0;32mIn[3], line 9\u001b[0m, in \u001b[0;36mVQGenerator.__init__\u001b[0;34m(self, n_qubits, n_layers, scaling, device, data_reuploading)\u001b[0m\n\u001b[1;32m      8\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m, n_qubits, n_layers, scaling, device\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mlightning.tensor\u001b[39m\u001b[38;5;124m'\u001b[39m, data_reuploading\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m):\n\u001b[0;32m----> 9\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdev \u001b[38;5;241m=\u001b[39m \u001b[43mqml\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdevice\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mwires\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mn_qubits\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     10\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mn_qubits \u001b[38;5;241m=\u001b[39m n_qubits\n\u001b[1;32m     11\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mn_layers \u001b[38;5;241m=\u001b[39m n_layers\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/__init__.py:433\u001b[0m, in \u001b[0;36mdevice\u001b[0;34m(name, *args, **kwargs)\u001b[0m\n\u001b[1;32m    429\u001b[0m             dev\u001b[38;5;241m.\u001b[39mpreprocess \u001b[38;5;241m=\u001b[39m custom_decomp_preprocess\n\u001b[1;32m    431\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m dev\n\u001b[0;32m--> 433\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m DeviceError(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDevice \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m does not exist. Make sure the required plugin is installed.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
      "\u001b[0;31mDeviceError\u001b[0m: Device lightning.tensor does not exist. Make sure the required plugin is installed."
     ]
    }
   ],
   "source": [
    "qc_model = VQGenerator(n_qubits=12, n_layers=2, scaling=1)\n",
    "qc_model.visualize_quantum_circuit(1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'C'"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import sys \n",
    "sys.path.append(\"../\")\n",
    "from qmg.utils import MoleculeQuantumStateGenerator\n",
    "import pandas as pd\n",
    "\n",
    "data_path = \"../dataset/chemical_space/effective_3.csv\"\n",
    "data = pd.read_csv(data_path)\n",
    "data.head()\n",
    "\n",
    "qg = MoleculeQuantumStateGenerator(heavy_atom_size=3)\n",
    "quantum_state = qg.decimal_to_binary(64, 12)\n",
    "qg.ConnectivityToSmiles(*qg.QuantumStateToConnectivity(quantum_state))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(4096,)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([0., 0., 0., ..., 0., 0., 0.], requires_grad=False)"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "valid_state_mask = np.zeros(2**12, requires_grad=False)\n",
    "for index, row in data.iterrows():\n",
    "    valid_state_mask[-1-int(row[\"decimal_index\"])] = 1.\n",
    "print(valid_state_mask.shape)\n",
    "valid_state_mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "ename": "ValueError",
     "evalue": "Parameter 3.9299758947039023 has incorrect batch dimension. Expecting first dimension of length 12.",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[57], line 4\u001b[0m\n\u001b[1;32m      2\u001b[0m weights \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mrandom\u001b[38;5;241m.\u001b[39muniform(low\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m, high\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m \u001b[38;5;241m*\u001b[39m np\u001b[38;5;241m.\u001b[39mpi, size\u001b[38;5;241m=\u001b[39m(batch_size, \u001b[38;5;241m2\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m12\u001b[39m, \u001b[38;5;241m3\u001b[39m), requires_grad\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m      3\u001b[0m batch_inputs \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mrandom\u001b[38;5;241m.\u001b[39muniform(size\u001b[38;5;241m=\u001b[39m(batch_size, \u001b[38;5;241m12\u001b[39m))\n\u001b[0;32m----> 4\u001b[0m ouput_prob \u001b[38;5;241m=\u001b[39m \u001b[43mqc_model\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompute_batch_outputs\u001b[49m\u001b[43m(\u001b[49m\u001b[43mweights\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatch_inputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m      5\u001b[0m \u001b[38;5;28mprint\u001b[39m(ouput_prob\u001b[38;5;241m.\u001b[39mshape)\n",
      "Cell \u001b[0;32mIn[42], line 77\u001b[0m, in \u001b[0;36mVQGenerator.compute_batch_outputs\u001b[0;34m(self, weights, x_batch)\u001b[0m\n\u001b[1;32m     74\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mW(weights[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m])\n\u001b[1;32m     75\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m [qml\u001b[38;5;241m.\u001b[39mexpval(qml\u001b[38;5;241m.\u001b[39mPauliZ(wires\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)) \u001b[38;5;28;01mfor\u001b[39;00m _ \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(x_batch))]\n\u001b[0;32m---> 77\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcircuit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx_batch\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/workflow/qnode.py:1164\u001b[0m, in \u001b[0;36mQNode.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1162\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m qml\u001b[38;5;241m.\u001b[39mcapture\u001b[38;5;241m.\u001b[39menabled():\n\u001b[1;32m   1163\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m qml\u001b[38;5;241m.\u001b[39mcapture\u001b[38;5;241m.\u001b[39mqnode_call(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m-> 1164\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_impl_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/workflow/qnode.py:1150\u001b[0m, in \u001b[0;36mQNode._impl_call\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1147\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_update_gradient_fn(shots\u001b[38;5;241m=\u001b[39moverride_shots, tape\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_tape)\n\u001b[1;32m   1149\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1150\u001b[0m     res \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_component\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moverride_shots\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moverride_shots\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1151\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m   1152\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m old_interface \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/workflow/qnode.py:1103\u001b[0m, in \u001b[0;36mQNode._execution_component\u001b[0;34m(self, args, kwargs, override_shots)\u001b[0m\n\u001b[1;32m   1100\u001b[0m _prune_dynamic_transform(full_transform_program, inner_transform_program)\n\u001b[1;32m   1102\u001b[0m \u001b[38;5;66;03m# pylint: disable=unexpected-keyword-arg\u001b[39;00m\n\u001b[0;32m-> 1103\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43mqml\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecute\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m   1104\u001b[0m \u001b[43m    \u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_tape\u001b[49m\u001b[43m,\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1105\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdevice\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1106\u001b[0m \u001b[43m    \u001b[49m\u001b[43mgradient_fn\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgradient_fn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1107\u001b[0m \u001b[43m    \u001b[49m\u001b[43minterface\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minterface\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1108\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtransform_program\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfull_transform_program\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1109\u001b[0m \u001b[43m    \u001b[49m\u001b[43minner_transform\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minner_transform_program\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1110\u001b[0m \u001b[43m    \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1111\u001b[0m \u001b[43m    \u001b[49m\u001b[43mgradient_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgradient_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1112\u001b[0m \u001b[43m    \u001b[49m\u001b[43moverride_shots\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moverride_shots\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1113\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecute_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1114\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1115\u001b[0m res \u001b[38;5;241m=\u001b[39m res[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m   1117\u001b[0m \u001b[38;5;66;03m# convert result to the interface in case the qfunc has no parameters\u001b[39;00m\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/workflow/execution.py:653\u001b[0m, in \u001b[0;36mexecute\u001b[0;34m(tapes, device, gradient_fn, interface, transform_program, inner_transform, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform, device_vjp, mcm_config)\u001b[0m\n\u001b[1;32m    650\u001b[0m     tapes, post_processing \u001b[38;5;241m=\u001b[39m transform_program(tapes)\n\u001b[1;32m    651\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    652\u001b[0m     \u001b[38;5;66;03m# TODO: Remove once old device are removed\u001b[39;00m\n\u001b[0;32m--> 653\u001b[0m     tapes, program_post_processing \u001b[38;5;241m=\u001b[39m \u001b[43mtransform_program\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtapes\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    654\u001b[0m     tapes, program_pre_processing, config \u001b[38;5;241m=\u001b[39m _batch_transform(\n\u001b[1;32m    655\u001b[0m         tapes, device, config, override_shots, device_batch_transform\n\u001b[1;32m    656\u001b[0m     )\n\u001b[1;32m    658\u001b[0m     \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mpost_processing\u001b[39m(results):\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/transforms/core/transform_program.py:515\u001b[0m, in \u001b[0;36mTransformProgram.__call__\u001b[0;34m(self, tapes)\u001b[0m\n\u001b[1;32m    513\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_argnums \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_argnums[i] \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m    514\u001b[0m     tape\u001b[38;5;241m.\u001b[39mtrainable_params \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_argnums[i][j]\n\u001b[0;32m--> 515\u001b[0m new_tapes, fn \u001b[38;5;241m=\u001b[39m \u001b[43mtransform\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtape\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mtargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mtkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    516\u001b[0m execution_tapes\u001b[38;5;241m.\u001b[39mextend(new_tapes)\n\u001b[1;32m    518\u001b[0m fns\u001b[38;5;241m.\u001b[39mappend(fn)\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/transforms/batch_params.py:193\u001b[0m, in \u001b[0;36mbatch_params\u001b[0;34m(tape, all_operations)\u001b[0m\n\u001b[1;32m    191\u001b[0m     shape \u001b[38;5;241m=\u001b[39m qml\u001b[38;5;241m.\u001b[39mmath\u001b[38;5;241m.\u001b[39mshape(params[i])\n\u001b[1;32m    192\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(shape) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m shape[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;241m!=\u001b[39m batch_dim:\n\u001b[0;32m--> 193\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m    194\u001b[0m             \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mParameter \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mparams[i]\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m has incorrect batch dimension. Expecting \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m    195\u001b[0m             \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfirst dimension of length \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mbatch_dim\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m    196\u001b[0m         )\n\u001b[1;32m    198\u001b[0m output_tapes \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m    199\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m ops \u001b[38;5;129;01min\u001b[39;00m _split_operations(tape\u001b[38;5;241m.\u001b[39moperations, params, indices, batch_dim):\n",
      "\u001b[0;31mValueError\u001b[0m: Parameter 3.9299758947039023 has incorrect batch dimension. Expecting first dimension of length 12."
     ]
    }
   ],
   "source": [
    "batch_size = 10\n",
    "weights = np.random.uniform(low=0, high=2 * np.pi, size=(batch_size, 2 + 1, 12, 3), requires_grad=True)\n",
    "batch_inputs = np.random.uniform(size=(batch_size, 12))\n",
    "ouput_prob = qc_model.compute_batch_outputs(weights, batch_inputs)\n",
    "print(ouput_prob.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[3.92997589, 1.35666068, 0.22954502],\n",
       "          [0.08401884, 1.81974524, 1.08668119],\n",
       "          [0.33192181, 1.3343856 , 2.90679703],\n",
       "          ...,\n",
       "          [1.62366172, 4.37264211, 3.028859  ],\n",
       "          [1.21531161, 4.88296065, 5.5592064 ],\n",
       "          [2.23690708, 4.07273631, 4.52616824]],\n",
       "\n",
       "         [[1.60851421, 2.20274593, 0.62835107],\n",
       "          [1.73809578, 0.13265767, 4.34247816],\n",
       "          [6.09129257, 2.39500429, 4.77780613],\n",
       "          ...,\n",
       "          [4.54979842, 1.62818812, 5.58295418],\n",
       "          [3.37028542, 5.58444903, 2.76768498],\n",
       "          [5.15848425, 0.25098391, 5.26054856]],\n",
       "\n",
       "         [[1.43240584, 2.54273178, 3.31172584],\n",
       "          [3.47498011, 5.27946618, 3.73341164],\n",
       "          [1.38563223, 1.43734209, 2.8437527 ],\n",
       "          ...,\n",
       "          [3.27760665, 2.61895511, 6.18387143],\n",
       "          [0.01849497, 3.54553244, 0.24969099],\n",
       "          [5.50216998, 2.1056025 , 2.50393841]]],\n",
       "\n",
       "\n",
       "        [[[1.90803741, 1.60979243, 4.76475759],\n",
       "          [3.61869927, 1.48492182, 2.58333092],\n",
       "          [4.71857775, 1.21094027, 5.84941722],\n",
       "          ...,\n",
       "          [0.78866645, 2.00499415, 1.29469944],\n",
       "          [5.86095156, 1.19680572, 6.24777296],\n",
       "          [3.59680014, 5.41090022, 2.41813213]],\n",
       "\n",
       "         [[0.50692442, 4.00960199, 1.98405935],\n",
       "          [2.46119248, 1.92543094, 3.53416447],\n",
       "          [0.54587754, 1.71160835, 5.25676454],\n",
       "          ...,\n",
       "          [5.81583903, 2.62321568, 4.39364072],\n",
       "          [1.14370425, 3.23620213, 2.40851606],\n",
       "          [0.86766119, 5.26396192, 0.47676468]],\n",
       "\n",
       "         [[0.56476778, 3.44819751, 1.77336157],\n",
       "          [3.38753019, 4.50708672, 2.21467221],\n",
       "          [5.75616116, 1.08169713, 3.75352407],\n",
       "          ...,\n",
       "          [3.05275461, 2.82320763, 4.88249487],\n",
       "          [0.92746765, 3.90123929, 0.76780421],\n",
       "          [1.09729323, 3.43559443, 4.80363945]]],\n",
       "\n",
       "\n",
       "        [[[0.7020064 , 3.66721819, 2.62695001],\n",
       "          [6.15716866, 2.85136576, 5.39587095],\n",
       "          [1.152895  , 5.8806234 , 5.09583094],\n",
       "          ...,\n",
       "          [1.16316951, 5.61263789, 2.03426758],\n",
       "          [3.97121084, 2.72984726, 1.02407483],\n",
       "          [4.24091304, 0.58215287, 2.81436896]],\n",
       "\n",
       "         [[1.15397248, 3.89950004, 5.28725475],\n",
       "          [4.2365084 , 3.77851017, 1.84061937],\n",
       "          [4.45579384, 5.2810679 , 4.63757016],\n",
       "          ...,\n",
       "          [4.54859394, 1.25491142, 3.4055805 ],\n",
       "          [0.01361436, 4.51320045, 3.48445723],\n",
       "          [2.54306709, 0.26785624, 6.12081774]],\n",
       "\n",
       "         [[4.4479124 , 0.93818824, 3.96165143],\n",
       "          [5.80517608, 4.92755413, 0.35888304],\n",
       "          [3.23561046, 1.89295417, 0.1353645 ],\n",
       "          ...,\n",
       "          [2.7382941 , 1.66163706, 2.93483462],\n",
       "          [3.42741774, 0.52356351, 1.52867035],\n",
       "          [0.39839738, 5.99999906, 3.31045423]]],\n",
       "\n",
       "\n",
       "        ...,\n",
       "\n",
       "\n",
       "        [[[2.11295994, 1.40555438, 3.94047393],\n",
       "          [4.64454962, 3.56649111, 4.36540291],\n",
       "          [2.45980653, 5.62414844, 0.46086264],\n",
       "          ...,\n",
       "          [2.54857082, 3.22634053, 4.09844987],\n",
       "          [0.01745023, 1.15607524, 0.58922071],\n",
       "          [2.68275555, 6.05327624, 1.17001663]],\n",
       "\n",
       "         [[0.61817374, 1.170613  , 2.57613074],\n",
       "          [3.75656519, 4.40183245, 2.50034811],\n",
       "          [0.95351456, 1.95875993, 5.82015562],\n",
       "          ...,\n",
       "          [0.43847684, 4.88009484, 0.31393223],\n",
       "          [4.00149778, 2.71873622, 4.88794826],\n",
       "          [2.33285489, 1.11544375, 0.04037506]],\n",
       "\n",
       "         [[3.84268491, 5.52218923, 3.30036596],\n",
       "          [3.44606   , 1.1167624 , 3.49898112],\n",
       "          [3.83120138, 0.73661   , 1.9803072 ],\n",
       "          ...,\n",
       "          [4.02858858, 4.41763677, 0.80823922],\n",
       "          [0.07482879, 3.70994842, 2.84275062],\n",
       "          [1.74253804, 4.15999079, 2.17225998]]],\n",
       "\n",
       "\n",
       "        [[[3.57177051, 5.45490064, 0.75785982],\n",
       "          [4.31039971, 1.6953438 , 1.09415999],\n",
       "          [1.02924033, 4.02084438, 3.42481303],\n",
       "          ...,\n",
       "          [3.50262682, 2.38042235, 4.35556156],\n",
       "          [5.36495854, 4.29481513, 3.05444107],\n",
       "          [3.84862136, 5.37126027, 5.49338911]],\n",
       "\n",
       "         [[4.52724834, 1.05504712, 4.93109936],\n",
       "          [0.13973248, 4.20182607, 5.7916837 ],\n",
       "          [0.198924  , 2.27309714, 5.49596813],\n",
       "          ...,\n",
       "          [2.21488495, 4.68894639, 2.1860929 ],\n",
       "          [5.373406  , 1.6161678 , 3.16275536],\n",
       "          [4.10458199, 1.9832413 , 0.06008426]],\n",
       "\n",
       "         [[3.92854032, 1.58962106, 3.29446051],\n",
       "          [4.74292875, 1.33198198, 2.12162073],\n",
       "          [6.04276969, 4.99656879, 3.29818657],\n",
       "          ...,\n",
       "          [5.20340068, 0.5356003 , 4.34767775],\n",
       "          [0.53308076, 2.54088775, 2.18356053],\n",
       "          [4.40009089, 4.13978229, 1.49478636]]],\n",
       "\n",
       "\n",
       "        [[[1.91780266, 5.60123277, 0.81483284],\n",
       "          [6.10079603, 2.08612706, 2.49555259],\n",
       "          [4.26995022, 0.27322308, 0.58783456],\n",
       "          ...,\n",
       "          [0.78639258, 0.57565098, 0.33894204],\n",
       "          [5.16119035, 5.70706639, 2.31141551],\n",
       "          [5.4231704 , 0.42615183, 4.27114458]],\n",
       "\n",
       "         [[4.83370527, 0.49222542, 5.77625119],\n",
       "          [1.56683879, 2.93809304, 2.23660185],\n",
       "          [0.7638495 , 5.50816826, 2.03504793],\n",
       "          ...,\n",
       "          [0.44487149, 2.78467025, 2.38959132],\n",
       "          [2.9851237 , 1.90505685, 1.11526337],\n",
       "          [3.27938478, 5.05459589, 4.97454742]],\n",
       "\n",
       "         [[2.68317017, 2.76728834, 3.85221821],\n",
       "          [0.55025957, 3.19981755, 5.97039042],\n",
       "          [2.67070135, 6.11966852, 6.19958209],\n",
       "          ...,\n",
       "          [2.01150521, 3.57190861, 0.60933345],\n",
       "          [0.51229295, 1.08595266, 5.76921485],\n",
       "          [3.4334602 , 0.35110573, 2.78771119]]]], requires_grad=True)"
      ]
     },
     "execution_count": 61,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "weights"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [
    {
     "ename": "ValueError",
     "evalue": "Parameter 1.4039230771439868 does not contain a batch dimension.",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[63], line 116\u001b[0m\n\u001b[1;32m    113\u001b[0m trainer \u001b[38;5;241m=\u001b[39m Trainer(model\u001b[38;5;241m=\u001b[39mvqc, max_steps\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m100\u001b[39m, batch_size\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m, opt\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAdam\u001b[39m\u001b[38;5;124m\"\u001b[39m, learning_rate\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.1\u001b[39m)\n\u001b[1;32m    115\u001b[0m \u001b[38;5;66;03m# Train the model\u001b[39;00m\n\u001b[0;32m--> 116\u001b[0m trained_model, training_costs \u001b[38;5;241m=\u001b[39m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_data\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    118\u001b[0m \u001b[38;5;66;03m# Plot the training costs\u001b[39;00m\n\u001b[1;32m    119\u001b[0m plt\u001b[38;5;241m.\u001b[39mplot(training_costs)\n",
      "Cell \u001b[0;32mIn[63], line 82\u001b[0m, in \u001b[0;36mTrainer.train\u001b[0;34m(self, x, y)\u001b[0m\n\u001b[1;32m     80\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mtrain\u001b[39m(\u001b[38;5;28mself\u001b[39m, x, y):\n\u001b[1;32m     81\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"Train the model using the provided data.\"\"\"\u001b[39;00m\n\u001b[0;32m---> 82\u001b[0m     cst \u001b[38;5;241m=\u001b[39m [\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcost\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweights\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m)\u001b[49m]  \u001b[38;5;66;03m# Initial cost\u001b[39;00m\n\u001b[1;32m     83\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmax_steps):\n\u001b[1;32m     84\u001b[0m         batch_indices \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mrandom\u001b[38;5;241m.\u001b[39mrandint(\u001b[38;5;241m0\u001b[39m, \u001b[38;5;28mlen\u001b[39m(x), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch_size)\n",
      "Cell \u001b[0;32mIn[63], line 77\u001b[0m, in \u001b[0;36mTrainer.cost\u001b[0;34m(self, weights, x_batch, y_batch)\u001b[0m\n\u001b[1;32m     75\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mcost\u001b[39m(\u001b[38;5;28mself\u001b[39m, weights, x_batch, y_batch):\n\u001b[1;32m     76\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"Calculate the cost for a given set of weights and batch of data.\"\"\"\u001b[39;00m\n\u001b[0;32m---> 77\u001b[0m     predictions \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompute_batch_outputs\u001b[49m\u001b[43m(\u001b[49m\u001b[43mweights\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx_batch\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     78\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msquare_loss(y_batch, predictions)\n",
      "Cell \u001b[0;32mIn[63], line 53\u001b[0m, in \u001b[0;36mVQC.compute_batch_outputs\u001b[0;34m(self, weights, x_batch)\u001b[0m\n\u001b[1;32m     50\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mW(weights[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m])\n\u001b[1;32m     51\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m [qml\u001b[38;5;241m.\u001b[39mexpval(qml\u001b[38;5;241m.\u001b[39mPauliZ(wires\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)) \u001b[38;5;28;01mfor\u001b[39;00m _ \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(x_batch))]\n\u001b[0;32m---> 53\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcircuit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx_batch\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/workflow/qnode.py:1164\u001b[0m, in \u001b[0;36mQNode.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1162\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m qml\u001b[38;5;241m.\u001b[39mcapture\u001b[38;5;241m.\u001b[39menabled():\n\u001b[1;32m   1163\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m qml\u001b[38;5;241m.\u001b[39mcapture\u001b[38;5;241m.\u001b[39mqnode_call(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m-> 1164\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_impl_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/workflow/qnode.py:1150\u001b[0m, in \u001b[0;36mQNode._impl_call\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1147\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_update_gradient_fn(shots\u001b[38;5;241m=\u001b[39moverride_shots, tape\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_tape)\n\u001b[1;32m   1149\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1150\u001b[0m     res \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_component\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moverride_shots\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moverride_shots\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1151\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m   1152\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m old_interface \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/workflow/qnode.py:1103\u001b[0m, in \u001b[0;36mQNode._execution_component\u001b[0;34m(self, args, kwargs, override_shots)\u001b[0m\n\u001b[1;32m   1100\u001b[0m _prune_dynamic_transform(full_transform_program, inner_transform_program)\n\u001b[1;32m   1102\u001b[0m \u001b[38;5;66;03m# pylint: disable=unexpected-keyword-arg\u001b[39;00m\n\u001b[0;32m-> 1103\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43mqml\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecute\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m   1104\u001b[0m \u001b[43m    \u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_tape\u001b[49m\u001b[43m,\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1105\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdevice\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1106\u001b[0m \u001b[43m    \u001b[49m\u001b[43mgradient_fn\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgradient_fn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1107\u001b[0m \u001b[43m    \u001b[49m\u001b[43minterface\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minterface\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1108\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtransform_program\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfull_transform_program\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1109\u001b[0m \u001b[43m    \u001b[49m\u001b[43minner_transform\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minner_transform_program\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1110\u001b[0m \u001b[43m    \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1111\u001b[0m \u001b[43m    \u001b[49m\u001b[43mgradient_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgradient_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1112\u001b[0m \u001b[43m    \u001b[49m\u001b[43moverride_shots\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moverride_shots\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1113\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecute_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1114\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1115\u001b[0m res \u001b[38;5;241m=\u001b[39m res[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m   1117\u001b[0m \u001b[38;5;66;03m# convert result to the interface in case the qfunc has no parameters\u001b[39;00m\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/workflow/execution.py:653\u001b[0m, in \u001b[0;36mexecute\u001b[0;34m(tapes, device, gradient_fn, interface, transform_program, inner_transform, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, override_shots, expand_fn, max_expansion, device_batch_transform, device_vjp, mcm_config)\u001b[0m\n\u001b[1;32m    650\u001b[0m     tapes, post_processing \u001b[38;5;241m=\u001b[39m transform_program(tapes)\n\u001b[1;32m    651\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    652\u001b[0m     \u001b[38;5;66;03m# TODO: Remove once old device are removed\u001b[39;00m\n\u001b[0;32m--> 653\u001b[0m     tapes, program_post_processing \u001b[38;5;241m=\u001b[39m \u001b[43mtransform_program\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtapes\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    654\u001b[0m     tapes, program_pre_processing, config \u001b[38;5;241m=\u001b[39m _batch_transform(\n\u001b[1;32m    655\u001b[0m         tapes, device, config, override_shots, device_batch_transform\n\u001b[1;32m    656\u001b[0m     )\n\u001b[1;32m    658\u001b[0m     \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mpost_processing\u001b[39m(results):\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/transforms/core/transform_program.py:515\u001b[0m, in \u001b[0;36mTransformProgram.__call__\u001b[0;34m(self, tapes)\u001b[0m\n\u001b[1;32m    513\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_argnums \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_argnums[i] \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m    514\u001b[0m     tape\u001b[38;5;241m.\u001b[39mtrainable_params \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_argnums[i][j]\n\u001b[0;32m--> 515\u001b[0m new_tapes, fn \u001b[38;5;241m=\u001b[39m \u001b[43mtransform\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtape\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mtargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mtkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    516\u001b[0m execution_tapes\u001b[38;5;241m.\u001b[39mextend(new_tapes)\n\u001b[1;32m    518\u001b[0m fns\u001b[38;5;241m.\u001b[39mappend(fn)\n",
      "File \u001b[0;32m~/.conda/envs/qmg/lib/python3.10/site-packages/pennylane/transforms/batch_params.py:188\u001b[0m, in \u001b[0;36mbatch_params\u001b[0;34m(tape, all_operations)\u001b[0m\n\u001b[1;32m    186\u001b[0m     batch_dim \u001b[38;5;241m=\u001b[39m qml\u001b[38;5;241m.\u001b[39mmath\u001b[38;5;241m.\u001b[39mshape(params[indices[\u001b[38;5;241m0\u001b[39m]])[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m    187\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mIndexError\u001b[39;00m:\n\u001b[0;32m--> 188\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mParameter \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mparams[\u001b[38;5;241m0\u001b[39m]\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m does not contain a batch dimension.\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m    190\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m indices:\n\u001b[1;32m    191\u001b[0m     shape \u001b[38;5;241m=\u001b[39m qml\u001b[38;5;241m.\u001b[39mmath\u001b[38;5;241m.\u001b[39mshape(params[i])\n",
      "\u001b[0;31mValueError\u001b[0m: Parameter 1.4039230771439868 does not contain a batch dimension."
     ]
    }
   ],
   "source": [
    "import pennylane as qml\n",
    "from pennylane import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from pennylane.templates import StronglyEntanglingLayers\n",
    "\n",
    "class VQC:\n",
    "    def __init__(self, n_qubits, n_layers, scaling):\n",
    "        self.dev = qml.device('lightning.gpu', wires=n_qubits)\n",
    "        self.n_qubits = n_qubits\n",
    "        self.n_layers = n_layers\n",
    "        self.scaling = scaling\n",
    "        self.initialize_weights()\n",
    "\n",
    "    def initialize_weights(self):\n",
    "        \"\"\"Initialize the weights for the variational circuit.\"\"\"\n",
    "        self.weights = np.random.uniform(low=0, high=2 * np.pi, size=(self.n_layers + 1, self.n_qubits, 3), requires_grad=True)\n",
    "    \n",
    "    def S(self, x):\n",
    "        \"\"\"Data-encoding circuit block.\"\"\"\n",
    "        for w in range(self.n_qubits):\n",
    "            qml.RX(self.scaling * x, wires=w)\n",
    "\n",
    "    def W(self, theta):\n",
    "        \"\"\"Trainable circuit block.\"\"\"\n",
    "        if self.n_qubits == 1:\n",
    "            qml.Rot(theta[0][0], theta[0][1], theta[0][2], wires=0)\n",
    "        else:\n",
    "            StronglyEntanglingLayers(theta.reshape((-1, self.n_qubits, 3)), wires=range(self.n_qubits))\n",
    "\n",
    "    def quantum_model(self, weights, x):\n",
    "        \"\"\"Define the quantum model for a given input x.\"\"\"\n",
    "        @qml.qnode(self.dev)\n",
    "        def circuit(x):\n",
    "            for layer in range(self.n_layers):\n",
    "                self.W(weights[layer])\n",
    "                self.S(x)\n",
    "            self.W(weights[-1])\n",
    "            return qml.expval(qml.PauliZ(wires=0))\n",
    "        return circuit(x)\n",
    "    \n",
    "    def compute_batch_outputs(self, weights, x_batch):\n",
    "        \"\"\"Compute the quantum model outputs for a batch of inputs.\"\"\"\n",
    "        @qml.batch_params\n",
    "        @qml.qnode(self.dev)\n",
    "        def circuit(x_batch):\n",
    "            for layer in range(self.n_layers):\n",
    "                self.W(weights[layer])\n",
    "                for x in x_batch:\n",
    "                    self.S(x)\n",
    "            self.W(weights[-1])\n",
    "            return [qml.expval(qml.PauliZ(wires=0)) for _ in range(len(x_batch))]\n",
    "        \n",
    "        return circuit(x_batch)\n",
    "\n",
    "class Trainer:\n",
    "    def __init__(self, model: VQC, max_steps=200, batch_size=32, opt=\"Adam\", learning_rate=0.2):\n",
    "        self.model = model\n",
    "        self.max_steps = max_steps\n",
    "        self.batch_size = batch_size\n",
    "        self.opt = self.get_optimizer(opt, learning_rate)\n",
    "\n",
    "    def get_optimizer(self, opt, learning_rate):\n",
    "        \"\"\"Retrieve the specified optimizer.\"\"\"\n",
    "        if opt == \"Adam\":\n",
    "            return qml.AdamOptimizer(learning_rate)\n",
    "        elif opt == \"GradientDescent\":\n",
    "            return qml.GradientDescentOptimizer(learning_rate)\n",
    "        else:\n",
    "            raise ValueError(f\"Unsupported optimizer: {opt}\")\n",
    "\n",
    "    def square_loss(self, targets, predictions):\n",
    "        \"\"\"Calculate the square loss between targets and predictions.\"\"\"\n",
    "        return 0.5 * np.mean((targets - predictions) ** 2)\n",
    "\n",
    "    def cost(self, weights, x_batch, y_batch):\n",
    "        \"\"\"Calculate the cost for a given set of weights and batch of data.\"\"\"\n",
    "        predictions = self.model.compute_batch_outputs(weights, x_batch)\n",
    "        return self.square_loss(y_batch, predictions)\n",
    "\n",
    "    def train(self, x, y):\n",
    "        \"\"\"Train the model using the provided data.\"\"\"\n",
    "        cst = [self.cost(self.model.weights, x, y)]  # Initial cost\n",
    "        for step in range(self.max_steps):\n",
    "            batch_indices = np.random.randint(0, len(x), self.batch_size)\n",
    "            x_batch = x[batch_indices]\n",
    "            y_batch = y[batch_indices]\n",
    "            \n",
    "            self.model.weights, _, _ = self.opt.step(self.cost, self.model.weights, x_batch, y_batch)\n",
    "            \n",
    "            current_cost = self.cost(self.model.weights, x, y)\n",
    "            cst.append(current_cost)\n",
    "            if (step + 1) % 10 == 0:\n",
    "                print(f\"Cost at step {step + 1:3}: {current_cost:.4f}\")\n",
    "        \n",
    "        return self.model, cst\n",
    "\n",
    "# Example usage\n",
    "\n",
    "# Define the parameters for the model\n",
    "n_qubits = 2\n",
    "n_layers = 3\n",
    "scaling = 1.0\n",
    "\n",
    "# Create a VQC instance\n",
    "vqc = VQC(n_qubits, n_layers, scaling)\n",
    "\n",
    "# Generate some random input data\n",
    "np.random.seed(42)\n",
    "x_data = np.random.uniform(low=0, high=2 * np.pi, size=(100,))  # 100 random input data points\n",
    "y_data = np.sin(x_data)  # Target data (for example purposes, here using a sine function)\n",
    "\n",
    "# Initialize the Trainer\n",
    "trainer = Trainer(model=vqc, max_steps=100, batch_size=10, opt=\"Adam\", learning_rate=0.1)\n",
    "\n",
    "# Train the model\n",
    "trained_model, training_costs = trainer.train(x_data, y_data)\n",
    "\n",
    "# Plot the training costs\n",
    "plt.plot(training_costs)\n",
    "plt.xlabel(\"Step\")\n",
    "plt.ylabel(\"Cost\")\n",
    "plt.title(\"Training Cost Over Time\")\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(10, 12)"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def compute_single_output(self, weights, x):\n",
    "    \"\"\"Compute the quantum model output for a given input x.\"\"\"\n",
    "    @qml.qnode(self.dev)\n",
    "    def circuit():\n",
    "        self.S(x)\n",
    "        for layer in range(self.n_layers):\n",
    "            self.W(weights[layer])\n",
    "        self.W(weights[-1])\n",
    "        return qml.probs(wires=list(range(self.n_qubits)))\n",
    "    return circuit()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "qmg",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
