{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>decimal_index</th>\n",
       "      <th>smiles</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>4</td>\n",
       "      <td>C</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>8</td>\n",
       "      <td>O</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>12</td>\n",
       "      <td>N</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>16</td>\n",
       "      <td>C</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>21</td>\n",
       "      <td>CC</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   decimal_index smiles\n",
       "0              4      C\n",
       "1              8      O\n",
       "2             12      N\n",
       "3             16      C\n",
       "4             21     CC"
      ]
     },
     "execution_count": 39,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import pennylane as qml\n",
    "\n",
    "from pennylane import numpy as np\n",
    "import torch.optim as optim\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "from rdkit import RDLogger\n",
    "RDLogger.DisableLog('rdApp.*')\n",
    "\n",
    "import pandas as pd\n",
    "import sys\n",
    "sys.path.append(\"../\")\n",
    "from qmg.utils import MoleculeQuantumStateGenerator\n",
    "\n",
    "num_heavy_atom = 2\n",
    "data_path = f\"../dataset/chemical_space/effective_{num_heavy_atom}.csv\"\n",
    "data_generator = MoleculeQuantumStateGenerator(heavy_atom_size=num_heavy_atom, ncpus=16)\n",
    "data = pd.read_csv(data_path)\n",
    "data.head()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cpu\n"
     ]
    }
   ],
   "source": [
    "# Quantum variables\n",
    "# n_a_qubits = 1  # Number of ancillary qubits / N_A\n",
    "n_qubits = data_generator.n_qubits # + n_a_qubits  # Total number of qubits / N\n",
    "q_depth = 6  # Depth of the parameterised quantum circuit / D\n",
    "n_generators = 1  # Number of subgenerators for the patch method / N_G\n",
    "sample_num = 2000 # Number of sampled molecules for validating the training\n",
    "# data_reuploading = False\n",
    "\n",
    "dev = qml.device(\"lightning.qubit\", wires=n_qubits) # Quantum simulator\n",
    "# device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") # Enable CUDA device if available\n",
    "device = torch.device(\"cpu\")\n",
    "print(device)\n",
    "\n",
    "valid_state_mask = data_generator.generate_valid_mask(data)\n",
    "valid_state_mask = torch.Tensor(valid_state_mask).to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([6, 18])\n",
      "0: ──RY(1.39)──RZ(0.06)──RX(0.99)──RY(0.42)─╭●─────────────╭X──RZ(0.39)──RX(0.93)──RY(0.54)─╭●──────\n",
      "1: ──RY(0.03)──RZ(0.57)──RX(0.81)──RY(0.51)─╰X─╭●──────────│───RZ(0.30)──RX(0.91)──RY(0.05)─╰X─╭●───\n",
      "2: ──RY(1.41)──RZ(0.21)──RX(0.23)──RY(0.35)────╰X─╭●───────│───RZ(0.27)──RX(0.91)──RY(0.65)────╰X─╭●\n",
      "3: ──RY(0.02)──RZ(0.01)──RX(0.68)──RY(0.72)───────╰X─╭●────│───RZ(0.06)──RX(0.99)──RY(0.02)───────╰X\n",
      "4: ──RY(0.78)──RZ(0.57)──RX(0.71)──RY(0.19)──────────╰X─╭●─│───RZ(0.42)──RX(0.95)──RY(0.37)─────────\n",
      "5: ──RY(0.40)──RZ(0.82)──RX(0.21)──RY(0.94)─────────────╰X─╰●──RZ(0.38)──RX(0.78)──RY(0.30)─────────\n",
      "\n",
      "────────╭X──RZ(0.65)──RX(0.91)──RY(0.71)─╭●─────────────╭X──RZ(0.86)──RX(0.47)──RY(0.03)─╭●─────────\n",
      "────────│───RZ(0.75)──RX(0.65)──RY(0.02)─╰X─╭●──────────│───RZ(0.83)──RX(0.32)──RY(0.35)─╰X─╭●──────\n",
      "────────│───RZ(0.80)──RX(0.82)──RY(0.25)────╰X─╭●───────│───RZ(0.30)──RX(0.56)──RY(0.54)────╰X─╭●───\n",
      "──╭●────│───RZ(0.76)──RX(0.57)──RY(0.98)───────╰X─╭●────│───RZ(0.87)──RX(0.35)──RY(0.71)───────╰X─╭●\n",
      "──╰X─╭●─│───RZ(0.83)──RX(0.92)──RY(0.70)──────────╰X─╭●─│───RZ(0.73)──RX(0.66)──RY(0.66)──────────╰X\n",
      "─────╰X─╰●──RZ(0.24)──RX(0.31)──RY(0.86)─────────────╰X─╰●──RZ(0.06)──RX(0.80)──RY(0.70)────────────\n",
      "\n",
      "─────╭X──RZ(0.03)──RX(0.78)──RY(0.42)─╭●─────────────╭X──RZ(0.44)──RX(0.74)──RY(0.52)─╭●────────────\n",
      "─────│───RZ(0.07)──RX(0.78)──RY(0.84)─╰X─╭●──────────│───RZ(0.10)──RX(0.70)──RY(0.73)─╰X─╭●─────────\n",
      "─────│───RZ(0.39)──RX(0.19)──RY(0.47)────╰X─╭●───────│───RZ(0.01)──RX(0.34)──RY(0.99)────╰X─╭●──────\n",
      "─────│───RZ(0.54)──RX(0.64)──RY(0.91)───────╰X─╭●────│───RZ(0.31)──RX(0.61)──RY(0.65)───────╰X─╭●───\n",
      "──╭●─│───RZ(0.94)──RX(0.09)──RY(0.10)──────────╰X─╭●─│───RZ(0.88)──RX(0.67)──RY(0.40)──────────╰X─╭●\n",
      "──╰X─╰●──RZ(0.14)──RX(0.78)──RY(0.74)─────────────╰X─╰●──RZ(0.08)──RX(0.14)──RY(0.23)─────────────╰X\n",
      "\n",
      "──╭X─┤ ╭Probs\n",
      "──│──┤ ├Probs\n",
      "──│──┤ ├Probs\n",
      "──│──┤ ├Probs\n",
      "──│──┤ ├Probs\n",
      "──╰●─┤ ╰Probs\n"
     ]
    }
   ],
   "source": [
    "@qml.qnode(dev, diff_method=\"parameter-shift\")\n",
    "def quantum_circuit(noise, weights, data_reuploading):\n",
    "    weights = weights.reshape(q_depth, n_qubits * 3)\n",
    "    for i in range(q_depth):\n",
    "        if data_reuploading:\n",
    "            for j in range(n_qubits):\n",
    "                qml.RY(noise[j], wires=j)\n",
    "        else:\n",
    "            if i == 0:\n",
    "                for j in range(n_qubits):\n",
    "                    qml.RY(noise[j], wires=j)\n",
    "        # Parameterised layer\n",
    "        for y in range(n_qubits):\n",
    "            qml.RZ(weights[i][3*y], wires=y)\n",
    "            qml.RX(weights[i][3*y+1], wires=y)\n",
    "            qml.RY(weights[i][3*y+2], wires=y)\n",
    "        # Control Z gates\n",
    "        for y in range(n_qubits - 1):\n",
    "            qml.CNOT(wires=[y, y + 1])\n",
    "        else:\n",
    "            qml.CNOT(wires=[y+1, 0])\n",
    "    return qml.probs(wires=list(range(n_qubits)))\n",
    "\n",
    "def binary_tensor_to_string(tensor):\n",
    "    flat_tensor = tensor.view(-1).tolist()\n",
    "    binary_string = ''.join(map(str, flat_tensor))\n",
    "    return binary_string\n",
    "\n",
    "def calc_validity_and_uniqueness(smiles_list):\n",
    "    valid_smiles_list = [i for i in smiles_list if i is not None]\n",
    "    return len(valid_smiles_list) / len(smiles_list), len(set(valid_smiles_list)) / len(smiles_list)\n",
    "\n",
    "# def partial_measure(noise, weights):\n",
    "#     # Non-linear Transform\n",
    "#     probs = quantum_circuit(noise, weights)\n",
    "#     probsgiven0 = probs[: (2 ** (n_qubits - n_a_qubits))]\n",
    "#     probsgiven0 /= torch.sum(probsgiven0)\n",
    "\n",
    "#     # # Post-Processing\n",
    "#     # probsgiven = probsgiven0 / torch.max(probsgiven0)\n",
    "#     return probsgiven0\n",
    "\n",
    "class PatchQuantumGenerator(nn.Module):\n",
    "    \"\"\"Quantum generator class for the patch method\"\"\"\n",
    "    def __init__(self, n_generators, valid_state_mask, q_delta=1, temperature=5, data_reuploading=False):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            n_generators (int): Number of sub-generators to be used in the patch method.\n",
    "            q_delta (float, optional): Spread of the random distribution for parameter initialisation.\n",
    "            temperature (float, optional): parameter adjusting the output probability distribution.\n",
    "            data_reuploading (bool): whether to use the data reuploading technique.\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.q_params = nn.ParameterList(\n",
    "            [\n",
    "                nn.Parameter(q_delta * torch.rand(q_depth * n_qubits * 3), requires_grad=True)\n",
    "                for _ in range(n_generators)\n",
    "            ]\n",
    "        )\n",
    "        self.n_generators = n_generators\n",
    "        self.valid_state_mask = valid_state_mask\n",
    "        self.noise_to_probability_linear_layer = nn.Linear(n_qubits, 2**n_qubits)\n",
    "        self.softmax_layer = nn.Softmax(dim=1)\n",
    "        self.temperature = temperature\n",
    "        self.data_reuploading = data_reuploading\n",
    "        for param in self.noise_to_probability_linear_layer.parameters():\n",
    "            param.requires_grad = False\n",
    "\n",
    "    def forward(self, x):\n",
    "        # Size of each sub-generator output\n",
    "        patch_size = 2 ** n_qubits # 2 ** (n_qubits - n_a_qubits)\n",
    "        # Create a Tensor to 'catch' a batch of images from the for loop. x.size(0) is the batch size.\n",
    "        outputs = torch.Tensor(x.size(0), 0).to(device)\n",
    "        # Iterate over all sub-generators\n",
    "        for params in self.q_params:\n",
    "            # Create a Tensor to 'catch' a batch of the patches from a single sub-generator\n",
    "            patches = torch.Tensor(0, patch_size).to(device)\n",
    "            for elem in x:\n",
    "                q_out = quantum_circuit(elem, params, self.data_reuploading).float().unsqueeze(0) # partial_measure\n",
    "                patches = torch.cat((patches, q_out))\n",
    "            # Each batch of patches is concatenated with each other to create a batch of images\n",
    "            outputs = torch.cat((outputs, patches), 1)\n",
    "\n",
    "        # converted input noise\n",
    "        converted_x = self.noise_to_probability_linear_layer(x) * self.temperature# - 10 * (1 - self.valid_state_mask) \n",
    "        # converted_x = self.softmax_layer(converted_x)\n",
    "        return outputs, converted_x\n",
    "    \n",
    "    def random_sample(self, sample_num, fixed_noise=False):\n",
    "        weights = torch.tensor([])\n",
    "        state_dict = self.state_dict()\n",
    "        for name, param in state_dict.items():\n",
    "            if name in [\"noise_to_probability_linear_layer.weight\", \"noise_to_probability_linear_layer.bias\"]:\n",
    "                continue\n",
    "            weights = torch.cat([weights, param.data])\n",
    "        if fixed_noise:\n",
    "            dev_sample = qml.device(\"default.qubit\", wires=n_qubits, shots=sample_num)\n",
    "        else:\n",
    "            dev_sample = qml.device(\"default.qubit\", wires=n_qubits, shots=1)\n",
    "        @qml.qnode(dev_sample)\n",
    "        def quantum_circuit_sample(noise, weights, data_reuploading):\n",
    "            weights = weights.reshape(q_depth, n_qubits * 3)\n",
    "            for i in range(q_depth):\n",
    "                if data_reuploading:\n",
    "                    for j in range(n_qubits):\n",
    "                        qml.RY(noise[j], wires=j)\n",
    "                else:\n",
    "                    if i == 0:\n",
    "                        for j in range(n_qubits):\n",
    "                            qml.RY(noise[j], wires=j)\n",
    "                # Parameterised layer\n",
    "                for y in range(n_qubits):\n",
    "                    qml.RZ(weights[i][3*y], wires=y)\n",
    "                    qml.RX(weights[i][3*y+1], wires=y)\n",
    "                    qml.RY(weights[i][3*y+2], wires=y)\n",
    "                # Control Z gates\n",
    "                for y in range(n_qubits - 1):\n",
    "                    qml.CNOT(wires=[y, y + 1])\n",
    "                else:\n",
    "                    qml.CNOT(wires=[y+1, 0])\n",
    "            return qml.sample()\n",
    "        \n",
    "        \n",
    "        if fixed_noise:\n",
    "            noise = torch.rand(n_qubits, device=device) * np.pi / 2\n",
    "            sampled_quantum_states = quantum_circuit_sample(noise, weights, self.data_reuploading) # 2-dimensional torch.tensor\n",
    "        else:\n",
    "            sampled_quantum_states = []\n",
    "            for i in range(sample_num):\n",
    "                noise = torch.rand(n_qubits, device=device) * np.pi / 2\n",
    "                sampled_quantum_states.append(quantum_circuit_sample(noise, weights, self.data_reuploading))\n",
    "\n",
    "        sampled_quantum_states = [binary_tensor_to_string(qs) for qs in sampled_quantum_states]\n",
    "        smiles_list = []\n",
    "        for q in sampled_quantum_states:\n",
    "            smiles_list.append(data_generator.QuantumStateToSmiles(q))\n",
    "        return smiles_list\n",
    "    \n",
    "drawer = qml.draw(quantum_circuit, show_all_wires=True, wire_order=list(range(n_qubits)), expansion_strategy=\"device\")\n",
    "noise = torch.rand(n_qubits, device=device) * np.pi / 2\n",
    "weights = torch.rand(q_depth, n_qubits * 3)\n",
    "print(weights.shape)\n",
    "print(drawer(noise, weights, False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([8, 6])\n",
      "PatchQuantumGenerator(\n",
      "  (q_params): ParameterList(  (0): Parameter containing: [torch.float32 of size 108])\n",
      "  (noise_to_probability_linear_layer): Linear(in_features=6, out_features=64, bias=True)\n",
      "  (softmax_layer): Softmax(dim=1)\n",
      ")\n",
      "torch.Size([8, 64])\n",
      "torch.Size([8, 64])\n",
      "tensor([ 5.5196, -3.8536, -2.4605, -0.7718, -3.0444, -3.7778,  1.5257,  1.6894,\n",
      "         2.4601,  4.6384,  2.1685, -1.6691, -3.2748, -0.9333,  2.7755,  2.3099,\n",
      "        -2.0354, -2.7791, -0.7890, -1.4811, -0.8917, -6.9078,  2.1025, -0.5557,\n",
      "        -2.7805, -1.3127, -0.8759,  0.9312, -2.6462, -2.2877, -0.6191,  4.7163,\n",
      "        -4.1985, -0.1422,  2.4960,  0.0259, -2.6579, -2.0169,  2.7834,  2.6082,\n",
      "        -1.0776, -3.6454, -5.5505, -1.3573,  2.6991, -0.5373,  1.6365, -3.8647,\n",
      "        -1.5355, -0.4470, -2.5301,  0.7358, -3.5461,  0.2384,  0.2967,  3.3985,\n",
      "        -2.1383, -1.6908, -1.8006, -6.4132, -0.2336,  2.7593,  9.0583, -0.4380])\n",
      "tensor(1., grad_fn=<SumBackward0>)\n"
     ]
    }
   ],
   "source": [
    "noise = torch.rand(8, n_qubits, device=device) * np.pi / 2\n",
    "print(noise.shape)\n",
    "generator = PatchQuantumGenerator(n_generators, valid_state_mask, data_reuploading=False).to(device)\n",
    "print(generator)\n",
    "\n",
    "output, converted_x = generator(noise)\n",
    "print(output.shape)\n",
    "print(converted_x.shape)\n",
    "print(converted_x[1])\n",
    "print(torch.sum(output[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "class valid_state_loss(nn.Module):\n",
    "    def __init__(self, valid_state_mask: torch.tensor, reduction=\"mean\"):\n",
    "        \"\"\"\n",
    "        Parameters\n",
    "        ----------\n",
    "        valid_state_mask :  torch.tensor\n",
    "            binart tensor, 1 indicates valid quantum state, and 0 indicates invalid.\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.valid_state_mask = valid_state_mask\n",
    "        self.reduction = reduction\n",
    "\n",
    "    def forward(self, predictions):\n",
    "        loss = (predictions * self.valid_state_mask).sum(dim=1)\n",
    "        if self.reduction == \"mean\":\n",
    "            return torch.mean(-torch.log(loss))\n",
    "        elif self.reduction == \"sum\":\n",
    "            return torch.sum(-torch.log(loss))\n",
    "        else:\n",
    "            return -torch.log(loss)\n",
    "\n",
    "class jenson_shannon_divergence(nn.Module):\n",
    "    def __init__(self, valid_state_mask, reduction=\"batchmean\"):\n",
    "        \"\"\"\n",
    "        Parameters\n",
    "        ----------\n",
    "        valid_state_mask :  torch.tensor\n",
    "            binart tensor, 1 indicates valid quantum state, and 0 indicates invalid.\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.valid_state_mask = valid_state_mask\n",
    "        self.kl_div = nn.KLDivLoss(reduction=reduction, log_target=False)\n",
    "        self.softmax_layer = nn.Softmax(dim=1)\n",
    "\n",
    "    def forward(self, outputs, coverted_noise):\n",
    "        outputs = outputs[:, self.valid_state_mask == 1.]\n",
    "        coverted_noise = coverted_noise[:, self.valid_state_mask == 1.]\n",
    "        converted_noise_probability = self.softmax_layer(coverted_noise)\n",
    "        total_m = 0.5 * (outputs + converted_noise_probability)\n",
    "        loss = 0.0\n",
    "        loss += self.kl_div(outputs.log(), total_m) \n",
    "        loss += self.kl_div(converted_noise_probability.log(), total_m) \n",
    "        return (0.5 * loss)\n",
    "    \n",
    "class diversity_loss(nn.Module):\n",
    "    def __init__(self, valid_state_mask, reduction=\"batchmean\"):\n",
    "        super().__init__()\n",
    "        self.valid_state_mask = valid_state_mask\n",
    "        self.kl_div = nn.KLDivLoss(reduction=reduction, log_target=False)\n",
    "\n",
    "    def jensen_shannon_divergence(self, ps, qs):\n",
    "        m = 0.5 * (ps + qs)\n",
    "        return 0.5 * (self.kl_div(ps.log(), m) + self.kl_div(qs.log(), m))\n",
    "    \n",
    "    def forward(self, distributions):\n",
    "        distributions = distributions[:, self.valid_state_mask == 1.]\n",
    "        reversed_distributions = torch.flip(distributions, dims=[0])\n",
    "        return - self.jensen_shannon_divergence(distributions, reversed_distributions)\n",
    "    \n",
    "class average_difference_probability_loss(nn.Module):\n",
    "    def __init__(self, valid_state_mask):\n",
    "        super().__init__()\n",
    "        self.valid_state_mask = valid_state_mask\n",
    "    \n",
    "    def forward(self, distributions):\n",
    "        half_length = len(distributions)//2\n",
    "        distributions_first = distributions[:half_length][:, self.valid_state_mask == 1.]\n",
    "        distributions_second = distributions[half_length:][:, self.valid_state_mask == 1.]\n",
    "        # print(distributions[0])\n",
    "        # print(reversed_distributions[0])\n",
    "        return - torch.mean(torch.sum(torch.abs(distributions_first - distributions_second), dim=1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([0.4120, 0.4396, 0.3538, 0.3244, 0.3910, 0.3822, 0.4450, 0.3632],\n",
      "       grad_fn=<SumBackward1>)\n",
      "tensor(0.9496, grad_fn=<MeanBackward0>)\n",
      "tensor(1.2729, grad_fn=<MulBackward0>)\n",
      "tensor(-0.0511, grad_fn=<NegBackward0>)\n",
      "tensor(-0.2724, grad_fn=<NegBackward0>)\n"
     ]
    }
   ],
   "source": [
    "a = output * valid_state_mask\n",
    "loss = a.sum(dim=1)\n",
    "print(loss)\n",
    "torch.mean(-torch.log(loss))\n",
    "\n",
    "criterion = valid_state_loss(valid_state_mask=valid_state_mask)\n",
    "loss = criterion(output)\n",
    "print(loss)\n",
    "criterion_js = jenson_shannon_divergence(valid_state_mask=valid_state_mask, reduction=\"batchmean\")\n",
    "loss = criterion_js(output, converted_x)\n",
    "print(loss)\n",
    "\n",
    "criterion_diversity = diversity_loss(valid_state_mask=valid_state_mask, reduction=\"batchmean\")\n",
    "loss = criterion_diversity(output)\n",
    "print(loss)\n",
    "# print(loss.shape)\n",
    "criterion_diversity_2 = average_difference_probability_loss(valid_state_mask)\n",
    "loss = criterion_diversity_2(output)\n",
    "print(loss)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 1 tensor(-0.2767, grad_fn=<NegBackward0>) tensor(0.8117, grad_fn=<MeanBackward0>) tensor(0.5350, grad_fn=<AddBackward0>)\n",
      "Step 2 tensor(-0.3956, grad_fn=<NegBackward0>) tensor(0.5069, grad_fn=<MeanBackward0>) tensor(0.1113, grad_fn=<AddBackward0>)\n",
      "Step 3 tensor(-0.3553, grad_fn=<NegBackward0>) tensor(0.3873, grad_fn=<MeanBackward0>) tensor(0.0320, grad_fn=<AddBackward0>)\n",
      "Step 4 tensor(-0.4324, grad_fn=<NegBackward0>) tensor(0.3895, grad_fn=<MeanBackward0>) tensor(-0.0429, grad_fn=<AddBackward0>)\n",
      "Step 5 tensor(-0.4404, grad_fn=<NegBackward0>) tensor(0.3246, grad_fn=<MeanBackward0>) tensor(-0.1158, grad_fn=<AddBackward0>)\n",
      "Step 6 tensor(-0.4308, grad_fn=<NegBackward0>) tensor(0.3123, grad_fn=<MeanBackward0>) tensor(-0.1185, grad_fn=<AddBackward0>)\n",
      "Step 7 tensor(-0.4374, grad_fn=<NegBackward0>) tensor(0.3239, grad_fn=<MeanBackward0>) tensor(-0.1135, grad_fn=<AddBackward0>)\n",
      "Step 8 tensor(-0.4865, grad_fn=<NegBackward0>) tensor(0.2959, grad_fn=<MeanBackward0>) tensor(-0.1906, grad_fn=<AddBackward0>)\n",
      "Step 9 tensor(-0.5012, grad_fn=<NegBackward0>) tensor(0.2750, grad_fn=<MeanBackward0>) tensor(-0.2263, grad_fn=<AddBackward0>)\n",
      "Step 10 tensor(-0.4383, grad_fn=<NegBackward0>) tensor(0.2542, grad_fn=<MeanBackward0>) tensor(-0.1840, grad_fn=<AddBackward0>)\n",
      "Step 10, sampling 2000 molecules, validity: 81.95%, uniqueness: 1.25%.\n",
      "Step 11 tensor(-0.4538, grad_fn=<NegBackward0>) tensor(0.2679, grad_fn=<MeanBackward0>) tensor(-0.1859, grad_fn=<AddBackward0>)\n",
      "Step 12 tensor(-0.4594, grad_fn=<NegBackward0>) tensor(0.2596, grad_fn=<MeanBackward0>) tensor(-0.1998, grad_fn=<AddBackward0>)\n",
      "Step 13 tensor(-0.4592, grad_fn=<NegBackward0>) tensor(0.2387, grad_fn=<MeanBackward0>) tensor(-0.2205, grad_fn=<AddBackward0>)\n",
      "Step 14 tensor(-0.4495, grad_fn=<NegBackward0>) tensor(0.2052, grad_fn=<MeanBackward0>) tensor(-0.2443, grad_fn=<AddBackward0>)\n",
      "Step 15 tensor(-0.5095, grad_fn=<NegBackward0>) tensor(0.2308, grad_fn=<MeanBackward0>) tensor(-0.2787, grad_fn=<AddBackward0>)\n",
      "Step 16 tensor(-0.5581, grad_fn=<NegBackward0>) tensor(0.2238, grad_fn=<MeanBackward0>) tensor(-0.3343, grad_fn=<AddBackward0>)\n",
      "Step 17 tensor(-0.5282, grad_fn=<NegBackward0>) tensor(0.2247, grad_fn=<MeanBackward0>) tensor(-0.3034, grad_fn=<AddBackward0>)\n",
      "Step 18 tensor(-0.4997, grad_fn=<NegBackward0>) tensor(0.2192, grad_fn=<MeanBackward0>) tensor(-0.2804, grad_fn=<AddBackward0>)\n",
      "Step 19 tensor(-0.5347, grad_fn=<NegBackward0>) tensor(0.2376, grad_fn=<MeanBackward0>) tensor(-0.2972, grad_fn=<AddBackward0>)\n",
      "Step 20 tensor(-0.5461, grad_fn=<NegBackward0>) tensor(0.2143, grad_fn=<MeanBackward0>) tensor(-0.3318, grad_fn=<AddBackward0>)\n",
      "Step 20, sampling 2000 molecules, validity: 88.8%, uniqueness: 1.25%.\n",
      "Step 21 tensor(-0.5851, grad_fn=<NegBackward0>) tensor(0.2106, grad_fn=<MeanBackward0>) tensor(-0.3745, grad_fn=<AddBackward0>)\n",
      "Step 22 tensor(-0.4806, grad_fn=<NegBackward0>) tensor(0.1771, grad_fn=<MeanBackward0>) tensor(-0.3035, grad_fn=<AddBackward0>)\n",
      "Step 23 tensor(-0.4835, grad_fn=<NegBackward0>) tensor(0.2173, grad_fn=<MeanBackward0>) tensor(-0.2661, grad_fn=<AddBackward0>)\n",
      "Step 24 tensor(-0.5867, grad_fn=<NegBackward0>) tensor(0.1948, grad_fn=<MeanBackward0>) tensor(-0.3920, grad_fn=<AddBackward0>)\n",
      "Step 25 tensor(-0.5290, grad_fn=<NegBackward0>) tensor(0.1949, grad_fn=<MeanBackward0>) tensor(-0.3342, grad_fn=<AddBackward0>)\n",
      "Step 26 tensor(-0.5821, grad_fn=<NegBackward0>) tensor(0.1849, grad_fn=<MeanBackward0>) tensor(-0.3971, grad_fn=<AddBackward0>)\n",
      "Step 27 tensor(-0.5815, grad_fn=<NegBackward0>) tensor(0.1914, grad_fn=<MeanBackward0>) tensor(-0.3901, grad_fn=<AddBackward0>)\n",
      "Step 28 tensor(-0.4899, grad_fn=<NegBackward0>) tensor(0.1959, grad_fn=<MeanBackward0>) tensor(-0.2940, grad_fn=<AddBackward0>)\n",
      "Step 29 tensor(-0.5435, grad_fn=<NegBackward0>) tensor(0.1714, grad_fn=<MeanBackward0>) tensor(-0.3722, grad_fn=<AddBackward0>)\n",
      "Step 30 tensor(-0.5514, grad_fn=<NegBackward0>) tensor(0.1832, grad_fn=<MeanBackward0>) tensor(-0.3681, grad_fn=<AddBackward0>)\n",
      "Step 30, sampling 2000 molecules, validity: 87.1%, uniqueness: 1.25%.\n",
      "Step 31 tensor(-0.5354, grad_fn=<NegBackward0>) tensor(0.1796, grad_fn=<MeanBackward0>) tensor(-0.3558, grad_fn=<AddBackward0>)\n",
      "Step 32 tensor(-0.6049, grad_fn=<NegBackward0>) tensor(0.1714, grad_fn=<MeanBackward0>) tensor(-0.4335, grad_fn=<AddBackward0>)\n",
      "Step 33 tensor(-0.6189, grad_fn=<NegBackward0>) tensor(0.1590, grad_fn=<MeanBackward0>) tensor(-0.4599, grad_fn=<AddBackward0>)\n",
      "Step 34 tensor(-0.6067, grad_fn=<NegBackward0>) tensor(0.1747, grad_fn=<MeanBackward0>) tensor(-0.4321, grad_fn=<AddBackward0>)\n",
      "Step 35 tensor(-0.6216, grad_fn=<NegBackward0>) tensor(0.1756, grad_fn=<MeanBackward0>) tensor(-0.4460, grad_fn=<AddBackward0>)\n",
      "Step 36 tensor(-0.6094, grad_fn=<NegBackward0>) tensor(0.1834, grad_fn=<MeanBackward0>) tensor(-0.4260, grad_fn=<AddBackward0>)\n",
      "Step 37 tensor(-0.5572, grad_fn=<NegBackward0>) tensor(0.1592, grad_fn=<MeanBackward0>) tensor(-0.3980, grad_fn=<AddBackward0>)\n",
      "Step 38 tensor(-0.6123, grad_fn=<NegBackward0>) tensor(0.1938, grad_fn=<MeanBackward0>) tensor(-0.4185, grad_fn=<AddBackward0>)\n",
      "Step 39 tensor(-0.5970, grad_fn=<NegBackward0>) tensor(0.1847, grad_fn=<MeanBackward0>) tensor(-0.4124, grad_fn=<AddBackward0>)\n",
      "Step 40 tensor(-0.5369, grad_fn=<NegBackward0>) tensor(0.1578, grad_fn=<MeanBackward0>) tensor(-0.3791, grad_fn=<AddBackward0>)\n",
      "Step 40, sampling 2000 molecules, validity: 88.5%, uniqueness: 1.25%.\n",
      "Step 41 tensor(-0.5850, grad_fn=<NegBackward0>) tensor(0.1576, grad_fn=<MeanBackward0>) tensor(-0.4273, grad_fn=<AddBackward0>)\n",
      "Step 42 tensor(-0.6267, grad_fn=<NegBackward0>) tensor(0.1630, grad_fn=<MeanBackward0>) tensor(-0.4636, grad_fn=<AddBackward0>)\n",
      "Step 43 tensor(-0.6127, grad_fn=<NegBackward0>) tensor(0.1487, grad_fn=<MeanBackward0>) tensor(-0.4639, grad_fn=<AddBackward0>)\n",
      "Step 44 tensor(-0.5345, grad_fn=<NegBackward0>) tensor(0.1528, grad_fn=<MeanBackward0>) tensor(-0.3817, grad_fn=<AddBackward0>)\n",
      "Step 45 tensor(-0.6036, grad_fn=<NegBackward0>) tensor(0.1592, grad_fn=<MeanBackward0>) tensor(-0.4444, grad_fn=<AddBackward0>)\n",
      "Step 46 tensor(-0.5917, grad_fn=<NegBackward0>) tensor(0.1742, grad_fn=<MeanBackward0>) tensor(-0.4175, grad_fn=<AddBackward0>)\n",
      "Step 47 tensor(-0.5723, grad_fn=<NegBackward0>) tensor(0.1743, grad_fn=<MeanBackward0>) tensor(-0.3980, grad_fn=<AddBackward0>)\n",
      "Step 48 tensor(-0.5284, grad_fn=<NegBackward0>) tensor(0.1620, grad_fn=<MeanBackward0>) tensor(-0.3665, grad_fn=<AddBackward0>)\n",
      "Step 49 tensor(-0.5822, grad_fn=<NegBackward0>) tensor(0.1475, grad_fn=<MeanBackward0>) tensor(-0.4347, grad_fn=<AddBackward0>)\n",
      "Step 50 tensor(-0.6390, grad_fn=<NegBackward0>) tensor(0.1682, grad_fn=<MeanBackward0>) tensor(-0.4708, grad_fn=<AddBackward0>)\n",
      "Step 50, sampling 2000 molecules, validity: 89.45%, uniqueness: 1.25%.\n",
      "Step 51 tensor(-0.6288, grad_fn=<NegBackward0>) tensor(0.1678, grad_fn=<MeanBackward0>) tensor(-0.4610, grad_fn=<AddBackward0>)\n",
      "Step 52 tensor(-0.6077, grad_fn=<NegBackward0>) tensor(0.1555, grad_fn=<MeanBackward0>) tensor(-0.4522, grad_fn=<AddBackward0>)\n",
      "Step 53 tensor(-0.6241, grad_fn=<NegBackward0>) tensor(0.1570, grad_fn=<MeanBackward0>) tensor(-0.4671, grad_fn=<AddBackward0>)\n",
      "Step 54 tensor(-0.6611, grad_fn=<NegBackward0>) tensor(0.1661, grad_fn=<MeanBackward0>) tensor(-0.4950, grad_fn=<AddBackward0>)\n",
      "Step 55 tensor(-0.6098, grad_fn=<NegBackward0>) tensor(0.1626, grad_fn=<MeanBackward0>) tensor(-0.4472, grad_fn=<AddBackward0>)\n",
      "Step 56 tensor(-0.6108, grad_fn=<NegBackward0>) tensor(0.1584, grad_fn=<MeanBackward0>) tensor(-0.4523, grad_fn=<AddBackward0>)\n",
      "Step 57 tensor(-0.6385, grad_fn=<NegBackward0>) tensor(0.1610, grad_fn=<MeanBackward0>) tensor(-0.4775, grad_fn=<AddBackward0>)\n",
      "Step 58 tensor(-0.6183, grad_fn=<NegBackward0>) tensor(0.1490, grad_fn=<MeanBackward0>) tensor(-0.4693, grad_fn=<AddBackward0>)\n",
      "Step 59 tensor(-0.5846, grad_fn=<NegBackward0>) tensor(0.1583, grad_fn=<MeanBackward0>) tensor(-0.4263, grad_fn=<AddBackward0>)\n",
      "Step 60 tensor(-0.5579, grad_fn=<NegBackward0>) tensor(0.1664, grad_fn=<MeanBackward0>) tensor(-0.3915, grad_fn=<AddBackward0>)\n",
      "Step 60, sampling 2000 molecules, validity: 89.3%, uniqueness: 1.25%.\n",
      "Step 61 tensor(-0.6456, grad_fn=<NegBackward0>) tensor(0.1514, grad_fn=<MeanBackward0>) tensor(-0.4943, grad_fn=<AddBackward0>)\n",
      "Step 62 tensor(-0.6150, grad_fn=<NegBackward0>) tensor(0.1453, grad_fn=<MeanBackward0>) tensor(-0.4697, grad_fn=<AddBackward0>)\n",
      "Step 63 tensor(-0.5651, grad_fn=<NegBackward0>) tensor(0.1467, grad_fn=<MeanBackward0>) tensor(-0.4184, grad_fn=<AddBackward0>)\n",
      "Step 64 tensor(-0.5896, grad_fn=<NegBackward0>) tensor(0.1582, grad_fn=<MeanBackward0>) tensor(-0.4314, grad_fn=<AddBackward0>)\n",
      "Step 65 tensor(-0.5877, grad_fn=<NegBackward0>) tensor(0.1704, grad_fn=<MeanBackward0>) tensor(-0.4172, grad_fn=<AddBackward0>)\n",
      "Step 66 tensor(-0.5710, grad_fn=<NegBackward0>) tensor(0.1418, grad_fn=<MeanBackward0>) tensor(-0.4292, grad_fn=<AddBackward0>)\n",
      "Step 67 tensor(-0.5585, grad_fn=<NegBackward0>) tensor(0.1570, grad_fn=<MeanBackward0>) tensor(-0.4015, grad_fn=<AddBackward0>)\n",
      "Step 68 tensor(-0.6030, grad_fn=<NegBackward0>) tensor(0.1511, grad_fn=<MeanBackward0>) tensor(-0.4519, grad_fn=<AddBackward0>)\n",
      "Step 69 tensor(-0.6673, grad_fn=<NegBackward0>) tensor(0.1490, grad_fn=<MeanBackward0>) tensor(-0.5183, grad_fn=<AddBackward0>)\n",
      "Step 70 tensor(-0.6397, grad_fn=<NegBackward0>) tensor(0.1589, grad_fn=<MeanBackward0>) tensor(-0.4808, grad_fn=<AddBackward0>)\n",
      "Step 70, sampling 2000 molecules, validity: 87.5%, uniqueness: 1.25%.\n",
      "Step 71 tensor(-0.5297, grad_fn=<NegBackward0>) tensor(0.1682, grad_fn=<MeanBackward0>) tensor(-0.3615, grad_fn=<AddBackward0>)\n",
      "Step 72 tensor(-0.5660, grad_fn=<NegBackward0>) tensor(0.1565, grad_fn=<MeanBackward0>) tensor(-0.4095, grad_fn=<AddBackward0>)\n",
      "Step 73 tensor(-0.6195, grad_fn=<NegBackward0>) tensor(0.1574, grad_fn=<MeanBackward0>) tensor(-0.4622, grad_fn=<AddBackward0>)\n",
      "Step 74 tensor(-0.6405, grad_fn=<NegBackward0>) tensor(0.1631, grad_fn=<MeanBackward0>) tensor(-0.4773, grad_fn=<AddBackward0>)\n",
      "Step 75 tensor(-0.6634, grad_fn=<NegBackward0>) tensor(0.1614, grad_fn=<MeanBackward0>) tensor(-0.5020, grad_fn=<AddBackward0>)\n",
      "Step 76 tensor(-0.5401, grad_fn=<NegBackward0>) tensor(0.1520, grad_fn=<MeanBackward0>) tensor(-0.3881, grad_fn=<AddBackward0>)\n",
      "Step 77 tensor(-0.5781, grad_fn=<NegBackward0>) tensor(0.1547, grad_fn=<MeanBackward0>) tensor(-0.4233, grad_fn=<AddBackward0>)\n",
      "Step 78 tensor(-0.6664, grad_fn=<NegBackward0>) tensor(0.1498, grad_fn=<MeanBackward0>) tensor(-0.5166, grad_fn=<AddBackward0>)\n",
      "Step 79 tensor(-0.5844, grad_fn=<NegBackward0>) tensor(0.1490, grad_fn=<MeanBackward0>) tensor(-0.4354, grad_fn=<AddBackward0>)\n",
      "Step 80 tensor(-0.6225, grad_fn=<NegBackward0>) tensor(0.1398, grad_fn=<MeanBackward0>) tensor(-0.4828, grad_fn=<AddBackward0>)\n",
      "Step 80, sampling 2000 molecules, validity: 89.64999999999999%, uniqueness: 1.25%.\n",
      "Step 81 tensor(-0.5971, grad_fn=<NegBackward0>) tensor(0.1551, grad_fn=<MeanBackward0>) tensor(-0.4421, grad_fn=<AddBackward0>)\n",
      "Step 82 tensor(-0.6023, grad_fn=<NegBackward0>) tensor(0.1411, grad_fn=<MeanBackward0>) tensor(-0.4612, grad_fn=<AddBackward0>)\n",
      "Step 83 tensor(-0.6474, grad_fn=<NegBackward0>) tensor(0.1437, grad_fn=<MeanBackward0>) tensor(-0.5036, grad_fn=<AddBackward0>)\n",
      "Step 84 tensor(-0.6222, grad_fn=<NegBackward0>) tensor(0.1487, grad_fn=<MeanBackward0>) tensor(-0.4735, grad_fn=<AddBackward0>)\n",
      "Step 85 tensor(-0.6287, grad_fn=<NegBackward0>) tensor(0.1577, grad_fn=<MeanBackward0>) tensor(-0.4710, grad_fn=<AddBackward0>)\n",
      "Step 86 tensor(-0.5857, grad_fn=<NegBackward0>) tensor(0.1640, grad_fn=<MeanBackward0>) tensor(-0.4217, grad_fn=<AddBackward0>)\n",
      "Step 87 tensor(-0.6022, grad_fn=<NegBackward0>) tensor(0.1501, grad_fn=<MeanBackward0>) tensor(-0.4521, grad_fn=<AddBackward0>)\n",
      "Step 88 tensor(-0.5746, grad_fn=<NegBackward0>) tensor(0.1610, grad_fn=<MeanBackward0>) tensor(-0.4136, grad_fn=<AddBackward0>)\n",
      "Step 89 tensor(-0.6575, grad_fn=<NegBackward0>) tensor(0.1501, grad_fn=<MeanBackward0>) tensor(-0.5074, grad_fn=<AddBackward0>)\n",
      "Step 90 tensor(-0.6220, grad_fn=<NegBackward0>) tensor(0.1426, grad_fn=<MeanBackward0>) tensor(-0.4794, grad_fn=<AddBackward0>)\n",
      "Step 90, sampling 2000 molecules, validity: 88.7%, uniqueness: 1.25%.\n",
      "Step 91 tensor(-0.6324, grad_fn=<NegBackward0>) tensor(0.1576, grad_fn=<MeanBackward0>) tensor(-0.4749, grad_fn=<AddBackward0>)\n",
      "Step 92 tensor(-0.6209, grad_fn=<NegBackward0>) tensor(0.1506, grad_fn=<MeanBackward0>) tensor(-0.4703, grad_fn=<AddBackward0>)\n",
      "Step 93 tensor(-0.5628, grad_fn=<NegBackward0>) tensor(0.1425, grad_fn=<MeanBackward0>) tensor(-0.4203, grad_fn=<AddBackward0>)\n",
      "Step 94 tensor(-0.6556, grad_fn=<NegBackward0>) tensor(0.1448, grad_fn=<MeanBackward0>) tensor(-0.5108, grad_fn=<AddBackward0>)\n",
      "Step 95 tensor(-0.6102, grad_fn=<NegBackward0>) tensor(0.1371, grad_fn=<MeanBackward0>) tensor(-0.4731, grad_fn=<AddBackward0>)\n",
      "Step 96 tensor(-0.6150, grad_fn=<NegBackward0>) tensor(0.1412, grad_fn=<MeanBackward0>) tensor(-0.4738, grad_fn=<AddBackward0>)\n",
      "Step 97 tensor(-0.5890, grad_fn=<NegBackward0>) tensor(0.1453, grad_fn=<MeanBackward0>) tensor(-0.4438, grad_fn=<AddBackward0>)\n",
      "Step 98 tensor(-0.6718, grad_fn=<NegBackward0>) tensor(0.1444, grad_fn=<MeanBackward0>) tensor(-0.5274, grad_fn=<AddBackward0>)\n",
      "Step 99 tensor(-0.6242, grad_fn=<NegBackward0>) tensor(0.1472, grad_fn=<MeanBackward0>) tensor(-0.4770, grad_fn=<AddBackward0>)\n",
      "Step 100 tensor(-0.6016, grad_fn=<NegBackward0>) tensor(0.1387, grad_fn=<MeanBackward0>) tensor(-0.4629, grad_fn=<AddBackward0>)\n",
      "Step 100, sampling 2000 molecules, validity: 90.75%, uniqueness: 1.25%.\n"
     ]
    }
   ],
   "source": [
    "from tqdm import tqdm\n",
    "import os\n",
    "\n",
    "save_dir = \"models_5_sverage_difference\"\n",
    "\n",
    "os.makedirs(f\"{save_dir}/heavy_atoms_{num_heavy_atom}\", exist_ok=True)\n",
    "criterion_js = jenson_shannon_divergence(valid_state_mask=valid_state_mask, reduction=\"batchmean\")\n",
    "criterion_valid = valid_state_loss(valid_state_mask)\n",
    "# criterion_diversity = diversity_loss(valid_state_mask=valid_state_mask, reduction=\"batchmean\")\n",
    "criterion_diversity_2 = average_difference_probability_loss(valid_state_mask)\n",
    "best_loss = 1e10\n",
    "\n",
    "batch_size = 32\n",
    "opt = optim.Adam(generator.parameters(), lr=0.1)\n",
    "loss_js_history = []\n",
    "loss_valid_state_history = []\n",
    "loss_valid_history = []\n",
    "valid_per_steps = 10\n",
    "\n",
    "steps = 100\n",
    "for i in range(steps):\n",
    "    # Noise follwing a uniform distribution in range [0,pi/2)\n",
    "    noise = torch.rand(batch_size, n_qubits, device=device) * np.pi / 2\n",
    "    outputs, converted_noise = generator(noise)\n",
    "    # loss_js = criterion_js(outputs, converted_noise)\n",
    "    # loss_js_history.append(loss_js.detach().cpu())\n",
    "    loss_diversity = criterion_diversity_2(outputs)\n",
    "\n",
    "    loss_valid_state = criterion_valid(outputs)\n",
    "    loss_valid_state_history.append(loss_valid_state.detach().cpu())\n",
    "    loss = loss_valid_state + loss_diversity\n",
    "        \n",
    "    opt.zero_grad()\n",
    "    loss.backward()\n",
    "    opt.step()\n",
    "    if float(loss.detach().cpu()) < best_loss:\n",
    "        best_loss = float(loss.detach().cpu())\n",
    "        torch.save(generator.state_dict(), f'{save_dir}/heavy_atoms_{num_heavy_atom}/best_generator.pt')\n",
    "\n",
    "    print(f\"Step {i+1}\", loss_diversity, loss_valid_state, loss)\n",
    "    if (i+1) % valid_per_steps == 0:\n",
    "        sample_smiles_list = generator.random_sample(sample_num=sample_num)\n",
    "        validity, uniqueness = calc_validity_and_uniqueness(sample_smiles_list)\n",
    "        print(f\"Step {i+1}, sampling {sample_num} molecules, validity: {validity*100}%, uniqueness: {uniqueness*100}%.\")\n",
    "        torch.save(generator.state_dict(), f'{save_dir}/heavy_atoms_{num_heavy_atom}/generator_{i+1}_steps.pt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Counter({None: 409, 'CN=N': 243, 'C1NN1': 211, 'C=NN': 163, 'CNC': 113, 'C=NC': 108, 'C1=CN1': 83, 'C1CC1': 80, 'C=CN': 70, 'CNN': 55, 'C1CN1': 52, 'N=NN': 33, 'NNN': 31, 'C.CN': 30, 'C1=NN1': 27, 'C1=NN=1': 19, 'C1#CC1': 18, '[nH]1[nH]o1': 17, 'C1CO1': 16, 'C1=NC1': 14, 'CN=O': 14, 'C.N=N': 13, 'CCC': 11, 'C#CC': 10, 'C1=CN=1': 10, 'CN.N': 10, 'C=CC': 10, 'C1NO1': 9, 'CCN': 9, 'N1=NN1': 9, 'NNO': 8, 'CC=N': 8, '[nH]1[nH][nH]1': 6, 'NC=O': 6, 'C=NO': 5, 'C1=CC1': 5, 'NCN': 5, 'N=NO': 5, 'C.NN': 4, 'N=CN': 4, 'C.N.N': 4, 'NCO': 4, 'C1N=N1': 3, 'COC': 3, 'C#CN': 3, 'C=[N+]=[N-]': 3, 'C.C=N': 3, 'C=N.N': 2, 'CCO': 2, 'C1#CN1': 2, 'NN=O': 2, 'NON': 2, 'C1=CO1': 1, 'N=CO': 1, 'C1=NO1': 1, '[N-]=[N+]=O': 1, 'N=C=N': 1, 'CON': 1, 'CC#N': 1, 'C=CO': 1, 'C.C=C': 1, 'C1=CC=1': 1, 'NN': 1, 'C=C=N': 1, 'C=N': 1, 'CNO': 1})\n",
      "sampling 2000 molecules, validity: 79.55%, uniqueness: 3.25%.\n"
     ]
    }
   ],
   "source": [
    "from collections import Counter\n",
    "sample_num = 2000\n",
    "\n",
    "generator = PatchQuantumGenerator(n_generators, valid_state_mask)\n",
    "# 讀取模型參數\n",
    "generator.load_state_dict(torch.load('models_3_data_reuploading/heavy_atoms_3/generator_100_steps.pt'))\n",
    "generator.eval()  # 設定模型為評估模式（如需要）\n",
    "\n",
    "sample_smiles_list = generator.random_sample(sample_num=sample_num, fixed_noise=True)\n",
    "print(Counter(sample_smiles_list))\n",
    "validity, uniqueness = calc_validity_and_uniqueness(sample_smiles_list)\n",
    "# print(loss)\n",
    "print(f\"sampling {sample_num} molecules, validity: {validity*100}%, uniqueness: {uniqueness*100}%.\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CCC\n",
      "C1CO1\n",
      "[nH]1[nH]o1\n",
      "C=CN\n",
      "C1=NN=1\n",
      "NC=O\n",
      "C=N.N\n",
      "C=C=N\n",
      "C=NN\n",
      "C1CC1\n",
      "N.NN\n",
      "NON\n",
      "C1NO1\n",
      "C.N.N\n",
      "OCO\n",
      "CC.N\n",
      "C1=CC=1\n",
      "N=NO\n",
      "CN=O\n",
      "NNO\n",
      "C#CO\n",
      "[nH]1oo1\n",
      "C.C=N\n",
      "C1=NC1\n",
      "C1=CN1\n",
      "[nH]1[nH][nH]1\n",
      "NN=O\n",
      "C1=CC1\n",
      "C.CC\n",
      "C1CN1\n",
      "CNN\n",
      "CC#N\n",
      "N=CN\n",
      "C.C=C\n",
      "C#CN\n",
      "CN.N\n",
      "None\n",
      "N1=NN1\n",
      "C1=CN=1\n",
      "N=NN\n",
      "C=C=C\n",
      "NNN\n",
      "C=NO\n",
      "C1N=N1\n",
      "N=CO\n",
      "C=CO\n",
      "CC=O\n",
      "NCN\n",
      "NCO\n",
      "CCO\n",
      "CNC\n",
      "CCN\n",
      "C.CN\n",
      "C=CC\n",
      "CC=N\n",
      "C.N=N\n",
      "CN=N\n",
      "C1=NN1\n",
      "C1NN1\n",
      "C#CC\n",
      "CNO\n",
      "N1=NO1\n",
      "N#CN\n",
      "C1#CN1\n",
      "C=C.N\n",
      "C=NC\n",
      "C1#CC1\n",
      "55\n",
      "2000\n",
      "67\n"
     ]
    }
   ],
   "source": [
    "z = 0\n",
    "for x in set(sample_smiles_list):\n",
    "    print(x)\n",
    "    if x and (\".\" not in x):\n",
    "        z += 1\n",
    "print(z)\n",
    "print(len(sample_smiles_list))\n",
    "print(len(set(sample_smiles_list)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAeoAAAHqCAYAAADLbQ06AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAABKUklEQVR4nO3deXhTVf4G8Dd70i1pKV0pbdn3UpZCiyM4VgsiAuKGaIFxGRVU9Oc4VMcNRys6KuPAgLiAjjqAIwLDKAKVnbJTFtmhtAW6UEqb7mmS+/uj5GKkLW1z05vS9/M8eTTJTfLNAfr2nHvuOQpBEAQQERGRR1LKXQARERHVj0FNRETkwRjUREREHoxBTURE5MEY1ERERB6MQU1EROTBGNREREQejEFNRETkwdRyF9DS7HY7Lly4AF9fXygUCrnLISKiNkgQBJSWliIsLAxKZcN95jYX1BcuXEBERITcZRARESEnJwcdOnRo8Jg2F9S+vr4AahvHz89P5mqIiKgtMpvNiIiIEDOpIW0uqB3D3X5+fgxqIiKSVWNOwXIyGRERkQdjUBMREXkwBjUREZEHa3PnqIludHa7HRaLRe4yiNo0jUYDlUolyXsxqIluIBaLBZmZmbDb7XKXQtTmmUwmhISEuLxmB4Oa6AYhCAJyc3OhUqkQERFx3UUUiMg9BEFARUUFCgoKAAChoaEuvR+DmugGYbVaUVFRgbCwMHh5ecldDlGbZjAYAAAFBQUICgpyaRicv3IT3SBsNhsAQKvVylwJEQEQf2Guqalx6X0Y1EQ3GK5hT+QZpPq3yKAmIiLyYAxqIrrhREVFYc6cObK/hyuOHz+OkJAQlJaWAgAWL14Mk8kkSy0jRozAjBkzJHu/119/Hf3795fs/eQwc+ZMPP300y3yWQxqIpKNQqFo8Pb666836313796Nxx9/XNpiW1hKSgqefvrpRm3a0BRy/wICAC+88ALS0tJk+/y77roLHTt2hF6vR2hoKB5++GFcuHBBfH7jxo0YO3YsQkND4e3tjf79++Prr792eo8XXngBX3zxBc6cOeP2ehnURCSb3Nxc8TZnzhz4+fk5PfbCCy+IxwqCAKvV2qj3bd++faue+Z6dnY3Vq1djypQpcpfiFj4+PmjXrp1sn3/LLbdg2bJlOH78OL777jucPn0a99xzj/j89u3b0a9fP3z33Xc4ePAgpk6diuTkZKxevVo8JjAwEElJSZg/f77b62VQE5FsQkJCxJvRaIRCoRDvHzt2DL6+vvjxxx8xcOBA6HQ6bN26FadPn8bYsWMRHBwMHx8fDB48GOvXr3d639/2GhUKBT799FOMHz8eXl5e6Nq1K1atWtWkWrOzszF27Fj4+PjAz88P9913H/Lz88XnDxw4gFtuuQW+vr7w8/PDwIEDsWfPHgBAVlYWxowZA39/f3h7e6N379744Ycf6v2sZcuWISYmBuHh4dc8t2LFCnTt2hV6vR5JSUnIyckRn7te24wYMQJZWVl47rnnxFELh23btmHEiBHw8vKCv78/kpKScPnyZfF5u92OF198EQEBAQgJCbnuaMfGjRsRFxcHb29vmEwmDBs2DFlZWQCuHfquazQlKipKfP7w4cMYNWoUfHx8EBwcjIcffhiFhYUNfn5DnnvuOQwdOhSRkZFISEjAzJkzsWPHDnF29ksvvYQ333wTCQkJ6Ny5M5599lmMHDkSy5cvd3qfMWPGYMmSJc2uo7EY1EQ3KEEQUGGxynITBEGy7zFz5ky88847OHr0KPr164eysjLccccdSEtLw/79+zFy5EiMGTMG2dnZDb7PG2+8gfvuuw8HDx7EHXfcgUmTJqGoqKhRNdjtdowdOxZFRUXYtGkT1q1bhzNnzuD+++8Xj5k0aRI6dOiA3bt3Y+/evZg5cyY0Gg0AYNq0aaiursbmzZtx6NAhzJ49Gz4+PvV+3pYtWzBo0KBrHq+oqMBbb72FL7/8Etu2bUNxcTEeeOAB8fnrtc3y5cvRoUMHzJo1Sxy1AICMjAzceuut6NWrF9LT07F161aMGTNGvOQPAL744gt4e3tj586dePfddzFr1iysW7euzvqtVivGjRuH4cOH4+DBg0hPT8fjjz9e7yzoX4+inDp1Cl26dMHNN98MACguLsbvf/97xMbGYs+ePVizZg3y8/Nx3333ia9/++234ePj0+Ctvr8fRUVF+Prrr5GQkCD+edWlpKQEAQEBTo/FxcXh3LlzOHv2bL2vkwIXPHHBvA2ncDTXjKnDojEw0l/ucoicVNbY0OvVn2T57COzkuCllebHy6xZs3DbbbeJ9wMCAhATEyPef/PNN/H9999j1apVmD59er3vM2XKFEycOBFA7Q/2jz76CLt27cLIkSOvW0NaWhoOHTqEzMxMREREAAC+/PJL9O7dG7t378bgwYORnZ2NP/3pT+jRowcAoGvXruLrs7OzMWHCBPTt2xcA0KlTpwY/Lysrq86grqmpwdy5czFkyBAAteHZs2dP7Nq1C3FxcYiJiWmwbQICAqBSqeDr64uQkBDxuHfffReDBg3CP//5T/Gx3r17O312v3798Nprr4nfbe7cuUhLS3P6s3Ewm80oKSnBnXfeic6dOwMAevbsWe/3ddQiCAImTJgAo9GIjz/+GAAwd+5cxMbG4u233xaP//zzzxEREYETJ06gW7dueOKJJ5yCuy5hYWFO9//85z9j7ty5qKiowNChQ52GtX9r2bJl2L17t1jTb98zKyvLaQRAauxRu2D76UKsPpiLnKIKuUshumH9NrDKysrwwgsvoGfPnjCZTPDx8cHRo0ev26Pu16+f+P/e3t7w8/MTl3i8nqNHjyIiIkIMaQDo1asXTCYTjh49CgB4/vnn8eijjyIxMRHvvPMOTp8+LR77zDPP4K9//SuGDRuG1157DQcPHmzw8yorK6HX6695XK1WY/DgweL9Hj16ONXQ3LZx9Kgb8uv2A2qXxayv/QICAjBlyhQkJSVhzJgx+Pvf/y723hvy0ksvIT09HStXrhRX9jpw4AA2bNjg1Dt2/DLkaOOAgAB06dKlwZta7fyL45/+9Cfs378fa9euhUqlQnJycp0jQRs2bMDUqVPxySefXPPLi6PGigr3ZgB71C4waGqXhKuqsV3nSKKWZ9CocGRWkmyfLRVvb2+n+y+88ALWrVuHv/3tb+jSpQsMBgPuueee6+4Y9tthTYVCIenmJa+//joefPBB/O9//8OPP/6I1157DUuWLMH48ePx6KOPIikpCf/73/+wdu1apKam4v3336/38p7AwECn88ON1dy2cQROQ5rafosWLcIzzzyDNWvWYOnSpfjLX/6CdevWYejQoXUe/9VXX+HDDz/Exo0bnc7Nl5WVYcyYMZg9e/Y1r3Gsof3222879bjrcuTIEXTs2FG8HxgYiMDAQHTr1g09e/ZEREQEduzYgfj4ePGYTZs2YcyYMfjwww+RnJx8zXs6Tp20b9++wc92FYPaBborP4wqGdTkgRQKhWTDz55k27ZtmDJlCsaPHw+g9ge5u88R9uzZEzk5OcjJyRF71UeOHEFxcTF69eolHtetWzd069YNzz33HCZOnIhFixaJdUZEROCJJ57AE088gZSUFHzyySf1BnVsbCyOHDlyzeNWqxV79uxBXFwcgNprrYuLi8Vh5ca0jVardTr3DNT2ltPS0vDGG280o3XqFxsbi9jYWKSkpCA+Ph7ffPNNnUGdnp6ORx99FB9//PE1zw8YMADfffcdoqKirukVOzRn6PvXHL9wVFdXi49t3LgRd955J2bPnl3vpX6HDx+GRqO5pqctNQ59u0CvdvSouaUgUUvp2rUrli9fjoyMDBw4cAAPPvig27f1TExMRN++fTFp0iTs27cPu3btQnJyMoYPH45BgwahsrIS06dPx8aNG5GVlYVt27Zh9+7dYoDOmDEDP/30EzIzM7Fv3z5s2LChwXO2SUlJSE9PvyZQNRoNnn76aezcuRN79+7FlClTMHToUDG4G9M2UVFR2Lx5M86fPy/OnE5JScHu3bvx1FNP4eDBgzh27Bjmz5/f7JnVmZmZSElJQXp6OrKysrB27VqcPHmyzu+cl5eH8ePH44EHHkBSUhLy8vKQl5eHixcvAqidiFdUVISJEydi9+7dOH36NH766SdMnTpVbJ+mDH3v3LkTc+fORUZGBrKysvDzzz9j4sSJ6Ny5s9ib3rBhA0aPHo1nnnkGEyZMEGv67eTDLVu24He/+12jRiRcwaB2gUFb23wc+iZqOR988AH8/f2RkJCAMWPGICkpCQMGDHDrZyoUCqxcuRL+/v64+eabkZiYiE6dOmHp0qUAAJVKhUuXLiE5ORndunXDfffdh1GjRok9VJvNhmnTpqFnz54YOXIkunXr5jRx67dGjRoFtVp9zWVnXl5e+POf/4wHH3wQw4YNg4+Pj1gD0Li2mTVrFs6ePYvOnTuLQ7bdunXD2rVrceDAAcTFxSE+Ph4rV66stwd7PV5eXjh27BgmTJiAbt264fHHH8e0adPwxz/+8Zpjjx07hvz8fHzxxRcIDQ0Vb45z8WFhYdi2bRtsNhtuv/129O3bFzNmzIDJZGrWVq5eXl5Yvnw5br31VnTv3h2PPPII+vXrh02bNkGn0wGonaRXUVGB1NRUp5ruvvtup/dasmQJHnvssWa0UNMoBCmvo2gFzGYzjEYjSkpK4Ofn59J7/XX1EXy6NRN/vLkTUu6o/7djopZQVVWFzMxMREdH1zkRiVqXefPmYdWqVfjpJ3lm7lPDfvzxR/zf//0fDh48WO8vNA39m2xKFt14J7BakJ6TyYjITf74xz+iuLgYpaWlki8jSq4rLy/HokWLmj3q0BQMahcYtDxHTUTuoVar8fLLL8tdBtXj10uOuhvPUbtAp75yjtrKHjUREbkHg9oFjqHvSguDmoiI3INB7QJxwRMrh77Jc7Sx+aFEHkuqf4sMahdwMhl5EpWq9u/j9VahIqKW4VhatKHNPhqDk8lcoNfwOmryHGq1Gl5eXrh48SI0Gk2zrjElItcJgoCKigoUFBTAZDKJv0Q3F4PaBVzrmzyJQqFAaGgoMjMzxX1/iUg+JpPJaZey5mJQu0Cn4eVZ5Fm0Wi26du3K4W8imWk0Gpd70g4Mahc4hr65KQd5EqVSyZXJiG4gPInlAg59ExGRu8ka1KmpqRg8eDB8fX0RFBSEcePG4fjx4w2+ZvHixVAoFE43uXoPjlnf1Rz6JiIiN5E1qDdt2oRp06Zhx44dWLduHWpqanD77bejvLy8wdf5+fkhNzdXvMk1ccYR1BabHTY7r10lIiLpyXqOes2aNU73Fy9ejKCgIOzduxc333xzva9TKBSSzKRzlWPoG6gd/vbW8ZQ/ERFJy6POUZeUlACo3QS8IWVlZYiMjERERATGjh2LX375pSXKu4ZjrW+A56mJiMg9PCao7XY7ZsyYgWHDhqFPnz71Hte9e3d8/vnnWLlyJb766ivY7XYkJCTg3LlzdR5fXV0Ns9nsdJOKUqmAVs2Z30RE5D4eE9TTpk3D4cOHsWTJkgaPi4+PR3JyMvr374/hw4dj+fLlaN++PT7++OM6j09NTYXRaBRvERERktZt4LXURETkRh4R1NOnT8fq1auxYcMGdOjQoUmv1Wg0iI2NxalTp+p8PiUlBSUlJeItJydHipJFXEaUiIjcSdbZT4Ig4Omnn8b333+PjRs3Ijo6usnvYbPZcOjQIdxxxx11Pq/T6aDT6VwttV7cmIOIiNxJ1qCeNm0avvnmG6xcuRK+vr7Iy8sDABiNRhgMBgBAcnIywsPDkZqaCgCYNWsWhg4dii5duqC4uBjvvfcesrKy8Oijj8ryHTj0TURE7iRrUM+fPx8AMGLECKfHFy1ahClTpgAAsrOznXYBunz5Mh577DHk5eXB398fAwcOxPbt29GrV6+WKtuJjj1qIiJyI9mHvq9n48aNTvc//PBDfPjhh26qqOn0nPVNRERu5BGTyVozg5Y9aiIich8GtYv06itBbeU5aiIikh6D2kXi5VkW9qiJiEh6DGoXceibiIjciUHtIp049M2gJiIi6TGoXeRY8KTSwnPUREQkPQa1i8Rz1OxRExGRGzCoXWTggidERORGDGoXca1vIiJyJwa1i67unsVz1EREJD0GtYvYoyYiIndiULtInPXNoCYiIjdgULtIz20uiYjIjRjULnLM+q5mj5qIiNyAQe0ix2QyDn0TEZE7MKhdxMlkRETkTgxqFxl4jpqIiNyIQe0i3a+GvgVBkLkaIiK60TCoXeQY+gaAait71UREJC0GtYsMvw5qDn8TEZHEGNQu0qiUUCkVALiDFhERSY9BLQG9+sp5aguDmoiIpMWgloBBe2XmN3vUREQkMQa1BHRqXqJFRETuwaCWgLg6GYe+iYhIYgxqCXDom4iI3IVBLQG9mhtzEBGRezCoJcA9qYmIyF0Y1BLgntREROQuDGoJOCaTcQctIiKSGoNaAhz6JiIid2FQS4BbXRIRkbswqCXgGPrmrG8iIpIag1oCHPomIiJ3YVBL4OqsbwY1ERFJi0EtAV6eRURE7sKgloC41jd71EREJDEGtQQMHPomIiI3YVBLwDH0Xc2hbyIikhiDWgIc+iYiIndhUEuAs76JiMhdGNQSEIOa+1ETEZHEGNQScOxHXWnhOWoiIpIWg1oCXEKUiIjchUEtAYOWQ99EROQeDGoJOIa+a2wCrDYOfxMRkXQY1BJwTCYDgCorg5qIiKTDoJaATn21GXmJFhERSYlBLQGlUiGGdaWFQU1ERNJhUEtEXEaUE8qIiEhCDGqJGLjVJRERuQGDWiJc75uIiNyBQS0RrvdNRETuwKCWiJ5D30RE5AYMaolw6JuIiNyBQS0RDn0TEZE7MKgl4pj1zY05iIhISgxqiTh61Bz6JiIiKTGoJeI4R83JZEREJCUGtUR4jpqIiNyBQS0RDn0TEZE7MKgl4tiTmkPfREQkJQa1RAza2qbkrG8iIpISg1oi4jlq7p5FREQSkjWoU1NTMXjwYPj6+iIoKAjjxo3D8ePHr/u6b7/9Fj169IBer0ffvn3xww8/tEC1DXMMfXM/aiIikpKsQb1p0yZMmzYNO3bswLp161BTU4Pbb78d5eXl9b5m+/btmDhxIh555BHs378f48aNw7hx43D48OEWrPxaei3PURMRkfQUgiAIchfhcPHiRQQFBWHTpk24+eab6zzm/vvvR3l5OVavXi0+NnToUPTv3x8LFiy47meYzWYYjUaUlJTAz89PstrX/pKHx/+1F7EdTfj+qWGSvS8REd14mpJFHnWOuqSkBAAQEBBQ7zHp6elITEx0eiwpKQnp6el1Hl9dXQ2z2ex0cwfx8iwOfRMRkYQ8JqjtdjtmzJiBYcOGoU+fPvUel5eXh+DgYKfHgoODkZeXV+fxqampMBqN4i0iIkLSuh0MV4a+q60c+iYiIul4TFBPmzYNhw8fxpIlSyR935SUFJSUlIi3nJwcSd/f4ep11OxRExGRdNRyFwAA06dPx+rVq7F582Z06NChwWNDQkKQn5/v9Fh+fj5CQkLqPF6n00Gn00lWa324HzUREbmDrD1qQRAwffp0fP/99/j5558RHR193dfEx8cjLS3N6bF169YhPj7eXWU2Ctf6JiIid5C1Rz1t2jR88803WLlyJXx9fcXzzEajEQaDAQCQnJyM8PBwpKamAgCeffZZDB8+HO+//z5Gjx6NJUuWYM+ePVi4cKFs3wP4dVDbIQgCFAqFrPUQEdGNQdYe9fz581FSUoIRI0YgNDRUvC1dulQ8Jjs7G7m5ueL9hIQEfPPNN1i4cCFiYmLwn//8BytWrGhwAlpLcAx9A5xQRkRE0pG1R92YS7g3btx4zWP33nsv7r33XjdU1HyOHjVQO/z96/tERETN5TGzvls7jUoJtbJ2uJurkxERkVQY1BLintRERCQ1BrWEOPObiIikxqCWkNeV1ckqLFaZKyEiohsFg1pCPrrauXll1exRExGRNBjUEhKDuoo9aiIikgaDWkI++tqgLq9mUBMRkTQY1BLyvtKjLmVQExGRRBjUEnIMfbNHTUREUmFQS8hHVzvru4xBTUREEmFQS8hHpwHAoCYiIukwqCXk7ehRc9Y3ERFJhEEtIV/O+iYiIokxqCXEWd9ERCQ1BrWEOOubiIikxqCW0NUlRBnUREQkDQa1hLgyGRERSY1BLSFv7ZVz1Jz1TUREEmFQS8gx67vaakeNzS5zNUREdCNgUEvIMesb4PA3ERFJg0EtIY1KCZ26tkk5oYyIiKTAoJYYZ34TEZGUGNQS48xvIiKSEoNaYpz5TUREUmJQS+xqj9omcyVERHQjYFBL7Oo56hqZKyEiohsBg1piV4OaPWoiInIdg1pijmupuSc1ERFJgUEtMXFPaguDmoiIXMeglhhnfRMRkZQY1BLjddRERCQlBrXEfHQqAFyZjIiIpMGglpiPTgOAQU1ERNJgUEvM29Gj5jlqIiKSAINaYpz1TUREUmJQS4zXURMRkZQY1BLjNpdERCQlBrXEHEFdbbWjxmaXuRoiImrtGNQScwx9A7yWmoiIXMeglphGpYROXdusXJ2MiIhcxaB2A878JiIiqTCo3YAzv4mISCoMajfgzG8iIpIKg9oNvBnUREQkEQa1G/jquIMWERFJg0HtBo4eNWd9ExGRqxjUbnB1T2qbzJUQEVFrx6B2g6uTyWpkroSIiFo7BrUbXA1q9qiJiMg1DGo34KxvIiKSCoPaDTjrm4iIpMKgdgOuTEZERFJhULuBY9Y3h76JiMhVDGo38NGpADCoiYjIdQxqN/DRaQDwHDUREbmOQe0G3ld61KUMaiIichGD2g18r/SoLVY7LFa7zNUQEVFrxqB2A0ePGuDwNxERuYZB7QZqlRJ6TW3TckIZERG5gkHtJj5cnYyIiCTAoHYTH65ORkREEmBQuwnX+yYiIikwqN2EQ99ERCQFWYN68+bNGDNmDMLCwqBQKLBixYoGj9+4cSMUCsU1t7y8vJYpuAk49E1ERFKQNajLy8sRExODefPmNel1x48fR25urngLCgpyU4XN51jvu5QbcxARkQvUcn74qFGjMGrUqCa/LigoCCaTSfqCJOQt9qhtMldCREStWas8R92/f3+Ehobitttuw7Zt2xo8trq6Gmaz2enWEnzFc9Q1LfJ5RER0Y2pVQR0aGooFCxbgu+++w3fffYeIiAiMGDEC+/btq/c1qampMBqN4i0iIqJFar0665s9aiIiaj5Zh76bqnv37ujevbt4PyEhAadPn8aHH36If/3rX3W+JiUlBc8//7x432w2t0hYc9Y3ERFJoVUFdV3i4uKwdevWep/X6XTQ6XQtWFEtzvomIiIptKqh77pkZGQgNDRU7jKu4Zj1XcZZ30RE5AJZe9RlZWU4deqUeD8zMxMZGRkICAhAx44dkZKSgvPnz+PLL78EAMyZMwfR0dHo3bs3qqqq8Omnn+Lnn3/G2rVr5foK9eLKZEREJAVZg3rPnj245ZZbxPuOc8mTJ0/G4sWLkZubi+zsbPF5i8WC//u//8P58+fh5eWFfv36Yf369U7v4Sl4jpqIiKSgEARBkLuIlmQ2m2E0GlFSUgI/Pz+3fc7xvFIkzdmMdt5a7H3lNrd9DhERtT5NyaJWf47aU4krk7FHTURELmBQu4mPtjaoLVY7LFa7zNUQEVFrxaB2Ex+9GgpF7f+bq7g6GRERNQ+D2k1USgX89BoAQHEFg5qIiJqHQe1GJi9HUFtkroSIiFqrZgV1Tk4Ozp07J97ftWsXZsyYgYULF0pW2I3AZGCPmoiIXNOsoH7wwQexYcMGAEBeXh5uu+027Nq1Cy+//DJmzZolaYGtmclLCwAormRQExFR8zQrqA8fPoy4uDgAwLJly9CnTx9s374dX3/9NRYvXixlfa0ah76JiMhVzQrqmpoacaOL9evX46677gIA9OjRA7m5udJV18px6JuIiFzVrKDu3bs3FixYgC1btmDdunUYOXIkAODChQto166dpAW2ZkZx6Js9aiIiap5mBfXs2bPx8ccfY8SIEZg4cSJiYmIAAKtWrRKHxAnw92KPmoiIXNOsTTlGjBiBwsJCmM1m+Pv7i48//vjj8PLykqy41s7EoCYiIhc1q0ddWVmJ6upqMaSzsrIwZ84cHD9+HEFBQZIW2JqZDBz6JiIi1zQrqMeOHSvuEV1cXIwhQ4bg/fffx7hx4zB//nxJC2zN2KMmIiJXNSuo9+3bh9/97ncAgP/85z8IDg5GVlYWvvzyS3z00UeSFtiaOa6jLmFQExFRMzUrqCsqKuDr6wsAWLt2Le6++24olUoMHToUWVlZkhbYmjkuzyqttqLGxh20iIio6ZoV1F26dMGKFSuQk5ODn376CbfffjsAoKCg4LobYLclfgaNuINWCVcnIyKiZmhWUL/66qt44YUXEBUVhbi4OMTHxwOo7V3HxsZKWmBrxh20iIjIVc26POuee+7BTTfdhNzcXPEaagC49dZbMX78eMmKuxGYvDQoqazhMqJERNQszQpqAAgJCUFISIi4i1aHDh242EkdTAYNssAeNRERNU+zhr7tdjtmzZoFo9GIyMhIREZGwmQy4c0334TdzklTv8YdtIiIyBXN6lG//PLL+Oyzz/DOO+9g2LBhAICtW7fi9ddfR1VVFd566y1Ji2zNuIMWERG5ollB/cUXX+DTTz8Vd80CgH79+iE8PBxPPfUUg/pXuIMWERG5ollD30VFRejRo8c1j/fo0QNFRUUuF3Uj4Q5aRETkimYFdUxMDObOnXvN43PnzkW/fv1cLupGwh20iIjIFc0a+n733XcxevRorF+/XryGOj09HTk5Ofjhhx8kLbC1c5yj5oInRETUHM3qUQ8fPhwnTpzA+PHjUVxcjOLiYtx999345Zdf8K9//UvqGls1xw5alzmZjIiImqHZ11GHhYVdM2nswIED+Oyzz7Bw4UKXC7tRcActIiJyRbN61NR43EGLiIhcwaB2M+6gRURErmBQuxl30CIiIlc06Rz13Xff3eDzxcXFrtRyQ3LsoFW7MUcNAn10cpdEREStSJOC2mg0Xvf55ORklwq6ETl20CrhoidERNRETQrqRYsWuauOG5pjB63L5Rz6JiKipuE56hbAHbSIiKi5GNQtgDtoERFRczGoWwB30CIiouZiULcA7qBFRETNxaBuAdxBi4iImotB3QK4gxYRETUXg7oFcActIiJqLgZ1C+AOWkRE1FwM6hbAHbSIiKi5GNQtgDtoERFRczGoWwB30CIiouZiULcAxw5aAM9TExFR0zCoW8jVS7Q485uIiBqPQd1CHOepuYMWERE1BYO6hXAHLSIiag4GdQvhDlpERNQcDOoW4hj65qxvIiJqCgZ1C3HsoMVlRImIqCkY1C2EO2gREVFzMKhbCHfQIiKi5mBQtxDuoEVERM3BoG4hjh41r6MmIqKmYFC3kHbeOgDApfJqCIIgczVERNRaMKhbSKBv7dB3VY0d5RabzNUQEVFrwaBuIV5aNby0KgBAYWm1zNUQEVFrwaBuQYE+tcPfhWUMaiIiahwGdQsK9Kkd/mZQExFRYzGoW1B739oe9cUyXqJFRESNw6BuQY6h74s8R01ERI0ka1Bv3rwZY8aMQVhYGBQKBVasWHHd12zcuBEDBgyATqdDly5dsHjxYrfXKRWeoyYioqaSNajLy8sRExODefPmNer4zMxMjB49GrfccgsyMjIwY8YMPProo/jpp5/cXKk0Aq8MfXPWNxERNZZazg8fNWoURo0a1ejjFyxYgOjoaLz//vsAgJ49e2Lr1q348MMPkZSU5K4yJdOek8mIiKiJWtU56vT0dCQmJjo9lpSUhPT0dJkqapqrQ9+cTEZERI0ja4+6qfLy8hAcHOz0WHBwMMxmMyorK2EwGK55TXV1Naqrr/ZgzWaz2+usD89RExFRU7WqHnVzpKamwmg0ireIiAjZanFcnlVhsaG82ipbHURE1Hq0qqAOCQlBfn6+02P5+fnw8/OrszcNACkpKSgpKRFvOTk5LVFqnbx1ahg0V5YRZa+aiIgaoVUNfcfHx+OHH35wemzdunWIj4+v9zU6nQ46nc7dpTVaoK8WOUWVKCyrRmQ7b7nLISIiDydrj7qsrAwZGRnIyMgAUHv5VUZGBrKzswHU9oaTk5PF45944gmcOXMGL774Io4dO4Z//vOfWLZsGZ577jk5ym+Wq4uecEIZERFdn6xBvWfPHsTGxiI2NhYA8PzzzyM2NhavvvoqACA3N1cMbQCIjo7G//73P6xbtw4xMTF4//338emnn7aKS7McOKGMiIiaQtah7xEjRkAQhHqfr2vVsREjRmD//v1urMq9GNRERNQUrWoy2Y3AsegJ1/smIqLGYFC3MMclWuxRExFRYzCoWxhXJyMioqZgULewQPaoiYioCRjULUzsUfMcNRERNQKDuoUFXplMVm6xocLCZUSJiKhhDOoW5qNTQ6eubfZCLnpCRETXwaBuYQqF4urqZDxPTURE18GglgEv0SIiosZiUMuAq5MREVFjMahl0N63dkIZz1ETEdH1MKhlwB41ERE1FoNaBle3umRQExFRwxjUMmCPmoiIGotBLQPHoicMaiIiuh4GtQyuXp7FyWRERNQwBrUMHBtzlFVbUVVjk7kaIiLyZAxqGfjq1NBeWUaUE8qIiKghDGoZKBQKtOcyokRE1AgMapmIE8rYoyYiogYwqGVy9RItTigjIqL6Mahlwo05iIioMRjUMuGiJ0RE1BgMaplw0RMiImoMBrVMgv30AIALxVUyV0JERJ6MQS2TyHbeAICzl8plroSIiDwZg1omUYFeAIDiihpcLufMbyIiqhuDWiZeWjVCrgx/Z7JXTURE9WBQy8jRqz5byKAmIqK6MahlFB3oAwDIZFATEVE9GNQyir7So2ZQExFRfRjUMnL0qDnzm4iI6sOglpHYo75YDkEQZK6GiIg8EYNaRhEBXlAqgHKLjdtdEhFRnRjUMtKpVQj3NwCo7VUTERH9FoNaZlFcoYyIiBrAoJZZp8DaoM4srJC5EiIi8kQMaplFiUFdJnMlRETkiRjUMnME9Vn2qImIqA4Mapk5hr7PXiqH3c5LtIiIyBmDWmbhJgPUSgWqrXbkmrk3NREROWNQy0ytUqJjO27OQUREdWNQe4DoK5donWFQExHRbzCoPcDVCWUMaiIicsag9gDR4iVaDGoiInLGoPYA0exRExFRPRjUHsAx9J1dVAGrzS5zNURE5EkY1B4g1E8PnVoJq13AucuVcpdDREQehEHtAZRKhbg5RyY35yAiol9hUHsIcUIZt7skIqJfYVB7iOj2tUF9sqBU5kqIiMiTMKg9xICO/gCAnWeKZK6EiIg8CYPaQ8RFB0CpqF2dLK+Ea34TEVEtBrWHMBo06BNuBACknymUuRoiIvIUDGoPEt+pHQAg/fQlmSshIiJPwaD2IPGda4N6O4OaiIiuYFB7kMFRAVArFTh3uRI5RRVyl0NERB6AQe1BvHVq9OvgOE/NXjURETGoPY5j+JvnqYmICGBQe5yEzoEAaoNaEASZqyEiIrkxqD3MwEh/aFVK5JmrcPYSz1MTEbV1DGoPo9eo0L+jCQCw/TSvpyYiausY1B4ogeepiYjoCga1B3IsfLLjDM9TExG1dR4R1PPmzUNUVBT0ej2GDBmCXbt21Xvs4sWLoVAonG56vb4Fq3W//h1N0KmVKCyz4GRBmdzlEBGRjGQP6qVLl+L555/Ha6+9hn379iEmJgZJSUkoKCio9zV+fn7Izc0Vb1lZWS1Ysfvp1CoMjgoAAGw+cVHmaoiISE6yB/UHH3yAxx57DFOnTkWvXr2wYMECeHl54fPPP6/3NQqFAiEhIeItODi4BStuGbf1qv1O/z1wQeZKiIhITrIGtcViwd69e5GYmCg+plQqkZiYiPT09HpfV1ZWhsjISERERGDs2LH45Zdf6j22uroaZrPZ6dYajO4XCpVSgQPnSpBZWC53OUREJBNZg7qwsBA2m+2aHnFwcDDy8vLqfE337t3x+eefY+XKlfjqq69gt9uRkJCAc+fO1Xl8amoqjEajeIuIiJD8e7hDoI8ON3WpXfxkZcZ5mashIiK5yD703VTx8fFITk5G//79MXz4cCxfvhzt27fHxx9/XOfxKSkpKCkpEW85OTktXHHzje0fBgBYlXGBs7+JiNootZwfHhgYCJVKhfz8fKfH8/PzERIS0qj30Gg0iI2NxalTp+p8XqfTQafTuVyrHG7vHQK95hDOFJbj8Hkz+l7ZsIOIiNoOWXvUWq0WAwcORFpamviY3W5HWloa4uPjG/UeNpsNhw4dQmhoqLvKlI2PTo3EnrWnBVZw+JuIqE2Sfej7+eefxyeffIIvvvgCR48exZNPPony8nJMnToVAJCcnIyUlBTx+FmzZmHt2rU4c+YM9u3bh4ceeghZWVl49NFH5foKbjW2fziA2tnfNjuHv4mI2hpZh74B4P7778fFixfx6quvIi8vD/3798eaNWvECWbZ2dlQKq/+PnH58mU89thjyMvLg7+/PwYOHIjt27ejV69ecn0FtxrerT2MBg0KSqux88wlJFyZYEZERG2DQmhjs5TMZjOMRiNKSkrg5+cndzmNkrL8EP69Kxv3DeqAd++JkbscIiJyUVOySPahb7o+x+zvHw/noarGJnM1RETUkhjUrUBcVADCjHqUVlkx/Zt9DGsiojaEQd0KKJUKvH13X+jUSqw/WoDJn+9CaVWN3GUREVELYFC3EiO6B+GLP8TBR6fGzswiTPxkBy6VVctdFhERuRmDuhUZ2qkdljw+FO28tTh83oz7Pk5HcYVF7rKIiMiNGNStTJ9wI5Y9EY9Qox6nL5bj6X/vh9Vml7ssIiJyEwZ1K9S5vQ8+mzwYBo0KW04WYvaaY3KXREREbsKgbqV6hfnhb/fWXlP9yZZMLN9X9+5hRETUujGoW7HR/ULx9O+7AABmLj+EAznF8hZERESSY1C3cs8ldkNizyBYrHb8YfFuZDCsiYhuKAzqVk6pVODD+/ujd5gfLpVb8MDCdKz9JU/usoiISCIM6huAr16DpX+Mx/Bu7VFVY8cfv9qLRdsy5S6LiIgkwKC+Qfjo1Phs8iBMjOsIQQDe+O8RvPcTZ4MTEbV2DOobiFqlxNvj++DPI3sAAOZtOI0lu7JlroqIiFzBoL7BKBQKPDmiM2YkdgUA/GXFYWw/XShzVURE1FwM6hvUs7d2xZiYMFjtAp78ah8yC8vlLomIiJqBQX2DUigUeO+efugfYUJJZQ0eWbwbJRXccYuIqLVhUN/A9BoVFiYPRJhRjzOF5XhnzVG5SyIioiZiUN/ggnz14lKj/z2Qi6oam8wVERFRUzCo24ChndohzKhHWbUVG44VyF0OERE1AYO6DVAqFRjTPwwAsDLjgszVEBFRUzCo24hx/cMBAD8fL0BJJSeVERG1FgzqNqJHiC+6BfvAYrXjJ64FTkTUajCo2wiFQoGxV3rVqzj8TUTUajCo25C7YmrPU28/XYgCc5XM1RARUWMwqNuQiAAvDOhogl0AVh/MBQAIgoA1h3Px/trjKK3iuWsiIk+jlrsAalnjYsOxL7sYKzPOI6FLO7yx6gjSz1wCAJzIL8WChwZCoVDIXCURETmwR93G3NE3FCqlAgfOlWD0R1uRfuYSdGolNCoFfvolHws3n5G7RCIi+hUGdRsT6KPDTV0CAQA2u4BRfUKw/vnheG1MbwDA7DXHkH76kpwlEhHRrzCo26BXx/TC/YMi8NUjQzD/oYGICPDCpCEdcfeAcNgF4Ol/70NeCSebERF5AoUgCILcRbQks9kMo9GIkpIS+Pn5yV2OR6m02DD+n9twLK8Uke28EGY0oMJiRbnFhn4djHjvnhiolDx/TUTkqqZkEXvUJDJoVVjw0ED46tXIulSB9DOXcOBcCU4VlGH5vvP4ZAvPXxMRtTTO+iYnUYHeWP5kAvZlX4ZBq4a3VoWjuWb8be0JfLD2BEZ0b48eIRyJICJqKQxqukbXYF90DfYV7/++RxAycoqx/mgBnlt6ACunDYNWzcEYIqKWwJ+2dF0KhQJv390X/l4aHM014+9pJ+QuiYiozWBQU6ME+erx9vi+AID5G09jX/ZlmSsiImobGNTUaKP6hmJc/zDYBWDyZ7vw8abTqLba5C6LiOiGxqCmJnnjrj6IiTChtNqK1B+P4bYPNuPHQ7loY1f5ERG1GF5HTU1mtwv4bt85vPfTcRSUVgMAOgV6486YMNwVE4YuQT4yV0hE5NmakkUMamq28morPt58Bp9sPoPKmqtD4D1CfNEr1A8dArzQwd+AniF+6NvBKGOlRESehUHdAAa19EqrarD+aD7+eyAXm09chNV+7V+pP4/sgSdHdL7mccdfP+7YRURtCYO6AQxq97pcbsG204XIulSBc5crkVlYhh1niqBQAIumDMaI7kHisfnmKvzxX3thrqzBnAf6o18Hk3yFExG1IAZ1AxjULS9l+UH8e1cO/PRq/PfpmxDZzhvZlyrw0Gc7kV1UAQDQqpV4a1wf3DsoQuZqiYjcj2t9k0d5/a7eiO1ogrnKise/3IuMnGLcs2A7sosq0DHAC7d0bw+L1Y4//ecgXllxGBarXe6SiYg8BnvU1CLyzVW48x9bcfHKLHEA6B7si389EodAHx0++vkk5qw/CQDo4G/AiO7tcVOXQMR3CoTRSyNX2UREbsGh7wYwqOWz52wRJn6yAzU2AbEdTVg0ZTBMXlrx+bSj+XhuaQbMVVbxMaUCuG9QBGaN7eO0vrggCJi34RQWbz+L2I7+uK1nMG7pEYT2vroW/U5ERM3BoG4Ag1pem05cxN6sy/jjzZ3grbt2T5iyait2nL6EracKse1UIU4WlAEAEjq3w4KHB8JPr0GNzY6/fH8YS/fkOL1WoQDiO7XDnAf6I8hX3yLfh4ioORjUDWBQty6bTlzEU1/tRbnFhu7Bvpg3KRZvrj6KTScuQqkA/pTUAzU2O9YfzcfBcyUAanf7+mzyIF7yRUQei0HdAAZ163P4fAn+sHi3uAoaABg0KvxjYiwSewU7HXf3P7fDYrPjb/fG4J6BHeQol4joujjrm24ofcKNWP5UArpeWZq0nbcWSx4f6hTSjuNm3NYVAPDGf39BXklVi9dKRCQ19qip1SiprMF/D1zALT2CEG4y1HmM1WbHhAXpOJBTjBHd22PRlMEQBODHw3n4fFsm7IKAu2PDcVf/cBgNnE1ORPLg0HcDGNQ3vpP5pRj9j62wWO14aGhH7DhThFNXJqU56NRKjO4bimFdAhFi1CPYT48Qox4+dUxwIyKSGoO6AQzqtmHBptN458dj4n0/vRpTh0XDz6DB0t3ZOJFfVufrQo169A4zok+4H3qF+qFjOy+EGg3w06s5OY2IJMOgbgCDum2w2QU88sVuHD5vxpSESCQnRMFPXzvULQgCMnKK8f3+88gsLEduSRXyS6pQWm2t9/18dGpEBHghoXM7jOjeHnHRAdCpVeLz3FyEiJqCQd0ABjXVx1xVg2O5pfjlQgkOnzfjWJ4ZF4orcbmi5ppjDRoVuoX4orzaiuIKC4orauCtU2NwVACGdgrAkOh28NapcKG4CheKK3GxrBrDugSif4Sp5b8YEXkcBnUDGNTUVJUWGy6UVOJYbik2nSjAxuMXnS4VayyFAnh4aCReHNmD58KJ2jgGdQMY1OQqQRBwNLcU2UUVMBo0MHlpYDRoUFBajR1nLmHnmUvYc/YybIKAcJMBYSYDVEoFfj5WAKD2PPhb4/vg9z2Cr/NJVz8P4LA60Y2EQd0ABjW1hLrCddupQqQsPyRu7RngrUXn9t7oFOiDiAAD7AJQVWNDtdUOc2UNckuqcKGkEheKK6FWKjEw0h9DOgVgSHQATF5anL9c+1xuSRU6+BswvHv7a5ZOraqxIbekChH+BqhVzmulp5++hE+2nEFhmQV/uzcG3UN8W6BliAhgUDeIQU1yqrTY8OH6E1i0LRM1Nun/6fXrYMSwLoEoKrPg0PkSnMgvhdUuwFenxtDO7fC7roHw0anx+bZMHD5vFl/nq1dj4cODEN+5neQ1EdG1GNQNYFCTJ6iwWHHmYjnOFJbjdEEZzhdXQqNSQq9RQqdWwUenQojRgDCTHmFGA8qqrdiVWYSdmZew++xlVNfYEO5fO6we7KvH0TyzuNb5b2lUijp/KdBrlLhvUASO5pqx++xlaFVKvHdvP4ztHw5BEHCyoAy7zxYBADr4e6GDvwHhJgPsggBzpRWlVTUot9jgq1cjwEsLo0EDpfL6w/OVFhsyC8tx9lI5jAYNBkX5O82gl5vVZkdZtRVGg+aa0w05RRX4+VgBLldY0Km9jzgiYtBKX3+11YbD50ugVanQJ9zPqRa7XcCmExex9kg+ugf74P7BHd1SgxROFZTh402nYbHZMS42HDd3bQ9VI/6eeJIamx3nLlcis7AMhaUW3Dc4wuX3ZFA3gEFNN6qC0ipsOn4Ru88Wob2vDn3DTejbwYgQPz1+uVCCLSdrdyTLM1fhrpgwJMdHIcBbi6oaG55bmoEfD+cBAG7qEogjuWYUlVua9PlKBWDy0sJPr4avXgNfvRoGjQoWmx3VNXZUW224WFqNC79Z2tVLq8KwLoEY0b09DBoVzl+uxPniShSUViOqnTcGR/ljYJQ/gnz1qLTYcLKgFMfzSnGhuAoKBaBSKqBSKqBXKxHgo0M7by0CvGu3T71YWo3CstqbWqlEwJXnTF4aFFfU4Hxx7emD88WVOHe5EucvVyLPXAWbXUCAtxY9QnzRM9QPapUCG44V1Hn9vUIB+GjVUKkUUCsVUCpq/6tWKaFWKaBRKmHQquCtU8FLq4bvlUv9OrX3RnSgN4J89SiutKCozILCcgtO5ZdiZ2YR9ucUw2K1AwDCjHqM6huK23sF45cLZnyZfhZnL1WINbTz1uIPN0XjoaGR8NWpcbnCgnxzNcxVNQj20yPUqIdeUxvkdruAgtJqnC+ucNpSFkLtLwelVVaUVllRXm2FQauCyUsLfy8NfPUaFJVX43xxFXKv/PlYrHbU2OyosQvQqZUYHOWPYV0C0TPEDxfLqjFn/Uks25MDm/1qzIQa9bhnYAd0CfKBubIG5iorzFU1qLEKsAsCbHYBAgQYDRoEeOsQ6KOFj06N0qraKyxKKq2w2Gzw1qnhc+Vm8tIgxM+AEKMe/l7X/oJVabHh9MUynMgvRU5RJcL9DegT7ofO7X2gUSlRbbXhZH4ZjuSacbawHJcrasSrOfLMVcguqhC/g1qpwLE3RzqdSmoOBnUDGNRE17LbBfz1f0fx+bZM8TGDRoUBkSYYNCqcu1yJnKIKlFtsAGrD0VevhpdGhdLq2h/sTWHy0iA60BvnL1c2egZ9oI8Ol8qrIedPLJVSgYGR/ogM8EJmYTlOXSxDcR2X70mlnbcWlTU2VFxp91/z1asxum8otp0uRE5RJYDaFffsglDnCEqgjxYGrQp5JVVuOe3yawHeWlRabKisqa37tl7BCDcZsCLjvFvbCwC0aiV8dbULFCkVgACgsKzuvzdatRLhJgNyiipgtTfcJgaNClGB3ogO9ELq3f1cXoKYQd0ABjVR/VZmnMeF4irERfujb7gJWrXzBDRzlRUalQIGjcqp12Kx2lFcYUFRheVKj6wGpVVWVFps0F0ZzteplTB5adAp0Af+V3q8druAI7lmbDhWgK2nCqFSKhBuMiDc34B2PjqcyCvF7rNFOJ5fKv6gbeetRfcQX0S28wKggN0uwGoXUFljxaUyC4rKLbhUboECQHtfHQJ9antlVruAovLa5y9XWGAyaBFm0ounEDr4eyHcZECEvwG+eg1OFZThaK4ZR3LNKK+24qaugRjerT1MXlqnNisqt6CksgY2e21v0Gq3w2qr/W+NTYDVJlwJWyvKq20orrQgq7ACmYW1pz4ulVfD30srjgSE+xswOCoAcdEB6BTojWqrHZtOXMSaw3nYcLwAwb56PBwfifGx4fDWqWG12bH6YC7mbzyN4/mlAGp7+e28dfDVq5FXUiUGpoNKqUCInx7+3hoocPXPUadWwkdf20v11qpRZbWhqLy2Z2muqoG/lxbhJgNCryy7q9eqoFUpoFEpUVRuwfbTl7DjzCXxF4uBkf6YOaoHBkcFAKid3LjuSD5WZpxHhcUGP70GfobaERitWgmVQiGePimpqB1hKCqzoNxiha9eDaOh9goLrUqJcosN5dVWlFVbUVRuQV5JFS41MArk76VB12BfdAzwQnZRBY5eMDstcmQ0aNAr1A9dg33QzlsHk1ftFR3tfXXoFOiDYD+dpFdetLqgnjdvHt577z3k5eUhJiYG//jHPxAXF1fv8d9++y1eeeUVnD17Fl27dsXs2bNxxx13NOqzGNRErU9JZQ1OXyxDhL8X2vvq5C5HUoIgSBIAdruAM4Vl8NKq0d5XB82VoVlBEFBSWTvMX2GxXZnXoHN56LY+FqsdB84VQ4HaoG7JywqrrTYUmKtRWWODXRBgtwN2QUCIUY9AH+e/N3a7gOyiCpy7XIno9t4IM+pbtNZWFdRLly5FcnIyFixYgCFDhmDOnDn49ttvcfz4cQQFBV1z/Pbt23HzzTcjNTUVd955J7755hvMnj0b+/btQ58+fa77eQxqIiKSW6sK6iFDhmDw4MGYO3cuAMButyMiIgJPP/00Zs6cec3x999/P8rLy7F69WrxsaFDh6J///5YsGDBdT+PQU1ERHJrSha5Z+yjkSwWC/bu3YvExETxMaVSicTERKSnp9f5mvT0dKfjASApKane44mIiFozWRccLiwshM1mQ3Cw81KKwcHBOHbsWJ2vycvLq/P4vLy8Oo+vrq5GdfXVWaVms7nO44iIiDyRrD3qlpCamgqj0SjeIiJcv1CdiIiopcga1IGBgVCpVMjPz3d6PD8/HyEhIXW+JiQkpEnHp6SkoKSkRLzl5ORIUzwREVELkDWotVotBg4ciLS0NPExu92OtLQ0xMfH1/ma+Ph4p+MBYN26dfUer9Pp4Ofn53QjIiJqLWTfFPf555/H5MmTMWjQIMTFxWHOnDkoLy/H1KlTAQDJyckIDw9HamoqAODZZ5/F8OHD8f7772P06NFYsmQJ9uzZg4ULF8r5NYiIiNxC9qC+//77cfHiRbz66qvIy8tD//79sWbNGnHCWHZ2NpTKqx3/hIQEfPPNN/jLX/6Cl156CV27dsWKFSsadQ01ERFRayP7ddQtjddRExGR3FrNddRERETUMAY1ERGRB2NQExEReTAGNRERkQdjUBMREXkwBjUREZEHY1ATERF5MNkXPGlpjsvGuYsWERHJxZFBjVnKpM0FdWlpKQBwFy0iIpJdaWkpjEZjg8e0uZXJ7HY7Lly4AF9fXygUCpfey2w2IyIiAjk5OVzlrJHYZk3HNms6tlnTsc2azpU2EwQBpaWlCAsLc1omuy5trketVCrRoUMHSd+Tu3I1Hdus6dhmTcc2azq2WdM1t82u15N24GQyIiIiD8agJiIi8mAMahfodDq89tpr0Ol0cpfSarDNmo5t1nRss6ZjmzVdS7VZm5tMRkRE1JqwR01EROTBGNREREQejEFNRETkwRjULpg3bx6ioqKg1+sxZMgQ7Nq1S+6SPEJqaioGDx4MX19fBAUFYdy4cTh+/LjTMVVVVZg2bRratWsHHx8fTJgwAfn5+TJV7HneeecdKBQKzJgxQ3yMbXat8+fP46GHHkK7du1gMBjQt29f7NmzR3xeEAS8+uqrCA0NhcFgQGJiIk6ePCljxfKy2Wx45ZVXEB0dDYPBgM6dO+PNN990WsayrbfZ5s2bMWbMGISFhUGhUGDFihVOzzemfYqKijBp0iT4+fnBZDLhkUceQVlZWfOLEqhZlixZImi1WuHzzz8XfvnlF+Gxxx4TTCaTkJ+fL3dpsktKShIWLVokHD58WMjIyBDuuOMOoWPHjkJZWZl4zBNPPCFEREQIaWlpwp49e4ShQ4cKCQkJMlbtOXbt2iVERUUJ/fr1E5599lnxcbaZs6KiIiEyMlKYMmWKsHPnTuHMmTPCTz/9JJw6dUo85p133hGMRqOwYsUK4cCBA8Jdd90lREdHC5WVlTJWLp+33npLaNeunbB69WohMzNT+PbbbwUfHx/h73//u3hMW2+zH374QXj55ZeF5cuXCwCE77//3un5xrTPyJEjhZiYGGHHjh3Cli1bhC5duggTJ05sdk0M6maKi4sTpk2bJt632WxCWFiYkJqaKmNVnqmgoEAAIGzatEkQBEEoLi4WNBqN8O2334rHHD16VAAgpKeny1WmRygtLRW6du0qrFu3Thg+fLgY1Gyza/35z38Wbrrppnqft9vtQkhIiPDee++JjxUXFws6nU7497//3RIlepzRo0cLf/jDH5weu/vuu4VJkyYJgsA2+63fBnVj2ufIkSMCAGH37t3iMT/++KOgUCiE8+fPN6sODn03g8Viwd69e5GYmCg+plQqkZiYiPT0dBkr80wlJSUAgICAAADA3r17UVNT49R+PXr0QMeOHdt8+02bNg2jR492ahuAbVaXVatWYdCgQbj33nsRFBSE2NhYfPLJJ+LzmZmZyMvLc2ozo9GIIUOGtNk2S0hIQFpaGk6cOAEAOHDgALZu3YpRo0YBYJtdT2PaJz09HSaTCYMGDRKPSUxMhFKpxM6dO5v1uW1urW8pFBYWwmazITg42Onx4OBgHDt2TKaqPJPdbseMGTMwbNgw9OnTBwCQl5cHrVYLk8nkdGxwcDDy8vJkqNIzLFmyBPv27cPu3buveY5tdq0zZ85g/vz5eP755/HSSy9h9+7deOaZZ6DVajF58mSxXer6d9pW22zmzJkwm83o0aMHVCoVbDYb3nrrLUyaNAkA2GbX0Zj2ycvLQ1BQkNPzarUaAQEBzW5DBjW51bRp03D48GFs3bpV7lI8Wk5ODp599lmsW7cOer1e7nJaBbvdjkGDBuHtt98GAMTGxuLw4cNYsGABJk+eLHN1nmnZsmX4+uuv8c0336B3797IyMjAjBkzEBYWxjbzYBz6bobAwECoVKprZtzm5+cjJCREpqo8z/Tp07F69Wps2LDBaceykJAQWCwWFBcXOx3flttv7969KCgowIABA6BWq6FWq7Fp0yZ89NFHUKvVCA4OZpv9RmhoKHr16uX0WM+ePZGdnQ0AYrvw3+lVf/rTnzBz5kw88MAD6Nu3Lx5++GE899xzSE1NBcA2u57GtE9ISAgKCgqcnrdarSgqKmp2GzKom0Gr1WLgwIFIS0sTH7Pb7UhLS0N8fLyMlXkGQRAwffp0fP/99/j5558RHR3t9PzAgQOh0Wic2u/48ePIzs5us+1366234tChQ8jIyBBvgwYNwqRJk8T/Z5s5GzZs2DWX/Z04cQKRkZEAgOjoaISEhDi1mdlsxs6dO9tsm1VUVFyz97FKpYLdbgfANruexrRPfHw8iouLsXfvXvGYn3/+GXa7HUOGDGneBzdrChoJS5YsEXQ6nbB48WLhyJEjwuOPPy6YTCYhLy9P7tJk9+STTwpGo1HYuHGjkJubK94qKirEY5544gmhY8eOws8//yzs2bNHiI+PF+Lj42Ws2vP8eta3ILDNfmvXrl2CWq0W3nrrLeHkyZPC119/LXh5eQlfffWVeMw777wjmEwmYeXKlcLBgweFsWPHtqlLjX5r8uTJQnh4uHh51vLly4XAwEDhxRdfFI9p621WWloq7N+/X9i/f78AQPjggw+E/fv3C1lZWYIgNK59Ro4cKcTGxgo7d+4Utm7dKnTt2pWXZ8nlH//4h9CxY0dBq9UKcXFxwo4dO+QuySMAqPO2aNEi8ZjKykrhqaeeEvz9/QUvLy9h/PjxQm5urnxFe6DfBjXb7Fr//e9/hT59+gg6nU7o0aOHsHDhQqfn7Xa78MorrwjBwcGCTqcTbr31VuH48eMyVSs/s9ksPPvss0LHjh0FvV4vdOrUSXj55ZeF6upq8Zi23mYbNmyo8+fX5MmTBUFoXPtcunRJmDhxouDj4yP4+fkJU6dOFUpLS5tdE3fPIiIi8mA8R01EROTBGNREREQejEFNRETkwRjUREREHoxBTURE5MEY1ERERB6MQU1EROTBGNREREQejEFNRETkwRjURG3MxYsX8eSTT6Jjx47Q6XQICQlBUlIStm3bBgBQKBRYsWKFvEUSkYj7URO1MRMmTIDFYsEXX3yBTp06IT8/H2lpabh06ZLcpRFRHdijJmpDiouLsWXLFsyePRu33HILIiMjERcXh5SUFNx1112IiooCAIwfPx4KhUK8DwArV67EgAEDoNfr0alTJ7zxxhuwWq3i8wqFAvPnz8eoUaNgMBjQqVMn/Oc//xGft1gsmD59OkJDQ6HX6xEZGSnug0xE9WNQE7UhPj4+8PHxwYoVK1BdXX3N87t37wYALFq0CLm5ueL9LVu2IDk5Gc8++yyOHDmCjz/+GIsXL8Zbb73l9PpXXnkFEyZMwIEDBzBp0iQ88MADOHr0KADgo48+wqpVq7Bs2TIcP34cX3/9tdMvAkRUN+6eRdTGfPfdd3jsscdQWVmJAQMGYPjw4XjggQfQr18/ALU94++//x7jxo0TX5OYmIhbb70VKSkp4mNfffUVXnzxRVy4cEF83RNPPIH58+eLxwwdOhQDBgzAP//5TzzzzDP45ZdfsH79eigUipb5skQ3APaoidqYCRMm4MKFC1i1ahVGjhyJjRs3YsCAAVi8eHG9rzlw4ABmzZol9sh9fHzw2GOPITc3FxUVFeJx8fHxTq+Lj48Xe9RTpkxBRkYGunfvjmeeeQZr1651y/cjutEwqInaIL1ej9tuuw2vvPIKtm/fjilTpuC1116r9/iysjK88cYbyMjIEG+HDh3CyZMnodfrG/WZAwYMQGZmJt58801UVlbivvvuwz333CPVVyK6YTGoiQi9evVCeXk5AECj0cBmszk9P2DAABw/fhxdunS55qZUXv0xsmPHDqfX7dixAz179hTv+/n54f7778cnn3yCpUuX4rvvvkNRUZEbvxlR68fLs4jakEuXLuHee+/FH/7wB/Tr1w++vr7Ys2cP3n33XYwdOxYAEBUVhbS0NAwbNgw6nQ7+/v549dVXceedd6Jjx4645557oFQqceDAARw+fBh//etfxff/9ttvMWjQINx00034+uuvsWvXLnz22WcAgA8++AChoaGIjY2FUqnEt99+i5CQEJhMJjmagqj1EIiozaiqqhJmzpwpDBgwQDAajYKXl5fQvXt34S9/+YtQUVEhCIIgrFq1SujSpYugVquFyMhI8bVr1qwREhISBIPBIPj5+QlxcXHCwoULxecBCPPmzRNuu+02QafTCVFRUcLSpUvF5xcuXCj0799f8Pb2Fvz8/IRbb71V2LdvX4t9d6LWirO+iUgSdc0WJyLX8Rw1ERGRB2NQExEReTBOJiMiSfAsGpF7sEdNRETkwRjUREREHoxBTURE5MEY1ERERB6MQU1EROTBGNREREQejEFNRETkwRjUREREHoxBTURE5MH+H/8OXbAW2CwiAAAAAElFTkSuQmCC",
      "text/plain": [
       "<Figure size 500x500 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.figure(figsize=(5, 5))\n",
    "\n",
    "plt.plot(loss_history, label=f'Train loss (batch size={batch_size})')\n",
    "plt.xlabel('Steps')\n",
    "plt.ylabel('Loss')\n",
    "plt.legend()\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## TODO: write function for random smaple molecule SMILES."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ParameterList(  (0): Parameter containing: [torch.float32 of size 108])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "# def random_sample(generator, num_molecules, data_generator):\n",
    "generator.q_params\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "q_params.0 tensor([-6.1403e-01,  6.8589e-01, -8.2242e-01,  2.8891e-03,  7.2007e-01,\n",
      "         9.6816e-01, -7.8608e-01, -3.3917e-04,  1.2507e+00, -6.3094e-01,\n",
      "        -8.1984e-01,  4.7613e-04,  2.3608e+00,  5.9280e-02, -7.3479e-01,\n",
      "        -1.2768e-01,  8.9564e-01,  1.4311e+00, -7.8910e-01, -4.3922e-02,\n",
      "         2.1609e+00,  5.1940e-01, -9.8968e-01,  7.7408e-01,  1.5925e+00,\n",
      "         1.1653e+00, -3.6929e-03,  2.4755e-03,  1.5733e+00,  6.7595e-02,\n",
      "         7.2887e-04,  7.8670e-03,  1.5778e+00,  2.0184e-01, -1.4096e-02,\n",
      "        -1.0092e-02,  2.4204e-02,  6.0875e-03, -5.9460e-03, -2.0760e-04,\n",
      "         3.3274e-02,  1.9868e-01,  1.5212e-03,  6.1897e-03,  5.4395e-02,\n",
      "        -6.9438e-02,  7.6293e-02, -2.0426e-01,  8.5052e-01,  1.3071e-02,\n",
      "        -1.9599e-03, -4.9723e-03,  5.2085e-02,  1.2249e-02, -4.0249e-04,\n",
      "        -7.0410e-03, -1.9441e-01,  6.0722e-03,  7.5010e-03,  2.0489e-03,\n",
      "        -7.2583e-03, -6.4861e-02, -1.0081e-01,  1.2406e-01,  1.5557e+00,\n",
      "        -1.3705e-01,  2.1734e-02,  4.6810e-02,  2.0301e-01, -4.7089e-01,\n",
      "         4.4221e-01, -9.4178e-01])\n"
     ]
    }
   ],
   "source": [
    "# def random_sample(generator, num_molecules, data_generator):\n",
    "weights = torch.tensor([])\n",
    "state_dict = generator.state_dict()\n",
    "for name, param in state_dict.items():\n",
    "    print(name)\n",
    "    weights = torch.cat([weights, param.data])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "q_params.0\n",
      "tensor([1.4980, 0.2461, 0.7691, 0.9030, 0.8347, 0.3899, 1.2255, 1.1860, 0.7066,\n",
      "        0.4137, 0.4713, 1.0023])\n",
      "['111111100100', '110111010101', '001111010101', '111111010101', '111111010101', '111111010101', '101111010101', '111111010101', '111111010101', '111111010101', '111111010101', '111111010101', '111111010101', '111111010101', '111111100101', '111111010101', '111111010001', '111110010101', '011111010101', '111111010101']\n",
      "N=NN\n",
      "C1NN1\n",
      "None\n",
      "[nH]1[nH][nH]1\n",
      "[nH]1[nH][nH]1\n",
      "[nH]1[nH][nH]1\n",
      "[nH]1[nH]o1\n",
      "[nH]1[nH][nH]1\n",
      "[nH]1[nH][nH]1\n",
      "[nH]1[nH][nH]1\n",
      "[nH]1[nH][nH]1\n",
      "[nH]1[nH][nH]1\n",
      "[nH]1[nH][nH]1\n",
      "[nH]1[nH][nH]1\n",
      "N1=NN1\n",
      "[nH]1[nH][nH]1\n",
      "NNN\n",
      "[nH]1[nH]o1\n",
      "C1NN1\n",
      "[nH]1[nH][nH]1\n"
     ]
    }
   ],
   "source": [
    "weights = torch.tensor([])\n",
    "state_dict = generator.state_dict()\n",
    "for name, param in state_dict.items():\n",
    "    print(name)\n",
    "    weights = torch.cat([weights, param.data])\n",
    "\n",
    "dev_sample = qml.device(\"default.qubit\", wires=n_qubits, shots=20)\n",
    "@qml.qnode(dev_sample)\n",
    "def quantum_circuit_sample(noise, weights):\n",
    "    weights = weights.reshape(q_depth, n_qubits * 2)\n",
    "    # Initialise latent vectors\n",
    "    for i in range(n_qubits):\n",
    "        qml.RY(noise[i], wires=i)\n",
    "    # Repeated layer\n",
    "    for i in range(q_depth):\n",
    "        # Parameterised layer\n",
    "        for y in range(n_qubits):\n",
    "            qml.RY(weights[i][2*y], wires=y)\n",
    "            qml.RX(weights[i][2*y+1], wires=y)\n",
    "        # Control Z gates\n",
    "        for y in range(n_qubits - 1):\n",
    "            qml.CZ(wires=[y, y + 1])\n",
    "    return qml.sample()\n",
    "\n",
    "def binary_tensor_to_string(tensor):\n",
    "    if not torch.all((tensor == 0) | (tensor == 1)):\n",
    "        raise ValueError(\"The tensor must be binary (contain only 0 and 1).\")\n",
    "    flat_tensor = tensor.view(-1).tolist()\n",
    "    binary_string = ''.join(map(str, flat_tensor))\n",
    "    return binary_string\n",
    "\n",
    "noise = torch.rand(n_qubits, device=device) * np.pi / 2\n",
    "print(noise)\n",
    "sampled_quantum_states = quantum_circuit_sample(noise, weights)\n",
    "\n",
    "sampled_quantum_states = [binary_tensor_to_string(qs) for qs in sampled_quantum_states]\n",
    "print(sampled_quantum_states)\n",
    "\n",
    "for q in sampled_quantum_states:\n",
    "    print(data_generator.QuantumStateToSmiles(q))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]])"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import numpy\n",
    "# numpy.array(sampled_quantum_states)\n",
    "numpy.where(numpy.array(sampled_quantum_states) == 0, 1, 0)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "qmg",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
