repo
stringclasses 885
values | file
stringclasses 741
values | content
stringlengths 4
215k
|
---|---|---|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import pulse
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
with pulse.build() as pulse_prog:
with pulse.align_right():
# this pulse will start at t=0
pulse.play(pulse.Constant(100, 1.0), d0)
# this pulse will start at t=80
pulse.play(pulse.Constant(20, 1.0), d1)
pulse_prog.draw()
|
https://github.com/urwin419/QiskitChecker
|
urwin419
|
from qiskit import QuantumCircuit
def create_bell_pair():
qc = QuantumCircuit(2)
qc.h(1)
### added h gate ###
qc.h(1)
qc.cx(1, 0)
return qc
def encode_message(qc, qubit, msg):
if len(msg) != 2 or not set([0,1]).issubset({0,1}):
raise ValueError(f"message '{msg}' is invalid")
if msg[1] == "1":
qc.x(qubit)
if msg[0] == "1":
qc.z(qubit)
return qc
def decode_message(qc):
qc.cx(1, 0)
qc.h(1)
return qc
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit.quantum_info import SparsePauliOp
H2_op = SparsePauliOp.from_list(
[
("II", -1.052373245772859),
("IZ", 0.39793742484318045),
("ZI", -0.39793742484318045),
("ZZ", -0.01128010425623538),
("XX", 0.18093119978423156),
]
)
print(f"Number of qubits: {H2_op.num_qubits}")
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit.opflow import PauliSumOp
numpy_solver = NumPyMinimumEigensolver()
result = numpy_solver.compute_minimum_eigenvalue(operator=PauliSumOp(H2_op))
ref_value = result.eigenvalue.real
print(f"Reference value: {ref_value:.5f}")
# define ansatz and optimizer
from qiskit.circuit.library import TwoLocal
from qiskit.algorithms.optimizers import SPSA
iterations = 125
ansatz = TwoLocal(rotation_blocks="ry", entanglement_blocks="cz")
spsa = SPSA(maxiter=iterations)
# define callback
# note: Re-run this cell to restart lists before training
counts = []
values = []
def store_intermediate_result(eval_count, parameters, mean, std):
counts.append(eval_count)
values.append(mean)
# define Aer Estimator for noiseless statevector simulation
from qiskit.utils import algorithm_globals
from qiskit_aer.primitives import Estimator as AerEstimator
seed = 170
algorithm_globals.random_seed = seed
noiseless_estimator = AerEstimator(
run_options={"seed": seed, "shots": 1024},
transpile_options={"seed_transpiler": seed},
)
# instantiate and run VQE
from qiskit.algorithms.minimum_eigensolvers import VQE
vqe = VQE(
noiseless_estimator, ansatz, optimizer=spsa, callback=store_intermediate_result
)
result = vqe.compute_minimum_eigenvalue(operator=H2_op)
print(f"VQE on Aer qasm simulator (no noise): {result.eigenvalue.real:.5f}")
print(
f"Delta from reference energy value is {(result.eigenvalue.real - ref_value):.5f}"
)
import pylab
pylab.rcParams["figure.figsize"] = (12, 4)
pylab.plot(counts, values)
pylab.xlabel("Eval count")
pylab.ylabel("Energy")
pylab.title("Convergence with no noise")
from qiskit_aer.noise import NoiseModel
from qiskit.providers.fake_provider import FakeVigo
# fake providers contain data from real IBM Quantum devices stored in Qiskit Terra,
# and are useful for extracting realistic noise models.
device = FakeVigo()
coupling_map = device.configuration().coupling_map
noise_model = NoiseModel.from_backend(device)
print(noise_model)
noisy_estimator = AerEstimator(
backend_options={
"method": "density_matrix",
"coupling_map": coupling_map,
"noise_model": noise_model,
},
run_options={"seed": seed, "shots": 1024},
transpile_options={"seed_transpiler": seed},
)
# re-start callback variables
counts = []
values = []
vqe.estimator = noisy_estimator
result1 = vqe.compute_minimum_eigenvalue(operator=H2_op)
print(f"VQE on Aer qasm simulator (with noise): {result1.eigenvalue.real:.5f}")
print(
f"Delta from reference energy value is {(result1.eigenvalue.real - ref_value):.5f}"
)
if counts or values:
pylab.rcParams["figure.figsize"] = (12, 4)
pylab.plot(counts, values)
pylab.xlabel("Eval count")
pylab.ylabel("Energy")
pylab.title("Convergence with noise")
print(f"Reference value: {ref_value:.5f}")
print(f"VQE on Aer qasm simulator (no noise): {result.eigenvalue.real:.5f}")
print(f"VQE on Aer qasm simulator (with noise): {result1.eigenvalue.real:.5f}")
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/Pitt-JonesLab/slam_decomposition
|
Pitt-JonesLab
|
# need to make figure which shows curved path to build CX and SWAP
# ideally on the same weyl chamber plot
from slam.utils.pd_playground import ParallelDrivenGateWidget, ImprovedCX, ImprovedSWAP
import numpy as np
from slam.utils.visualize import coordinate_2dlist_weyl
from weylchamber import c1c2c3
cx = ImprovedCX()
fig = coordinate_2dlist_weyl(*cx.coordinate_list, c="cyan", no_bar=1);
%matplotlib widget
cx = ImprovedCX()
swap = ImprovedSWAP()
# unoptimized together
fig = coordinate_2dlist_weyl(*cx.baseline_coords, c="cyan", no_bar=1);
# fig = coordinate_2dlist_weyl(*swap.baseline_coords, c='magenta', no_bar=1, fig=fig);
from slam.utils.visualize import fpath_images
# save fig as pdf
cx.fig.savefig(f"{fpath_images}/optimimzed_cx.pdf")
|
https://github.com/SaashaJoshi/IBM-Qiskit-Summer-School-2020
|
SaashaJoshi
|
!pip install -U -r grading_tools/requirements.txt
from IPython.display import clear_output
clear_output()
import numpy as np; pi = np.pi
from qiskit import QuantumCircuit, Aer, execute
from qiskit.visualization import plot_histogram
from copy import deepcopy as make_copy
def prepare_hets_circuit(depth, angle1, angle2):
hets_circ = QuantumCircuit(depth)
hets_circ.ry(angle1, 0)
hets_circ.rz(angle1, 0)
hets_circ.ry(angle1, 1)
hets_circ.rz(angle1, 1)
for ii in range(depth):
hets_circ.cx(0,1)
hets_circ.ry(angle2,0)
hets_circ.rz(angle2,0)
hets_circ.ry(angle2,1)
hets_circ.rz(angle2,1)
return hets_circ
hets_circuit = prepare_hets_circuit(2, pi/2, pi/2)
hets_circuit.draw()
def measure_zz_circuit(given_circuit):
zz_meas = make_copy(given_circuit)
zz_meas.measure_all()
return zz_meas
zz_meas = measure_zz_circuit(hets_circuit)
zz_meas.draw()
simulator = Aer.get_backend('qasm_simulator')
result = execute(zz_meas, backend = simulator, shots=10000).result()
counts = result.get_counts(zz_meas)
plot_histogram(counts)
def measure_zz(given_circuit, num_shots = 10000):
zz_meas = measure_zz_circuit(given_circuit)
result = execute(zz_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(zz_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
zz = counts['00'] + counts['11'] - counts['01'] - counts['10']
zz = zz / total_counts
return zz
zz = measure_zz(hets_circuit)
print("<ZZ> =", str(zz))
def measure_zi(given_circuit, num_shots = 10000):
zz_meas = measure_zz_circuit(given_circuit)
result = execute(zz_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(zz_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
zi = counts['00'] - counts['11'] + counts['01'] - counts['10']
zi = zi / total_counts
return zi
def measure_iz(given_circuit, num_shots = 10000):
zz_meas = measure_zz_circuit(given_circuit)
result = execute(zz_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(zz_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
iz = counts['00'] - counts['11'] - counts['01'] + counts['10']
iz = iz / total_counts
return iz
zi = measure_zi(hets_circuit)
print("<ZI> =", str(zi))
iz = measure_iz(hets_circuit)
print("<IZ> =", str(iz))
def measure_xx_circuit(given_circuit):
xx_meas = make_copy(given_circuit)
### WRITE YOUR CODE BETWEEN THESE LINES - START
xx_meas.h(0)
xx_meas.h(1)
xx_meas.measure_all()
### WRITE YOUR CODE BETWEEN THESE LINES - END
return xx_meas
xx_meas = measure_xx_circuit(hets_circuit)
xx_meas.draw()
def measure_xx(given_circuit, num_shots = 10000):
xx_meas = measure_xx_circuit(given_circuit)
result = execute(xx_meas, backend = simulator, shots = num_shots).result()
counts = result.get_counts(xx_meas)
if '00' not in counts:
counts['00'] = 0
if '01' not in counts:
counts['01'] = 0
if '10' not in counts:
counts['10'] = 0
if '11' not in counts:
counts['11'] = 0
total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10']
xx = counts['00'] + counts['11'] - counts['01'] - counts['10']
xx = xx / total_counts
return xx
xx = measure_xx(hets_circuit)
print("<XX> =", str(xx))
def get_energy(given_circuit, num_shots = 10000):
zz = measure_zz(given_circuit, num_shots = num_shots)
iz = measure_iz(given_circuit, num_shots = num_shots)
zi = measure_zi(given_circuit, num_shots = num_shots)
xx = measure_xx(given_circuit, num_shots = num_shots)
energy = (-1.0523732)*1 + (0.39793742)*iz + (-0.3979374)*zi + (-0.0112801)*zz + (0.18093119)*xx
return energy
energy = get_energy(hets_circuit)
print("The energy of the trial state is", str(energy))
hets_circuit_plus = None
hets_circuit_minus = None
### WRITE YOUR CODE BETWEEN THESE LINES - START
hets_circuit_plus = prepare_hets_circuit(2, pi/2 + 0.1*pi/2, pi/2)
hets_circuit_minus = prepare_hets_circuit(2, pi/2 - 0.1*pi/2, pi/2)
### WRITE YOUR CODE BETWEEN THESE LINES - END
energy_plus = get_energy(hets_circuit_plus, num_shots=100000)
energy_minus = get_energy(hets_circuit_minus, num_shots=100000)
print(energy_plus, energy_minus)
name = 'Pon Rahul M'
email = 'ponrahul.21it@licet.ac.in'
### Do not change the lines below
from grading_tools import grade
grade(answer=measure_xx_circuit(hets_circuit), name=name, email=email, labid='lab9', exerciseid='ex1')
grade(answer=hets_circuit_plus, name=name, email=email, labid='lab9', exerciseid='ex2')
grade(answer=hets_circuit_minus, name=name, email=email, labid='lab9', exerciseid='ex3')
energy_plus_100, energy_plus_1000, energy_plus_10000 = 0, 0, 0
energy_minus_100, energy_minus_1000, energy_minus_10000 = 0, 0, 0
### WRITE YOUR CODE BETWEEN THESE LINES - START
energy_plus_100 = get_energy(hets_circuit_plus, num_shots = 100)
energy_minus_100 = get_energy(hets_circuit_minus, num_shots = 100)
energy_plus_1000 = get_energy(hets_circuit_plus, num_shots = 1000)
energy_minus_1000 = get_energy(hets_circuit_minus, num_shots = 1000)
energy_plus_10000 = get_energy(hets_circuit_plus, num_shots = 10000)
energy_minus_10000 = get_energy(hets_circuit_minus, num_shots = 10000)
### WRITE YOUR CODE BETWEEN THESE LINES - END
print(energy_plus_100, energy_minus_100, "difference = ", energy_minus_100 - energy_plus_100)
print(energy_plus_1000, energy_minus_1000, "difference = ", energy_minus_1000 - energy_plus_1000)
print(energy_plus_10000, energy_minus_10000, "difference = ", energy_minus_10000 - energy_plus_10000)
### WRITE YOUR CODE BETWEEN THESE LINES - START
I = np.array([
[1, 0],
[0, 1]
])
X = np.array([
[0, 1],
[1, 0]
])
Z = np.array([
[1, 0],
[0, -1]
])
h2_hamiltonian = (-1.0523732) * np.kron(I, I) + \
(0.39793742) * np.kron(I, Z) + \
(-0.3979374) * np.kron(Z, I) + \
(-0.0112801) * np.kron(Z, Z) + \
(0.18093119) * np.kron(X, X)
from numpy import linalg as LA
eigenvalues, eigenvectors = LA.eig(h2_hamiltonian)
for ii, eigenvalue in enumerate(eigenvalues):
print(f"Eigenvector {eigenvectors[:,ii]} has energy {eigenvalue}")
exact_eigenvector = eigenvectors[:,np.argmin(eigenvalues)]
exact_eigenvalue = np.min(eigenvalues)
print()
print("Minimum energy is", exact_eigenvalue)
### WRITE YOUR CODE BETWEEN THESE LINES - END
|
https://github.com/qiskit-community/qiskit-aqt-provider
|
qiskit-community
|
# This code is part of Qiskit.
#
# (C) Copyright Alpine Quantum Technologies GmbH 2023
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Trivial minimization example using a quantum approximate optimization algorithm (QAOA).
This is the same example as in vqe.py, but uses QAOA instead of VQE as solver.
"""
from typing import Final
import qiskit_algorithms
from qiskit.quantum_info import SparsePauliOp
from qiskit_algorithms.minimum_eigensolvers import QAOA
from qiskit_algorithms.optimizers import COBYLA
from qiskit_aqt_provider import AQTProvider
from qiskit_aqt_provider.primitives import AQTSampler
RANDOM_SEED: Final = 0
if __name__ == "__main__":
backend = AQTProvider("token").get_backend("offline_simulator_no_noise")
sampler = AQTSampler(backend)
# fix the random seeds such that the example is reproducible
qiskit_algorithms.utils.algorithm_globals.random_seed = RANDOM_SEED
backend.simulator.options.seed_simulator = RANDOM_SEED
# Hamiltonian: Ising model on two spin 1/2 without external field
J = 1.23456789
hamiltonian = SparsePauliOp.from_list([("ZZ", 3 * J)])
# Find the ground-state energy with QAOA
optimizer = COBYLA(maxiter=100, tol=0.01)
qaoa = QAOA(sampler, optimizer)
result = qaoa.compute_minimum_eigenvalue(operator=hamiltonian)
assert result.eigenvalue is not None # noqa: S101
print(f"Optimizer run time: {result.optimizer_time:.2f} s")
print("Cost function evaluations:", result.cost_function_evals)
print("Deviation from expected ground-state energy:", abs(result.eigenvalue - (-3 * J)))
|
https://github.com/2lambda123/Qiskit-qiskit
|
2lambda123
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test dynamical decoupling insertion pass."""
import unittest
import numpy as np
from numpy import pi
from ddt import ddt, data
from qiskit.circuit import QuantumCircuit, Delay, Measure, Reset, Parameter
from qiskit.circuit.library import XGate, YGate, RXGate, UGate, CXGate, HGate
from qiskit.quantum_info import Operator
from qiskit.transpiler.instruction_durations import InstructionDurations
from qiskit.transpiler.passes import (
ASAPScheduleAnalysis,
ALAPScheduleAnalysis,
PadDynamicalDecoupling,
)
from qiskit.transpiler.passmanager import PassManager
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.transpiler.target import Target, InstructionProperties
from qiskit import pulse
from qiskit.test import QiskitTestCase
@ddt
class TestPadDynamicalDecoupling(QiskitTestCase):
"""Tests PadDynamicalDecoupling pass."""
def setUp(self):
"""Circuits to test DD on.
┌───┐
q_0: ┤ H ├──■────────────
└───┘┌─┴─┐
q_1: ─────┤ X ├──■───────
└───┘┌─┴─┐
q_2: ──────────┤ X ├──■──
└───┘┌─┴─┐
q_3: ───────────────┤ X ├
└───┘
┌──────────┐
q_0: ──■──┤ U(π,0,π) ├──────────■──
┌─┴─┐└──────────┘ ┌─┴─┐
q_1: ┤ X ├─────■───────────■──┤ X ├
└───┘ ┌─┴─┐ ┌─┐┌─┴─┐└───┘
q_2: ────────┤ X ├────┤M├┤ X ├─────
└───┘ └╥┘└───┘
c: 1/══════════════════╩═══════════
0
"""
super().setUp()
self.ghz4 = QuantumCircuit(4)
self.ghz4.h(0)
self.ghz4.cx(0, 1)
self.ghz4.cx(1, 2)
self.ghz4.cx(2, 3)
self.midmeas = QuantumCircuit(3, 1)
self.midmeas.cx(0, 1)
self.midmeas.cx(1, 2)
self.midmeas.u(pi, 0, pi, 0)
self.midmeas.measure(2, 0)
self.midmeas.cx(1, 2)
self.midmeas.cx(0, 1)
self.durations = InstructionDurations(
[
("h", 0, 50),
("cx", [0, 1], 700),
("cx", [1, 2], 200),
("cx", [2, 3], 300),
("x", None, 50),
("y", None, 50),
("u", None, 100),
("rx", None, 100),
("measure", None, 1000),
("reset", None, 1500),
]
)
def test_insert_dd_ghz(self):
"""Test DD gates are inserted in correct spots.
┌───┐ ┌────────────────┐ ┌───┐ »
q_0: ──────┤ H ├─────────■──┤ Delay(100[dt]) ├──────┤ X ├──────»
┌─────┴───┴─────┐ ┌─┴─┐└────────────────┘┌─────┴───┴─────┐»
q_1: ┤ Delay(50[dt]) ├─┤ X ├────────■─────────┤ Delay(50[dt]) ├»
├───────────────┴┐└───┘ ┌─┴─┐ └───────────────┘»
q_2: ┤ Delay(750[dt]) ├───────────┤ X ├───────────────■────────»
├────────────────┤ └───┘ ┌─┴─┐ »
q_3: ┤ Delay(950[dt]) ├─────────────────────────────┤ X ├──────»
└────────────────┘ └───┘ »
« ┌────────────────┐ ┌───┐ ┌────────────────┐
«q_0: ┤ Delay(200[dt]) ├──────┤ X ├───────┤ Delay(100[dt]) ├─────────────────
« └─────┬───┬──────┘┌─────┴───┴──────┐└─────┬───┬──────┘┌───────────────┐
«q_1: ──────┤ X ├───────┤ Delay(100[dt]) ├──────┤ X ├───────┤ Delay(50[dt]) ├
« └───┘ └────────────────┘ └───┘ └───────────────┘
«q_2: ───────────────────────────────────────────────────────────────────────
«
«q_3: ───────────────────────────────────────────────────────────────────────
«
"""
dd_sequence = [XGate(), XGate()]
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence),
]
)
ghz4_dd = pm.run(self.ghz4)
expected = self.ghz4.copy()
expected = expected.compose(Delay(50), [1], front=True)
expected = expected.compose(Delay(750), [2], front=True)
expected = expected.compose(Delay(950), [3], front=True)
expected = expected.compose(Delay(100), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(200), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(100), [0])
expected = expected.compose(Delay(50), [1])
expected = expected.compose(XGate(), [1])
expected = expected.compose(Delay(100), [1])
expected = expected.compose(XGate(), [1])
expected = expected.compose(Delay(50), [1])
self.assertEqual(ghz4_dd, expected)
def test_insert_dd_ghz_with_target(self):
"""Test DD gates are inserted in correct spots.
┌───┐ ┌────────────────┐ ┌───┐ »
q_0: ──────┤ H ├─────────■──┤ Delay(100[dt]) ├──────┤ X ├──────»
┌─────┴───┴─────┐ ┌─┴─┐└────────────────┘┌─────┴───┴─────┐»
q_1: ┤ Delay(50[dt]) ├─┤ X ├────────■─────────┤ Delay(50[dt]) ├»
├───────────────┴┐└───┘ ┌─┴─┐ └───────────────┘»
q_2: ┤ Delay(750[dt]) ├───────────┤ X ├───────────────■────────»
├────────────────┤ └───┘ ┌─┴─┐ »
q_3: ┤ Delay(950[dt]) ├─────────────────────────────┤ X ├──────»
└────────────────┘ └───┘ »
« ┌────────────────┐ ┌───┐ ┌────────────────┐
«q_0: ┤ Delay(200[dt]) ├──────┤ X ├───────┤ Delay(100[dt]) ├─────────────────
« └─────┬───┬──────┘┌─────┴───┴──────┐└─────┬───┬──────┘┌───────────────┐
«q_1: ──────┤ X ├───────┤ Delay(100[dt]) ├──────┤ X ├───────┤ Delay(50[dt]) ├
« └───┘ └────────────────┘ └───┘ └───────────────┘
«q_2: ───────────────────────────────────────────────────────────────────────
«
«q_3: ───────────────────────────────────────────────────────────────────────
«
"""
target = Target(num_qubits=4, dt=1)
target.add_instruction(HGate(), {(0,): InstructionProperties(duration=50)})
target.add_instruction(
CXGate(),
{
(0, 1): InstructionProperties(duration=700),
(1, 2): InstructionProperties(duration=200),
(2, 3): InstructionProperties(duration=300),
},
)
target.add_instruction(
XGate(), {(x,): InstructionProperties(duration=50) for x in range(4)}
)
target.add_instruction(
YGate(), {(x,): InstructionProperties(duration=50) for x in range(4)}
)
target.add_instruction(
UGate(Parameter("theta"), Parameter("phi"), Parameter("lambda")),
{(x,): InstructionProperties(duration=100) for x in range(4)},
)
target.add_instruction(
RXGate(Parameter("theta")),
{(x,): InstructionProperties(duration=100) for x in range(4)},
)
target.add_instruction(
Measure(), {(x,): InstructionProperties(duration=1000) for x in range(4)}
)
target.add_instruction(
Reset(), {(x,): InstructionProperties(duration=1500) for x in range(4)}
)
target.add_instruction(Delay(Parameter("t")), {(x,): None for x in range(4)})
dd_sequence = [XGate(), XGate()]
pm = PassManager(
[
ALAPScheduleAnalysis(target=target),
PadDynamicalDecoupling(target=target, dd_sequence=dd_sequence),
]
)
ghz4_dd = pm.run(self.ghz4)
expected = self.ghz4.copy()
expected = expected.compose(Delay(50), [1], front=True)
expected = expected.compose(Delay(750), [2], front=True)
expected = expected.compose(Delay(950), [3], front=True)
expected = expected.compose(Delay(100), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(200), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(100), [0])
expected = expected.compose(Delay(50), [1])
expected = expected.compose(XGate(), [1])
expected = expected.compose(Delay(100), [1])
expected = expected.compose(XGate(), [1])
expected = expected.compose(Delay(50), [1])
self.assertEqual(ghz4_dd, expected)
def test_insert_dd_ghz_one_qubit(self):
"""Test DD gates are inserted on only one qubit.
┌───┐ ┌────────────────┐ ┌───┐ »
q_0: ──────┤ H ├─────────■──┤ Delay(100[dt]) ├──────┤ X ├───────»
┌─────┴───┴─────┐ ┌─┴─┐└────────────────┘┌─────┴───┴──────┐»
q_1: ┤ Delay(50[dt]) ├─┤ X ├────────■─────────┤ Delay(300[dt]) ├»
├───────────────┴┐└───┘ ┌─┴─┐ └────────────────┘»
q_2: ┤ Delay(750[dt]) ├───────────┤ X ├───────────────■─────────»
├────────────────┤ └───┘ ┌─┴─┐ »
q_3: ┤ Delay(950[dt]) ├─────────────────────────────┤ X ├───────»
└────────────────┘ └───┘ »
meas: 4/═══════════════════════════════════════════════════════════»
»
« ┌────────────────┐┌───┐┌────────────────┐ ░ ┌─┐
« q_0: ┤ Delay(200[dt]) ├┤ X ├┤ Delay(100[dt]) ├─░─┤M├─────────
« └────────────────┘└───┘└────────────────┘ ░ └╥┘┌─┐
« q_1: ──────────────────────────────────────────░──╫─┤M├──────
« ░ ║ └╥┘┌─┐
« q_2: ──────────────────────────────────────────░──╫──╫─┤M├───
« ░ ║ ║ └╥┘┌─┐
« q_3: ──────────────────────────────────────────░──╫──╫──╫─┤M├
« ░ ║ ║ ║ └╥┘
«meas: 4/═════════════════════════════════════════════╩══╩══╩══╩═
« 0 1 2 3
"""
dd_sequence = [XGate(), XGate()]
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence, qubits=[0]),
]
)
ghz4_dd = pm.run(self.ghz4.measure_all(inplace=False))
expected = self.ghz4.copy()
expected = expected.compose(Delay(50), [1], front=True)
expected = expected.compose(Delay(750), [2], front=True)
expected = expected.compose(Delay(950), [3], front=True)
expected = expected.compose(Delay(100), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(200), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(100), [0])
expected = expected.compose(Delay(300), [1])
expected.measure_all()
self.assertEqual(ghz4_dd, expected)
def test_insert_dd_ghz_everywhere(self):
"""Test DD gates even on initial idle spots.
┌───┐ ┌────────────────┐┌───┐┌────────────────┐┌───┐»
q_0: ──────┤ H ├─────────■──┤ Delay(100[dt]) ├┤ Y ├┤ Delay(200[dt]) ├┤ Y ├»
┌─────┴───┴─────┐ ┌─┴─┐└────────────────┘└───┘└────────────────┘└───┘»
q_1: ┤ Delay(50[dt]) ├─┤ X ├───────────────────────────────────────────■──»
├───────────────┴┐├───┤┌────────────────┐┌───┐┌────────────────┐┌─┴─┐»
q_2: ┤ Delay(162[dt]) ├┤ Y ├┤ Delay(326[dt]) ├┤ Y ├┤ Delay(162[dt]) ├┤ X ├»
├────────────────┤├───┤├────────────────┤├───┤├────────────────┤└───┘»
q_3: ┤ Delay(212[dt]) ├┤ Y ├┤ Delay(426[dt]) ├┤ Y ├┤ Delay(212[dt]) ├─────»
└────────────────┘└───┘└────────────────┘└───┘└────────────────┘ »
« ┌────────────────┐
«q_0: ┤ Delay(100[dt]) ├─────────────────────────────────────────────
« ├───────────────┬┘┌───┐┌────────────────┐┌───┐┌───────────────┐
«q_1: ┤ Delay(50[dt]) ├─┤ Y ├┤ Delay(100[dt]) ├┤ Y ├┤ Delay(50[dt]) ├
« └───────────────┘ └───┘└────────────────┘└───┘└───────────────┘
«q_2: ────────■──────────────────────────────────────────────────────
« ┌─┴─┐
«q_3: ──────┤ X ├────────────────────────────────────────────────────
« └───┘
"""
dd_sequence = [YGate(), YGate()]
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence, skip_reset_qubits=False),
]
)
ghz4_dd = pm.run(self.ghz4)
expected = self.ghz4.copy()
expected = expected.compose(Delay(50), [1], front=True)
expected = expected.compose(Delay(162), [2], front=True)
expected = expected.compose(YGate(), [2], front=True)
expected = expected.compose(Delay(326), [2], front=True)
expected = expected.compose(YGate(), [2], front=True)
expected = expected.compose(Delay(162), [2], front=True)
expected = expected.compose(Delay(212), [3], front=True)
expected = expected.compose(YGate(), [3], front=True)
expected = expected.compose(Delay(426), [3], front=True)
expected = expected.compose(YGate(), [3], front=True)
expected = expected.compose(Delay(212), [3], front=True)
expected = expected.compose(Delay(100), [0])
expected = expected.compose(YGate(), [0])
expected = expected.compose(Delay(200), [0])
expected = expected.compose(YGate(), [0])
expected = expected.compose(Delay(100), [0])
expected = expected.compose(Delay(50), [1])
expected = expected.compose(YGate(), [1])
expected = expected.compose(Delay(100), [1])
expected = expected.compose(YGate(), [1])
expected = expected.compose(Delay(50), [1])
self.assertEqual(ghz4_dd, expected)
def test_insert_dd_ghz_xy4(self):
"""Test XY4 sequence of DD gates.
┌───┐ ┌───────────────┐ ┌───┐ ┌───────────────┐»
q_0: ──────┤ H ├─────────■──┤ Delay(37[dt]) ├──────┤ X ├──────┤ Delay(75[dt]) ├»
┌─────┴───┴─────┐ ┌─┴─┐└───────────────┘┌─────┴───┴─────┐└─────┬───┬─────┘»
q_1: ┤ Delay(50[dt]) ├─┤ X ├────────■────────┤ Delay(12[dt]) ├──────┤ X ├──────»
├───────────────┴┐└───┘ ┌─┴─┐ └───────────────┘ └───┘ »
q_2: ┤ Delay(750[dt]) ├───────────┤ X ├──────────────■─────────────────────────»
├────────────────┤ └───┘ ┌─┴─┐ »
q_3: ┤ Delay(950[dt]) ├────────────────────────────┤ X ├───────────────────────»
└────────────────┘ └───┘ »
« ┌───┐ ┌───────────────┐ ┌───┐ ┌───────────────┐»
«q_0: ──────┤ Y ├──────┤ Delay(76[dt]) ├──────┤ X ├──────┤ Delay(75[dt]) ├»
« ┌─────┴───┴─────┐└─────┬───┬─────┘┌─────┴───┴─────┐└─────┬───┬─────┘»
«q_1: ┤ Delay(25[dt]) ├──────┤ Y ├──────┤ Delay(26[dt]) ├──────┤ X ├──────»
« └───────────────┘ └───┘ └───────────────┘ └───┘ »
«q_2: ────────────────────────────────────────────────────────────────────»
« »
«q_3: ────────────────────────────────────────────────────────────────────»
« »
« ┌───┐ ┌───────────────┐
«q_0: ──────┤ Y ├──────┤ Delay(37[dt]) ├─────────────────
« ┌─────┴───┴─────┐└─────┬───┬─────┘┌───────────────┐
«q_1: ┤ Delay(25[dt]) ├──────┤ Y ├──────┤ Delay(12[dt]) ├
« └───────────────┘ └───┘ └───────────────┘
«q_2: ───────────────────────────────────────────────────
«
«q_3: ───────────────────────────────────────────────────
"""
dd_sequence = [XGate(), YGate(), XGate(), YGate()]
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence),
]
)
ghz4_dd = pm.run(self.ghz4)
expected = self.ghz4.copy()
expected = expected.compose(Delay(50), [1], front=True)
expected = expected.compose(Delay(750), [2], front=True)
expected = expected.compose(Delay(950), [3], front=True)
expected = expected.compose(Delay(37), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(75), [0])
expected = expected.compose(YGate(), [0])
expected = expected.compose(Delay(76), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(75), [0])
expected = expected.compose(YGate(), [0])
expected = expected.compose(Delay(37), [0])
expected = expected.compose(Delay(12), [1])
expected = expected.compose(XGate(), [1])
expected = expected.compose(Delay(25), [1])
expected = expected.compose(YGate(), [1])
expected = expected.compose(Delay(26), [1])
expected = expected.compose(XGate(), [1])
expected = expected.compose(Delay(25), [1])
expected = expected.compose(YGate(), [1])
expected = expected.compose(Delay(12), [1])
self.assertEqual(ghz4_dd, expected)
def test_insert_midmeas_hahn_alap(self):
"""Test a single X gate as Hahn echo can absorb in the downstream circuit.
global phase: 3π/2
┌────────────────┐ ┌───┐ ┌────────────────┐»
q_0: ────────■─────────┤ Delay(625[dt]) ├───────┤ X ├───────┤ Delay(625[dt]) ├»
┌─┴─┐ └────────────────┘┌──────┴───┴──────┐└────────────────┘»
q_1: ──────┤ X ├───────────────■─────────┤ Delay(1000[dt]) ├────────■─────────»
┌─────┴───┴──────┐ ┌─┴─┐ └───────┬─┬───────┘ ┌─┴─┐ »
q_2: ┤ Delay(700[dt]) ├──────┤ X ├───────────────┤M├──────────────┤ X ├───────»
└────────────────┘ └───┘ └╥┘ └───┘ »
c: 1/═════════════════════════════════════════════╩═══════════════════════════»
0 »
« ┌───────────────┐
«q_0: ┤ U(0,π/2,-π/2) ├───■──
« └───────────────┘ ┌─┴─┐
«q_1: ──────────────────┤ X ├
« ┌────────────────┐└───┘
«q_2: ┤ Delay(700[dt]) ├─────
« └────────────────┘
«c: 1/═══════════════════════
"""
dd_sequence = [XGate()]
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence),
]
)
midmeas_dd = pm.run(self.midmeas)
combined_u = UGate(0, -pi / 2, pi / 2)
expected = QuantumCircuit(3, 1)
expected.cx(0, 1)
expected.delay(625, 0)
expected.x(0)
expected.delay(625, 0)
expected.compose(combined_u, [0], inplace=True)
expected.delay(700, 2)
expected.cx(1, 2)
expected.delay(1000, 1)
expected.measure(2, 0)
expected.cx(1, 2)
expected.cx(0, 1)
expected.delay(700, 2)
expected.global_phase = pi / 2
self.assertEqual(midmeas_dd, expected)
# check the absorption into U was done correctly
self.assertEqual(Operator(combined_u), Operator(XGate()) & Operator(XGate()))
def test_insert_midmeas_hahn_asap(self):
"""Test a single X gate as Hahn echo can absorb in the upstream circuit.
┌──────────────────┐ ┌────────────────┐┌─────────┐»
q_0: ────────■─────────┤ U(3π/4,-π/2,π/2) ├─┤ Delay(600[dt]) ├┤ Rx(π/4) ├»
┌─┴─┐ └──────────────────┘┌┴────────────────┤└─────────┘»
q_1: ──────┤ X ├────────────────■──────────┤ Delay(1000[dt]) ├─────■─────»
┌─────┴───┴──────┐ ┌─┴─┐ └───────┬─┬───────┘ ┌─┴─┐ »
q_2: ┤ Delay(700[dt]) ├───────┤ X ├────────────────┤M├───────────┤ X ├───»
└────────────────┘ └───┘ └╥┘ └───┘ »
c: 1/═══════════════════════════════════════════════╩════════════════════»
0 »
« ┌────────────────┐
«q_0: ┤ Delay(600[dt]) ├──■──
« └────────────────┘┌─┴─┐
«q_1: ──────────────────┤ X ├
« ┌────────────────┐└───┘
«q_2: ┤ Delay(700[dt]) ├─────
« └────────────────┘
«c: 1/═══════════════════════
«
"""
dd_sequence = [RXGate(pi / 4)]
pm = PassManager(
[
ASAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence),
]
)
midmeas_dd = pm.run(self.midmeas)
combined_u = UGate(3 * pi / 4, -pi / 2, pi / 2)
expected = QuantumCircuit(3, 1)
expected.cx(0, 1)
expected.compose(combined_u, [0], inplace=True)
expected.delay(600, 0)
expected.rx(pi / 4, 0)
expected.delay(600, 0)
expected.delay(700, 2)
expected.cx(1, 2)
expected.delay(1000, 1)
expected.measure(2, 0)
expected.cx(1, 2)
expected.cx(0, 1)
expected.delay(700, 2)
self.assertEqual(midmeas_dd, expected)
# check the absorption into U was done correctly
self.assertTrue(
Operator(XGate()).equiv(
Operator(UGate(3 * pi / 4, -pi / 2, pi / 2)) & Operator(RXGate(pi / 4))
)
)
def test_insert_ghz_uhrig(self):
"""Test custom spacing (following Uhrig DD [1]).
[1] Uhrig, G. "Keeping a quantum bit alive by optimized π-pulse sequences."
Physical Review Letters 98.10 (2007): 100504.
┌───┐ ┌──────────────┐ ┌───┐ ┌──────────────┐┌───┐»
q_0: ──────┤ H ├─────────■──┤ Delay(3[dt]) ├──────┤ X ├───────┤ Delay(8[dt]) ├┤ X ├»
┌─────┴───┴─────┐ ┌─┴─┐└──────────────┘┌─────┴───┴──────┐└──────────────┘└───┘»
q_1: ┤ Delay(50[dt]) ├─┤ X ├───────■────────┤ Delay(300[dt]) ├─────────────────────»
├───────────────┴┐└───┘ ┌─┴─┐ └────────────────┘ »
q_2: ┤ Delay(750[dt]) ├──────────┤ X ├──────────────■──────────────────────────────»
├────────────────┤ └───┘ ┌─┴─┐ »
q_3: ┤ Delay(950[dt]) ├───────────────────────────┤ X ├────────────────────────────»
└────────────────┘ └───┘ »
« ┌───────────────┐┌───┐┌───────────────┐┌───┐┌───────────────┐┌───┐┌───────────────┐»
«q_0: ┤ Delay(13[dt]) ├┤ X ├┤ Delay(16[dt]) ├┤ X ├┤ Delay(20[dt]) ├┤ X ├┤ Delay(16[dt]) ├»
« └───────────────┘└───┘└───────────────┘└───┘└───────────────┘└───┘└───────────────┘»
«q_1: ───────────────────────────────────────────────────────────────────────────────────»
« »
«q_2: ───────────────────────────────────────────────────────────────────────────────────»
« »
«q_3: ───────────────────────────────────────────────────────────────────────────────────»
« »
« ┌───┐┌───────────────┐┌───┐┌──────────────┐┌───┐┌──────────────┐
«q_0: ┤ X ├┤ Delay(13[dt]) ├┤ X ├┤ Delay(8[dt]) ├┤ X ├┤ Delay(3[dt]) ├
« └───┘└───────────────┘└───┘└──────────────┘└───┘└──────────────┘
«q_1: ────────────────────────────────────────────────────────────────
«
«q_2: ────────────────────────────────────────────────────────────────
«
«q_3: ────────────────────────────────────────────────────────────────
«
"""
n = 8
dd_sequence = [XGate()] * n
# uhrig specifies the location of the k'th pulse
def uhrig(k):
return np.sin(np.pi * (k + 1) / (2 * n + 2)) ** 2
# convert that to spacing between pulses (whatever finite duration pulses have)
spacing = []
for k in range(n):
spacing.append(uhrig(k) - sum(spacing))
spacing.append(1 - sum(spacing))
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence, qubits=[0], spacing=spacing),
]
)
ghz4_dd = pm.run(self.ghz4)
expected = self.ghz4.copy()
expected = expected.compose(Delay(50), [1], front=True)
expected = expected.compose(Delay(750), [2], front=True)
expected = expected.compose(Delay(950), [3], front=True)
expected = expected.compose(Delay(3), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(8), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(13), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(16), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(20), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(16), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(13), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(8), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(3), [0])
expected = expected.compose(Delay(300), [1])
self.assertEqual(ghz4_dd, expected)
def test_asymmetric_xy4_in_t2(self):
"""Test insertion of XY4 sequence with unbalanced spacing.
global phase: π
┌───┐┌───┐┌────────────────┐┌───┐┌────────────────┐┌───┐┌────────────────┐»
q_0: ┤ H ├┤ X ├┤ Delay(450[dt]) ├┤ Y ├┤ Delay(450[dt]) ├┤ X ├┤ Delay(450[dt]) ├»
└───┘└───┘└────────────────┘└───┘└────────────────┘└───┘└────────────────┘»
« ┌───┐┌────────────────┐┌───┐
«q_0: ┤ Y ├┤ Delay(450[dt]) ├┤ H ├
« └───┘└────────────────┘└───┘
"""
dd_sequence = [XGate(), YGate()] * 2
spacing = [0] + [1 / 4] * 4
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence, spacing=spacing),
]
)
t2 = QuantumCircuit(1)
t2.h(0)
t2.delay(2000, 0)
t2.h(0)
expected = QuantumCircuit(1)
expected.h(0)
expected.x(0)
expected.delay(450, 0)
expected.y(0)
expected.delay(450, 0)
expected.x(0)
expected.delay(450, 0)
expected.y(0)
expected.delay(450, 0)
expected.h(0)
expected.global_phase = pi
t2_dd = pm.run(t2)
self.assertEqual(t2_dd, expected)
# check global phase is correct
self.assertEqual(Operator(t2), Operator(expected))
def test_dd_after_reset(self):
"""Test skip_reset_qubits option works.
┌─────────────────┐┌───┐┌────────────────┐┌───┐┌─────────────────┐»
q_0: ─|0>─┤ Delay(1000[dt]) ├┤ H ├┤ Delay(190[dt]) ├┤ X ├┤ Delay(1710[dt]) ├»
└─────────────────┘└───┘└────────────────┘└───┘└─────────────────┘»
« ┌───┐┌───┐
«q_0: ┤ X ├┤ H ├
« └───┘└───┘
"""
dd_sequence = [XGate(), XGate()]
spacing = [0.1, 0.9]
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(
self.durations, dd_sequence, spacing=spacing, skip_reset_qubits=True
),
]
)
t2 = QuantumCircuit(1)
t2.reset(0)
t2.delay(1000)
t2.h(0)
t2.delay(2000, 0)
t2.h(0)
expected = QuantumCircuit(1)
expected.reset(0)
expected.delay(1000)
expected.h(0)
expected.delay(190, 0)
expected.x(0)
expected.delay(1710, 0)
expected.x(0)
expected.h(0)
t2_dd = pm.run(t2)
self.assertEqual(t2_dd, expected)
def test_insert_dd_bad_sequence(self):
"""Test DD raises when non-identity sequence is inserted."""
dd_sequence = [XGate(), YGate()]
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence),
]
)
with self.assertRaises(TranspilerError):
pm.run(self.ghz4)
@data(0.5, 1.5)
def test_dd_with_calibrations_with_parameters(self, param_value):
"""Check that calibrations in a circuit with parameters work fine."""
circ = QuantumCircuit(2)
circ.x(0)
circ.cx(0, 1)
circ.rx(param_value, 1)
rx_duration = int(param_value * 1000)
with pulse.build() as rx:
pulse.play(pulse.Gaussian(rx_duration, 0.1, rx_duration // 4), pulse.DriveChannel(1))
circ.add_calibration("rx", (1,), rx, params=[param_value])
durations = InstructionDurations([("x", None, 100), ("cx", None, 300)])
dd_sequence = [XGate(), XGate()]
pm = PassManager(
[ALAPScheduleAnalysis(durations), PadDynamicalDecoupling(durations, dd_sequence)]
)
self.assertEqual(pm.run(circ).duration, rx_duration + 100 + 300)
def test_insert_dd_ghz_xy4_with_alignment(self):
"""Test DD with pulse alignment constraints.
┌───┐ ┌───────────────┐ ┌───┐ ┌───────────────┐»
q_0: ──────┤ H ├─────────■──┤ Delay(40[dt]) ├──────┤ X ├──────┤ Delay(70[dt]) ├»
┌─────┴───┴─────┐ ┌─┴─┐└───────────────┘┌─────┴───┴─────┐└─────┬───┬─────┘»
q_1: ┤ Delay(50[dt]) ├─┤ X ├────────■────────┤ Delay(20[dt]) ├──────┤ X ├──────»
├───────────────┴┐└───┘ ┌─┴─┐ └───────────────┘ └───┘ »
q_2: ┤ Delay(750[dt]) ├───────────┤ X ├──────────────■─────────────────────────»
├────────────────┤ └───┘ ┌─┴─┐ »
q_3: ┤ Delay(950[dt]) ├────────────────────────────┤ X ├───────────────────────»
└────────────────┘ └───┘ »
« ┌───┐ ┌───────────────┐ ┌───┐ ┌───────────────┐»
«q_0: ──────┤ Y ├──────┤ Delay(70[dt]) ├──────┤ X ├──────┤ Delay(70[dt]) ├»
« ┌─────┴───┴─────┐└─────┬───┬─────┘┌─────┴───┴─────┐└─────┬───┬─────┘»
«q_1: ┤ Delay(20[dt]) ├──────┤ Y ├──────┤ Delay(20[dt]) ├──────┤ X ├──────»
« └───────────────┘ └───┘ └───────────────┘ └───┘ »
«q_2: ────────────────────────────────────────────────────────────────────»
« »
«q_3: ────────────────────────────────────────────────────────────────────»
« »
« ┌───┐ ┌───────────────┐
«q_0: ──────┤ Y ├──────┤ Delay(50[dt]) ├─────────────────
« ┌─────┴───┴─────┐└─────┬───┬─────┘┌───────────────┐
«q_1: ┤ Delay(20[dt]) ├──────┤ Y ├──────┤ Delay(20[dt]) ├
« └───────────────┘ └───┘ └───────────────┘
«q_2: ───────────────────────────────────────────────────
«
«q_3: ───────────────────────────────────────────────────
«
"""
dd_sequence = [XGate(), YGate(), XGate(), YGate()]
pm = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(
self.durations,
dd_sequence,
pulse_alignment=10,
extra_slack_distribution="edges",
),
]
)
ghz4_dd = pm.run(self.ghz4)
expected = self.ghz4.copy()
expected = expected.compose(Delay(50), [1], front=True)
expected = expected.compose(Delay(750), [2], front=True)
expected = expected.compose(Delay(950), [3], front=True)
expected = expected.compose(Delay(40), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(70), [0])
expected = expected.compose(YGate(), [0])
expected = expected.compose(Delay(70), [0])
expected = expected.compose(XGate(), [0])
expected = expected.compose(Delay(70), [0])
expected = expected.compose(YGate(), [0])
expected = expected.compose(Delay(50), [0])
expected = expected.compose(Delay(20), [1])
expected = expected.compose(XGate(), [1])
expected = expected.compose(Delay(20), [1])
expected = expected.compose(YGate(), [1])
expected = expected.compose(Delay(20), [1])
expected = expected.compose(XGate(), [1])
expected = expected.compose(Delay(20), [1])
expected = expected.compose(YGate(), [1])
expected = expected.compose(Delay(20), [1])
self.assertEqual(ghz4_dd, expected)
def test_dd_can_sequentially_called(self):
"""Test if sequentially called DD pass can output the same circuit.
This test verifies:
- if global phase is properly propagated from the previous padding node.
- if node_start_time property is properly updated for new dag circuit.
"""
dd_sequence = [XGate(), YGate(), XGate(), YGate()]
pm1 = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence, qubits=[0]),
PadDynamicalDecoupling(self.durations, dd_sequence, qubits=[1]),
]
)
circ1 = pm1.run(self.ghz4)
pm2 = PassManager(
[
ALAPScheduleAnalysis(self.durations),
PadDynamicalDecoupling(self.durations, dd_sequence, qubits=[0, 1]),
]
)
circ2 = pm2.run(self.ghz4)
self.assertEqual(circ1, circ2)
def test_respect_target_instruction_constraints(self):
"""Test if DD pass does not pad delays for qubits that do not support delay instructions
and does not insert DD gates for qubits that do not support necessary gates.
See: https://github.com/Qiskit/qiskit-terra/issues/9993
"""
qc = QuantumCircuit(3)
qc.cx(0, 1)
qc.cx(1, 2)
target = Target(dt=1)
# Y is partially supported (not supported on qubit 2)
target.add_instruction(
XGate(), {(q,): InstructionProperties(duration=100) for q in range(2)}
)
target.add_instruction(
CXGate(),
{
(0, 1): InstructionProperties(duration=1000),
(1, 2): InstructionProperties(duration=1000),
},
)
# delays are not supported
# No DD instructions nor delays are padded due to no delay support in the target
pm_xx = PassManager(
[
ALAPScheduleAnalysis(target=target),
PadDynamicalDecoupling(dd_sequence=[XGate(), XGate()], target=target),
]
)
scheduled = pm_xx.run(qc)
self.assertEqual(qc, scheduled)
# Fails since Y is not supported in the target
with self.assertRaises(TranspilerError):
PassManager(
[
ALAPScheduleAnalysis(target=target),
PadDynamicalDecoupling(
dd_sequence=[XGate(), YGate(), XGate(), YGate()], target=target
),
]
)
# Add delay support to the target
target.add_instruction(Delay(Parameter("t")), {(q,): None for q in range(3)})
# No error but no DD on qubit 2 (just delay is padded) since X is not supported on it
scheduled = pm_xx.run(qc)
expected = QuantumCircuit(3)
expected.delay(1000, [2])
expected.cx(0, 1)
expected.cx(1, 2)
expected.delay(200, [0])
expected.x([0])
expected.delay(400, [0])
expected.x([0])
expected.delay(200, [0])
self.assertEqual(expected, scheduled)
if __name__ == "__main__":
unittest.main()
|
https://github.com/mathelatics/QGSS-2023-From-Theory-to-Implementations
|
mathelatics
|
from qiskit import *
%matplotlib inline
from qiskit.tools.visualization import plot_histogram
secret_number = '101001'
qc = QuantumCircuit(6+1, 6)
qc.h([0, 1, 2, 3, 4, 5])
qc.x(6)
qc.h(6)
qc.barrier()
qc.draw(output='mpl')
# building box of secret number
qc.cx(5, 6)
qc.cx(3, 6)
qc.cx(0, 6)
qc.draw(output='mpl')
qc.barrier()
qc.h([0, 1, 2, 3, 4, 5])
qc.barrier()
qc.measure([0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5])
qc.draw(output='mpl')
simulator = Aer.get_backend('qasm_simulator')
result = execute(qc, backend=simulator, shots=1).result()
counts = result.get_counts()
print(counts)
secret_number = '1000100110010101001'
qc = QuantumCircuit(len(secret_number)+1, len(secret_number))
qc.h(range(len(secret_number)))
qc.x(len(secret_number))
qc.h(len(secret_number))
qc.barrier()
for ii, yesno in enumerate(reversed(secret_number)):
if yesno == '1':
qc.cx(ii, len(secret_number))
qc.barrier()
qc.h(range(len(secret_number)))
qc.barrier()
qc.measure(range(len(secret_number)), range(len(secret_number)))
qc.draw(output='mpl')
simulator = Aer.get_backend('qasm_simulator')
result = execute(qc, backend=simulator, shots=1).result()
counts = result.get_counts()
print(counts)
|
https://github.com/acfilok96/Quantum-Computation
|
acfilok96
|
from qiskit import *
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr, cr)
%matplotlib inline
circuit.draw()
circuit.h(qr[0])
circuit.draw(output = 'mpl')
circuit.cx(qr[0], qr[1])
circuit.draw(output='mpl')
circuit.measure(qr, cr)
circuit.draw(output='mpl')
simulator = Aer.get_backend('qasm_simulator')
execute(circuit, backend=simulator)
result = execute(circuit, backend=simulator).result()
from qiskit.tools.visualization import plot_histogram
plot_histogram(result.get_counts(circuit))
# plot_histogram
provider = IBMQ.get_provider('ibm-q')
backend = provider.get_backend('ibmq_16_melbourne')
job =execute(circuit, backend= backend)
from qiskit.tools.monitor import job_monitor
job_monitor(job)
result = job.result()
plot_histogram(result.get_counts(circuit))
|
https://github.com/abhik-99/Qiskit-Summer-School
|
abhik-99
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=wrong-import-order
"""Main Qiskit public functionality."""
import pkgutil
# First, check for required Python and API version
from . import util
# qiskit errors operator
from .exceptions import QiskitError
# The main qiskit operators
from qiskit.circuit import ClassicalRegister
from qiskit.circuit import QuantumRegister
from qiskit.circuit import QuantumCircuit
# pylint: disable=redefined-builtin
from qiskit.tools.compiler import compile # TODO remove after 0.8
from qiskit.execute import execute
# The qiskit.extensions.x imports needs to be placed here due to the
# mechanism for adding gates dynamically.
import qiskit.extensions
import qiskit.circuit.measure
import qiskit.circuit.reset
# Allow extending this namespace. Please note that currently this line needs
# to be placed *before* the wrapper imports or any non-import code AND *before*
# importing the package you want to allow extensions for (in this case `backends`).
__path__ = pkgutil.extend_path(__path__, __name__)
# Please note these are global instances, not modules.
from qiskit.providers.basicaer import BasicAer
# Try to import the Aer provider if installed.
try:
from qiskit.providers.aer import Aer
except ImportError:
pass
# Try to import the IBMQ provider if installed.
try:
from qiskit.providers.ibmq import IBMQ
except ImportError:
pass
from .version import __version__
from .version import __qiskit_version__
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A collection of discrete probability metrics."""
from __future__ import annotations
import numpy as np
def hellinger_distance(dist_p: dict, dist_q: dict) -> float:
"""Computes the Hellinger distance between
two counts distributions.
Parameters:
dist_p (dict): First dict of counts.
dist_q (dict): Second dict of counts.
Returns:
float: Distance
References:
`Hellinger Distance @ wikipedia <https://en.wikipedia.org/wiki/Hellinger_distance>`_
"""
p_sum = sum(dist_p.values())
q_sum = sum(dist_q.values())
p_normed = {}
for key, val in dist_p.items():
p_normed[key] = val / p_sum
q_normed = {}
for key, val in dist_q.items():
q_normed[key] = val / q_sum
total = 0
for key, val in p_normed.items():
if key in q_normed:
total += (np.sqrt(val) - np.sqrt(q_normed[key])) ** 2
del q_normed[key]
else:
total += val
total += sum(q_normed.values())
dist = np.sqrt(total) / np.sqrt(2)
return dist
def hellinger_fidelity(dist_p: dict, dist_q: dict) -> float:
"""Computes the Hellinger fidelity between
two counts distributions.
The fidelity is defined as :math:`\\left(1-H^{2}\\right)^{2}` where H is the
Hellinger distance. This value is bounded in the range [0, 1].
This is equivalent to the standard classical fidelity
:math:`F(Q,P)=\\left(\\sum_{i}\\sqrt{p_{i}q_{i}}\\right)^{2}` that in turn
is equal to the quantum state fidelity for diagonal density matrices.
Parameters:
dist_p (dict): First dict of counts.
dist_q (dict): Second dict of counts.
Returns:
float: Fidelity
Example:
.. code-block::
from qiskit import QuantumCircuit, execute, BasicAer
from qiskit.quantum_info.analysis import hellinger_fidelity
qc = QuantumCircuit(5, 5)
qc.h(2)
qc.cx(2, 1)
qc.cx(2, 3)
qc.cx(3, 4)
qc.cx(1, 0)
qc.measure(range(5), range(5))
sim = BasicAer.get_backend('qasm_simulator')
res1 = execute(qc, sim).result()
res2 = execute(qc, sim).result()
hellinger_fidelity(res1.get_counts(), res2.get_counts())
References:
`Quantum Fidelity @ wikipedia <https://en.wikipedia.org/wiki/Fidelity_of_quantum_states>`_
`Hellinger Distance @ wikipedia <https://en.wikipedia.org/wiki/Hellinger_distance>`_
"""
dist = hellinger_distance(dist_p, dist_q)
return (1 - dist**2) ** 2
|
https://github.com/jcylim/QiskitProject
|
jcylim
|
# useful additional packages
import matplotlib.pyplot as plt
#%matplotlib inline
import numpy as np
from pprint import pprint
# importing QISKit
from qiskit import QuantumProgram, QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import available_backends, execute, register, least_busy
# import basic plot tools
from qiskit.tools.visualization import plot_histogram, circuit_drawer
qx_config = {
"APItoken": 'dcdb2d9414a625c1f57373c544add3711c78c3d7faf39397fe2c41887110e8b59caf81bcb2bc32714d936da41a261fea510f96df379afcbdfa9df6cc6bfe3829',
"url": 'https://quantumexperience.ng.bluemix.net/api'
}
backend = 'local_qasm_simulator' # run on local simulator by default
# register(qx_config['APItoken'], qx_config['url'])
# backend = least_busy(available_backends({'simulator': False, 'local': False}))
# print("the best backend is " + backend)
# Creating registers
sdq = QuantumRegister(2)
sdc = ClassicalRegister(2)
# Quantum circuit to make the shared entangled state
superdense = QuantumCircuit(sdq, sdc)
superdense.h(sdq[0])
superdense.cx(sdq[0], sdq[1])
# For 00, do nothing
# For 01, apply $X$
#shared.x(q[0])
# For 01, apply $Z$
#shared.z(q[0])
# For 11, apply $XZ$
superdense.z(sdq[0])
superdense.x(sdq[0])
superdense.barrier()
superdense.cx(sdq[0], sdq[1])
superdense.h(sdq[0])
superdense.measure(sdq[0], sdc[0])
superdense.measure(sdq[1], sdc[1])
superdense_job = execute(superdense, backend)
superdense_result = superdense_job.result()
plot_histogram(superdense_result.get_counts(superdense))
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import IBMQ, transpile
from qiskit import QuantumCircuit
from qiskit_aer import AerSimulator
from qiskit.tools.visualization import plot_histogram
from qiskit.providers.fake_provider import FakeVigo
device_backend = FakeVigo()
# Construct quantum circuit
circ = QuantumCircuit(3, 3)
circ.h(0)
circ.cx(0, 1)
circ.cx(1, 2)
circ.measure([0, 1, 2], [0, 1, 2])
sim_ideal = AerSimulator()
# Execute and get counts
result = sim_ideal.run(transpile(circ, sim_ideal)).result()
counts = result.get_counts(0)
plot_histogram(counts, title='Ideal counts for 3-qubit GHZ state')
sim_vigo = AerSimulator.from_backend(device_backend)
# Transpile the circuit for the noisy basis gates
tcirc = transpile(circ, sim_vigo)
# Execute noisy simulation and get counts
result_noise = sim_vigo.run(tcirc).result()
counts_noise = result_noise.get_counts(0)
plot_histogram(counts_noise,
title="Counts for 3-qubit GHZ state with device noise model")
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
# You can choose different colors for the real and imaginary parts of the density matrix.
from qiskit import QuantumCircuit
from qiskit.quantum_info import DensityMatrix
from qiskit.visualization import plot_state_city
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
state = DensityMatrix(qc)
plot_state_city(state, color=['midnightblue', 'crimson'], title="New State City")
|
https://github.com/swe-bench/Qiskit__qiskit
|
swe-bench
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Testing naming functionality of transpiled circuits"""
import unittest
from qiskit.circuit import QuantumCircuit
from qiskit.compiler import transpile
from qiskit import BasicAer
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.test import QiskitTestCase
class TestNamingTranspiledCircuits(QiskitTestCase):
"""Testing the naming fuctionality for transpiled circuits."""
def setUp(self):
super().setUp()
self.basis_gates = ["u1", "u2", "u3", "cx"]
self.backend = BasicAer.get_backend("qasm_simulator")
self.circuit0 = QuantumCircuit(name="circuit0")
self.circuit1 = QuantumCircuit(name="circuit1")
self.circuit2 = QuantumCircuit(name="circuit2")
self.circuit3 = QuantumCircuit(name="circuit3")
def test_single_circuit_name_singleton(self):
"""Test output_name with a single circuit
Given a single circuit and a output name in form of a string, this test
checks whether that string name is assigned to the transpiled circuit.
"""
trans_cirkie = transpile(
self.circuit0, basis_gates=self.basis_gates, output_name="transpiled-cirkie"
)
self.assertEqual(trans_cirkie.name, "transpiled-cirkie")
def test_single_circuit_name_list(self):
"""Test singleton output_name and a single circuit
Given a single circuit and an output name in form of a single element
list, this test checks whether the transpiled circuit is mapped with
that assigned name in the list.
If list has more than one element, then test checks whether the
Transpile function raises an error.
"""
trans_cirkie = transpile(
self.circuit0, basis_gates=self.basis_gates, output_name=["transpiled-cirkie"]
)
self.assertEqual(trans_cirkie.name, "transpiled-cirkie")
def test_single_circuit_and_multiple_name_list(self):
"""Test multiple output_name and a single circuit"""
# If List has multiple elements, transpile function must raise error
with self.assertRaises(TranspilerError):
transpile(
self.circuit0,
basis_gates=self.basis_gates,
output_name=["cool-cirkie", "new-cirkie", "dope-cirkie", "awesome-cirkie"],
)
def test_multiple_circuits_name_singleton(self):
"""Test output_name raise error if a single name is provided to a list of circuits
Given multiple circuits and a single string as a name, this test checks
whether the Transpile function raises an error.
"""
# Raise Error if single name given to multiple circuits
with self.assertRaises(TranspilerError):
transpile([self.circuit1, self.circuit2], self.backend, output_name="circ")
def test_multiple_circuits_name_list(self):
"""Test output_name with a list of circuits
Given multiple circuits and a list for output names, if
len(list)=len(circuits), then test checks whether transpile func assigns
each element in list to respective circuit.
If lengths are not equal, then test checks whether transpile func raises
error.
"""
# combining multiple circuits
circuits = [self.circuit1, self.circuit2, self.circuit3]
# equal lengths
names = ["awesome-circ1", "awesome-circ2", "awesome-circ3"]
trans_circuits = transpile(circuits, self.backend, output_name=names)
self.assertEqual(trans_circuits[0].name, "awesome-circ1")
self.assertEqual(trans_circuits[1].name, "awesome-circ2")
self.assertEqual(trans_circuits[2].name, "awesome-circ3")
def test_greater_circuits_name_list(self):
"""Test output_names list greater than circuits list"""
# combining multiple circuits
circuits = [self.circuit1, self.circuit2, self.circuit3]
# names list greater than circuits list
names = ["awesome-circ1", "awesome-circ2", "awesome-circ3", "awesome-circ4"]
with self.assertRaises(TranspilerError):
transpile(circuits, self.backend, output_name=names)
def test_smaller_circuits_name_list(self):
"""Test output_names list smaller than circuits list"""
# combining multiple circuits
circuits = [self.circuit1, self.circuit2, self.circuit3]
# names list smaller than circuits list
names = ["awesome-circ1", "awesome-circ2"]
with self.assertRaises(TranspilerError):
transpile(circuits, self.backend, output_name=names)
if __name__ == "__main__":
unittest.main()
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
qc = QuantumCircuit(12)
for idx in range(5):
qc.h(idx)
qc.cx(idx, idx+5)
qc.cx(1, 7)
qc.x(8)
qc.cx(1, 9)
qc.x(7)
qc.cx(1, 11)
qc.swap(6, 11)
qc.swap(6, 9)
qc.swap(6, 10)
qc.x(6)
qc.draw('mpl')
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit, transpile
from qiskit.providers.fake_provider import FakeAuckland
backend = FakeAuckland()
ghz = QuantumCircuit(15)
ghz.h(0)
ghz.cx(0, range(1, 15))
depths = []
gate_counts = []
non_local_gate_counts = []
levels = [str(x) for x in range(4)]
for level in range(4):
circ = transpile(ghz, backend, optimization_level=level)
depths.append(circ.depth())
gate_counts.append(sum(circ.count_ops().values()))
non_local_gate_counts.append(circ.num_nonlocal_gates())
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.bar(levels, depths, label='Depth')
ax1.set_xlabel("Optimization Level")
ax1.set_ylabel("Depth")
ax1.set_title("Output Circuit Depth")
ax2.bar(levels, gate_counts, label='Number of Circuit Operations')
ax2.bar(levels, non_local_gate_counts, label='Number of non-local gates')
ax2.set_xlabel("Optimization Level")
ax2.set_ylabel("Number of gates")
ax2.legend()
ax2.set_title("Number of output circuit gates")
fig.tight_layout()
plt.show()
|
https://github.com/bayesian-randomized-benchmarking/qiskit-advocates-bayes-RB
|
bayesian-randomized-benchmarking
|
#Import general libraries (needed for functions)
import numpy as np
import matplotlib.pyplot as plt
#from IPython import display
#Import Qiskit classes
import qiskit
from qiskit.tools.monitor import job_monitor
from qiskit import Aer
from qiskit.providers.aer.noise import NoiseModel
from qiskit import QuantumRegister, QuantumCircuit
#Import the RB Functions
import qiskit.ignis.verification.randomized_benchmarking as rb
# import the bayesian packages
import pymc3 as pm
import arviz as az
from scipy.optimize import curve_fit
import bayesian_fitter as bf
# initialize the Bayesian extension
%config InlineBackend.figure_format = 'retina'
# Initialize random number generator
RANDOM_SEED = 8927
np.random.seed(RANDOM_SEED)
az.style.use("arviz-darkgrid")
RB_process = "1_Q RB"
if RB_process in ["3_Q RB","2-3_Q RB"] :
#Number of qubits
nQ = 3
#There are 3 qubits: Q0,Q1,Q2.
#2Q RB on Q0,Q2 and 1Q RB on Q1
rb_pattern = [[1,2],[3]] # because 3 qubits
#Do three times as many 1Q Cliffords
length_multiplier = [1,3]
#Interleaved Clifford gates (2-qubits and 1-qubit)
interleaved_gates = [['cx 0 1'],['x 2']]
elif RB_process == "2_Q RB":
#Number of qubits
nQ = 2
#There are 2 qubits: Q0,Q1.
#2Q RB Q0,Q1
rb_pattern = [[0,1]]
length_multiplier = 1
interleaved_gates = [['cx 0,1']]
elif RB_process == "1_Q RB":
#Number of qubits
nQ = 1
#There is 1 qubit: Q0
rb_pattern = [[0]]
length_multiplier = 1
interleaved_gates = [['x 0']]
#Number of Cliffords in the sequence (start, stop, steps)
nCliffs = [1, 50, 100, 200, 400, 600, 800, 1000, 1300, 1600]
#Number of seeds (random sequences)
nseeds=8
scale = (2 ** len(rb_pattern[0]) - 1) / (2 ** len(rb_pattern[0]))
from qiskit import IBMQ
from qiskit import Aer
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
device = provider.get_backend('ibmq_lima') # type here hardware backend
properties = device.properties()
coupling_map = device.configuration().coupling_map
# use a noise model corresponding to the chosen real device backend
basis_gates = ['id', 'rz', 'sx', 'x', 'cx', 'reset']
hardware = device.name()
backend = Aer.get_backend('qasm_simulator')
shots = 2**9
noise_model = NoiseModel.from_backend(properties)
qregs_02 = QuantumRegister(2)
circ_02 = QuantumCircuit(qregs_02, name='circ_02')
#circ_02.h(qregs_02[0]) # booptrap! WIP!
circ_02.cx(qregs_02[0], qregs_02[1])
circ_02.draw()
qregs_1 = QuantumRegister(1)
circ_1 = QuantumCircuit(qregs_1, name='circ_1')
circ_1.x(qregs_1[0]) # booptrap! WIP!
circ_1.draw()
rb_opts = {}
rb_opts['rand_seed'] = 61946
rb_opts['length_vector'] = nCliffs
rb_opts['nseeds'] = nseeds
rb_opts['rb_pattern'] = rb_pattern
rb_opts['length_multiplier'] = length_multiplier
#rb_opts['align_cliffs'] = True
if RB_process in ["3_Q RB","2-3_Q RB"]:
rb_opts['interleaved_elem'] = [circ_02, circ_1]
elif RB_process == "2_Q RB":
rb_opts['interleaved_elem'] = [circ_02]
elif RB_process == "1_Q RB":
rb_opts['interleaved_elem'] = [circ_1]
rb_original_circs, xdata, rb_interleaved_circs = rb.randomized_benchmarking_seq(**rb_opts)
#Original RB circuits
print (rb_original_circs[0][0])
#Interleaved RB circuits
print (rb_interleaved_circs[0][0])
retrieve_list = []
original_result_list, original_transpile_list = bf.get_and_run_seeds(rb_circs=rb_original_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
retrieve_list = []
interleaved_result_list, interleaved_transpile_list = bf.get_and_run_seeds(rb_circs=rb_interleaved_circs,
shots=shots,
backend = backend,
coupling_map = coupling_map,
basis_gates = basis_gates,
noise_model = noise_model,
retrieve_list=retrieve_list)
# retrieve counts; skip if model rerun
# if model rerun, recuperate output data by running Y1 and Y2 = np.printed_array instead of this cell
Y1 = bf.get_count_data(original_result_list, nCliffs=nCliffs )
Y2 = bf.get_count_data(interleaved_result_list, nCliffs=nCliffs)
# output np.array Y1; skip if model rerun
Y1
# output np.array Y1; skip if model rerun
Y2
# skip if no model rerun
Y1 =np.array([[509, 504, 493, 474, 448, 417, 406, 391, 392, 367],
[507, 498, 495, 485, 461, 440, 421, 370, 382, 353],
[507, 499, 491, 469, 437, 433, 400, 395, 373, 359],
[507, 500, 484, 477, 451, 435, 399, 365, 374, 355],
[508, 500, 494, 477, 446, 429, 401, 394, 365, 369],
[506, 493, 496, 484, 457, 430, 401, 413, 374, 372],
[506, 500, 490, 472, 444, 435, 389, 389, 372, 337],
[506, 498, 492, 485, 462, 427, 392, 385, 368, 359]])
# skip if no model rerun
Y2 = np.array([[505, 488, 482, 444, 406, 377, 330, 310, 327, 270],
[507, 487, 485, 460, 414, 363, 354, 346, 297, 296],
[509, 498, 476, 444, 414, 386, 355, 347, 312, 291],
[509, 486, 478, 453, 413, 368, 354, 334, 306, 304],
[509, 494, 483, 445, 405, 366, 338, 356, 278, 268],
[508, 492, 474, 466, 417, 357, 340, 327, 321, 284],
[507, 490, 483, 452, 419, 390, 359, 341, 306, 293],
[509, 494, 477, 441, 392, 368, 357, 331, 324, 278]])
# function to optimize
def lsf(x, a, alpha, b):
return a * alpha ** x + b
# curve fit
popt_s,pcov_s = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y1)/shots,
bounds = ([scale-0.15,.9,1-scale-.15],
[scale+0.15,1.0,1-scale+.15]))
perr_s= np.sqrt(np.diag(pcov_s))
# get EPC and EPC sigma for LSF accelerated
alpha_f = popt_s[1]
alpha_f_err = perr_s[1]
popt_s,perr_s
# curve fit
popt_i,pcov_i = curve_fit(lsf, np.array(nseeds*list(nCliffs)),
np.ravel(Y2)/shots,
bounds = ([scale-0.15,.9,1-scale-.15],
[scale+0.15,1.0,1-scale+.15]))
perr_i= np.sqrt(np.diag(pcov_i))
# get EPC and EPC sigma for LSF accelerated
alphC_f = popt_i[1]
alphC_f_err = perr_i[1]
popt_i,perr_i
epc_est_f = scale*(1 - alphC_f/alpha_f)
epc_est_f_err = scale*(alphC_f/alpha_f)*(np.sqrt(alpha_f_err**2 + alphC_f_err**2))
# function to optimize
def lsmf(x, a, alpha, p_tilde_m, b):
return x[1]*(a * alpha ** x[0] + b) + x[2]*(a * (alpha*p_tilde_m) ** x[0] + b)
# obtain the data
m_len = len(nCliffs)*nseeds
x0_lsmf = np.array(nseeds*2*list(nCliffs))
x1_lsmf = np.hstack((np.ones(m_len),np.zeros(m_len)))
x2_lsmf = np.hstack((np.zeros(m_len),np.ones(m_len)))
x_lsmf = np.vstack((x0_lsmf,x1_lsmf,x2_lsmf))
y_lsmf=np.hstack((np.ravel(Y1),np.ravel(Y2)))/shots
# curve fit
popt_m,pcov_m = curve_fit(lsmf, x_lsmf, y_lsmf,
bounds = ([scale-0.15,.9,.9,1-scale-.15],
[scale+0.15,1.0,1.0,1-scale+.15]))
perr_m = np.sqrt(np.diag(pcov_m))
# get EPC and EPC sigma for LSF accelerated
alpha_fm = popt_m[1]
p_tilde_m = popt_m[2]
alpha_fm_err = perr_m[1]
p_tilde_m_err = perr_m[2]
popt_m,perr_m
epc_est_fm = scale*(1 - p_tilde_m)
epc_est_fm_err = scale*p_tilde_m_err
original_model = bf.get_bayesian_model(model_type="pooled",Y=Y1,shots=shots,m_gates=nCliffs,
mu_AB=[popt_s[0],popt_s[2]],cov_AB=[perr_s[0],perr_s[2]],
alpha_ref=alpha_f, alpha_upper=.999999, p_upper=.999999)
pm.model_to_graphviz(original_model)
trace_o = bf.get_trace(original_model, target_accept = .95)
azo_summary = bf.get_summary(original_model, trace_o)
azo_summary
alpha_original_p = azo_summary['mean']['alpha']
alpha_original_p_err = azo_summary['sd']['alpha']
interleaved_model = bf.get_bayesian_model(model_type="pooled",Y=Y2,shots=shots,m_gates=nCliffs,
mu_AB=[popt_i[0],popt_i[2]],cov_AB=[perr_i[0],perr_i[2]],
alpha_ref=alpha_f, alpha_upper=.999999, p_upper=.999999)
pm.model_to_graphviz(interleaved_model)
trace_i = bf.get_trace(interleaved_model, target_accept = .95)
azi_summary = bf.get_summary(interleaved_model, trace_i)
azi_summary
alpha_c_p = azi_summary['mean']['alpha']
alpha_c_p_err = azi_summary['sd']['alpha']
epc_est_p = scale*(1 - alpha_c_p/alpha_original_p)
epc_est_p_err = scale*(alpha_c_p/alpha_original_p)*(np.sqrt(alpha_original_p_err**2 + alpha_c_p_err**2))
Y = np.vstack((Y1,Y2))
RvsI = np.vstack((np.ones_like(Y1),np.zeros_like(Y2)))
IvsR = np.vstack((np.zeros_like(Y1),np.ones_like(Y2)))
tilde =bf.get_bayesian_model("tilde",Y=Y,shots=shots, m_gates=nCliffs,
alpha_ref=alpha_fm, p_testval= p_tilde_m,
mu_AB=[popt_m[0],popt_m[3]],cov_AB=[perr_m[0],perr_m[3]],
RvsI=RvsI,IvsR=IvsR, alpha_upper=.999999, p_upper=.999999)
pm.model_to_graphviz(tilde)
trace_t = bf.get_trace(tilde, target_accept = .95)
azt_summary = bf.get_summary(tilde, trace_t)
azt_summary
epc_est_a = scale*(1 - azt_summary['mean']['p_tilde'])
epc_est_a_err = scale* (azt_summary['sd']['p_tilde'])
epc_calib = 3.364E-04 # not for simulation
# compare LSF and SMC
print("Model: Frequentist Bayesian ")
print(" two-run accelerated two-run accelerated ")
print("EPC {0:.5f} {1:.5f} {2:.5f} {3:.5f} "
.format(epc_est_f, epc_est_fm, epc_est_p, epc_est_a))
print("± sigma ± {0:.5f} ± {1:.5f} ± {2:.5f} ± {3:.5f} "
.format(epc_est_f_err, epc_est_fm_err, epc_est_p_err, epc_est_a_err))
# obtain EPC from alpha (used by plot_posterior)
def alpha_to_EPC(alpha):
return scale*(1-alpha)
# for refering the interleaved gate in the title of the graphs
intl_g=str(interleaved_gates[0][0][0:2])+str(rb_pattern[0][0:2])
if RB_process in ["3_Q RB","2-3_Q RB"] :
intl_g=intl_g+"<"+str(interleaved_gates[1][0][0:1]+str(rb_pattern[1][0:2]))
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
with tilde:
ax = az.plot_posterior(trace_t, var_names=['p_tilde'], round_to=4, point_estimate=None,
transform = alpha_to_EPC)
ax.set_xlim(0.0, 0.0005)
plt.axvline(x=epc_est_fm,color='red',ls="-")
plt.axvline(x=epc_est_p,color='orange',ls="-")
plt.axvline(x=epc_est_f,color='cyan',ls="-")
if epc_calib > 0.0:
plt.axvline(x=epc_calib,color='green',ls=":")
plt.axvline(x=epc_est_a,color='blue',ls=":")
plt.title(RB_process +' $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=12)
Bayes_legend = "EPC Accelerated SMC: {0:1.3e} ({1:1.3e})".format(epc_est_a, epc_est_a_err)
Bayes2_legend = "EPC SMC 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_p, epc_est_p_err)
Fitter_legend = "EPC LSF 2-runs: {0:1.3e} ({1:1.3e})".format(epc_est_f, epc_est_f_err)
LSM_legend = "EPC Accelerated LSF: {0:1.3e} ({1:1.3e})".format(epc_est_fm, epc_est_fm_err)
Cal_legend = "EPC Calibration: {0:1.3e}".format(epc_calib)
if epc_calib > 0.0:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend,Bayes2_legend,
Fitter_legend,Cal_legend), fontsize=10 )
else:
plt.legend((Bayes_legend, "$Higher\; density\; interval$ HDI",
LSM_legend, Bayes2_legend,
Fitter_legend), fontsize=10 )
import matplotlib.pyplot as plt # seems we need to reimport for replot WIP
fig, plt = plt.subplots(1, 1)
plt.set_ylabel("Ground State Population")
plt.set_xlabel("Number of Cliffords")
for i_seed in range(nseeds):
plt.scatter(nCliffs, Y1[i_seed,:]/shots, label = "data", marker="x",color="b")
plt.scatter(nCliffs, Y2[i_seed,:]/shots, label = "data", marker="+",color="r")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*azt_summary['mean']['alpha']**nCliffs+\
azt_summary['mean']['AB[1]'],'--',color="b")
plt.plot(nCliffs,azt_summary['mean']['AB[0]']*(azt_summary['mean']['alpha']*azt_summary['mean']['p_tilde'])**\
nCliffs+azt_summary['mean']['AB[1]'],'--',color="r")
plt.legend(("Standard",
"Interleaved"))
plt.set_title(RB_process +' SMC $accelerated$, gate: ' + intl_g\
+", "+hardware+', backend: '+backend.name(),
fontsize=14);
import qiskit.tools.jupyter
%qiskit_version_table
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
qc.draw('mpl')
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_nature.second_q.drivers import PySCFDriver
driver = PySCFDriver(
atom="H 0 0 0; H 0 0 0.72" # Two Hydrogen atoms, 0.72 Angstrom apart
)
molecule = driver.run()
from qiskit_nature.second_q.mappers import QubitConverter, ParityMapper
qubit_converter = QubitConverter(ParityMapper())
hamiltonian = qubit_converter.convert(molecule.second_q_ops()[0])
from qiskit.algorithms.minimum_eigensolvers import NumPyMinimumEigensolver
sol = NumPyMinimumEigensolver().compute_minimum_eigenvalue(hamiltonian)
real_solution = molecule.interpret(sol)
real_solution.groundenergy
from qiskit_ibm_runtime import QiskitRuntimeService, Estimator, Session, Options
service = QiskitRuntimeService()
backend = "ibmq_qasm_simulator"
from qiskit.algorithms.minimum_eigensolvers import VQE
# Use RealAmplitudes circuit to create trial states
from qiskit.circuit.library import RealAmplitudes
ansatz = RealAmplitudes(num_qubits=2, reps=2)
# Search for better states using SPSA algorithm
from qiskit.algorithms.optimizers import SPSA
optimizer = SPSA(150)
# Set a starting point for reproduceability
import numpy as np
np.random.seed(6)
initial_point = np.random.uniform(-np.pi, np.pi, 12)
# Create an object to store intermediate results
from dataclasses import dataclass
@dataclass
class VQELog:
values: list
parameters: list
def update(self, count, parameters, mean, _metadata):
self.values.append(mean)
self.parameters.append(parameters)
print(f"Running circuit {count} of ~350", end="\r", flush=True)
log = VQELog([],[])
# Main calculation
with Session(service=service, backend=backend) as session:
options = Options()
options.optimization_level = 3
vqe = VQE(Estimator(session=session, options=options),
ansatz, optimizer, callback=log.update, initial_point=initial_point)
result = vqe.compute_minimum_eigenvalue(hamiltonian)
print("Experiment complete.".ljust(30))
print(f"Raw result: {result.optimal_value}")
if 'simulator' not in backend:
# Run once with ZNE error mitigation
options.resilience_level = 2
vqe = VQE(Estimator(session=session, options=options),
ansatz, SPSA(1), initial_point=result.optimal_point)
result = vqe.compute_minimum_eigenvalue(hamiltonian)
print(f"Mitigated result: {result.optimal_value}")
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 14
# Plot energy and reference value
plt.figure(figsize=(12, 6))
plt.plot(log.values, label="Estimator VQE")
plt.axhline(y=real_solution.groundenergy, color="tab:red", ls="--", label="Target")
plt.legend(loc="best")
plt.xlabel("Iteration")
plt.ylabel("Energy [H]")
plt.title("VQE energy")
plt.show()
import qiskit_ibm_runtime
qiskit_ibm_runtime.version.get_version_info()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Tests for Layer1Q implementation.
"""
import unittest
from random import randint
import test.python.transpiler.aqc.fast_gradient.utils_for_testing as tut
import numpy as np
import qiskit.transpiler.synthesis.aqc.fast_gradient.layer as lr
from qiskit.transpiler.synthesis.aqc.fast_gradient.pmatrix import PMatrix
from qiskit.test import QiskitTestCase
class TestLayer1q(QiskitTestCase):
"""
Tests for Layer1Q class.
"""
max_num_qubits = 5 # maximum number of qubits in tests
num_repeats = 50 # number of repetitions in tests
def setUp(self):
super().setUp()
np.random.seed(0x0696969)
def test_layer1q_matrix(self):
"""
Tests: (1) the correctness of Layer2Q matrix construction;
(2) matrix multiplication interleaved with permutations.
"""
mat_kind = "complex"
eps = 100.0 * np.finfo(float).eps
max_rel_err = 0.0
for n in range(2, self.max_num_qubits + 1):
dim = 2**n
iden = tut.eye_int(n)
for k in range(n):
m_mat = tut.rand_matrix(dim=dim, kind=mat_kind)
t_mat, g_mat = tut.make_test_matrices2x2(n=n, k=k, kind=mat_kind)
lmat = lr.Layer1Q(num_qubits=n, k=k, g2x2=g_mat)
g2, perm, inv_perm = lmat.get_attr()
self.assertTrue(m_mat.dtype == t_mat.dtype == g_mat.dtype == g2.dtype)
self.assertTrue(np.all(g_mat == g2))
self.assertTrue(np.all(iden[perm].T == iden[inv_perm]))
g_mat = np.kron(tut.eye_int(n - 1), g_mat)
# T == P^t @ G @ P.
err = tut.relative_error(t_mat, iden[perm].T @ g_mat @ iden[perm])
self.assertLess(err, eps, "err = {:0.16f}".format(err))
max_rel_err = max(max_rel_err, err)
# Multiplication by permutation matrix of the left can be
# replaced by row permutations.
tm = t_mat @ m_mat
err1 = tut.relative_error(iden[perm].T @ g_mat @ m_mat[perm], tm)
err2 = tut.relative_error((g_mat @ m_mat[perm])[inv_perm], tm)
# Multiplication by permutation matrix of the right can be
# replaced by column permutations.
mt = m_mat @ t_mat
err3 = tut.relative_error(m_mat @ iden[perm].T @ g_mat @ iden[perm], mt)
err4 = tut.relative_error((m_mat[:, perm] @ g_mat)[:, inv_perm], mt)
self.assertTrue(
err1 < eps and err2 < eps and err3 < eps and err4 < eps,
"err1 = {:f}, err2 = {:f}, "
"err3 = {:f}, err4 = {:f}".format(err1, err2, err3, err4),
)
max_rel_err = max(max_rel_err, err1, err2, err3, err4)
def test_pmatrix_class(self):
"""
Test the class PMatrix.
"""
_eps = 100.0 * np.finfo(float).eps
mat_kind = "complex"
max_rel_err = 0.0
for n in range(2, self.max_num_qubits + 1):
dim = 2**n
tmp1 = np.ndarray((dim, dim), dtype=np.cfloat)
tmp2 = tmp1.copy()
for _ in range(self.num_repeats):
k0 = randint(0, n - 1)
k1 = randint(0, n - 1)
k2 = randint(0, n - 1)
k3 = randint(0, n - 1)
k4 = randint(0, n - 1)
t0, g0 = tut.make_test_matrices2x2(n=n, k=k0, kind=mat_kind)
t1, g1 = tut.make_test_matrices2x2(n=n, k=k1, kind=mat_kind)
t2, g2 = tut.make_test_matrices2x2(n=n, k=k2, kind=mat_kind)
t3, g3 = tut.make_test_matrices2x2(n=n, k=k3, kind=mat_kind)
t4, g4 = tut.make_test_matrices2x2(n=n, k=k4, kind=mat_kind)
c0 = lr.Layer1Q(num_qubits=n, k=k0, g2x2=g0)
c1 = lr.Layer1Q(num_qubits=n, k=k1, g2x2=g1)
c2 = lr.Layer1Q(num_qubits=n, k=k2, g2x2=g2)
c3 = lr.Layer1Q(num_qubits=n, k=k3, g2x2=g3)
c4 = lr.Layer1Q(num_qubits=n, k=k4, g2x2=g4)
m_mat = tut.rand_matrix(dim=dim, kind=mat_kind)
ttmtt = t0 @ t1 @ m_mat @ np.conj(t2).T @ np.conj(t3).T
pmat = PMatrix(n)
pmat.set_matrix(m_mat)
pmat.mul_left_q1(layer=c1, temp_mat=tmp1)
pmat.mul_left_q1(layer=c0, temp_mat=tmp1)
pmat.mul_right_q1(layer=c2, temp_mat=tmp1, dagger=True)
pmat.mul_right_q1(layer=c3, temp_mat=tmp1, dagger=True)
alt_ttmtt = pmat.finalize(temp_mat=tmp1)
err1 = tut.relative_error(alt_ttmtt, ttmtt)
self.assertLess(err1, _eps, "relative error: {:f}".format(err1))
prod = np.cfloat(np.trace(ttmtt @ t4))
alt_prod = pmat.product_q1(layer=c4, tmp1=tmp1, tmp2=tmp2)
err2 = abs(alt_prod - prod) / abs(prod)
self.assertLess(err2, _eps, "relative error: {:f}".format(err2))
max_rel_err = max(max_rel_err, err1, err2)
if __name__ == "__main__":
unittest.main()
|
https://github.com/jeevesh2002/QuantumKatasQiskit
|
jeevesh2002
|
# Run this cell using Ctrl+Enter (⌘+Enter on Mac).
from testing import exercise
from typing import Tuple
import math
Complex = Tuple[float, float]
Polar = Tuple[float, float]
@exercise
def imaginary_power(n : int) -> int:
# If n is divisible by 4
if n % 4 == 0:
return 1
else:
return -1
@exercise
def complex_add(x : Complex, y : Complex) -> Complex:
# You can extract elements from a tuple like this
a = x[0]
b = x[1]
c = y[0]
d = y[1]
# This creates a new variable and stores the real component into it
real = a + c
# Replace the ... with code to calculate the imaginary component
imaginary = b + d
# You can create a tuple like this, with the real and imaginary part of the number
ans = (real, imaginary)
return ans
@exercise
def complex_mult(x : Complex, y : Complex) -> Complex:
# Here is another, more compact way of extracting the parts of the complex number:
(a, b) = x
(c, d) = y
real = (a * c) - (b * d)
imaginary = (a * d) + (c * b)
# Instead of creating a separate tuple with the result, we return the tuple directly
return (real, imaginary)
@exercise
def conjugate(x : Complex) -> Complex:
real = x[0]
# The sign can be easily flipped with the unary minus operator
imaginary = - x[1]
return (real, imaginary)
@exercise
def complex_div(x : Complex, y : Complex) -> Complex:
(a, b) = x
(c, d) = y
# We will use the denominator multiple times, so we'll store the value in a separate variable,
# to make the code easier to use and less error prone.
# Here we use ** to raise c and d to the second power.
denominator = (c ** 2) + (d ** 2)
real = ((a * c) + (b * d)) / denominator
imaginary = ((a * (-d) ) + (c * b)) / denominator
return (real, imaginary)
@exercise
def modulus(x : Complex) -> float:
return math.sqrt(x[0] ** 2 + x[1] ** 2)
@exercise
def complex_exp(x : Complex) -> Complex:
(a, b) = x
expa = math.e ** a
real = expa * math.cos(b)
imaginary = expa * math.sin(b)
return (real, imaginary)
@exercise
def complex_exp_real(r : float, x : Complex) -> Complex:
# Since ln(r) is only defined for positive numbers, we check for this special case.
# If r = 0, raising it to any power will give 0.
# Calling return before the end of the function will not execute the rest of the function.
if (r == 0):
return (0,0)
(a, b) = x
# Raise r to the power of a
ra = r ** a
# Natural logarithm of r
lnr = math.log(r)
real = ra * math.cos(b * lnr)
imaginary = ra * math.sin(b * lnr)
return (real, imaginary)
@exercise
def polar_convert(x : Complex) -> Polar:
(a, b) = x
r = math.sqrt(a**2 + b **2)
theta = math.atan2(b, a)
return (r, theta)
@exercise
def cartesian_convert(x : Polar) -> Complex:
(r, theta) = x
real = r * math.cos(theta)
imaginary = r * math.sin(theta)
return (real, imaginary)
@exercise
def polar_mult(x : Polar, y : Polar) -> Polar:
(r1, theta1) = x
(r2, theta2) = y
radius = r1 * r2
angle = theta1 + theta2
# If the calculated angle is larger then pi, we will subtract 2pi
if (angle > math.pi):
# Reassign the value for angle
angle = angle - 2.0 * math.pi
# If the calculated angle is smaller then -pi, we will add 2 pi
elif (angle <= -math.pi):
angle = angle + 2.0 * math.pi
return (radius, angle)
@exercise
def complex_exp_arbitrary(x : Complex, y : Complex) -> Complex:
(a, b) = x
(c, d) = y
# Convert x to polar form
r = math.sqrt(a ** 2 + b ** 2)
theta = math.atan2(b, a)
# Special case for r = 0
if (r == 0):
return (0, 0)
lnr = math.log(r)
exponent = math.exp(lnr * c - d * theta)
real = exponent * (math.cos(lnr * d + theta * c ))
imaginary = exponent * (math.sin(lnr * d + theta * c))
return (real, imaginary)
|
https://github.com/UST-QuAntiL/qiskit-service
|
UST-QuAntiL
|
# ******************************************************************************
# Copyright (c) 2021 University of Stuttgart
#
# See the NOTICE file(s) distributed with this work for additional
# information regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import unittest
import os
from app.config import basedir
from app import app, db
import qiskit
class TranspileTestCase(unittest.TestCase):
def setUp(self):
# setup environment variables for testing
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///" + os.path.join(basedir, 'test.db')
self.client = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_version(self):
response = self.client.get('/qiskit-service/api/v1.0/version')
self.assertEqual(response.status_code, 200)
json_data = response.get_json()
self.assertTrue("version" in json_data)
self.assertEqual(json_data['version'], "1.0")
@unittest.skip("Has to be adapted as the benchmarking via Qiskit ignis is deprecated")
def test_randomize_full_request(self):
# prepare the request
request = {
'qpu-name': 'ibmq_qasm_simulator',
'number-of-qubits': 1,
'min-depth-of-circuit': 1,
'max-depth-of-circuit': 1,
'number-of-circuits': 1,
'clifford': False,
'shots': 1024,
'token': os.environ["QISKIT_TOKEN"]
}
# send the request
response = self.client.post('/qiskit-service/api/v1.0/randomize', json=request)
self.assertEqual(response.status_code, 200)
json_data = response.get_json()
self.assertEqual(1, len(json_data))
self.assertEqual(3, len(json_data[0]))
print(json_data[0])
self.assertIn("result-benchmark", json_data[0])
self.assertIn("result-simulator", json_data[0])
self.assertIn("result-real-backend", json_data[0])
@unittest.skip("Has to be adapted as the benchmarking via Qiskit ignis is deprecated")
def test_randomize_no_shots_request(self):
# prepare the request
request = {
'qpu-name': 'ibmq_qasm_simulator',
'number-of-qubits': 1,
'min-depth-of-circuit': 1,
'max-depth-of-circuit': 1,
'number-of-circuits': 1,
'clifford': False,
'token': os.environ["QISKIT_TOKEN"]
}
# send the request
response = self.client.post('/qiskit-service/api/v1.0/randomize', json=request)
self.assertEqual(response.status_code, 200)
json_data = response.get_json()
self.assertEqual(1, len(json_data))
self.assertEqual(3, len(json_data[0]))
self.assertIn("result-benchmark", json_data[0])
self.assertIn("result-simulator", json_data[0])
self.assertIn("result-real-backend", json_data[0])
@unittest.skip("Has to be adapted as the benchmarking via Qiskit ignis is deprecated")
def test_randomize_no_clifford_request(self):
# prepare the request
request = {
'qpu-name': 'ibmq_qasm_simulator',
'number-of-qubits': 1,
'min-depth-of-circuit': 1,
'max-depth-of-circuit': 1,
'number-of-circuits': 1,
'token': os.environ["QISKIT_TOKEN"]
}
# send the request
response = self.client.post('/qiskit-service/api/v1.0/randomize', json=request)
self.assertEqual(response.status_code, 200)
json_data = response.get_json()
self.assertEqual(1, len(json_data))
self.assertEqual(3, len(json_data[0]))
self.assertIn("result-benchmark", json_data[0])
self.assertIn("result-simulator", json_data[0])
self.assertIn("result-real-backend", json_data[0])
@unittest.skip("Has to be adapted as the benchmarking via Qiskit ignis is deprecated")
def test_randomize_clifford_full_request(self):
# prepare the request
request = {
'qpu-name': 'ibmq_qasm_simulator',
'number-of-qubits': 1,
'min-depth-of-circuit': 1,
'max-depth-of-circuit': 1,
'number-of-circuits': 1,
'clifford': True,
'token': os.environ["QISKIT_TOKEN"]
}
# send the request
response = self.client.post('/qiskit-service/api/v1.0/randomize', json=request)
self.assertEqual(response.status_code, 200)
json_data = response.get_json()
self.assertEqual(1, len(json_data))
self.assertEqual(3, len(json_data[0]))
self.assertIn("result-benchmark", json_data[0])
self.assertIn("result-simulator", json_data[0])
self.assertIn("result-real-backend", json_data[0])
if __name__ == "__main__":
unittest.main()
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
import numpy as np
from qiskit_nature.second_q.hamiltonians import QuadraticHamiltonian
# create Hamiltonian
hermitian_part = np.array(
[
[1.0, 2.0, 0.0, 0.0],
[2.0, 1.0, 2.0, 0.0],
[0.0, 2.0, 1.0, 2.0],
[0.0, 0.0, 2.0, 1.0],
]
)
antisymmetric_part = np.array(
[
[0.0, 3.0, 0.0, 0.0],
[-3.0, 0.0, 3.0, 0.0],
[0.0, -3.0, 0.0, 3.0],
[0.0, 0.0, -3.0, 0.0],
]
)
constant = 4.0
hamiltonian = QuadraticHamiltonian(
hermitian_part=hermitian_part,
antisymmetric_part=antisymmetric_part,
constant=constant,
)
# convert it to a FermionicOp and print it
hamiltonian_ferm = hamiltonian.second_q_op()
print(hamiltonian_ferm)
# get the transformation matrix W and orbital energies {epsilon_j}
(
transformation_matrix,
orbital_energies,
transformed_constant,
) = hamiltonian.diagonalizing_bogoliubov_transform()
print(f"Shape of matrix W: {transformation_matrix.shape}")
print(f"Orbital energies: {orbital_energies}")
print(f"Transformed constant: {transformed_constant}")
from qiskit_nature.second_q.circuit.library import FermionicGaussianState
occupied_orbitals = (0, 2)
eig = np.sum(orbital_energies[list(occupied_orbitals)]) + transformed_constant
print(f"Eigenvalue: {eig}")
circuit = FermionicGaussianState(transformation_matrix, occupied_orbitals=occupied_orbitals)
circuit.draw("mpl")
from qiskit.quantum_info import Statevector
from qiskit_nature.second_q.mappers import JordanWignerMapper
# simulate the circuit to get the final state
state = np.array(Statevector(circuit))
# convert the Hamiltonian to a matrix
hamiltonian_jw = JordanWignerMapper().map(hamiltonian_ferm).to_matrix()
# check that the state is an eigenvector with the expected eigenvalue
np.testing.assert_allclose(hamiltonian_jw @ state, eig * state, atol=1e-8)
# create Hamiltonian
hermitian_part = np.array(
[
[1.0, 2.0, 0.0, 0.0],
[2.0, 1.0, 2.0, 0.0],
[0.0, 2.0, 1.0, 2.0],
[0.0, 0.0, 2.0, 1.0],
]
)
constant = 4.0
hamiltonian = QuadraticHamiltonian(
hermitian_part=hermitian_part,
constant=constant,
)
print(f"Hamiltonian conserves particle number: {hamiltonian.conserves_particle_number()}")
# get the transformation matrix W and orbital energies {epsilon_j}
(
transformation_matrix,
orbital_energies,
transformed_constant,
) = hamiltonian.diagonalizing_bogoliubov_transform()
print(f"Shape of matrix W: {transformation_matrix.shape}")
print(f"Orbital energies: {orbital_energies}")
print(f"Transformed constant: {transformed_constant}")
from qiskit_nature.second_q.circuit.library import SlaterDeterminant
occupied_orbitals = (0, 2)
eig = np.sum(orbital_energies[list(occupied_orbitals)]) + transformed_constant
print(f"Eigenvalue: {eig}")
circuit = SlaterDeterminant(transformation_matrix[list(occupied_orbitals)])
circuit.draw("mpl")
from qiskit_nature.second_q.circuit.library import BogoliubovTransform
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.quantum_info import random_hermitian, random_statevector, state_fidelity
from scipy.linalg import expm
# create Hamiltonian
n_modes = 5
hermitian_part = np.array(random_hermitian(n_modes))
hamiltonian = QuadraticHamiltonian(hermitian_part=hermitian_part)
# diagonalize Hamiltonian
(
transformation_matrix,
orbital_energies,
_,
) = hamiltonian.diagonalizing_bogoliubov_transform()
# set simulation time and construct time evolution circuit
time = 1.0
register = QuantumRegister(n_modes)
circuit = QuantumCircuit(register)
bog_circuit = BogoliubovTransform(transformation_matrix)
# change to the diagonal basis of the Hamiltonian
circuit.append(bog_circuit.inverse(), register)
# perform time evolution by applying z rotations
for q, energy in zip(register, orbital_energies):
circuit.rz(-energy * time, q)
# change back to the original basis
circuit.append(bog_circuit, register)
# simulate the circuit
initial_state = random_statevector(2**n_modes)
final_state = initial_state.evolve(circuit)
# compute the correct state by direct exponentiation
hamiltonian_jw = JordanWignerMapper().map(hamiltonian.second_q_op()).to_matrix()
exact_evolution_op = expm(-1j * time * hamiltonian_jw)
expected_state = exact_evolution_op @ np.array(initial_state)
# check that the simulated state is correct
fidelity = state_fidelity(final_state, expected_state)
np.testing.assert_allclose(fidelity, 1.0, atol=1e-8)
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/tomtuamnuq/compare-qiskit-ocean
|
tomtuamnuq
|
import time
from dwave.system import DWaveCliqueSampler, LeapHybridSampler
from qiskit.optimization.algorithms import CplexOptimizer
from random_lp.lp_random_gen import create_models
from utilities.helpers import create_dwave_meo
DIR = 'TEST_DATA' + "/" + "14_03_2021" + "/DENSE/"
# select linear program to solve
qps = create_models(DIR)
qp = qps['test_10']
# init Optimizers
cplex = CplexOptimizer()
leap = create_dwave_meo(LeapHybridSampler())
clique = create_dwave_meo(DWaveCliqueSampler())
# solve classically
cplex.solve(qp)
# solve hybrid
res_leap = leap.solve(qp)
res_leap
# solve quantum
res_clique = clique.solve(qp)
res_clique
qp.to_docplex().prettyprint()
|
https://github.com/swe-train/qiskit__qiskit
|
swe-train
|
# Copyright 2022-2023 Ohad Lev.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0,
# or in the root directory of this package("LICENSE.txt").
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`SATInterface` class.
"""
import os
import json
from typing import List, Tuple, Union, Optional, Dict, Any
from sys import stdout
from datetime import datetime
from hashlib import sha256
from qiskit import transpile, QuantumCircuit, qpy
from qiskit.result.counts import Counts
from qiskit.visualization.circuit.text import TextDrawing
from qiskit.providers.backend import Backend
from qiskit.transpiler.passes import RemoveBarriers
from IPython import display
from matplotlib.figure import Figure
from sat_circuits_engine.util import timer_dec, timestamp, flatten_circuit
from sat_circuits_engine.util.settings import DATA_PATH, TRANSPILE_KWARGS
from sat_circuits_engine.circuit import GroverConstraintsOperator, SATCircuit
from sat_circuits_engine.constraints_parse import ParsedConstraints
from sat_circuits_engine.interface.circuit_decomposition import decompose_operator
from sat_circuits_engine.interface.counts_visualization import plot_histogram
from sat_circuits_engine.interface.translator import ConstraintsTranslator
from sat_circuits_engine.classical_processing import (
find_iterations_unknown,
calc_iterations,
ClassicalVerifier,
)
from sat_circuits_engine.interface.interactive_inputs import (
interactive_operator_inputs,
interactive_solutions_num_input,
interactive_run_input,
interactive_backend_input,
interactive_shots_input,
)
# Local globlas for visualization of charts and diagrams
IFRAME_WIDTH = "100%"
IFRAME_HEIGHT = "700"
class SATInterface:
"""
An interface for building, running and mining data from n-SAT problems quantum circuits.
There are 2 options to use this class:
(1) Using an interactive interface (intuitive but somewhat limited) - for this
just initiate a bare instance of this class: `SATInterface()`.
(2) Using the API defined by this class, that includes the following methods:
* The following descriptions are partial, for full annotations see the methods' docstrings.
- `__init__`: an instance of `SATInterface must be initiated with exactly 1 combination:
(a) (high_level_constraints_string + high_level_vars) - for constraints
in a high-level format.
(b) (num_input_qubits + constraints_string) - for constraints
in a low-level foramt.
* For formats annotations see `constriants_format.ipynb` in the main directory.
- `obtain_grover_operator`: obtains the suitable grover operator for the constraints.
- `save_display_grover_operator`: saves and displays data generated
by the `obtain_grover_operator` method.
- `obtain_overall_circuit`: obtains the suitable overall SAT circuit.
- `save_display_overall_circuit: saves and displays data generated
by the `obtain_overall_circuit` method.
- `run_overall_circuit`: executes the overall SAT circuit.
- `save_display_results`: saves and displays data generated
by the `run_overall_circuit` method.
It is very recommended to go through `demos.ipynb` that demonstrates the various optional uses
of this class, in addition to reading `constraints_format.ipynb`, which is a must for using
this package properly. Both notebooks are in ther main directory.
"""
def __init__(
self,
num_input_qubits: Optional[int] = None,
constraints_string: Optional[str] = None,
high_level_constraints_string: Optional[str] = None,
high_level_vars: Optional[Dict[str, int]] = None,
name: Optional[str] = None,
save_data: Optional[bool] = True,
) -> None:
"""
Accepts the combination of paramters:
(high_level_constraints_string + high_level_vars) or (num_input_qubits + constraints_string).
Exactly one combination is accepted.
In other cases either an iteractive user interface will be called to take user's inputs,
or an exception will be raised due to misuse of the API.
Args:
num_input_qubits (Optional[int] = None): number of input qubits.
constraints_string (Optional[str] = None): a string of constraints in a low-level format.
high_level_constraints_string (Optional[str] = None): a string of constraints in a
high-level format.
high_level_vars (Optional[Dict[str, int]] = None): a dictionary that configures
the high-level variables - keys are names and values are bits-lengths.
name (Optional[str] = None): a name for this object, if None than the
generic name "SAT" is given automatically.
save_data (Optional[bool] = True): if True, saves all data and metadata generated by this
class to a unique data folder (by using the `save_XXX` methods of this class).
Raises:
SyntaxError - if a forbidden combination of arguments has been provided.
"""
if name is None:
name = "SAT"
self.name = name
# Creating a directory for data to be saved
if save_data:
self.time_created = timestamp(datetime.now())
self.dir_path = f"{DATA_PATH}{self.time_created}_{self.name}/"
os.mkdir(self.dir_path)
print(f"Data will be saved into '{self.dir_path}'.")
# Initial metadata, more to be added by this class' `save_XXX` methods
self.metadata = {
"name": self.name,
"datetime": self.time_created,
"num_input_qubits": num_input_qubits,
"constraints_string": constraints_string,
"high_level_constraints_string": high_level_constraints_string,
"high_level_vars": high_level_vars,
}
self.update_metadata()
# Identifying user's platform, for visualization purposes
self.identify_platform()
# In the case of low-level constraints format, that is the default value
self.high_to_low_map = None
# Case A - interactive interface
if (num_input_qubits is None or constraints_string is None) and (
high_level_constraints_string is None or high_level_vars is None
):
self.interactive_interface()
# Case B - API
else:
self.high_level_constraints_string = high_level_constraints_string
self.high_level_vars = high_level_vars
# Case B.1 - high-level format constraints inputs
if num_input_qubits is None or constraints_string is None:
self.num_input_qubits = sum(self.high_level_vars.values())
self.high_to_low_map, self.constraints_string = ConstraintsTranslator(
self.high_level_constraints_string, self.high_level_vars
).translate()
# Case B.2 - low-level format constraints inputs
elif num_input_qubits is not None and constraints_string is not None:
self.num_input_qubits = num_input_qubits
self.constraints_string = constraints_string
# Misuse
else:
raise SyntaxError(
"SATInterface accepts the combination of paramters:"
"(high_level_constraints_string + high_level_vars) or "
"(num_input_qubits + constraints_string). "
"Exactly one combination is accepted, not both."
)
self.parsed_constraints = ParsedConstraints(
self.constraints_string, self.high_level_constraints_string
)
def update_metadata(self, update_metadata: Optional[Dict[str, Any]] = None) -> None:
"""
Updates the metadata file (in the unique data folder of a given `SATInterface` instance).
Args:
update_metadata (Optional[Dict[str, Any]] = None):
- If None - just dumps `self.metadata` into the metadata JSON file.
- If defined - updates the `self.metadata` attribute and then dumps it.
"""
if update_metadata is not None:
self.metadata.update(update_metadata)
with open(f"{self.dir_path}metadata.json", "w") as metadata_file:
json.dump(self.metadata, metadata_file, indent=4)
def identify_platform(self) -> None:
"""
Identifies user's platform.
Writes True to `self.jupyter` for Jupyter notebook, False for terminal.
"""
# If True then the platform is a terminal/command line/shell
if stdout.isatty():
self.jupyter = False
# If False, we assume the platform is a Jupyter notebook
else:
self.jupyter = True
def output_to_platform(
self,
*,
title: str,
output_terminal: Union[TextDrawing, str],
output_jupyter: Union[Figure, str],
display_both_on_jupyter: Optional[bool] = False,
) -> None:
"""
Displays output to user's platform.
Args:
title (str): a title for the output.
output_terminal (Union[TextDrawing, str]): text to print for a terminal platform.
output_jupyter: (Union[Figure, str]): objects to display for a Jupyter notebook platform.
can handle `Figure` matplotlib objects or strings of paths to IFrame displayable file,
e.g PDF files.
display_both_on_jupyter (Optional[bool] = False): if True, displays both
`output_terminal` and `output_jupyter` in a Jupyter notebook platform.
Raises:
TypeError - in the case of misusing the `output_jupyter` argument.
"""
print()
print(title)
if self.jupyter:
if isinstance(output_jupyter, str):
display.display(display.IFrame(output_jupyter, width=IFRAME_WIDTH, height=IFRAME_HEIGHT))
elif isinstance(output_jupyter, Figure):
display.display(output_jupyter)
else:
raise TypeError("output_jupyter must be an str (path to image file) or a Figure object.")
if display_both_on_jupyter:
print(output_terminal)
else:
print(output_terminal)
def interactive_interface(self) -> None:
"""
An interactive CLI that allows exploiting most (but not all) of the package's features.
Uses functions of the form `interactive_XXX_inputs` from the `interactive_inputs.py` module.
Divided into 3 main stages:
1. Obtaining Grover's operator for the SAT problem.
2. Obtaining the overall SAT cirucit.
3. Executing the circuit and parsing the results.
The interface is built in a modular manner such that a user can halt at any stage.
The defualt settings for the interactive user intreface are:
1. `name = "SAT"`.
2. `save_data = True`.
3. `display = True`.
4. `transpile_kwargs = {'basis_gates': ['u', 'cx'], 'optimization_level': 3}`.
5. Backends are limited to those defined in the global-constant-like function `BACKENDS`:
- Those are the local `aer_simulator` and the remote `ibmq_qasm_simulator` for now.
Due to these default settings the interactive CLI is somewhat restrictive,
for full flexibility a user should use the API and not the CLI.
"""
# Handling operator part
operator_inputs = interactive_operator_inputs()
self.num_input_qubits = operator_inputs["num_input_qubits"]
self.high_to_low_map = operator_inputs["high_to_low_map"]
self.constraints_string = operator_inputs["constraints_string"]
self.high_level_constraints_string = operator_inputs["high_level_constraints_string"]
self.high_level_vars = operator_inputs["high_level_vars"]
self.parsed_constraints = ParsedConstraints(
self.constraints_string, self.high_level_constraints_string
)
self.update_metadata(
{
"num_input_qubits": self.num_input_qubits,
"constraints_string": self.constraints_string,
"high_level_constraints_string": self.high_level_constraints_string,
"high_level_vars": self.high_level_vars,
}
)
obtain_grover_operator_output = self.obtain_grover_operator()
self.save_display_grover_operator(obtain_grover_operator_output)
# Handling overall circuit part
solutions_num = interactive_solutions_num_input()
if solutions_num is not None:
backend = None
if solutions_num == -1:
backend = interactive_backend_input()
overall_circuit_data = self.obtain_overall_sat_circuit(
obtain_grover_operator_output["operator"], solutions_num, backend
)
self.save_display_overall_circuit(overall_circuit_data)
# Handling circuit execution part
if interactive_run_input():
if backend is None:
backend = interactive_backend_input()
shots = interactive_shots_input()
counts_parsed = self.run_overall_sat_circuit(
overall_circuit_data["circuit"], backend, shots
)
self.save_display_results(counts_parsed)
print()
print(f"Done saving data into '{self.dir_path}'.")
def obtain_grover_operator(
self, transpile_kwargs: Optional[Dict[str, Any]] = None
) -> Dict[str, Union[GroverConstraintsOperator, QuantumCircuit]]:
"""
Obtains the suitable `GroverConstraintsOperator` object for the constraints,
decomposes it using the `circuit_decomposition.py` module and transpiles it
according to `transpile_kwargs`.
Args:
transpile_kwargs (Optional[Dict[str, Any]]): kwargs for Qiskit's transpile function.
The defualt is set to the global constant `TRANSPILE_KWARGS`.
Returns:
(Dict[str, Union[GroverConstraintsOperator, QuantumCircuit, Dict[str, List[int]]]]):
- 'operator' (GroverConstraintsOperator):the high-level blocks operator.
- 'decomposed_operator' (QuantumCircuit): decomposed to building-blocks operator.
* For annotations regarding the decomposition method see the
`circuit_decomposition` module.
- 'transpiled_operator' (QuantumCircuit): the transpiled operator.
*** The high-level operator and the decomposed operator are generated with barriers
between constraints as default for visualizations purposes. The barriers are stripped
off before transpiling so the the transpiled operator object contains no barriers. ***
- 'high_level_to_bit_indexes_map' (Optional[Dict[str, List[int]]] = None):
A map of high-level variables with their allocated bit-indexes in the input register.
"""
print()
print(
"The system synthesizes and transpiles a Grover's "
"operator for the given constraints. Please wait.."
)
if transpile_kwargs is None:
transpile_kwargs = TRANSPILE_KWARGS
self.transpile_kwargs = transpile_kwargs
operator = GroverConstraintsOperator(
self.parsed_constraints, self.num_input_qubits, insert_barriers=True
)
decomposed_operator = decompose_operator(operator)
no_baerriers_operator = RemoveBarriers()(operator)
transpiled_operator = transpile(no_baerriers_operator, **transpile_kwargs)
print("Done.")
return {
"operator": operator,
"decomposed_operator": decomposed_operator,
"transpiled_operator": transpiled_operator,
"high_level_to_bit_indexes_map": self.high_to_low_map,
}
def save_display_grover_operator(
self,
obtain_grover_operator_output: Dict[str, Union[GroverConstraintsOperator, QuantumCircuit]],
display: Optional[bool] = True,
) -> None:
"""
Handles saving and displaying data generated by the `self.obtain_grover_operator` method.
Args:
obtain_grover_operator_output(Dict[str, Union[GroverConstraintsOperator, QuantumCircuit]]):
the dictionary returned upon calling the `self.obtain_grover_operator` method.
display (Optional[bool] = True) - If true, displays objects to user's platform.
"""
# Creating a directory to save operator's data
operator_dir_path = f"{self.dir_path}grover_operator/"
os.mkdir(operator_dir_path)
# Titles for displaying objects, by order of `obtain_grover_operator_output`
titles = [
"The operator diagram - high level blocks:",
"The operator diagram - decomposed:",
f"The transpiled operator diagram saved into '{operator_dir_path}'.\n"
f"It's not presented here due to its complexity.\n"
f"Please note that barriers appear in the high-level diagrams above only for convenient\n"
f"visual separation between constraints.\n"
f"Before transpilation all barriers are removed to avoid redundant inefficiencies.",
]
for index, (op_name, op_obj) in enumerate(obtain_grover_operator_output.items()):
# Generic path and name for files to be saved
files_path = f"{operator_dir_path}{op_name}"
# Generating a circuit diagrams figure
figure_path = f"{files_path}.pdf"
op_obj.draw("mpl", filename=figure_path, fold=-1)
# Generating a QPY serialization file for the circuit object
qpy_file_path = f"{files_path}.qpy"
with open(qpy_file_path, "wb") as qpy_file:
qpy.dump(op_obj, qpy_file)
# Original high-level operator and decomposed operator
if index < 2 and display:
# Displaying to user
self.output_to_platform(
title=titles[index], output_terminal=op_obj.draw("text"), output_jupyter=figure_path
)
# Transpiled operator
elif index == 2:
# Output to user, not including the circuit diagram
print()
print(titles[index])
print()
print(f"The transpilation kwargs are: {self.transpile_kwargs}.")
transpiled_operator_depth = op_obj.depth()
transpiled_operator_gates_count = op_obj.count_ops()
print(f"Transpiled operator depth: {transpiled_operator_depth}.")
print(f"Transpiled operator gates count: {transpiled_operator_gates_count}.")
print(f"Total number of qubits: {op_obj.num_qubits}.")
# Generating QASM 2.0 file only for the (flattened = no registers) tranpsiled operator
qasm_file_path = f"{files_path}.qasm"
flatten_circuit(op_obj).qasm(filename=qasm_file_path)
# Index 3 is 'high_level_to_bit_indexes_map' so it's time to break from the loop
break
# Mapping from high-level variables to bit-indexes will be displayed as well
mapping = obtain_grover_operator_output["high_level_to_bit_indexes_map"]
if mapping:
print()
print(f"The high-level variables mapping to bit-indexes:\n{mapping}")
print()
print(
f"Saved into '{operator_dir_path}':\n",
" Circuit diagrams for all levels.\n",
" QPY serialization exports for all levels.\n",
" QASM 2.0 export only for the transpiled level.",
)
with open(f"{operator_dir_path}operator.qpy", "rb") as qpy_file:
operator_qpy_sha256 = sha256(qpy_file.read()).hexdigest()
self.update_metadata(
{
"high_level_to_bit_indexes_map": self.high_to_low_map,
"transpile_kwargs": self.transpile_kwargs,
"transpiled_operator_depth": transpiled_operator_depth,
"transpiled_operator_gates_count": transpiled_operator_gates_count,
"operator_qpy_sha256": operator_qpy_sha256,
}
)
def obtain_overall_sat_circuit(
self,
grover_operator: GroverConstraintsOperator,
solutions_num: int,
backend: Optional[Backend] = None,
) -> Dict[str, SATCircuit]:
"""
Obtains the suitable `SATCircuit` object (= the overall SAT circuit) for the SAT problem.
Args:
grover_operator (GroverConstraintsOperator): Grover's operator for the SAT problem.
solutions_num (int): number of solutions for the SAT problem. In the case the number
of solutions is unknown, specific negative values are accepted:
* '-1' - for launching a classical iterative stochastic process that finds an adequate
number of iterations - by calling the `find_iterations_unknown` function (see its
docstrings for more information).
* '-2' - for generating a dynamic circuit that iterates over Grover's iterator until
a solution is obtained, using weak measurements. TODO - this feature isn't ready yet.
backend (Optional[Backend] = None): in the case of a '-1' value given to `solutions_num`,
a backend object to execute the depicted iterative prcess upon should be provided.
Returns:
(Dict[str, SATCircuit]):
- 'circuit' key for the overall SAT circuit.
- 'concise_circuit' key for the overall SAT circuit, with only 1 iteration over Grover's
iterator (operator + diffuser). Useful for visualization purposes.
*** The concise circuit is generated with barriers between segments as default
for visualizations purposes. In the actual circuit there no barriers. ***
"""
# -1 = Unknown number of solutions - iterative stochastic process
print()
if solutions_num == -1:
assert backend is not None, "Need to specify a backend if `solutions_num == -1`."
print("Please wait while the system checks various solutions..")
circuit, iterations = find_iterations_unknown(
self.num_input_qubits,
grover_operator,
self.parsed_constraints,
precision=10,
backend=backend,
)
print()
print(f"An adequate number of iterations found = {iterations}.")
# -2 = Unknown number of solutions - implement a dynamic circuit
# TODO this feature isn't fully implemented yet
elif solutions_num == -2:
print("The system builds a dynamic circuit..")
circuit = SATCircuit(self.num_input_qubits, grover_operator, iterations=None)
circuit.add_input_reg_measurement()
iterations = None
# Known number of solutions
else:
print("The system builds the overall circuit..")
iterations = calc_iterations(self.num_input_qubits, solutions_num)
print(f"\nFor {solutions_num} solutions, {iterations} iterations needed.")
circuit = SATCircuit(
self.num_input_qubits, grover_operator, iterations, insert_barriers=False
)
circuit.add_input_reg_measurement()
self.iterations = iterations
# Obtaining a SATCircuit object with one iteration for concise representation
concise_circuit = SATCircuit(
self.num_input_qubits, grover_operator, iterations=1, insert_barriers=True
)
concise_circuit.add_input_reg_measurement()
return {"circuit": circuit, "concise_circuit": concise_circuit}
def save_display_overall_circuit(
self, obtain_overall_sat_circuit_output: Dict[str, SATCircuit], display: Optional[bool] = True
) -> None:
"""
Handles saving and displaying data generated by the `self.obtain_overall_sat_circuit` method.
Args:
obtain_overall_sat_circuit_output(Dict[str, SATCircuit]):
the dictionary returned upon calling the `self.obtain_overall_sat_circuit` method.
display (Optional[bool] = True) - If true, displays objects to user's platform.
"""
circuit = obtain_overall_sat_circuit_output["circuit"]
concise_circuit = obtain_overall_sat_circuit_output["concise_circuit"]
# Creating a directory to save overall circuit's data
overall_circuit_dir_path = f"{self.dir_path}overall_circuit/"
os.mkdir(overall_circuit_dir_path)
# Generating a figure of the overall SAT circuit with just 1 iteration (i.e "concise")
concise_circuit_fig_path = f"{overall_circuit_dir_path}overall_circuit_1_iteration.pdf"
concise_circuit.draw("mpl", filename=concise_circuit_fig_path, fold=-1)
# Displaying the concise circuit to user
if display:
if self.iterations:
self.output_to_platform(
title=(
f"The high level circuit contains {self.iterations}"
f" iterations of the following form:"
),
output_terminal=concise_circuit.draw("text"),
output_jupyter=concise_circuit_fig_path,
)
# Dynamic circuit case - TODO NOT FULLY IMPLEMENTED YET
else:
dynamic_circuit_fig_path = f"{overall_circuit_dir_path}overall_circuit_dynamic.pdf"
circuit.draw("mpl", filename=dynamic_circuit_fig_path, fold=-1)
self.output_to_platform(
title="The dynamic circuit diagram:",
output_terminal=circuit.draw("text"),
output_jupyter=dynamic_circuit_fig_path,
)
if self.iterations:
transpiled_overall_circuit = transpile(flatten_circuit(circuit), **self.transpile_kwargs)
print()
print(f"The transpilation kwargs are: {self.transpile_kwargs}.")
transpiled_overall_circuit_depth = transpiled_overall_circuit.depth()
transpiled_overall_circuit_gates_count = transpiled_overall_circuit.count_ops()
print(f"Transpiled overall-circuit depth: {transpiled_overall_circuit_depth}.")
print(f"Transpiled overall-circuit gates count: {transpiled_overall_circuit_gates_count}.")
print(f"Total number of qubits: {transpiled_overall_circuit.num_qubits}.")
print()
print("Exporting the full overall SAT circuit object..")
export_files_path = f"{overall_circuit_dir_path}overall_circuit"
with open(f"{export_files_path}.qpy", "wb") as qpy_file:
qpy.dump(circuit, qpy_file)
if self.iterations:
transpiled_overall_circuit.qasm(filename=f"{export_files_path}.qasm")
print()
print(
f"Saved into '{overall_circuit_dir_path}':\n",
" A concised (1 iteration) circuit diagram of the high-level overall SAT circuit.\n",
" QPY serialization export for the full overall SAT circuit object.",
)
if self.iterations:
print(" QASM 2.0 export for the transpiled full overall SAT circuit object.")
metadata_update = {
"num_total_qubits": circuit.num_qubits,
"num_iterations": circuit.iterations,
}
if self.iterations:
metadata_update["transpiled_overall_circuit_depth"] = (transpiled_overall_circuit_depth,)
metadata_update[
"transpiled_overall_circuit_gates_count"
] = transpiled_overall_circuit_gates_count
self.update_metadata(metadata_update)
@timer_dec("Circuit simulation execution time = ")
def run_overall_sat_circuit(
self, circuit: QuantumCircuit, backend: Backend, shots: int
) -> Dict[str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]]:
"""
Executes a `circuit` on `backend` transpiled w.r.t backend, `shots` times.
Args:
circuit (QuantumCircuit): `QuantumCircuit` object or child-object (a.k.a `SATCircuit`)
to execute.
backend (Backend): backend to execute `circuit` upon.
shots (int): number of execution shots.
Returns:
(Dict[str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]]):
dict object returned by `self.parse_counts` - see this method's docstrings for annotations.
"""
# Defines also instance attributes to use in other methods
self.backend = backend
self.shots = shots
print()
print(f"The system is running the circuit {shots} times on {backend}, please wait..")
print("This process might take a while.")
job = backend.run(transpile(circuit, backend), shots=shots)
counts = job.result().get_counts()
print("Done.")
parsed_counts = self.parse_counts(counts)
return parsed_counts
def parse_counts(
self, counts: Counts
) -> Dict[str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]]:
"""
Parses a `Counts` object into several desired datas (see 'Returns' section).
Args:
counts (Counts): the `Counts` object to parse.
Returns:
(Dict[str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]]):
'counts' (Counts) - the original `Counts` object.
'counts_sorted' (List[Tuple[Union[str, int]]]) - results sorted in a descending order.
'distilled_solutions' (List[str]): list of solutions (bitstrings).
'high_level_vars_values' (List[Dict[str, int]]): list of solutions (each solution is a
dictionary with variable-names as keys and their integer values as values).
"""
# Sorting results in an a descending order
counts_sorted = sorted(counts.items(), key=lambda x: x[1], reverse=True)
# Generating a set of distilled verified-only solutions
verifier = ClassicalVerifier(self.parsed_constraints)
distilled_solutions = set()
for count_item in counts_sorted:
if not verifier.verify(count_item[0]):
break
distilled_solutions.add(count_item[0])
# In the case of high-level format in use, translating `distilled_solutions` into integer values
high_level_vars_values = None
if self.high_level_constraints_string and self.high_level_vars:
# Container for dictionaries with variables integer values
high_level_vars_values = []
for solution in distilled_solutions:
# Keys are variable-names and values are their integer values
solution_vars = {}
for var, bits_bundle in self.high_to_low_map.items():
reversed_solution = solution[::-1]
var_bitstring = ""
for bit_index in bits_bundle:
var_bitstring += reversed_solution[bit_index]
# Translating to integer value
solution_vars[var] = int(var_bitstring, 2)
high_level_vars_values.append(solution_vars)
return {
"counts": counts,
"counts_sorted": counts_sorted,
"distilled_solutions": distilled_solutions,
"high_level_vars_values": high_level_vars_values,
}
def save_display_results(
self,
run_overall_sat_circuit_output: Dict[
str, Union[Counts, List[Tuple[Union[str, int]]], List[str], List[Dict[str, int]]]
],
display: Optional[bool] = True,
) -> None:
"""
Handles saving and displaying data generated by the `self.run_overall_sat_circuit` method.
Args:
run_overall_sat_circuit_output (Dict[str, Union[Counts, List[Tuple[Union[str, int]]],
List[str], List[Dict[str, int]]]]): the dictionary returned upon calling
the `self.run_overall_sat_circuit` method.
display (Optional[bool] = True) - If true, displays objects to user's platform.
"""
counts = run_overall_sat_circuit_output["counts"]
counts_sorted = run_overall_sat_circuit_output["counts_sorted"]
distilled_solutions = run_overall_sat_circuit_output["distilled_solutions"]
high_level_vars_values = run_overall_sat_circuit_output["high_level_vars_values"]
# Creating a directory to save results data
results_dir_path = f"{self.dir_path}results/"
os.mkdir(results_dir_path)
# Defining custom dimensions for the custom `plot_histogram` of this package
histogram_fig_width = max((len(counts) * self.num_input_qubits * (10 / 72)), 7)
histogram_fig_height = 5
histogram_figsize = (histogram_fig_width, histogram_fig_height)
histogram_path = f"{results_dir_path}histogram.pdf"
plot_histogram(counts, figsize=histogram_figsize, sort="value_desc", filename=histogram_path)
if display:
# Basic output text
output_text = (
f"All counts:\n{counts_sorted}\n"
f"\nDistilled solutions ({len(distilled_solutions)} total):\n"
f"{distilled_solutions}"
)
# Additional outputs for a high-level constraints format
if high_level_vars_values:
# Mapping from high-level variables to bit-indexes will be displayed as well
output_text += (
f"\n\nThe high-level variables mapping to bit-indexes:" f"\n{self.high_to_low_map}"
)
# Actual integer solutions will be displayed as well
additional_text = ""
for solution_index, solution in enumerate(high_level_vars_values):
additional_text += f"Solution {solution_index + 1}: "
for var_index, (var, value) in enumerate(solution.items()):
additional_text += f"{var} = {value}"
if var_index != len(solution) - 1:
additional_text += ", "
else:
additional_text += "\n"
output_text += f"\n\nHigh-level format solutions: \n{additional_text}"
self.output_to_platform(
title=f"The results for {self.shots} shots are:",
output_terminal=output_text,
output_jupyter=histogram_path,
display_both_on_jupyter=True,
)
results_dict = {
"high_level_to_bit_indexes_map": self.high_to_low_map,
"solutions": list(distilled_solutions),
"high_level_solutions": high_level_vars_values,
"counts": counts_sorted,
}
with open(f"{results_dir_path}results.json", "w") as results_file:
json.dump(results_dict, results_file, indent=4)
self.update_metadata(
{
"num_solutions": len(distilled_solutions),
"backend": str(self.backend),
"shots": self.shots,
}
)
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit_nature.second_q.drivers import PySCFDriver
driver = PySCFDriver()
problem = driver.run()
fermionic_op = problem.hamiltonian.second_q_op()
from qiskit_nature.second_q.mappers import JordanWignerMapper
mapper = JordanWignerMapper()
qubit_jw_op = mapper.map(fermionic_op)
print(qubit_jw_op)
from qiskit_nature.second_q.mappers import ParityMapper
mapper = ParityMapper()
qubit_p_op = mapper.map(fermionic_op)
print(qubit_p_op)
mapper = ParityMapper(num_particles=problem.num_particles)
qubit_op = mapper.map(fermionic_op)
print(qubit_op)
tapered_mapper = problem.get_tapered_mapper(mapper)
print(type(tapered_mapper))
qubit_op = tapered_mapper.map(fermionic_op)
print(qubit_op)
from qiskit_nature.second_q.circuit.library import HartreeFock
hf_state = HartreeFock(2, (1, 1), JordanWignerMapper())
hf_state.draw()
from qiskit_nature.second_q.mappers import InterleavedQubitMapper
interleaved_mapper = InterleavedQubitMapper(JordanWignerMapper())
hf_state = HartreeFock(2, (1, 1), interleaved_mapper)
hf_state.draw()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.measure(q, c)
qc.draw(output='mpl', style={'backgroundcolor': '#EEEEEE'})
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit, execute
from qiskit.visualization import plot_error_map
from qiskit.providers.fake_provider import FakeVigoV2
backend = FakeVigoV2()
plot_error_map(backend)
|
https://github.com/Qubit-MU/Qiskit_Fall_Fest_2023
|
Qubit-MU
|
########################################
# ENTER YOUR NAME AND WISC EMAIL HERE: #
########################################
# Name: Rochelle Li
# Email: rli484@wisc.edu
## Run this cell to make sure your grader is setup correctly
%set_env QC_GRADE_ONLY=true
%set_env QC_GRADING_ENDPOINT=https://qac-grading.quantum-computing.ibm.com
from qiskit import QuantumCircuit
qc = QuantumCircuit(3, 3)
## Write your code below this line ##
qc.h(0)
qc.h(2)
qc.cx(0,1)
## Do not change the code below here ##
answer1 = qc
qc.draw()
# Grader Cell: Run this to submit your answer
from qc_grader.challenges.fall_fest23 import grade_ex2a
grade_ex2a(answer1)
from qiskit import QuantumCircuit
qc = QuantumCircuit(3, 3)
qc.barrier(0, 1, 2)
## Write your code below this line ##
qc.cx(1,2)
qc.x(2)
qc.cx(2,0)
qc.x(2)
## Do not change the code below this line ##
answer2 = qc
qc.draw()
# Grader Cell: Run this to submit your answer
from qc_grader.challenges.fall_fest23 import grade_ex2b
grade_ex2b(answer2)
answer3: bool
## Quiz: evaluate the results and decide if the following statement is True or False
q0 = 1
q1 = 0
q2 = 1
## Based on this, is it TRUE or FALSE that the Guard on the left is a liar?
## Assign your answer, either True or False, to answer3 below
answer3 = True
from qc_grader.challenges.fall_fest23 import grade_ex2c
grade_ex2c(answer3)
## Quiz: Fill in the correct numbers to make the following statement true:
## The treasure is on the right, and the Guard on the left is the liar
q0 = 0
q1 = 1
q2 = 1
## HINT - Remember that Qiskit uses little-endian ordering
answer4 = [q0, q1, q2]
# Grader Cell: Run this to submit your answer
from qc_grader.challenges.fall_fest23 import grade_ex2d
grade_ex2d(answer4)
from qiskit import QuantumCircuit
qc = QuantumCircuit(3)
## in the code below, fill in the missing gates. Run the cell to see a drawing of the current circuit ##
qc.h(0)
qc.h(2)
qc.cx(0,1)
qc.barrier(0, 1, 2)
qc.cx(2, 1)
qc.x(2)
qc.cx(2, 0)
qc.x(2)
qc.barrier(0, 1, 2)
qc.swap(0,1)
qc.x(0)
qc.x(1)
qc.cx(2,1)
qc.x(2)
qc.cx(2,0)
qc.x(2)
## Do not change any of the code below this line ##
answer5 = qc
qc.draw(output="mpl")
# Grader Cell: Run this to submit your answer
from qc_grader.challenges.fall_fest23 import grade_ex2e
grade_ex2e(answer5)
from qiskit import QuantumCircuit, Aer, transpile
from qiskit.visualization import plot_histogram
## This is the full version of the circuit. Run it to see the results ##
quantCirc = QuantumCircuit(3)
quantCirc.h(0), quantCirc.h(2), quantCirc.cx(0, 1), quantCirc.barrier(0, 1, 2), quantCirc.cx(2, 1), quantCirc.x(2), quantCirc.cx(2, 0), quantCirc.x(2)
quantCirc.barrier(0, 1, 2), quantCirc.swap(0, 1), quantCirc.x(1), quantCirc.cx(2, 1), quantCirc.x(0), quantCirc.x(2), quantCirc.cx(2, 0), quantCirc.x(2)
# Execute the circuit and draw the histogram
measured_qc = quantCirc.measure_all(inplace=False)
backend = Aer.get_backend('qasm_simulator') # the device to run on
result = backend.run(transpile(measured_qc, backend), shots=1000).result()
counts = result.get_counts(measured_qc)
plot_histogram(counts)
from qiskit.primitives import Sampler
from qiskit.visualization import plot_distribution
sampler = Sampler()
result = sampler.run(measured_qc, shots=1000).result()
probs = result.quasi_dists[0].binary_probabilities()
plot_distribution(probs)
|
https://github.com/AllenGabrielMarchen/HHL_implementation
|
AllenGabrielMarchen
|
#setup.py
import numpy as np
from scipy.linalg import expm
from qiskit.extensions import UnitaryGate
from qiskit.circuit.add_control import add_control
from qiskit import Aer
import circuit
import hhl
import tools
#참고 논문 :Low Complexity Quantum Matrix Inversion A기gorithm for non-Hermitian Matrices
def main(A,b,backend,shots,t,n_l,delta):
#Check if Hermitian
if np.allclose(A,A.T) == False:
print("Given A matrice is not Hermitian.")
print("Given Matrices will be transformed into Hermitian formation.")
A = np.vstack((np.hstack((np.zeros_like(A),A)),np.hstack((A.T, np.zeros_like(A))))) # Hermitian의 꼴로 바꿈
b = np.hstack((b,np.zeros_like((np.shape(A)[0]-np.shape(b)[0],1))))
#A의 shape와 동일한 zero array를 생성하고, A의 왼쪽에 배치, horizontal 방향도 마찬가지.
i = complex(0,1) #complex(real part, imaginary part)
U = expm(i*A*t) #여기서 A가 행렬로 주어졌기 때문에, 행렬을 exp에 올리기 위해서는 expm이라는 scipy 패키지가 필요함.
U_gate = UnitaryGate(U) #위에서 구성한 U라는 행렬로써 Unitary gate를 구성할 수 있음. (4*4) 행렬
CU = add_control(U_gate,1,ctrl_state=None, label="CU")
#CU라는 게이트 이름을 label에 저장
#control 되는 경우의 state를 지정 -> 해당사항 없음
#두번째 인자는 컨트롤 큐빗의 개수를 지정함.
n_b = int(np.log2(U.shape[0]))
#Ax =b의 꼴이고, b는 4*1의 shape이므로, A의 행의 개수와 동일함. 따라서, U의 행렬의 행의 개수와 동일함.
#행의 개수에 log2를 취하면 필요한 n_b의 값을 구할 수 있음.
My_HHL_result = hhl.My_HHL(CU,b,n_l,n_b,backend,delta,shots,A,details = True,chevyshev = False)
print("\n")
qiskit_result = hhl.qiskit_HHL(A,b,backend)
print("\n")
classical_result = hhl.classical_HHL(A,b)
print("\n")
#For normalized answer
print("<Un - normalized Case Comparision>")
print('Qiskit Error : {0}'.format(np.linalg.norm(qiskit_result[1]-classical_result[1])))
print('My HHL Error : {0}'.format(np.linalg.norm(My_HHL_result[1]-classical_result[1])))
print("\n")
print("<Normalized Case Comparision>")
print('Qiskit Error : {0}'.format(np.linalg.norm(qiskit_result[0]-classical_result[0])))
print('My HHL Error : {0}'.format(np.linalg.norm(My_HHL_result[0]-classical_result[0])))
if __name__ == "__main__":
#setups
A = np.array([[2,-1],[1,4]]) #non-Hermitian인 경우의 행렬에 대한 저장
b = np.array([1,1])
backend = Aer.get_backend('aer_simulator')
shots = 8192
t = np.pi*2/16
n_l = 3 #QPE 상에서 n_ㅣ는 하다마드로 초기화 되는 부분
delta = 1/16*(2**(n_l-1))
main(A,b,backend,shots,t,n_l,delta)
|
https://github.com/ttlion/ShorAlgQiskit
|
ttlion
|
""" This file allows to test the Multiplication blocks Ua. This blocks, when put together as explain in
the report, do the exponentiation.
The user can change N, n, a and the input state, to create the circuit:
up_reg |+> ---------------------|----------------------- |+>
|
|
|
-------|---------
------------ | | ------------
down_reg |x> ------------ | Mult | ------------ |(x*a) mod N>
------------ | | ------------
-----------------
Where |x> has n qubits and is the input state, the user can change it to whatever he wants
This file uses as simulator the local simulator 'statevector_simulator' because this simulator saves
the quantum state at the end of the circuit, which is exactly the goal of the test file. This simulator supports sufficient
qubits to the size of the QFTs that are going to be used in Shor's Algorithm because the IBM simulator only supports up to 32 qubits
"""
""" Imports from qiskit"""
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute, IBMQ, BasicAer
""" Imports to Python functions """
import math
import time
""" Local Imports """
from qfunctions import create_QFT, create_inverse_QFT
from qfunctions import cMULTmodN
""" Function to properly get the final state, it prints it to user """
""" This is only possible in this way because the program uses the statevector_simulator """
def get_final(results, number_aux, number_up, number_down):
i=0
""" Get total number of qubits to go through all possibilities """
total_number = number_aux + number_up + number_down
max = pow(2,total_number)
print('|aux>|top_register>|bottom_register>\n')
while i<max:
binary = bin(i)[2:].zfill(total_number)
number = results.item(i)
number = round(number.real, 3) + round(number.imag, 3) * 1j
""" If the respective state is not zero, then print it and store the state of the register where the result we are looking for is.
This works because that state is the same for every case where number !=0 """
if number!=0:
print('|{0}>|{1}>|{2}>'.format(binary[0:number_aux],binary[number_aux:(number_aux+number_up)],binary[(number_aux+number_up):(total_number)]),number)
if binary[number_aux:(number_aux+number_up)]=='1':
store = binary[(number_aux+number_up):(total_number)]
i=i+1
print(' ')
return int(store, 2)
""" Main program """
if __name__ == '__main__':
""" Select number N to do modN"""
N = int(input('Please insert integer number N: '))
print(' ')
""" Get n value used in QFT, to know how many qubits are used """
n = math.ceil(math.log(N,2))
""" Select the value for 'a' """
a = int(input('Please insert integer number a: '))
print(' ')
""" Please make sure the a and N are coprime"""
if math.gcd(a,N)!=1:
print('Please make sure the a and N are coprime. Exiting program.')
exit()
print('Total number of qubits used: {0}\n'.format(2*n+3))
print('Please check source file to change input quantum state. By default is |2>.\n')
ts = time.time()
""" Create quantum and classical registers """
aux = QuantumRegister(n+2)
up_reg = QuantumRegister(1)
down_reg = QuantumRegister(n)
aux_classic = ClassicalRegister(n+2)
up_classic = ClassicalRegister(1)
down_classic = ClassicalRegister(n)
""" Create Quantum Circuit """
circuit = QuantumCircuit(down_reg , up_reg , aux, down_classic, up_classic, aux_classic)
""" Initialize with |+> to also check if the control is working"""
circuit.h(up_reg[0])
""" Put the desired input state in the down quantum register. By default we put |2> """
circuit.x(down_reg[1])
""" Apply multiplication"""
cMULTmodN(circuit, up_reg[0], down_reg, aux, int(a), N, n)
""" show results of circuit creation """
create_time = round(time.time()-ts, 3)
if n < 8: print(circuit)
print(f"... circuit creation time = {create_time}")
ts = time.time()
""" Simulate the created Quantum Circuit """
simulation = execute(circuit, backend=BasicAer.get_backend('statevector_simulator'),shots=1)
""" Get the results of the simulation in proper structure """
sim_result=simulation.result()
""" Get the statevector of the final quantum state """
outputstate = sim_result.get_statevector(circuit, decimals=3)
""" Show the final state after the multiplication """
after_exp = get_final(outputstate, n+2, 1, n)
""" show execution time """
exec_time = round(time.time()-ts, 3)
print(f"... circuit execute time = {exec_time}")
""" Print final quantum state to user """
print('When control=1, value after exponentiation is in bottom quantum register: |{0}>'.format(after_exp))
|
https://github.com/MadhavJivrajani/pyQSR
|
MadhavJivrajani
|
# -*- coding: utf-8 -*-
# pylint: disable=no-member
"""An IBM Qiskit implementation of quantum shift registers."""
__author__ = "Madhav Jivrajani"
from qiskit import QuantumCircuit, execute, Aer
from qiskit.circuit.exceptions import CircuitError
class QuantumLeftShift:
"""Circuit to implement a quantum shift register.
Two types of shift registers can be implemented:
1. Left shift register.
2. Circular left shift register.
Reference:
[1] Jae-weon Lee and Eok Kyun Lee and Jaewan Kim and Soonchil Lee, Quantum Shift Register, 2001.
https://arxiv.org/abs/quant-ph/0112107
"""
def __init__(self, data_qubits: int, seed: str, circular: bool = False) -> None:
"""Initializes a circuit for quantum shift register.
- Encodes qubits according to the initial seed.
To perform a shift operation on n data qubits, atleast n ancillary qubits are required
to store information. Therefore, a circuit will have a total of 2*n qubits. An extra
qubit apart from the 2*n qubits is required to store information about whether the circuit
should behave as a left shift register or a circular left shift register.
Args:
data_qubits : The number of data qubits to be alloted. Needs to be equal to
length of seed.
seed : Initial value the quantum shift register should start with.
circular : Flag to indicate if the shifts are circular in nature or not.
Raises:
CircuitError : If the number of data qubits is not equal to the length of the seed.
"""
self.seed_string = seed
#to follow qiskit's little endian encoding.
self.seed = list(map(int, list(seed)))[::-1]
self.num_data_qubits = data_qubits
if len(seed) != self.num_data_qubits:
raise CircuitError("Length of seed should be equal to number of data qubits.")
self.shift_circuit = QuantumCircuit(self.num_data_qubits + self.num_data_qubits + 1,
self.num_data_qubits)
self.shifts_done = 0
self.circular = circular
self.__prepare_circuit()
self.shift_gate = self.construct_shift_gate()
def __repr__(self):
return ("QuantumShiftLeft(data_qubits = %d, "
"seed = \"%s\", "
"circular = %s)") % (self.num_data_qubits, self.seed_string, str(self.circular))
def __str__(self):
print(self.shift_circuit)
return ("Data qubits: %d\n"
"Initial seed: %s\n"
"Shifts performed: %d\n"
"Circular: %s") % (self.num_data_qubits, self.seed_string,
self.shifts_done, str(self.circular))
def __prepare_circuit(self):
"""Encodes qubits according to initial seed and the flag: circular"""
for i in range(len(self.seed)):
if self.seed[i] == 1:
self.shift_circuit.x(i)
if self.circular:
self.shift_circuit.x(self.num_data_qubits + self.num_data_qubits)
self.shift_circuit.barrier(range(self.num_data_qubits + self.num_data_qubits + 1))
def construct_shift_gate(self):
"""Constructs a left/circular left shift circuit and
returns the circuit as a Gate object.
"""
if self.circular:
name = "Circular Left Shift"
else:
name = "Left Shift"
shift_circ = QuantumCircuit(self.num_data_qubits + self.num_data_qubits + 1, name=name)
for qubit in range(1, self.num_data_qubits + self.num_data_qubits)[::-1]:
shift_circ.swap(qubit, qubit - 1)
shift_circ.cx(0, self.num_data_qubits)
shift_circ.toffoli(self.num_data_qubits + self.num_data_qubits, self.num_data_qubits, 0)
shift_circ.cx(0, self.num_data_qubits)
shift_gate = shift_circ.to_gate()
return shift_gate
def shift(self):
"""Performs a single shift operation on the values of the data qubits."""
self.shifts_done += 1
self.shift_circuit.append(self.shift_gate,
range(self.num_data_qubits + self.num_data_qubits + 1))
def get_register_state(self):
"""Performs a measurement in the |0> basis on each of the data qubits
and returns the counts of the measurement.
"""
self.shift_circuit.barrier(range(self.num_data_qubits + self.num_data_qubits + 1))
self.shift_circuit.measure(range(self.num_data_qubits), range(self.num_data_qubits))
self.shift_circuit.barrier(range(self.num_data_qubits + self.num_data_qubits + 1))
backend = Aer.get_backend('statevector_simulator')
register_state = execute(self.shift_circuit, backend, shots=1024).result().get_counts()
return register_state
|
https://github.com/BOBO1997/osp_solutions
|
BOBO1997
|
import sys, time
sys.path.append("../")
import Qconfig
qx_config = {
"APItoken": Qconfig.APItoken,
"url": Qconfig.config['url']}
print('Qconfig loaded from %s.' % Qconfig.__file__)
# Imports
import qiskit as qk
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import available_backends, execute, register, least_busy
from qiskit.tools.visualization import plot_histogram
from qiskit import IBMQ
from qiskit import Aer
from math import pi
print(qk.__version__)
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr,cr)
# circuit construction
# preparing the wavefunction
qc.h(qr[0])
qc.barrier()
qc.cx(qr[0], qr[1])
qc.barrier()
qc.u3(1.2309,0,0,qr[0])
qc.barrier()
# if we apply phase shift gate
qc.u1((2.0/3)*pi,qr[0])
qc.u1((2.0/3)*pi,qr[1])
qc.barrier()
# rotating the axis for measurement
#qc.cx(qr[0],qr[1]) # THE IMPLEMENTATION PROVIDED BY THE AUTHORS SKIPS THIS CNOT APPLICATION!!! it shouldn't be
# it shouldn't be there. Improves accuracy
#qc.barrier()
qc.u3(2.1862,6.5449,pi,qr[0])
qc.u3(0.9553,2*pi,pi,qr[1])
qc.barrier()
qc.measure(qr,cr)
# Running locally IBM backends respond too slowly
print(Aer.backends())
backend = Aer.get_backend('qasm_simulator')
job = execute(qc,backend)
job.status
result = job.result()
counts = result.get_counts(qc)
print(counts)
plot_histogram(counts)
performance = {
'success':(counts['01']+counts['10'])/1024,
'failure':1 - (counts['01']+counts['10'])/1024
}
plot_histogram(performance)
IBMQ.load_accounts()
# checking the backends
large_enough_devices = IBMQ.backends(filters=lambda x: x.configuration()['n_qubits'] > 1 and not x.configuration()['simulator'])
print(large_enough_devices)
backend = IBMQ.get_backend('ibmqx4')
job = execute(qc,backend,max_credits=3)
job.status
result = job.result()
counts = result.get_counts(qc)
print(counts)
plot_histogram(counts)
performance = {
'success':(counts['01']+counts['10'])/1024,
'failure':(counts['00']+counts['11'])/1024
}
plot_histogram(performance)
sqr = QuantumRegister(1)
scr = ClassicalRegister(1)
sqc = QuantumCircuit(sqr,scr)
# constructing the circuit
sqc.u3(1.1503,6.4850,2.2555,sqr[0])
sqc.barrier()
# unidentified gate -- now assumed to be R2pi3. if identity then use qc.u1(0,q[0])
sqc.u1((2.0/3)*pi,sqr[0])
sqc.barrier()
sqc.u3(1.231,0,0,sqr[0]) # X
sqc.barrier()
sqc.u1((2.0/3)*pi,sqr[0]) # U (unidentified gate)
sqc.barrier()
# measurement
sqc.u3(0.7854,6.0214,6.1913,sqr[0])
sqc.barrier()
sqc.measure(sqr,scr)
# Running on local simulator
backend = Aer.get_backend('qasm_simulator')
job = execute(sqc,backend)
result = job.result()
counts = result.get_counts(sqc)
print(counts)
plot_histogram(counts)
# Running on IBM
backend = IBMQ.get_backend('ibmqx4')
job = execute(sqc,backend,max_credits=3)
result = job.result()
counts = result.get_counts(sqc)
print(counts)
plot_histogram(counts)
|
https://github.com/quantumjim/qreative
|
quantumjim
|
from qiskit import IBMQ
IBMQ.save_account('MY_API_TOKEN')
from qiskit import IBMQ
IBMQ.load_accounts()
from qiskit import IBMQ
IBMQ.enable_account('MY_API_TOKEN')
IBMQ.backends()
import CreativeQiskit
result = CreativeQiskit.bell_correlation('ZZ',device='ibmq_16_melbourne')
print(' Probability of agreement =',result['P'])
|
https://github.com/urwin419/QiskitChecker
|
urwin419
|
from qiskit import QuantumCircuit
def create_bell_pair():
qc = QuantumCircuit(2)
qc.h(1)
### added y gate ###
qc.cx(0, 1)
qc.cx(1, 0)
return qc
def encode_message(qc, qubit, msg):
if len(msg) != 2 or not set([0,1]).issubset({0,1}):
raise ValueError(f"message '{msg}' is invalid")
if msg[1] == "1":
qc.x(qubit)
if msg[0] == "1":
qc.z(qubit)
return qc
def decode_message(qc):
qc.cx(1, 0)
qc.h(1)
### added h gate ###
qc.h(0)
return qc
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit
from qiskit.quantum_info import DensityMatrix
from qiskit.visualization import plot_state_city
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0,1)
# plot using a DensityMatrix
state = DensityMatrix(qc)
plot_state_city(state)
|
https://github.com/agneya-1402/Quantum-HalfAdder
|
agneya-1402
|
import numpy as np
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, transpile, Aer, IBMQ
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from ibm_quantum_widgets import *
from qiskit import *
# Loading your IBM Quantum account(s)
#provider = IBMQ.load_account()
qc_ha = QuantumCircuit(4,2)
qc_ha.x(0)
qc_ha.x(1)
qc_ha.barrier()
qc_ha.cx(0,2)
qc_ha.cx(1,2)
qc_ha.ccx(0,1,3)
qc_ha.barrier()
qc_ha.measure(2,0)
qc_ha.measure(3,1)
qc_ha.draw()
sim = Aer.get_backend('qasm_simulator')
qobj = assemble(qc_ha)
counts = sim.run(qobj).result().get_counts()
plot_histogram(counts)
|
https://github.com/Herbrant/shor-algorithm-qiskit
|
Herbrant
|
"""The following is python code utilizing the qiskit library that can be run on extant quantum
hardware using 5 qubits for factoring the integer 15 into 3 and 5. Using period finding,
for a^r mod N = 1, where a = 11 and N = 15 (the integer to be factored) the problem is to find
r values for this identity such that one can find the prime factors of N. For 11^r mod(15) =1,
results (as shown in fig 1.) correspond with period r = 4 (|00100>) and r = 0 (|00000>).
To find the factor, use the equivalence a^r mod 15. From this:
(a^r -1) mod 15 = (a^(r/2) + 1)(a^(r/2) - 1) mod 15.In this case, a = 11. Plugging in the two r
values for this a value yields (11^(0/2) +1)(11^(4/2) - 1) mod 15 = 2*(11 +1)(11-1) mod 15
Thus, we find (24)(20) mod 15. By finding the greatest common factor between the two coefficients,
gcd(24,15) and gcd(20,15), yields 3 and 5 respectively. These are the prime factors of 15,
so the result of running shors algorithm to find the prime factors of an integer using quantum
hardware are demonstrated. Note, this is not the same as the technical implementation of shor's
algorithm described in this section for breaking the discrete log hardness assumption,
though the proof of concept remains."""
# Import libraries
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from numpy import pi
from qiskit import IBMQ, Aer, QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from qiskit.providers.ibmq import least_busy
from qiskit.visualization import plot_histogram
# Initialize qubit registers
qreg_q = QuantumRegister(5, 'q')
creg_c = ClassicalRegister(5, 'c')
circuit = QuantumCircuit(qreg_q, creg_c)
circuit.reset(qreg_q[0])
circuit.reset(qreg_q[1])
circuit.reset(qreg_q[2])
circuit.reset(qreg_q[3])
circuit.reset(qreg_q[4])
# Apply Hadamard transformations to qubit registers
circuit.h(qreg_q[0])
circuit.h(qreg_q[1])
circuit.h(qreg_q[2])
# Apply first QFT, modular exponentiation, and another QFT
circuit.h(qreg_q[1])
circuit.cx(qreg_q[2], qreg_q[3])
circuit.crx(pi/2, qreg_q[0], qreg_q[1])
circuit.ccx(qreg_q[2], qreg_q[3], qreg_q[4])
circuit.h(qreg_q[0])
circuit.rx(pi/2, qreg_q[2])
circuit.crx(pi/2, qreg_q[1], qreg_q[2])
circuit.crx(pi/2, qreg_q[1], qreg_q[2])
circuit.cx(qreg_q[0], qreg_q[1])
# Measure the qubit registers 0-2
circuit.measure(qreg_q[2], creg_c[2])
circuit.measure(qreg_q[1], creg_c[1])
circuit.measure(qreg_q[0], creg_c[0])
# Get least busy quantum hardware backend to run on
provider = IBMQ.load_account()
device = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 3 and
not x.configuration().simulator and x.status().operational==True))
print("Running on current least busy device: ", device)
# Run the circuit on available quantum hardware and plot histogram
from qiskit.tools.monitor import job_monitor
job = execute(circuit, backend=device, shots=1024, optimization_level=3)
job_monitor(job, interval = 2)
results = job.result()
answer = results.get_counts(circuit)
plot_histogram(answer)
#largest amplitude results correspond with r values used to find the prime factor of N.
|
https://github.com/qiskit-community/qiskit-translations-staging
|
qiskit-community
|
from qiskit import QuantumCircuit, transpile
from qiskit.visualization import plot_circuit_layout
from qiskit.providers.fake_provider import FakeVigo
backend = FakeVigo()
ghz = QuantumCircuit(3, 3)
ghz.h(0)
ghz.cx(0,range(1,3))
ghz.barrier()
ghz.measure(range(3), range(3))
new_circ_lv0 = transpile(ghz, backend=backend, optimization_level=0)
plot_circuit_layout(new_circ_lv0, backend)
|
https://github.com/lancecarter03/QiskitLearning
|
lancecarter03
|
from qiskit import *
from qiskit.tools.visualization import plot_histogram
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr,cr)
circuit.decompose().draw(output="mpl", initial_state=True)
circuit.h(qr[0])
circuit.draw(output="mpl")
circuit.cx(qr[0],qr[1])
circuit.draw(output="mpl")
circuit.measure(qr,cr)
circuit.draw(output="mpl")
simulator = Aer.get_backend('qasm_simulator')
result = execute(circuit, backend = simulator).result()
plot_histogram(result.get_counts(circuit))
|
https://github.com/1chooo/Quantum-Oracle
|
1chooo
|
from qiskit import QuantumCircuit
import math
qc = QuantumCircuit(3)
qc.rx(math.pi/2, 0)
qc.ry(math.pi/2, 1)
qc.rz(math.pi/2, 2)
qc.draw("mpl")
|
https://github.com/sebasmos/QuantumVE
|
sebasmos
|
import sys
sys.path.insert(0,'../')
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import random_split
from torch.utils.data import Subset, DataLoader, random_split
from torchvision import datasets, transforms
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
import os
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report
import pandas as pd
# from MAE code
from util.datasets import build_dataset
import argparse
import util.misc as misc
import argparse
import datetime
import json
import numpy as np
import os
import time
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import timm
assert timm.__version__ == "0.3.2" # version check
from timm.models.layers import trunc_normal_
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
import util.lr_decay as lrd
import util.misc as misc
from util.datasets import build_dataset
from util.pos_embed import interpolate_pos_embed
from util.misc import NativeScalerWithGradNormCount as NativeScaler
import models_vit
import sys
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import models_mae
import torch; print(f'numpy version: {np.__version__}\nCUDA version: {torch.version.cuda} - Torch versteion: {torch.__version__} - device count: {torch.cuda.device_count()}')
from engine_finetune import train_one_epoch, evaluate
from timm.data import Mixup
from timm.utils import accuracy
from sklearn.metrics import confusion_matrix, classification_report
import seaborn as sns
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from itertools import cycle
import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score
import torch.optim as optim
imagenet_mean = np.array([0.485, 0.456, 0.406])
imagenet_std = np.array([0.229, 0.224, 0.225])
def show_image(image, title=''):
# image is [H, W, 3]
assert image.shape[2] == 3
plt.imshow(torch.clip((image * imagenet_std + imagenet_mean) * 255, 0, 255).int())
plt.title(title, fontsize=16)
plt.axis('off')
return
def prepare_model(chkpt_dir, arch='mae_vit_large_patch16'):
# build model
model = getattr(models_mae, arch)()
# load model
checkpoint = torch.load(chkpt_dir, map_location='cpu')
msg = model.load_state_dict(checkpoint['model'], strict=False)
print(msg)
return model
def run_one_image(img, model):
x = torch.tensor(img)
# make it a batch-like
x = x.unsqueeze(dim=0)
x = torch.einsum('nhwc->nchw', x)
# run MAE
loss, y, mask = model(x.float(), mask_ratio=0.75)
y = model.unpatchify(y)
y = torch.einsum('nchw->nhwc', y).detach().cpu()
# visualize the mask
mask = mask.detach()
mask = mask.unsqueeze(-1).repeat(1, 1, model.patch_embed.patch_size[0]**2 *3) # (N, H*W, p*p*3)
mask = model.unpatchify(mask) # 1 is removing, 0 is keeping
mask = torch.einsum('nchw->nhwc', mask).detach().cpu()
x = torch.einsum('nchw->nhwc', x)
# masked image
im_masked = x * (1 - mask)
# MAE reconstruction pasted with visible patches
im_paste = x * (1 - mask) + y * mask
# make the plt figure larger
plt.rcParams['figure.figsize'] = [24, 24]
plt.subplot(1, 4, 1)
show_image(x[0], "original")
plt.subplot(1, 4, 2)
show_image(im_masked[0], "masked")
plt.subplot(1, 4, 3)
show_image(y[0], "reconstruction")
plt.subplot(1, 4, 4)
show_image(im_paste[0], "reconstruction + visible")
plt.show()
# Set the seed for PyTorch
torch.manual_seed(42)
# !pip install qiskit_machine_learning
# !pip install qiskit torch torchvision matplotlib
# !pip install qiskit-machine-learning
# !pip install torchviz
# !pip install qiskit[all]
# !pip install qiskit == 0.45.2
# !pip install qiskit_algorithms == 0.7.1
# !pip install qiskit-ibm-runtime == 0.17.0
# !pip install qiskit-aer == 0.13.2
# #Quentum net draw
# !pip install pylatexenc
from qiskit_machine_learning.connectors import TorchConnector
from qiskit_machine_learning.neural_networks.estimator_qnn import EstimatorQNN
from qiskit_machine_learning.circuit.library import QNNCircuit
parser = argparse.ArgumentParser('MAE fine-tuning for image classification', add_help=False)
parser.add_argument('--batch_size', default=256, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=10, type=int)
parser.add_argument('--accum_iter', default=4, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='mobilenet_v3', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=5e-4, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=0.65,
help='layer-wise lr decay from ELECTRA/BEiT')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
help='Color jitter factor (enabled only when not using Auto/RandAug)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='mae_pretrain_vit_base.pth',
help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=True)
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
help='Use class token instead of global pool for classification')
# Dataset parameters
parser.add_argument('--data_path', default='/media/enc/vera1/sebastian/data/ABGQI_mel_spectrograms', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=5, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='quinn_5_classes',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default="/media/enc/vera1/sebastian_hdd/codes/classifiers/mae/MobileNet/quinn_5_classes/checkpoint-49.pth",
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval',default=True, action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
args, unknown = parser.parse_known_args()
misc.init_distributed_mode(args)
print("{}".format(args).replace(', ', ',\n'))
os.makedirs(args.output_dir, exist_ok=True)
device = torch.device(args.device)
# from torchvision.models import mobilenet_v3_large
# model_ft = mobilenet_v3_large(pretrained=True, progress=True)
# model_ft.classifier[-1] = nn.Linear(1280, args.nb_classes)
# base_model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=True)
# Remove FC and Global pooling layers to allow for ABGQI fine-tuning
# base_model_output = nn.Sequential(*list(base_model.children())[:-3]) # Remove the last 3 layers
# print(base_model_output)
from torch.nn import Module, Linear
from qiskit_machine_learning.connectors.torch_connector import TorchConnector
from torch import Tensor
from torch.nn import Linear, CrossEntropyLoss, MSELoss
from torch.optim import LBFGS
# from qiskit_machine_learning.neural_networks import CircuitQNN
from qiskit_machine_learning.circuit.library import QNNCircuit
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit_algorithms.utils import algorithm_globals
from qiskit_machine_learning.neural_networks import SamplerQNN, EstimatorQNN
from qiskit_machine_learning.connectors import TorchConnector
from qiskit_machine_learning.circuit.library import QNNCircuit
def create_qnn():
feature_map = ZZFeatureMap(5)
ansatz = RealAmplitudes(5, reps=1)
qc = QNNCircuit(5)
qc.compose(feature_map, inplace=True)
qc.compose(ansatz, inplace=True)
# Create a Quantum Neural Network (QNN) with 5 output neurons
qnn = QNNCircuit(circuit=qc,
input_params=feature_map.parameters,
weight_params=ansatz.parameters,
output_shape=(5,),
output_params=[Parameter(f"theta_{i}") for i in range(5)],
measurement_error_mitigation=True)
return qnn
qnn = create_qnn()
class qNet(Module):
def __init__(self, qnn):
super(qNet, self).__init__()
# Load the pre-trained MobileNetV3 model
self.mobilenet = models.mobilenet_v3_large(pretrained=True, progress=True)
# Freeze all layers except the classifier
for param in self.mobilenet.parameters():
param.requires_grad = False
num_classes = args.nb_classes
in_features = self.mobilenet.classifier[-1].in_features
self.mobilenet.classifier[-1] = nn.Linear(in_features, num_classes)
num_layers_unfreeze = 50
# Unfreeze the last layer for fine-tuning
for param in self.mobilenet.classifier[-num_layers_unfreeze:].parameters():
param.requires_grad = True
# Ensure that qnn is a PyTorch neural network object
self.qnn = TorchConnector(qnn)
self.fc1 = nn.Linear(num_classes, num_classes)
def forward(self, x):
# Pass the input through the MobileNetV3 model
x = self.mobilenet(x)
# Apply the quantum network in the forward section
x = self.qnn(x)
x = x.view(x.size(0), -1) # Flatten the output
return self.fc1(x)
# # # Create random input data
batch_size = 1
channels = 3
height = 224
width = 224
random_input = torch.randn(batch_size, channels, height, width)
model = qNet(qnn)
output = model(random_input)
print("Output shape:", output.shape)
model
model
model(random_input)
misc.init_distributed_mode(args)
# print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train = build_dataset(is_train=True, args=args)
dataset_val = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None and not args.eval:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
# model = models_vit.__dict__[args.model](
# num_classes=args.nb_classes,
# drop_path_rate=args.drop_path,
# global_pool=args.global_pool,
# )
model = stage_1_model
if args.finetune and not args.eval:
print("args.finetune and not args.eval")
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
if args.global_pool:
assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
else:
assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# manually initialize fc layer
trunc_normal_(model.head.weight, std=2e-5)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
optimizer = optim.Adam(model.parameters(), lr=args.lr)#SEB
# optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
loss_scaler = NativeScaler()
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
# exit(0)
train = True
if train:
print(f"Start training for {args.epochs} epochs with batch size of {args.batch_size}")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, mixup_fn,
log_writer=log_writer,
args=args
)
if args.output_dir:
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and misc.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
EXPERIMENT_NAME = "mobileNet"
saving_model = f"{EXPERIMENT_NAME}/models"
os.makedirs(saving_model, exist_ok = True)
os.makedirs(EXPERIMENT_NAME, exist_ok=True)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
@torch.no_grad()
def evaluate_test(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = misc.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
all_predictions = []
all_labels = []
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)#
pred = output.argmax(dim=1)
all_predictions.append(pred.cpu().numpy())# ADDED
all_labels.append(target.cpu().numpy())# ADDED
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
all_predictions = np.array(all_predictions)#.squeeze(0)
all_labels = np.array(all_labels)#.squeeze(0)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
# return
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}, np.concatenate(all_predictions, axis=0), np.concatenate(all_labels, axis=0)
metrics, all_predictions, all_labels = evaluate_test(data_loader_val, model, device)
# print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
metrics
all_predictions
unique_classes = np.unique(np.concatenate((all_labels, all_predictions)))
unique_classes
confusion_mat = confusion_matrix(all_labels, all_predictions, labels=unique_classes)
conf_matrix = pd.DataFrame(confusion_mat, index=unique_classes, columns=unique_classes)
conf_matrix
unique_classes = np.unique(np.concatenate((all_labels, all_predictions)))
confusion_mat = confusion_matrix(all_labels, all_predictions, labels=unique_classes)
conf_matrix = pd.DataFrame(confusion_mat, index=unique_classes, columns=unique_classes)
# Plot the confusion matrix using seaborn
plt.figure(figsize=(5, 4))
ax = sns.heatmap(conf_matrix, annot=True, fmt='.1f', cmap=sns.cubehelix_palette(as_cmap=True), linewidths=0.1, cbar=True)
# Set labels and ticks
ax.set_xlabel('Predicted Labels')
ax.set_ylabel('True Labels')
# Set x and y ticks using the unique classes
ax.set_xticks(range(len(unique_classes)))
ax.set_yticks(range(len(unique_classes)))
# Set x and y ticks at the center of the cells
ax.set_xticks([i + 0.5 for i in range(len(unique_classes))])
ax.set_yticks([i + 0.5 for i in range(len(unique_classes))])
plt.show()
def plot_multiclass_roc_curve(all_labels, all_predictions, EXPERIMENT_NAME="."):
# Step 1: Label Binarization
label_binarizer = LabelBinarizer()
y_onehot = label_binarizer.fit_transform(all_labels)
all_predictions_hot = label_binarizer.transform(all_predictions)
# Step 2: Calculate ROC curves
fpr = dict()
tpr = dict()
roc_auc = dict()
unique_classes = range(y_onehot.shape[1])
for i in unique_classes:
fpr[i], tpr[i], _ = roc_curve(y_onehot[:, i], all_predictions_hot[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Step 3: Plot ROC curves
fig, ax = plt.subplots(figsize=(8, 8))
# Micro-average ROC curve
fpr_micro, tpr_micro, _ = roc_curve(y_onehot.ravel(), all_predictions_hot.ravel())
roc_auc_micro = auc(fpr_micro, tpr_micro)
plt.plot(
fpr_micro,
tpr_micro,
label=f"micro-average ROC curve (AUC = {roc_auc_micro:.2f})",
color="deeppink",
linestyle=":",
linewidth=4,
)
# Macro-average ROC curve
all_fpr = np.unique(np.concatenate([fpr[i] for i in unique_classes]))
mean_tpr = np.zeros_like(all_fpr)
for i in unique_classes:
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= len(unique_classes)
fpr_macro = all_fpr
tpr_macro = mean_tpr
roc_auc_macro = auc(fpr_macro, tpr_macro)
plt.plot(
fpr_macro,
tpr_macro,
label=f"macro-average ROC curve (AUC = {roc_auc_macro:.2f})",
color="navy",
linestyle=":",
linewidth=4,
)
# Individual class ROC curves with unique colors
colors = plt.cm.rainbow(np.linspace(0, 1, len(unique_classes)))
for class_id, color in zip(unique_classes, colors):
plt.plot(
fpr[class_id],
tpr[class_id],
color=color,
label=f"ROC curve for Class {class_id} (AUC = {roc_auc[class_id]:.2f})",
linewidth=2,
)
plt.plot([0, 1], [0, 1], color='gray', linestyle='--', linewidth=2) # Add diagonal line for reference
plt.axis("equal")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Extension of Receiver Operating Characteristic\n to One-vs-Rest multiclass")
plt.legend()
plt.savefig(f'{EXPERIMENT_NAME}/roc_curve.png')
plt.show()
# Example usage:
plot_multiclass_roc_curve(all_labels, all_predictions, EXPERIMENT_NAME)
# def visualize_predictions(model, val_loader, device, type_label=None, dataset_type=1, unique_classes=np.array([0, 1, 2, 3, 4, 5, 6])):
# criterion = torch.nn.CrossEntropyLoss()
# metric_logger = misc.MetricLogger(delimiter=" ")
# header = 'Test:'
# # switch to evaluation mode
# model.eval()
# all_predictions = []
# all_labels = []
# for batch in metric_logger.log_every(val_loader, 10, header):
# images = batch[0]
# target = batch[-1]
# images = images.to(device, non_blocking=True)
# target = target.to(device, non_blocking=True)
# # compute output
# with torch.cuda.amp.autocast():
# output = model(images)
# loss = criterion(output, target)#
# pred = output.argmax(dim=1)
# all_predictions.append(pred.cpu().numpy())# ADDED
# all_labels.append(target.cpu().numpy())# ADDED
# acc1, acc5 = accuracy(output, target, topk=(1, 5))
# batch_size = images.shape[0]
# metric_logger.update(loss=loss.item())
# metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
# metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# all_predictions = np.array(all_predictions)#.squeeze(0)
# all_labels = np.array(all_labels)#.squeeze(0)
# if type_label is None:
# type_label = unique_classes
# # Create a 4x4 grid for visualization
# num_rows = 4
# num_cols = 4
# plt.figure(figsize=(12, 12))
# for i in range(num_rows * num_cols):
# plt.subplot(num_rows, num_cols, i + 1)
# idx = np.random.randint(len(all_labels))
# import pdb;pdb.set_trace()
# plt.imshow(images[idx].cpu().numpy().squeeze(), cmap='gray')
# # Use the class names instead of numeric labels for Fashion MNIST
# if dataset_type == 1:
# class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# predicted_class = class_names[all_predictions[idx]]
# actual_class = class_names[all_labels[idx]]
# else:
# predicted_class = all_predictions[idx]
# actual_class = all_labels[idx]
# plt.title(f'Pred: {predicted_class}\nActual: {actual_class}')
# plt.axis('off')
# plt.tight_layout()
# plt.show()
# visualize_predictions(model, data_loader_val, device, dataset_type=2, unique_classes=unique_classes)
unique_classes
report = classification_report(all_labels, all_predictions, target_names=unique_classes,output_dict=True)# Mostrar el informe de
df = pd.DataFrame(report).transpose()
df.to_csv(os.path.join(EXPERIMENT_NAME, "confusion_matrix.csv"))
print(df)
df
# Calculate precision, recall, and specificity (micro-averaged)
precision = precision_score(all_labels, all_predictions, average='micro')
recall = recall_score(all_labels, all_predictions, average='micro')
# Calculate true negatives, false positives, and specificity (micro-averaged)
tn = np.sum((all_labels != 1) & (all_predictions != 1))
fp = np.sum((all_labels != 1) & (all_predictions == 1))
specificity = tn / (tn + fp)
# Calculate F1 score (weighted average)
f1 = f1_score(all_labels, all_predictions, average='weighted')
evaluation_metrics = {
"Acc1": metrics['acc1'], # Add acc1 metric
"Acc5": metrics['acc5'], # Add acc5 metric
"loss": metrics['loss'], # Add acc5 metric
"F1 Score": [f1],
"Precision": [precision],
"Recall": [recall],
"Specificity": [specificity]
}
evaluation_metrics
# Create a DataFrame from the dictionary
df = pd.DataFrame(evaluation_metrics)
# Save the DataFrame to a CSV file
df.to_csv(f'{EXPERIMENT_NAME}/evaluation_metrics_for_table.csv', index=False)
df
|
https://github.com/jonasmaziero/computacao_quantica_qiskit
|
jonasmaziero
| |
https://github.com/2lambda123/Qiskit-qiskit
|
2lambda123
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Qiskit's QuantumCircuit class for multiple registers."""
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.test import QiskitTestCase
from qiskit.circuit.exceptions import CircuitError
class TestCircuitMultiRegs(QiskitTestCase):
"""QuantumCircuit Qasm tests."""
def test_circuit_multi(self):
"""Test circuit multi regs declared at start."""
qreg0 = QuantumRegister(2, "q0")
creg0 = ClassicalRegister(2, "c0")
qreg1 = QuantumRegister(2, "q1")
creg1 = ClassicalRegister(2, "c1")
circ = QuantumCircuit(qreg0, qreg1, creg0, creg1)
circ.x(qreg0[1])
circ.x(qreg1[0])
meas = QuantumCircuit(qreg0, qreg1, creg0, creg1)
meas.measure(qreg0, creg0)
meas.measure(qreg1, creg1)
qc = circ.compose(meas)
circ2 = QuantumCircuit()
circ2.add_register(qreg0)
circ2.add_register(qreg1)
circ2.add_register(creg0)
circ2.add_register(creg1)
circ2.x(qreg0[1])
circ2.x(qreg1[0])
meas2 = QuantumCircuit()
meas2.add_register(qreg0)
meas2.add_register(qreg1)
meas2.add_register(creg0)
meas2.add_register(creg1)
meas2.measure(qreg0, creg0)
meas2.measure(qreg1, creg1)
qc2 = circ2.compose(meas2)
dag_qc = circuit_to_dag(qc)
dag_qc2 = circuit_to_dag(qc2)
dag_circ2 = circuit_to_dag(circ2)
dag_circ = circuit_to_dag(circ)
self.assertEqual(dag_qc, dag_qc2)
self.assertEqual(dag_circ, dag_circ2)
def test_circuit_multi_name_collision(self):
"""Test circuit multi regs, with name collision."""
qreg0 = QuantumRegister(2, "q")
qreg1 = QuantumRegister(3, "q")
self.assertRaises(CircuitError, QuantumCircuit, qreg0, qreg1)
|
https://github.com/MonitSharma/Qiskit-Summer-School-and-Quantum-Challenges
|
MonitSharma
|
# General Imports
import numpy as np
# Visualisation Imports
import matplotlib.pyplot as plt
# Scikit Imports
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, MinMaxScaler
# Qiskit Imports
from qiskit import Aer, execute
from qiskit.circuit import QuantumCircuit, Parameter, ParameterVector
from qiskit.circuit.library import PauliFeatureMap, ZFeatureMap, ZZFeatureMap
from qiskit.circuit.library import TwoLocal, NLocal, RealAmplitudes, EfficientSU2
from qiskit.circuit.library import HGate, RXGate, RYGate, RZGate, CXGate, CRXGate, CRZGate
from qiskit_machine_learning.kernels import QuantumKernel
# Load digits dataset
digits = datasets.load_digits(n_class=2)
# Plot example '0' and '1'
fig, axs = plt.subplots(1, 2, figsize=(6,3))
axs[0].set_axis_off()
axs[0].imshow(digits.images[0], cmap=plt.cm.gray_r, interpolation='nearest')
axs[1].set_axis_off()
axs[1].imshow(digits.images[1], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
# Split dataset
sample_train, sample_test, label_train, label_test = train_test_split(
digits.data, digits.target, test_size=0.2, random_state=22)
# Reduce dimensions
n_dim = 4
pca = PCA(n_components=n_dim).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Normalise
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Scale
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Select
train_size = 100
sample_train = sample_train[:train_size]
label_train = label_train[:train_size]
test_size = 20
sample_test = sample_test[:test_size]
label_test = label_test[:test_size]
print(sample_train[0], label_train[0])
print(sample_test[0], label_test[0])
# 3 features, depth 2
map_z = ZFeatureMap(feature_dimension=3, reps=2)
map_z.draw('mpl')
# 3 features, depth 1
map_zz = ZZFeatureMap(feature_dimension=3, reps=1)
map_zz.draw('mpl')
# 3 features, depth 1, linear entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='linear')
map_zz.draw('mpl')
# 3 features, depth 1, circular entanglement
map_zz = ZZFeatureMap(feature_dimension=3, reps=1, entanglement='circular')
map_zz.draw('mpl')
# 3 features, depth 1
map_pauli = PauliFeatureMap(feature_dimension=3, reps=1, paulis = ['X', 'Y', 'ZZ'])
map_pauli.draw('mpl')
def custom_data_map_func(x):
coeff = x[0] if len(x) == 1 else reduce(lambda m, n: m * n, np.sin(np.pi - x))
return coeff
map_customdatamap = PauliFeatureMap(feature_dimension=3, reps=1, paulis=['Z','ZZ'],
data_map_func=custom_data_map_func)
#map_customdatamap.draw() # qiskit isn't able to draw the circuit with np.sin in the custom data map
twolocal = TwoLocal(num_qubits=3, reps=2, rotation_blocks=['ry','rz'],
entanglement_blocks='cx', entanglement='circular', insert_barriers=True)
twolocal.draw('mpl')
twolocaln = NLocal(num_qubits=3, reps=2,
rotation_blocks=[RYGate(Parameter('a')), RZGate(Parameter('a'))],
entanglement_blocks=CXGate(),
entanglement='circular', insert_barriers=True)
twolocaln.draw('mpl')
# rotation block:
rot = QuantumCircuit(2)
params = ParameterVector('r', 2)
rot.ry(params[0], 0)
rot.rz(params[1], 1)
# entanglement block:
ent = QuantumCircuit(4)
params = ParameterVector('e', 3)
ent.crx(params[0], 0, 1)
ent.crx(params[1], 1, 2)
ent.crx(params[2], 2, 3)
nlocal = NLocal(num_qubits=6, rotation_blocks=rot, entanglement_blocks=ent,
entanglement='linear', insert_barriers=True)
nlocal.draw('mpl')
qubits = 3
repeats = 2
x = ParameterVector('x', length=qubits)
var_custom = QuantumCircuit(qubits)
for _ in range(repeats):
for i in range(qubits):
var_custom.rx(x[i], i)
for i in range(qubits):
for j in range(i + 1, qubits):
var_custom.cx(i, j)
var_custom.p(x[i] * x[j], j)
var_custom.cx(i, j)
var_custom.barrier()
var_custom.draw('mpl')
print(sample_train[0])
encode_map = ZZFeatureMap(feature_dimension=4, reps=2, entanglement='linear', insert_barriers=True)
encode_circuit = encode_map.bind_parameters(sample_train[0])
encode_circuit.draw(output='mpl')
x = [-0.1,0.2]
# YOUR CODE HERE
encode_map_x =ZZFeatureMap(feature_dimension=2, reps=4)
ex1_circuit =encode_map_x.bind_parameters(x)
ex1_circuit.draw(output='mpl')
from qc_grader import grade_lab3_ex1
# Note that the grading function is expecting a quantum circuit
grade_lab3_ex1(ex1_circuit)
zz_map = ZZFeatureMap(feature_dimension=4, reps=2, entanglement='linear', insert_barriers=True)
zz_kernel = QuantumKernel(feature_map=zz_map, quantum_instance=Aer.get_backend('statevector_simulator'))
print(sample_train[0])
print(sample_train[1])
zz_circuit = zz_kernel.construct_circuit(sample_train[0], sample_train[1])
zz_circuit.decompose().decompose().draw(output='mpl')
backend = Aer.get_backend('qasm_simulator')
job = execute(zz_circuit, backend, shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts = job.result().get_counts(zz_circuit)
counts['0000']/sum(counts.values())
matrix_train = zz_kernel.evaluate(x_vec=sample_train)
matrix_test = zz_kernel.evaluate(x_vec=sample_test, y_vec=sample_train)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(np.asmatrix(matrix_train),
interpolation='nearest', origin='upper', cmap='Blues')
axs[0].set_title("training kernel matrix")
axs[1].imshow(np.asmatrix(matrix_test),
interpolation='nearest', origin='upper', cmap='Reds')
axs[1].set_title("testing kernel matrix")
plt.show()
x = [-0.1,0.2]
y = [0.4,-0.6]
# YOUR CODE HERE
encode_map_x2 = ZZFeatureMap(feature_dimension=2, reps=4)
zz_kernel_x2 =QuantumKernel(feature_map=encode_map_x2,quantum_instance=Aer.get_backend('statevector_simulator'))
zz_circuit_x2 =zz_kernel_x2.construct_circuit(x,y)
backend =Aer.get_backend('qasm_simulator')
job = execute(zz_circuit_x2,backend,shots=8192,
seed_simulator=1024, seed_transpiler=1024)
counts_x2 = job.result().get_counts(zz_circuit_x2)
amplitude= counts_x2['00']/sum(counts_x2.values())
from qc_grader import grade_lab3_ex2
# Note that the grading function is expecting a floating point number
grade_lab3_ex2(amplitude)
zzpc_svc = SVC(kernel='precomputed')
zzpc_svc.fit(matrix_train, label_train)
zzpc_score = zzpc_svc.score(matrix_test, label_test)
print(f'Precomputed kernel classification test score: {zzpc_score}')
zzcb_svc = SVC(kernel=zz_kernel.evaluate)
zzcb_svc.fit(sample_train, label_train)
zzcb_score = zzcb_svc.score(sample_test, label_test)
print(f'Callable kernel classification test score: {zzcb_score}')
classical_kernels = ['linear', 'poly', 'rbf', 'sigmoid']
for kernel in classical_kernels:
classical_svc = SVC(kernel=kernel)
classical_svc.fit(sample_train, label_train)
classical_score = classical_svc.score(sample_test, label_test)
print('%s kernel classification test score: %0.2f' % (kernel, classical_score))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.