import functools
import numpy as np
from quimb.tensor import Tensor, TensorNetwork
import jax
import jax.numpy as jnp

jax.config.update("jax_enable_x64", True)

filename = "QPT_dataset1 N_qubit=10, N_shot=1000.json"

"""Load the QPT dataset"""
import json
import pathlib

if not pathlib.Path(filename).exists(): raise FileNotFoundError
with open(filename, 'r') as f: data_dict = json.load(f)

metadata = data_dict['metadata']
samples = data_dict['samples']

N_qubit = metadata['N_qubit']
N_set = metadata['N_set']
N_shot = metadata['N_shot']
N_total = N_set * N_shot

# The 1-qubit and 2-qubit coefficient of SPLM
real_lam1_mat = jnp.asarray(metadata["lam1_mat"])
real_lam2_mat = jnp.asarray(metadata["lam2_mat"])

# Measurement settings and datas
rhoin_settings = samples['rhoin']
datas = samples['datas']
"""Process and shuffle the dataset"""
from TNQPT.states import ptm_pauli_proj

Xp_o1 = ptm_pauli_proj('x+').reshape(-1)
Xm_o1 = ptm_pauli_proj('x-').reshape(-1)
Yp_o1 = ptm_pauli_proj('y+').reshape(-1)
Ym_o1 = ptm_pauli_proj('y-').reshape(-1)
Zp_o1 = ptm_pauli_proj('z+').reshape(-1)
Zm_o1 = ptm_pauli_proj('z-').reshape(-1)

rhoin_map = {
	'x+': Xp_o1, 'x-': Xm_o1,
	'y+': Yp_o1, 'y-': Ym_o1,
	'z+': Zp_o1, 'z-': Zm_o1,
}
meas_map = {
	'x': {'x+': Xp_o1, 'x-': Xm_o1},
	'y': {'y+': Yp_o1, 'y-': Ym_o1},
	'z': {'z+': Zp_o1, 'z-': Zm_o1}
}
rhoin_trans = {k: n for k, n in zip(rhoin_map.keys(), range(len(rhoin_map.keys())))}

rhoin_settings = jnp.asarray([[rhoin_trans[_] for _ in rhoin_setting] for rhoin_setting in rhoin_settings])
rhoin_settings = jnp.repeat(rhoin_settings, N_shot, axis=0)
rhoin_map = jnp.array(list(rhoin_map.values()))

data_trans, data_map = [], []
for v in meas_map.values():
	data_trans.extend(list(v.keys()))
	data_map.extend(list(v.values()))

data_trans = {k: n for k, n in zip(data_trans, range(len(data_trans)))}

datas = jnp.asarray([[[data_trans[_] for _ in outcome] for outcome in outcomes] for outcomes in datas]).reshape(-1, 10)
data_map = jnp.array(data_map)

# shuffle the dataset
inds = np.random.permutation(N_total)
rhoin_settings = rhoin_settings[inds]
datas = datas[inds]
"""The MPO of noisy circuit, here is even CNOT layer
and Creating MPO of Sparse Pauli Lindblad model(SPLM)
"""
from QPT_utils import even_CNOT_mpo
from TNQPT.noise_model import PTM_SPLM_MPO

ideal_mpo = even_CNOT_mpo(N_qubit,
                          upper_ind_id="a{}",
                          lower_ind_id="b{}",
                          site_tag_id="I{}")

onchip_splm = PTM_SPLM_MPO.from_adj_param_mat(real_lam1_mat, real_lam2_mat,
                                              upper_ind_id="k{}",
                                              lower_ind_id="a{}",
                                              site_tag_id="I{}", )

onchip_splm_norm2 = onchip_splm.norm() ** 2

"""Calculate the KL divergence distance"""

def get_noisy_mpo(params):
	lam1_mat, lam2_mat = params[:N_qubit * 3].reshape(-1, 3), params[N_qubit * 3:].reshape(-1, 9)
	splm = PTM_SPLM_MPO.from_adj_param_mat(lam1_mat, lam2_mat,
	                                       L=N_qubit,
	                                       upper_ind_id="k{}",
	                                       lower_ind_id="a{}",
	                                       site_tag_id="I{}",
	                                       like="jax")
	return splm

def loss_fn(
		params,
		ideal_mpo,
		batch_rhoin_settings,
		batch_datas,
		batch_size=500,
		alpha=5.0
):
	# lam1_mat, lam2_mat = params[:N_qubit * 3].reshape(-1, 3), params[N_qubit * 3:].reshape(-1, 9)
	# splm = PTM_SPLM_MPO.from_adj_param_mat(lam1_mat, lam2_mat,
	#                                        L=N_qubit,
	#                                        upper_ind_id="k{}",
	#                                        lower_ind_id="a{}",
	#                                        site_tag_id="I{}",
	#                                        like="jax")
	splm = get_noisy_mpo(params)
	noisy_mpo = ideal_mpo | splm

	def prob_fn(
			rhoin_setting,
			data,
	):
		rho_mps = TensorNetwork([Tensor(rhoin_map[s], inds=(f"b{l}",), tags=[f"I{l}"])
		                         for l, s in enumerate(rhoin_setting)], virtual=True)
		meas_mps = TensorNetwork([Tensor(data_map[s], inds=(f"k{l}",), tags=[f"I{l}"])
		                          for l, s in enumerate(data)], virtual=True)
		return jnp.real((rho_mps | noisy_mpo | meas_mps).contract())

	KL_loss_fn = jax.vmap(prob_fn, (0, 0), 0, )
	probs = KL_loss_fn(batch_rhoin_settings, batch_datas)

	KL_loss = jnp.sum(-jnp.log(probs)) / batch_size
	penalty = alpha * jnp.sum(jnp.where(params < 0., -params, 0.))
	diff_loss = jnp.real(splm.norm() ** 2 + onchip_splm_norm2 - 2.0 * splm.overlap(onchip_splm)) / 4.0 ** N_qubit
	return KL_loss + penalty, diff_loss

loss_fn = functools.partial(loss_fn, ideal_mpo=ideal_mpo)
loss_grad_fn = jax.jit(jax.value_and_grad(loss_fn, has_aux=True))
loss_fn = jax.jit(loss_fn)

"""Training using Adam"""
import optax
from tqdm import trange

def QPT_train_and_test(
		epoches: int,
		iters_per_epoch: int,
		train_batch_size,
		test_batch_size,
		params0,
		lr,
):
	losses, diff_losses = [], []
	params = params0.copy()
	key = jax.random.PRNGKey(np.random.randint(1000))
	best_loss, best_diff_loss = np.inf, np.inf
	for epoch in range(epoches):
		solver = optax.adamw(learning_rate=lr)
		opt_state = solver.init(params)

		pbar = trange(iters_per_epoch, desc=f'epoch: {epoch + 1}/{epoches}', leave=True)
		for _ in pbar:
			key, subkey = jax.random.split(key)
			train_inds = jax.random.choice(key, N_total, shape=(train_batch_size,), replace=False)
			test_inds = jax.random.choice(key, N_total, shape=(test_batch_size,), replace=False)

			train_rhoin_settings, test_rhoin_settings = rhoin_settings[train_inds], rhoin_settings[test_inds]
			train_datas, test_datas = datas[train_inds], datas[test_inds]

			_, grad = loss_grad_fn(params,
			                       batch_rhoin_settings=train_rhoin_settings,
			                       batch_datas=train_datas,
			                       batch_size=train_batch_size)
			loss, diff_loss = loss_fn(params,
			                          batch_rhoin_settings=test_rhoin_settings,
			                          batch_datas=test_datas,
			                          batch_size=test_batch_size)

			updates, opt_state = solver.update(grad, opt_state, params)
			params = optax.apply_updates(params, updates)
			if diff_loss < best_diff_loss:
				best_diff_loss = diff_loss
			if loss < best_loss:
				best_loss = loss
			pbar.set_postfix(
				{'loss': loss, 'diff_loss': diff_loss, 'best_loss': best_loss, 'best_diff_loss': best_diff_loss})

			losses.append(loss)
			diff_losses.append(diff_loss)
	return params, losses, diff_losses

params0 = jnp.asarray(np.random.uniform(low=0., high=1.2e-3, size=(12 * N_qubit - 9)))

params, losses, diff_losses = QPT_train_and_test(
	1,
	30,
	1000,
	min(int(N_total / 2), 15000),
	params0,
	lr=optax.schedules.exponential_decay(1.0e-4, 1, 0.999,
	                                     int(1400 / 3 * 2))
)
print(params)

"""Drawing"""
initial_lam1_mat, initial_lam2_mat = params0[:N_qubit * 3].reshape(-1, 3), params0[N_qubit * 3:].reshape(-1, 9)
guess_lam1_mat, guess_lam2_mat = params[:N_qubit * 3].reshape(-1, 3), params[N_qubit * 3:].reshape(-1, 9)

import matplotlib.pyplot as plt
from QPT_utils import SPLM_coeff_plot

_, axs = plt.subplots(1, 2, figsize=(7, 3), layout='constrained')
plt.sca(axs[0])
plt.plot(np.arange(len(losses)), np.asarray(losses), '--', lw=0.5, )  # marker='^')
plt.xlabel("iterations")
plt.ylabel(r"$D_{KL}(\theta,S)$")
plt.xlim([0, len(losses) - 1])

plt.sca(axs[1])
plt.semilogy(np.arange(len(diff_losses)), np.asarray(diff_losses), '--', lw=0.5, )  # marker='o')
plt.xlabel("iterations")
plt.ylabel(r"$||\Lambda-\Lambda_\theta||_F^2/4^n$")
plt.xlim([0, len(diff_losses) - 1])
plt.show()

max_val = max(np.max(real_lam1_mat), np.max(real_lam2_mat), np.max(guess_lam1_mat), np.max(guess_lam2_mat)) + 1e-4
_, axs = plt.subplots(2, 1, figsize=(14, 6.5), layout='constrained')
SPLM_coeff_plot(N_qubit, real_lam1_mat, real_lam2_mat, ax=axs[0], title="real SPLM coefficient of even CNOT layer")
SPLM_coeff_plot(N_qubit, guess_lam1_mat, guess_lam2_mat, ax=axs[1],
                title='restructed SPLM coefficient of even CNOT layer')
axs[0].set_ylim([0, max_val])
axs[1].set_ylim([0, max_val])
plt.show()

from QPT_utils import pauli_coeff_plot

pauli_operators = np.r_[
	[['I'] * 10],
	np.random.choice(['I', 'X', 'Y', 'Z'], size=(8, N_qubit), p=[0.85, 0.05, 0.05, 0.05]),
	np.random.choice(['I', 'X', 'Y', 'Z'], size=(8, N_qubit), p=[0.4, 0.2, 0.2, 0.2]),
	np.random.choice(['I', 'X', 'Y', 'Z'], size=(8, N_qubit), p=[0.1, 0.3, 0.3, 0.3])
]
pauli_coeff_plot(pauli_operators,
                 onchip_splm,
                 get_noisy_mpo(params),
                 figsize=(8, 5),
                 lower_ind_id="a{}")
plt.show()
