# -*- encoding: utf-8 -*-
"""
========================================
@Time   ：2021/9/7 10:24
@Auther ：shutao
@File   ：QTM_classification.py
@IDE    ：PyCharm
@Github ：https://github.com/NameLacker
@Gitee  ：https://gitee.com/nameLacker
========================================
"""

import time
import numpy as np
import paddle
from numpy import pi as PI
import matplotlib.pyplot as plt

from paddle import matmul, transpose
from paddle_quantum.circuit import UAnsatz
from paddle_quantum.utils import pauli_str_to_matrix

from quantum_computing.utils import circle_data_point_generator, data_point_plot


def Ry(theta):
    """
    布洛赫球面绕 Y轴旋转
    Args:
        theta: 参数
    Returns: Y旋转矩阵
    """
    return np.array([[np.cos(theta / 2), -np.sin(theta / 2)],
                     [np.sin(theta / 2), np.cos(theta / 2)]])


def Rz(theta):
    """
    布洛赫球面绕 Z 轴旋转

    其矩阵形式为：

    .. math::
        e^(-i * theta/2) = cos(theta / 2) - sin(theta / 2) * 1j
        e^(i * theta/2) = cos(theta / 2) + sin(theta / 2) * 1j

    Args:
        theta: 参数
    Returns: Z旋转矩阵
    """
    return np.array([[np.cos(theta / 2) - np.sin(theta / 2) * 1j, 0],
                     [0, np.cos(theta / 2) + np.sin(theta / 2) * 1j]])


def datapoints_transform_to_state(data, n_qubits):
    """
    经典 -> 量子数据编码器
    Args:
        data: 形状为 [-1, 2]
        n_qubits: 数据转化后的量子比特数量
    Returns:
        形状为 [-1, 1, 2 ^ n_qubits]
    """
    dim1, dim2 = data.shape
    res = []
    for sam in range(dim1):
        res_state = 1.
        zero_state = np.array([[1, 0]])
        for i in range(n_qubits):
            if i % 2 == 0:
                ry = Ry(np.arcsin(data[sam][0])).T
                state_tmp = np.dot(zero_state, ry)
                rz = Rz(np.arccos(data[sam][0] ** 2)).T
                state_tmp = np.dot(state_tmp, rz)
                res_state = np.kron(res_state, state_tmp)
            elif i % 2 == 1:
                ry = Ry(np.arcsin(data[sam][1])).T
                state_tmp = np.dot(zero_state, ry)
                rz = Rz(np.arccos(data[sam][1] ** 2)).T
                state_tmp = np.dot(state_tmp, rz)
                res_state = np.kron(res_state, state_tmp)
        res.append(res_state)

    res = np.array(res)
    return res.astype("complex128")


def U_theta(theta, n, depth):
    """
    模拟搭建量子神经网络
    Args:
        theta: 维数: [n, depth + 3]
        n: 量子比特数量
        depth: 电路深度

    Returns:
        "U_theta"
    """
    # 初始化网络
    cir = UAnsatz(n)

    # 搭建广义选装层
    for i in range(n):
        cir.rz(theta[i][0], i)
        cir.ry(theta[i][1], i)
        cir.rz(theta[i][2], i)

    # 默认深度为 depth = 1
    # 搭建纠缠层和 Ry旋转层
    for d in range(3, depth + 3):
        for i in range(n - 1):
            cir.cnot([i, i + 1])
        cir.cnot([n - 1, 0])
        for i in range(n):
            cir.ry(theta[i][d], i)
    # print(cir)
    return cir


def Observable(n):
    """
    生成只作用在第一个量子比特上的泡利 Z 算符
    其余量子比特都作用单位矩阵
    Args:
        n: 量子比特数量

    Returns:
        局部可观测量: Z \otimes I \otimes ...\otimes I
    """
    Ob = pauli_str_to_matrix([[1.0, 'z0']], n)
    return Ob


class Net(paddle.nn.Layer):
    def __init__(self,
                 n,  # 量子比特数量
                 depth,  # 电路深度
                 seed_paras=1,
                 dtype='float64'):
        super(Net, self).__init__()

        self.n = n
        self.depth = depth

        # 初始化参数列表 theta，并用 [0, 2*pi] 的均匀分布来填充初始值
        self.theta = self.create_parameter(
            shape=[n, depth + 3],
            default_initializer=paddle.nn.initializer.Uniform(low=0.0, high=2 * PI),
            dtype=dtype,
            is_bias=False)

        # 初始化偏置 (bias)
        self.bias = self.create_parameter(
            shape=[1],
            default_initializer=paddle.nn.initializer.Normal(std=0.01),
            dtype=dtype,
            is_bias=False)

    def forward(self, state_in, label):
        """
        Args:
            state_in: The input quantum state, shape [-1, 1, 2^n]
            label: label for the input state, shape [-1, 1]

        Returns:
            The Loss:
                L = ((<Z> + 1)/2 + bias - label)^2
        """
        # 将 Numpy 转化为 Tensor
        Ob = paddle.to_tensor(Observable(self.n))
        label_pp = paddle.to_tensor(label)

        # 按照随机初始化的参数 theta
        cir = U_theta(self.theta, n=self.n, depth=self.depth)
        Utheta = cir.U

        # 因为 Utheta是学习到的，我们这里用行向量运算来提速而不会影响训练效果
        state_out = matmul(state_in, Utheta)  # 维度 [-1, 1, 2 ** n]

        # 测量得到泡利 Z 算符的期望值 <Z>
        E_Z = matmul(matmul(state_out, Ob), transpose(paddle.conj(state_out), perm=[0, 2, 1]))

        # 映射 <Z> 处理成标签的估计值
        state_predict = paddle.real(E_Z)[:, 0] * 0.5 + 0.5 + self.bias
        loss = paddle.mean((state_predict - label_pp) ** 2)

        # 计算交叉验证正确率
        is_correct = (paddle.abs(state_predict - label_pp) < 0.5).nonzero().shape[0]
        acc = is_correct / label.shape[0]

        return loss, acc, state_predict.numpy(), cir


def heatmap_plot(net, N):
    # 生成数据点 x_y_
    Num_points = 30
    x_y_ = []
    for row_y in np.linspace(0.9, -0.9, Num_points):
        row = []
        for row_x in np.linspace(-0.9, 0.9, Num_points):
            row.append([row_x, row_y])
        x_y_.append(row)
    x_y_ = np.array(x_y_).reshape(-1, 2).astype("float64")

    # 计算预测: heat_data
    input_state_test = paddle.to_tensor(
        datapoints_transform_to_state(x_y_, N))
    loss_useless, acc_useless, state_predict, cir = net(state_in=input_state_test, label=x_y_[:, 0])
    heat_data = state_predict.reshape(Num_points, Num_points)

    # 画图
    fig = plt.figure(1)
    ax = fig.add_subplot(111)
    x_label = np.linspace(-0.9, 0.9, 3)
    y_label = np.linspace(0.9, -0.9, 3)
    ax.set_xticks([0, Num_points // 2, Num_points - 1])
    ax.set_xticklabels(x_label)
    ax.set_yticks([0, Num_points // 2, Num_points - 1])
    ax.set_yticklabels(y_label)
    im = ax.imshow(heat_data, cmap=plt.cm.RdBu)
    plt.colorbar(im)
    plt.show()


def QClassifier(Ntrain, Ntest, gap, N, D, EPOCH, LR, BATCH, seed_paras, seed_data):
    """
    量子二分类
    """
    # 生成数据集
    train_x, train_y, test_x, test_y = circle_data_point_generator(Ntrain, Ntest, gap, seed_data)

    # 读取训练集的维度
    N_train = train_x.shape[0]

    paddle.seed(seed_paras)
    # 定义优化图
    net = Net(n=N, depth=D)

    # 一般来说，我们利用Adam优化器来获得相对好的收敛
    # 当然你可以改成SGD或者是RMSprop
    opt = paddle.optimizer.Adam(learning_rate=LR, parameters=net.parameters())

    # 初始化寄存器存储正确率 acc 等信息
    summary_iter, summary_test_acc = [], []

    # 优化循环
    for ep in range(EPOCH):
        for itr in range(N_train // BATCH):

            # 将经典数据编码成量子态 |psi>, 维度 [-1, 2 ** N]
            input_state = paddle.to_tensor(datapoints_transform_to_state(train_x[itr * BATCH:(itr + 1) * BATCH], N))

            # 前向传播计算损失函数
            loss, train_acc, state_predict_useless, cir \
                = net(state_in=input_state, label=train_y[itr * BATCH:(itr + 1) * BATCH])
            if itr % 50 == 0:
                # 计算测试集上的正确率 test_acc
                input_state_test = paddle.to_tensor(datapoints_transform_to_state(test_x, N))
                loss_useless, test_acc, state_predict_useless, t_cir \
                    = net(state_in=input_state_test, label=test_y)
                print("epoch:", ep, "iter:", itr,
                      "loss: %.4f" % loss.numpy(),
                      "train acc: %.4f" % train_acc,
                      "test acc: %.4f" % test_acc)
                # 存储正确率 acc 等信息
                summary_iter.append(itr + ep * N_train)
                summary_test_acc.append(test_acc)
            if (itr + 1) % 151 == 0 and ep == EPOCH - 1:
                print("训练后的电路：")
                print(cir)

            # 反向传播极小化损失函数
            loss.backward()
            opt.minimize(loss)
            opt.clear_grad()

        # 画出 heatmap 表示的决策边界
    heatmap_plot(net, N=N)

    return summary_test_acc


def train():
    """
    训练主程序
    Returns:
        训练
    """
    # # 数据集参数设置
    # Ntrain = 1000
    # Ntest = 200
    # boundary_gap = 0.5
    # seed_data = 2
    #
    # # 生成自己的数据集
    # train_x, train_y, test_x, test_y = circle_data_point_generator(Ntrain, Ntest, boundary_gap, seed_data)
    #
    # # 打印数据集的维度信息
    # print("训练集 {} 个数据点的可视化：".format(Ntrain))
    # data_point_plot(train_x, train_y)
    # print("测试集 {} 个数据点的可视化：".format(Ntest))
    # data_point_plot(test_x, test_y)
    #
    # print("作为测试我们输入以上的经典信息:")
    # print("(x_0, x_1) = (1, 0)")
    # print("编码后输出的2比特量子态为:")
    # print(datapoints_transform_to_state(np.array([[1, 0]]), n_qubits=2))

    time_start = time.time()
    acc = QClassifier(
        Ntrain=200,  # 规定训练集大小
        Ntest=100,  # 规定测试集大小
        gap=0.5,  # 设定决策边界的宽度
        N=4,  # 所需的量子比特数量
        D=1,  # 采用的电路深度
        EPOCH=4,  # 训练 epoch 轮数
        LR=0.01,  # 设置学习速率
        BATCH=1,  # 训练时 batch 的大小
        seed_paras=19,  # 设置随机种子用以初始化各种参数
        seed_data=2,  # 固定生成数据集所需要的随机种子
    )

    time_span = time.time() - time_start
    print('主程序段总共运行了', time_span, '秒，测试结果: ', acc)


if __name__ == '__main__':
    train()
