# 修改3 建立本文件，训练得到相近图的精确参数

import numpy as np
import glob
import json

from scipy.optimize import minimize
from mindquantum.core.operators import Hamiltonian
from mindquantum.simulator import Simulator

from utils.qcirc import qaoa_hubo, build_ham_high
from score import *

#优化目标函数，返回函数值和梯度
def cost_fun(p, grad_ops):
    f, g = grad_ops(p)
    f = np.real(f)[0, 0]  #量子线路测量均值
    g = np.real(g)[0, 0]  #量子线路梯度
    return f, g

#返回Jc_dict的能量最小值和对应的精确参数
def train_parameter(Jc_dict, depth, Nq=12):
    hamop = build_ham_high(Jc_dict)
    ham = Hamiltonian(hamop)
    gammas = []
    betas = []
    for i in range(depth):
        gammas.append(f'g{i}')      #gamma参数
        betas.append(f'b{i}')       #beta参数
    circ = qaoa_hubo(Jc_dict, Nq, gammas, betas, p=depth)          #创建量子线路
    sim = Simulator('mqvector', circ.n_qubits)                     #创建模拟器
    grad_ops = sim.get_expectation_with_grad(ham, circ)            #获取计算变分量子线路的期望值和梯度的算子
    gammas, betas = main(Jc_dict, depth, Nq=Nq)                    #从main获取初始gamma和beta初始参数
    p0 = [val for pair in zip(gammas, betas) for val in pair]      #变分量子线路初始参数
    res = minimize(cost_fun, p0, args=(grad_ops, ), method='bfgs', jac=True, tol=1)
    return res.fun, res.x

#从文件名得到来源，例如uni_p0.3_0.json，识别为分布为uni和边产生概率为0.3
def get_source(dir, file):
    splited_name = file.split('_')
    distribution = splited_name[0][len(dir)+1:]
    portion = splited_name[1][1:]
    return distribution, portion

#搜索文件夹中所有文件（除去重复的）的精确参数
def train_parameter_dir(dir='data/train'):
    try:
        file_handle = open('parameter_dict.json', 'r')
        parameter_dict = json.load(file_handle)
    except:
        parameter_dict = {}
    pattern = f'{dir}/*.json'  #匹配所有子目录下的.json文件
    files = glob.glob(pattern)  #获取所有匹配的文件列表
    num_train_file = 1  #记录新增训练文件数
    num_file = 1 #记录搜索文件数
    total_num_file = len(files)  #总文件数
    total_score = 0 #记录总得分
    for file in files:
        distribution, portion = get_source(dir, file)
        the_key = f'{(distribution, portion)}'
        if the_key not in parameter_dict:
            Jc_dict = load_data(f'{file}')
            print(f'(正在训练文件：{file[len(dir)+1:]}, 进度：{num_train_file}/{num_file}/{total_num_file})')
            fun4, x4 = train_parameter(Jc_dict, depth=4)
            fun8, x8 = train_parameter(Jc_dict, depth=8)
            file_score = -fun4-fun8
            total_score += file_score
            parameter_dict[the_key]={}
            parameter_dict[the_key]["parameter4"], parameter_dict[the_key]["parameter8"] = x4.tolist(), x8.tolist()
            parameter_dict[the_key]["score4"], parameter_dict[the_key]["score8"] = -fun4, -fun8
            print(f'  找到参数：')
            print(f'  参数(depth=4): {x4}')
            print(f'  参数(depth=8): {x8}')
            print(f'  得分: {file_score}')
            with open('parameter_dict.json', 'w') as file_handle:  #这里存盘有个好处，就是不用一次训练完，可以下次接着训练，自动继续
                json.dump(parameter_dict, file_handle)
            num_train_file += 1
        else:
            print(f'{file[len(dir)+1:]}文件先前已训练, 进度：{num_train_file-1}/{num_file}/{total_num_file})')
        num_file += 1
    if num_train_file > 1:
        print(f'训练完成！文件数: {num_train_file-1}, 总得分: {total_score}, 平均分: {total_score/(num_train_file-1)}')
    else:
        print(f'训练完成！没有发现新的可训练文件。')

if __name__ == '__main__':
    train_parameter_dir()
    