import math
import os
import pickle
import time

import numpy
import pandas as pd
import numpy as np
import torch
import gurobipy as gp
from gurobipy import GRB
import torch
import torch.nn as nn
from torch import optim

import loss_lisan
import prob
from NN import ann, cdinn2
from generate_data import generate_data
from prob import DIM




# F = open('MD.pkl', 'rb')
# content = pickle.load(F)
# with open("D:/代码/SYNC-main/data_SANCT/2D_bicycle_model/MD.pkl", "rb") as fp_data:
# with open("MD.pkl", "rb") as fp_data:
#   	data_pkl = pickle.load(fp_data)
# data_pkl = np.load("MD.pkl", allow_pickle=True)
# data = torch.load(F, map_location='cpu')
# data = np.load(F, allow_pickle=True)
# print(data)
#
#
# df=pd.DataFrame(data)
# print(df)

# s = []
# a = [[1], [2], [3]]
# s += a
# print("s: ", s)
# b = [[4], [5], [6]]
# s += b
# print("s: ", s)
# a.append(b)
# print("a: ", a)
# e = [7]
# a.append(e)
# print("a: ", a)
# g = [[2], [8]]
# h = a + g
# print("h: ", h)
# print("a: ", a)

# batch_list = torch.tensor(data=[[1, 2], [2, 3]])
# # print(batch_list)  # tensor([[1, 2], [2, 3]])
# # cex_list = [[-1.24194249, -0.5], [2222, 3333]]  # [array([1.27174437, 0.44485882]), array([-1.8,  0.1]), array([-1.24194249, -0.5])]
# cex_list = []  # [array([1.27174437, 0.44485882]), array([-1.8,  0.1]), array([-1.24194249, -0.5])]
# # print(cex_list)  # [[-1.24194249, -0.5]]
# tensor_cex = torch.tensor(data=cex_list)
# # print(tensor_cex)  # tensor([[-1.2419, -0.5000]])
# batch_list = torch.cat((tensor_cex, batch_list), 0)
# print(batch_list)

# pi = sympy.pi
# a = pi / 6
# s = - pi
# for i in range(12):
#
#     print((s + a * i) % pi)

# ub = -sympy.pi / 4
# lb = -2
# x_len = 0.01
# npts1 = (int)((ub - lb) // x_len) + 1
# npts2 = ((ub - lb) / x_len)
# npts3 = ((ub - lb) // x_len)
# npts4 = (int)((ub - lb) / x_len)
# print((ub - lb) % x_len < 0.000001)
#
# print(npts1)
# print(npts2)
# print(npts3)
# print(npts4)


# pi = math.pi
# lb = 1.8
# # ub = -1.5519911258433212
# ub = 2
# print(math.sin(lb) * math.sin(lb))
# print(math.sin(ub) * math.sin(ub))
# k = (math.sin(ub) * math.sin(ub) - math.sin(lb) * math.sin(lb)) / (ub - lb)
# print(k)
# breakpoint()
# # min(|sin(2x) - k|)  s.t. x0 <= x <= x1
#
# m = gp.Model()
# xt2 = m.addVar(lb=lb * 2, ub=ub * 2, name="xt2")
#
# v = m.addVar(name="v")
# df = m.addVar(name="df")
# diff = m.addVar(name="diff")
#
# m.addGenConstrSin(xt2, df, name="c1")  # f = (sinx)^2 ==> f' = sin(2x)
# m.addConstr(v == df - k, name="c2")
# m.addGenConstrAbs(diff, v, name="c3")
# m.setObjective(diff, GRB.MINIMIZE)
# m.Params.NonConvex = 2
# m.optimize()
# # m.computeIIS()
# # m.write("model2.ilp")
# print("xt.X: ", xt2.X / 2)


# def save_parameters(model, save_net_dir):
#     if not os.path.exists(save_net_dir):
#         os.mkdir(save_net_dir)
#     if not os.path.exists(save_net_dir + "barrier/"):
#         os.mkdir(save_net_dir + "barrier/")
#
#     i = 0
#     for name, p in model.named_parameters():
#         if i % 2 == 0:
#             # print(list(map(list, zip(*(p.data.tolist())))))
#             p = list(map(list, zip(*(p.data.tolist()))))
#             print(p)
#             np.savetxt(save_net_dir + "barrier/w" + str(int(i/2) + 1), p)
#         else:
#             # print(list(map(list, zip(*([p.data.tolist()])))))
#             p = list(map(list, zip(*([p.data.tolist()]))))
#             np.savetxt(save_net_dir + "barrier/b" + str(int(i/2) + 1), p)
#         i += 1

# wb_1_list = []
# wb_2_list = []
# # model = ann.gen_nn()
# model = cdinn2.gen_nn()
# model.load_state_dict(torch.load('model/cdinn_c1_2_10_0_005_train.pt'), strict=True)
# # model.load_state_dict(torch.load('ann-model/ann-trained_eg2_2_10_10_1_0-1_relu.pt'), strict=True)
# # print(model.state_dict())
# i = 0
# for name, p in model.named_parameters():
#     if i % 4 < 2:
#         # wb_1_list.append(name)
#         wb_1_list.append(p.data.cpu().detach().numpy())
#     else:
#         wb_2_list.append(p.data.cpu().detach().numpy())
#     i += 1
# # print(wb_1_list)
#
# length = int(len(wb_1_list) / 2)
#
# x0 = cp.Variable(DIM)
# x_layer = x0
# constraints = []
#
# for i in range(length):
#     W_1 = wb_1_list[i * 2]
#     b_1 = wb_1_list[i * 2 + 1]
#     # b_1 = b_1[:, np.newaxis]
#     y_layer = cp.Variable(W_1.shape[0])
#     E = np.identity(y_layer.shape[0])
#     print(W_1.shape)
#     print(x_layer.shape)
#     expr = W_1 @ x_layer + b_1 - E @ y_layer
#     constraints += [expr == 0]
#
#     if i != length - 1:
#         z_layer = cp.Variable(y_layer.shape[0])
#         for j in range(y_layer.shape[0]):
#             constraints += [z_layer[j] == cp.maximum(y_layer[j], 0.0)]
#         x_layer = z_layer
#     else:
#         x_layer = y_layer

# problem = cp.Problem(objective, constraints)
"""
input_layer1.weight
input_layer1.bias

input_layer2.weight
input_layer2.bias

hidden_layers1.0.weight
hidden_layers1.0.bias

hidden_layers2.0.weight
hidden_layers2.0.bias

output_layer_linear_prim1.weight
output_layer_linear_prim1.bias

output_layer_linear_prim2.weight
output_layer_linear_prim2.bias
"""


# save_parameters(model, "net/")


# a = torch.tensor(2., requires_grad=True)
# b = torch.tensor(3., requires_grad=True)
# c = torch.tensor(4., requires_grad=True)
# loss = a * 9 + b * 99
# print(loss)
# print(a.grad)
# print(b.grad)
# print(c.grad)
# loss.backward()
# print("===========")
# print(a.grad)
# print(b.grad)
# print(c.grad)


# model = ann.gen_nn()
# model.load_state_dict(torch.load('ann-model/ann-trained_eg2_2_10_10_1_0-1_relu.pt'), strict=True)
#
# # x = [1.5, -0.89882507]
# x = [0.4, 0.1]
# # array([ 1.03042101, -0.65322224]), array([ 1.53444191, -0.94558351])
# # array([1.04518126, -0.64934505]), array([1.51459322, -0.91090996])
# # [1., -0.80654314]
# y = model(torch.tensor(data=x))
# print("y: ", y)

# x = torch.tensor(data=[[1, 2, 3, 4], [5, 6, 7, 8]])
# y = torch.tensor(data=[[96, 1, 3, 4], [888, 85, 7, 8]])
# z = [x, y]
# print(z)
#
# print(z[1])
# res = []
# for i in range(len(z)):
#     data = z[i][:, 0:2]
#     res.append(data)
# print(res)

# W = [
#         [
#            [ 8.98960279e-04,  8.27845070e-04, -1.79890776e-04,
#              5.52908285e-03,  1.88611541e-03,  6.76485710e-04,
#              6.72942549e-02, -1.23891205e-01, -1.86859025e-03,
#              6.52140155e-02],
#            [ 1.35614327e-03,  3.27796908e-03, -9.05362074e-04,
#              7.85562862e-03, -3.02786822e-04,  3.71581467e-04,
#             -2.15857312e-01, -1.21225074e-01,  4.93563362e-04,
#              2.75119126e-01]
#          ],
#          [
#                [-0.00213839],
#                [-0.00058767],
#                [-0.00102254],
#                [-0.0028088 ],
#                [-0.00118264],
#                [ 0.00110472],
#                [ 0.14720802],
#                [ 0.10582635],
#                [ 0.00040226],
#                [ 0.18511593]
#           ]
#      ]
# # print(W)
# # print(len(W))
# # print(W[0])
# # print(len(W[0]))
# for layer_index in range(0, len(W)):
#     print(layer_index)
#     print(len(W[layer_index]))
#     print(len(W[layer_index][0]))


# [array([[ 1.2089743e-01, -1.7890909e-01, -1.9053604e-03,  7.8991649e-04,
#          5.8029406e-04,  8.9143316e-04,  4.0028438e-02,  4.9527781e-04,
#          7.6505507e-04, -2.1073245e-04],
#        [-3.4515667e-01, -1.8967837e-01,  7.6253619e-04, -6.4276881e-04,
#         -1.0531136e-03, -7.4557960e-04,  4.7482508e-01,  2.4975985e-03,
#          7.6509709e-04,  3.1084013e-03]], dtype=float32), array([[-0.00076509,  0.00076509, -0.00252196, -0.00076511, -0.00076511,
#         -0.00076511, -0.0007651 , -0.0007651 ,  0.00076511,  0.28390902],
#        [-0.00252196,  0.00252196,  0.00252196, -0.00076509,  0.00076509,
#          0.00076511, -0.00076511,  0.00076509, -0.00076508,  0.2826148 ],
#        [ 0.00076511,  0.00076509, -0.0007651 , -0.00252196, -0.00076511,
#         -0.0007651 ,  0.00076505, -0.0007651 , -0.00252196, -0.0016668 ],
#        [ 0.0007651 , -0.00252196, -0.0007651 , -0.00252196, -0.00076509,
#         -0.0007651 ,  0.00076511, -0.00076511, -0.00076509, -0.00049749],
#        [ 0.0007651 , -0.00076506, -0.00076511, -0.00252196, -0.00252197,
#          0.00252196, -0.00076506,  0.00076511,  0.0007651 ,  0.00292707],
#        [-0.00252196,  0.00076511, -0.00252196, -0.0007651 ,  0.00076511,
#         -0.00076511,  0.00076511, -0.00076511,  0.00076511,  0.00089345],
#        [-0.00076511, -0.00076511,  0.0007651 , -0.00252196, -0.00076511,
#          0.00076511,  0.00076511,  0.00076511,  0.00252196,  0.3292197 ],
#        [ 0.00076509,  0.0007651 , -0.00252196,  0.0007651 ,  0.0007651 ,
#          0.00076511,  0.00076511, -0.00252196, -0.00076511, -0.00107623],
#        [ 0.00252197,  0.00252196, -0.00076511,  0.00076511, -0.00076509,
#         -0.0007651 ,  0.00076509, -0.00076506,  0.00076511,  0.00076509],
#        [ 0.0007651 ,  0.0007651 , -0.0007651 , -0.0007651 , -0.00076509,
#          0.0007651 ,  0.00076511, -0.0007651 ,  0.00076505,  0.00082043]],
#       dtype=float32), array([[-0.00076506],
#        [ 0.0007651 ],
#        [-0.00076511],
#        [ 0.00076511],
#        [ 0.00252196],
#        [ 0.00076511],
#        [ 0.0007651 ],
#        [ 0.00076511],
#        [ 0.00083675],
#        [ 0.243557  ]], dtype=float32)]

# model = ann.gen_nn()
# model.load_state_dict(torch.load('ann-model/ann-trained_eg2_2_10_10_1_0-1_relu.pt'), strict=True)
# # print(model)
# W_b_list = []
# for name, p in model.named_parameters():
#     W_b_list.append(p.data.cpu().detach().numpy())
# print(W_b_list)

# batches_init, batches_unsafe, batches_domain = data.gen_batch_data()

# full_data = torch.tensor(data = [[-1.8000, -0.5000],
#         [-1.8000, -0.3571],
#         [ 2.0000,  0.0714],
#         [ 2.0000,  0.2143],
#         [ 2.0000,  0.3571],
#         [ 2.0000,  0.5000]])
#
# batch_list = [torch.reshape(full_data, [8, 8, 2])]
# print(batch_list)

# for i in range(2):
#     batch_list = [tensor_block for curr_tensor in batch_list for tensor_block in list(curr_tensor.chunk(4, i))]

# new_batch_list = []
# temp_list = []
# for curr_tensor in batch_list:
#     temp_list += list(curr_tensor.chunk(4, 0))
#     # print(temp_list)
#     for curr_block in temp_list:
#         new_batch_list += list(curr_block.chunk(8, 1))
#
# batch_list = new_batch_list
#
#
# print("batch_list1: ", batch_list)


# init_list = np.arange(superp.BATCHES) % superp.BATCHES_I
# print(init_list)
# print(len(init_list))


# generate_data()
# model = ann.gen_nn()
# model.load_state_dict(torch.load("ann-model/ann-trained_eg2_0-1_relu.pt"), strict=True)
#
# train_start_time = time.time()
# model, train_succ_flag = train(model)
# train_end_time = time.time()
#
# print("flag: ", train_succ_flag)
# print(f"训练时间是{train_end_time - train_start_time}")


# cex_idx_list = []
# new_idx_list = []
# for cex_idx in cex_idx_list:
#     x0_idx, x1_idx, x2_idx = cex_idx
#
#     for new_x0_idx in range(x0_idx * 2, x0_idx * 2 + 2):
#         for new_x1_idx in range(x1_idx * 2, x1_idx * 2 + 2):
#             for new_x2_idx in range(x2_idx * 2, x2_idx * 2 + 2):
#                 new_idx_list.append([new_x0_idx, new_x1_idx, new_x2_idx])
#
# print(new_idx_list)

# cex_idx_list = []
# new_idx_list = []
# for cex_idx in cex_idx_list:
#     x0_idx, x1_idx = cex_idx
#
#     for new_x0_idx in range(x0_idx * 2, x0_idx * 2 + 2):
#         for new_x1_idx in range(x1_idx * 2, x1_idx * 2 + 2):
#             new_idx_list.append([new_x0_idx, new_x1_idx])
#
# print(new_idx_list)


# cex_idx_list = [10, 11, 12, 13, 14, 15, 16]
# new_idx_list = []
# base = 2
# for x0_idx in cex_idx_list:
#     for new_x0_idx in range(x0_idx * base, x0_idx * base + base):
#         new_idx_list.append(new_x0_idx)
#
# print(new_idx_list)


# model = ann.gen_nn()
# model.load_state_dict(torch.load('./ann-model/ann-trained_eg2_relu.pt'), strict=True)
# print(model)
# x = [-3, -0.125, -0.635]
# y = model(torch.tensor(data=x))
# print("y: ", y)


# from prob import *
# file_name = ("dccp_" if USE_CDINN else "fc_") + case_name + f"_{N_H}_{D_H}_{alpha}_{time_step}_{piece_len}_time.txt"
# print(file_name)
# fw = open("./log/" + file_name, "w+", encoding="utf-8")
# fw.close()


# import cvxpy as cp
# import numpy as np
#
# p = cp.Parameter()
# x = cp.Variable()
# quadratic = cp.square(x - 2 * p)
# problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])
# p.value = 3.0
# problem.solve(requires_grad=True, eps=1e-10)
# # derivative() populates the delta attribute of the variables
# p.delta = 1e-3
# problem.derivative()
# print(x.delta)
# # Because x* = 2 * p, dx*/dp = 2, so (dx*/dp)(p.delta) == 2e-3
# np.testing.assert_allclose(x.delta, 2e-3)


# import sympy as sp
# import cvxpy as cp
# from pyomo.environ import *
# from pyomo.core.expr.sympy_tools import sympy2pyomo_expression, PyomoSympyBimap
#
# x, y = sp.symbols('x y')
# expr = x**2 + 3*x + x*y + sp.Max(0, 2*x*y + x**2)
#
# deriv_expr_x = sp.diff(expr, x)
# # print(deriv_expr_x)  # 2*x + y + (2*x + 2*y)*Heaviside(x**2 + 2*x*y) + 3
#
# # deriv_expr_x_str = str(deriv_expr_x)
#
# m = ConcreteModel()
# m.a = Var()
# m.b = Var()
#
# bimap = PyomoSympyBimap()
# bimap.sympy2pyomo = {x: m.a, y: m.b}
# pyomo_expr = sympy2pyomo_expression(deriv_expr_x, bimap)
#
# m.obj = Objective(expr=pyomo_expr, sense=minimize)
# m.c1 = Constraint(expr=m.a >= 0)
# m.c2 = Constraint(expr=m.b == 1)
# opt = SolverFactory('gurobi')
# solution = opt.solve(m)
# print(f"optimum point: {(value(m.a)), value(m.b)} \n")
# obj_values = value(m.obj)
# print("optimal objective: {}".format(obj_values))

# 将 Heaviside 函数转换为 max(0, min(1, alpha * x))
# alpha = 1000  # 可以根据需求选择合适的 alpha 值
# deriv_expr_x_str_replaced = deriv_expr_x_str.replace("Heaviside(", f"cp.maximum(0, cp.minimum(1,{alpha}*")
# expr_replaced = expr.replace(sp.Heaviside, lambda arg: sp.Max(0, sp.Min(1, alpha * arg.args[0])))
# print(deriv_expr_x_str_replaced)


# deriv_func = sp.lambdify([x, y], deriv_expr_x)
# x_cvxpy = cp.Variable()
# objective = cp.Minimize(deriv_func(x_cvxpy, 1))
# problem = cp.Problem(objective, [x_cvxpy >= 0])
# problem.solve()
#
# # 获取最优解
# optimal_x = x_cvxpy.value
#
# print("optimal_x:", optimal_x)

# import numpy as np
# import gurobipy as gp
# from prob import *
#
# # https://support.gurobi.com/hc/en-us/articles/4414392016529-How-do-I-model-conditional-statements-in-Gurobi
# def relu_der(m, x, useVars):
#     eps = 0.0001
#     M = 10000 + eps
#
#     if useVars:
#         relu_der_x = m.addMVar(x.shape, vtype=GRB.CONTINUOUS)
#         for i in range(x.shape[0]):
#             b = m.addVar(vtype=GRB.BINARY)
#             m.addConstr(x[i] >= eps - M * (1 - b))
#             m.addConstr(x[i] <= M * b)
#             m.addConstr((b == 1) >> (relu_der_x[i] == 1))
#             m.addConstr((b == 0) >> (relu_der_x[i] == 0))
#         m.update()
#     else:
#         relu_der_x = []
#         for i in range(x.shape[0]):
#             if x[i] > 0:
#                 relu_der_x.append(1)
#             else:
#                 relu_der_x.append(0)
#         relu_der_x = np.array(relu_der_x)
#     return relu_der_x
#
#
# def barrier_f(m, x0, wb_list, wb_length, useVars):
#     x_layer = x0
#     if useVars:
#         jacobian = np.eye(x0.shape[0])
#     else:
#         jacobian = np.eye(len(x0))
#     for i in range(wb_length):
#         W = wb_list[i * 2]
#         b = wb_list[i * 2 + 1]
#         if useVars:
#             y_layer = m.addMVar((W.shape[0],), vtype=GRB.CONTINUOUS, lb=-np.inf, ub=np.inf)
#             E = np.identity(y_layer.shape[0])
#             expr = W @ x_layer + b - E @ y_layer
#             m.addConstr(expr == 0)
#         else:
#             # print("W", W.shape)
#             # x_layer = np.array(x_layer)
#             # print("x_layer", x_layer.shape)
#             # print("b:", b.shape)
#             y_layer = W @ x_layer + b
#             # y_layer = np.array(y_layer)
#             # print("y_layer:", y_layer.shape)
#         jacobian = W @ jacobian
#
#         if i != wb_length - 1:
#             if useVars:
#                 z_layer = m.addMVar((y_layer.shape[0],), vtype=GRB.CONTINUOUS, lb=-np.inf, ub=np.inf)
#             # z = activate(y)
#                 for j in range(y_layer.shape[0]):
#                     m.addConstr(z_layer[j] == gp.max_(y_layer[j], 0.0))
#             else:
#                 z_layer = y_layer
#                 for j in range(y_layer.shape[0]):
#                     z_layer[j] = max(y_layer[j], 0.0)
#             x_layer = z_layer
#
#             array_relu_der = []
#             relu_der_f = relu_der(m, y_layer, useVars)
#             for j in range(relu_der_f.shape[0]):
#                 array_relu_der.append(relu_der_f[j])
#             jacobian = np.diag(array_relu_der) @ jacobian
#         else:
#             x_layer = y_layer
#
#         if useVars:
#             m.update()
#
#     return x_layer, jacobian[0]
#
#
# def verify_initCond(wb_1_list, wb_2_list, wb_length):
#     cex_init = []
#     res_init = []
#     m = gp.Model()
#     m.setParam('Outputflag', 0)
#
#     x0 = m.addMVar((DIM,), vtype=GRB.CONTINUOUS)
#     m.addConstr(x0[0] >= init_min[0])
#     m.addConstr(x0[0] <= init_max[0])
#     m.addConstr(x0[1] >= init_min[1])
#     m.addConstr(x0[1] <= init_max[1])
#
#     if init_shape == 2:
#         p0 = (init_min[0] + init_max[0]) / 2.0
#         p1 = (init_min[1] + init_max[1]) / 2.0
#         r = p0 - init_min[0]
#         m.addConstr((x0[0] - p0) * (x0[0] - p0) + (x0[1] - p1) * (x0[1] - p1) <= r * r)
#
#     # f1_x0, df1dx_x0 = barrier_f(m, x0, wb_1_list, wb_length, True)
#     f2_x0, df2dx_x0 = barrier_f(m, x0, wb_2_list, wb_length, True)
#
#     # xt = np.random.randn(DIM)
#     xt = [0, 0]
#     result_pre = 0
#
#     while True:
#         # print("================")
#         f1_xt, df1dx_xt = barrier_f(m, xt, wb_1_list, wb_length, False)
#         # f2_xt, df2dx_xt = barrier_f(m, xt, wb_2_list, wb_length, False)
#
#         # print("f1_xt:", f1_xt)
#         # print("df1dx_xt[0]:", df1dx_xt)
#         # print("f2_x0:", f2_x0)
#
#         m.setObjective(f1_xt + df1dx_xt[0] * (x0[0] - xt[0]) + df1dx_xt[1] * (x0[1] - xt[1]) - f2_x0, GRB.MAXIMIZE)
#         m.optimize()
#         # print("x0.x:", x0.x)
#         # print("m.objVal:", m.objVal)
#         if abs(m.objVal - result_pre) < 1e-4 or np.linalg.norm(x0.x - xt) < 1e-4:
#             break
#         xt = x0.x
#         # print("xt[0]:", xt[0])
#         # print("xt[1]:", xt[1])
#         result_pre = m.objVal
#
#     print("maximize init value:", m.objVal)  # -0.07222615058680137
#     if m.objVal > 0:
#         cex_init.append(np.array(x0.x))
#         res_init.append(m.objVal)
#     return cex_init, res_init
#
#
# def verify_unsafeCond(wb_1_list, wb_2_list, wb_length):
#     cex_unsafe = []
#     res_unsafe = []
#     m = gp.Model()
#     m.setParam('Outputflag', 0)
#
#     x0 = m.addMVar((DIM,), vtype=GRB.CONTINUOUS)
#     m.addConstr(x0[0] >= unsafe_min[0])
#     m.addConstr(x0[0] <= unsafe_max[0])
#     m.addConstr(x0[1] >= unsafe_min[1])
#     m.addConstr(x0[1] <= unsafe_max[1])
#
#     if unsafe_shape == 2:
#         p0 = (unsafe_min[0] + unsafe_max[0]) / 2.0
#         p1 = (unsafe_min[1] + unsafe_max[1]) / 2.0
#         r = p0 - unsafe_min[0]
#         m.addConstr((x0[0] - p0) * (x0[0] - p0) + (x0[1] - p1) * (x0[1] - p1) <= r * r)
#
#     f1_x0, df1dx_x0 = barrier_f(m, x0, wb_1_list, wb_length, True)
#     # f2_x0, df2dx_x0 = barrier_f(m, x0, wb_2_list, wb_length, True)
#
#     # xt = np.random.randn(DIM)
#     xt = [0, 0]
#     result_pre = 0
#
#     while True:
#         # print("================")
#         # f1_xt, df1dx_xt = barrier_f(m, xt, wb_1_list, wb_length, False)
#         f2_xt, df2dx_xt = barrier_f(m, xt, wb_2_list, wb_length, False)
#
#         # print("f1_xt:", f1_xt)
#         # print("df1dx_xt[0]:", df1dx_xt)
#         # print("f2_x0:", f2_x0)
#
#         m.setObjective(f1_x0 - f2_xt - df2dx_xt[0] * (x0[0] - xt[0]) - df2dx_xt[1] * (x0[1] - xt[1]), GRB.MINIMIZE)
#         m.optimize()
#         # print("x0.x:", x0.x)
#         # print("m.objVal:", m.objVal)
#         if abs(m.objVal - result_pre) < 1e-4 or np.linalg.norm(x0.x - xt) < 1e-4:
#             break
#         xt = x0.x
#         # print("xt[0]:", xt[0])
#         # print("xt[1]:", xt[1])
#         result_pre = m.objVal
#
#     print("minimize unsafe value:", m.objVal)  # 0.36436068000596666
#     if m.objVal < 0:
#         cex_unsafe.append(np.array(x0.x))
#         res_unsafe.append(m.objVal)
#     return cex_unsafe, res_unsafe
#
# def verify_thirdCond(wb_1_list, wb_2_list, wb_length, x_len):
#     cex_domain = []
#     res_domain = []
#
#     data_dir = "../data/"
#     pieces = int(4 / x_len)
#     datas = np.loadtxt(data_dir + f"22_1_5_{piece_len}_20.txt", dtype=np.float64).reshape([-1, 6])
#
#     x0_min = domain_min[0]
#     x0_max = domain_max[0]
#     x1_min = domain_min[1]
#     x1_max = domain_max[1]
#
#     cex_idx_list = []
#
#     if len(cex_idx_list) == 0:
#         for x0_idx in range(pieces):
#             for x1_idx in range(pieces):
#                 cex_idx = [x0_idx, x1_idx]
#                 cex_idx_list.append(cex_idx)
#
#     for cex_idx in cex_idx_list:
#         x0_idx, x1_idx = cex_idx
#
#         k_0, b_l_0, b_0, k_1, b_l_1, b_1 = datas[x0_idx * pieces + x1_idx]
#
#         x0_l = x0_idx * piece_len + x0_min
#         x0_r = (x0_idx + 1) * piece_len + x0_min
#         x1_l = x1_idx * piece_len + x1_min
#         x1_r = (x1_idx + 1) * piece_len + x1_min
#
#         m = gp.Model()
#         m.setParam('Outputflag', 0)
#         # m.setParam('InfUnbdInfo', 1)
#
#         x0 = m.addMVar((DIM,), vtype=GRB.CONTINUOUS, name='x0', lb=-np.inf, ub=np.inf)
#         x0[0].lb = x0_l
#         x0[0].ub = x0_r
#         x0[1].lb = x1_l
#         x0[1].ub = x1_r
#
#         x1 = m.addMVar((DIM,), vtype=GRB.CONTINUOUS, name='x1', lb=-np.inf, ub=np.inf)  # x1 = F(kx+b; x0)
#         x1[0].lb = x0_min
#         x1[0].ub = x0_max
#         x1[1].lb = x1_min
#         x1[1].ub = x1_max
#
#         x2 = m.addMVar((DIM,), vtype=GRB.CONTINUOUS, name='x2', lb=-np.inf, ub=np.inf)  # x2 = F(x0)
#         x2[0].lb = x0_min
#         x2[0].ub = x0_max
#         x2[1].lb = x1_min
#         x2[1].ub = x1_max
#
#         b0 = m.addVar(lb=b_l_0, ub=b_l_0 + b_0)
#         b1 = m.addVar(lb=b_l_1, ub=b_l_1 + b_1)
#         m.addConstr(x1[0] == x0[0] + time_step * (-5.5 * x0[1] + (k_1 * x0[1] + b1)))
#         m.addConstr(x1[1] == x0[1] + time_step * (6 * x0[0] - (k_0 * x0[0] + b0)))
#         m.addConstr(x2[0] == x0[0] + time_step * (-5.5 * x0[1] + x0[1] * x0[1]))
#         m.addConstr(x2[1] == x0[1] + time_step * (6 * x0[0] - x0[0] * x0[0]))
#
#         f1_x0, df1dx_x0 = barrier_f(m, x0, wb_1_list, wb_length, True)
#         f2_x0, df2dx_x0 = barrier_f(m, x0, wb_2_list, wb_length, True)
#         f1_x1, df1dx_x1 = barrier_f(m, x1, wb_1_list, wb_length, True)
#         f2_x1, df2dx_x1 = barrier_f(m, x1, wb_2_list, wb_length, True)
#         f1_x2, df1dx_x2 = barrier_f(m, x2, wb_1_list, wb_length, True)
#         f2_x2, df2dx_x2 = barrier_f(m, x2, wb_2_list, wb_length, True)
#
#         # xt = np.random.randn(DIM)
#         xt = [0, 0]
#         result_pre = 0
#
#         while True:
#             x0_next_1 = [x0[0] + time_step * (-5.5 * x0[1] + (k_1 * x0[1] + b1)),
#                          x0[1] + time_step * (6 * x0[0] - (k_0 * x0[0] + b0))]
#
#             xt_next_2 = [xt[0] + time_step * (-5.5 * xt[1] + xt[1] * xt[1]),
#                        xt[1] + time_step * (6 * xt[0] - xt[0] * xt[0])]
#
#             # f1_xt, df1dx_xt = barrier_f(m, xt, wb_1_list, wb_length, False)
#             f2_xt, df2dx_xt = barrier_f(m, xt, wb_2_list, wb_length, False)
#             f1_xt_next, df1dx_xt_next = barrier_f(m, xt_next_2, wb_1_list, wb_length, False)
#             # f2_xt_next, df2dx_xt_next = barrier_f(m, xt_next_2, wb_2_list, wb_length, False)
#
#             m.setObjective(f1_xt_next + df1dx_xt_next[0] * (x0_next_1[0] - xt_next_2[0]) + df1dx_xt_next[1] * (x0_next_1[1] - xt_next_2[1])
#                            - f2_x1 - (1 - alpha) * f1_x0 + (1 - alpha) * (f2_xt + df2dx_xt[0] * (x0[0] - xt[0]) + df2dx_xt[1] * (x0[1] - xt[1])), GRB.MAXIMIZE)
#             m.optimize()
#             # print("x0.x:", x0.x)
#             # print("m.objVal:", m.objVal)
#             if abs(m.objVal - result_pre) < 1e-4 or np.linalg.norm(x0.x - xt) < 1e-4:
#                 break
#             xt = x0.x
#             # print("xt[0]:", xt[0])
#             # print("xt[1]:", xt[1])
#             result_pre = m.objVal
#
#         print("maximize domain value:", m.objVal)
#         if m.objVal > 0:
#             cex_domain.append(np.array(x0.x))
#             res_domain.append(m.objVal)
#     return cex_domain, res_domain
#
#
# def verif(model, x_len):
#     wb_1_list = []
#     wb_2_list = []
#     params = model.state_dict()
#     i = 0
#     for k in params:
#         if i % 4 < 2:
#             wb_1_list.append(params[k].detach().numpy())
#         else:
#             wb_2_list.append(params[k].detach().numpy())
#         i += 1
#
#     wb_length = int(len(wb_1_list) / 2)
#
#     cex_init, res_init = verify_initCond(wb_1_list, wb_2_list, wb_length)
#     cex_unsafe, res_unsafe = verify_unsafeCond(wb_1_list, wb_2_list, wb_length)
#     cex_domain, res_domain = verify_thirdCond(wb_1_list, wb_2_list, wb_length, x_len)
#     print("cex_init: ", cex_init)
#     print("res_init: ", res_init)
#     print("cex_unsafe: ", cex_unsafe)
#     print("res_unsafe: ", res_unsafe)
#     print("cex_domain: ", cex_domain)
#     print("res_domain: ", res_domain)
#     return cex_init, cex_unsafe, cex_domain


# import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
#
# # 创建一个新的图形
# fig = plt.figure()
#
# # 创建一个3D坐标系
# ax = fig.add_subplot(111, projection='3d')
#
# # 创建一些数据
# x = np.linspace(-5, 5, 100)
# y = np.linspace(-5, 5, 100)
# x, y = np.meshgrid(x, y)
# z = np.sin(np.sqrt(x**2 + y**2))
#
# # 绘制3D曲面
# ax.plot_surface(x, y, z, cmap='viridis')
#
# # 添加标签
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
#
# # 显示图形
# plt.show()


# 打点 散点图
# import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
#
# # 生成一些三维点的数据
# x = np.random.rand(100)
# y = np.random.rand(100)
# z = np.random.rand(100)
#
# # 创建一个新的图形
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
#
# # 绘制散点图
# ax.scatter(x, y, z, c='b', marker='o')
#
# # 添加标签
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
#
# # 显示图形
# plt.show()


# 画障碍函数
# import numpy as np
# import prob
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# from scipy.spatial import Delaunay
#
# model = cdinn2.gen_nn()
# model.load_state_dict(torch.load('./model/cdinn_zhrR1_1_5_0.1_0.1_1_train.pt'), strict=True)
#
# # 生成一些采样的三维点和对应的函数值
# x = np.linspace(prob.domain_min[0], prob.domain_max[0], int(prob.PLOT_LEN_B[0]))
# y = np.linspace(prob.domain_min[1], prob.domain_max[1], int(prob.PLOT_LEN_B[1]))
# z = np.linspace(prob.domain_min[2], prob.domain_max[2], int(prob.PLOT_LEN_B[2]))
#
# # s = np.array(np.meshgrid(x, y, z)).reshape(-1, 3)
# xx, yy, zz = np.meshgrid(x, y, z)
# s = np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T
#
# # print(s)
# nn_input = torch.tensor(s, dtype=torch.float64).to("cpu")
# nn_output = model(nn_input)
# plot_v = nn_output.detach().numpy()
# plot_v = np.squeeze(plot_v)
# # print(plot_v)
# # 找到函数值为零的点
# zero_indices = np.where(np.abs(plot_v) < 0.1)
# print(zero_indices)
#
# # 提取函数值为零的点的坐标
# # print(s[zero_indices])
#
# # 创建一个新的图形
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
#
# # 绘制曲面
# ax.add_collection3d(Poly3DCollection([s[zero_indices]], facecolors='grey', edgecolors='cyan', alpha=0.3))
#
# # 设置坐标轴范围
# ax.set_xlim([prob.domain_min[0], prob.domain_max[0]])
# ax.set_ylim([prob.domain_min[1], prob.domain_max[1]])
# ax.set_zlim([prob.domain_min[2], prob.domain_max[2]])
#
# # 添加标签
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
#
# # 显示图形
# plt.show()


# show equations gNN and hNN
# import verify_dccp
#
#
# if __name__ == "__main__":
#     model = cdinn2.gen_nn()
#     model.load_state_dict(torch.load('./model/cdinn_c11_1_20_0_0.05_1_train.pt'), strict=True)
#     verify_dccp.getMathExpress2(model, piece_len)


# x1 = [1., 2.]
# x2 = [2., 3.]
# x_numpy = [x1, x2]
# x_i_data = torch.tensor(data=x_numpy)
# x_data = prob.vector_field(x_i_data)
# print(x_data)
# cnt = 0
# for x in x_data:
#     print(f"\n\n第{cnt}个tensor:{x}")
#     print(f"第{cnt}个numpy:{x.numpy()}")
#     x_data = x_numpy[cnt] + x.numpy() * 0.01
#     print(f"第{cnt}个numpy:{x_data}")
#     cnt += 1


def block_space(dim_min, dim_max):
    n = len(dim_min)

    sub_blocks = []

    # 递归地对当前维度进行划分
    def split_block(current_min, current_max, index):
        if index == n:
            sub_blocks.append([current_min, current_max])
            return
        mid = (current_min[index] + current_max[index]) / 2

        # 左子块
        left_max = current_max[:]
        left_max[index] = mid
        split_block(current_min, left_max, index + 1)

        # 右子块
        right_min = current_min[:]
        right_min[index] = mid
        split_block(right_min, current_max, index + 1)

    # 开始分割
    split_block(dim_min, dim_max, 0)
    return sub_blocks

from prob import *

def barrier_f_without_der(m, x0, wb_list, wb_length, useVars):
    x_layer = x0
    for i in range(wb_length):
        W = wb_list[i * 2]
        b = wb_list[i * 2 + 1]
        if useVars:
            y_layer = m.addMVar((W.shape[0],), vtype=GRB.CONTINUOUS, lb=-np.inf, ub=np.inf)
            E = np.identity(y_layer.shape[0])
            expr = W @ x_layer + b - E @ y_layer
            m.addConstr(expr == 0)
        else:
            y_layer = W @ x_layer + b

        if i != wb_length - 1:
            if useVars:
                z_layer = m.addMVar((y_layer.shape[0],), vtype=GRB.CONTINUOUS, lb=-np.inf, ub=np.inf)
                # z = activate(y)
                for j in range(y_layer.shape[0]):
                    m.addConstr(z_layer[j] == gp.max_(y_layer[j], 0.0))
            else:
                z_layer = y_layer
                for j in range(y_layer.shape[0]):
                    z_layer[j] = max(y_layer[j], 0.0)
            x_layer = z_layer
        else:
            x_layer = y_layer

        if useVars:
            m.update()
    return x_layer
def barrier_Bf_nonlinear(wb_list, wb_length, min_bound, max_bound):
    m = gp.Model()
    m.setParam('Outputflag', 0)
    m.setParam('NonConvex', 2)
    A = m.addVars(DIM, name="A")
    b_l = m.addVar(name="b_l")
    b = m.addVar(lb=0,name="b")
    x0 = m.addMVar((DIM,), vtype=GRB.CONTINUOUS, name='x0', lb=-np.inf, ub=np.inf)
    x0[0].lb = min_bound[0]
    x0[0].ub = max_bound[0]
    x0[1].lb = min_bound[1]
    x0[1].ub = max_bound[1]
    x1 = m.addMVar((DIM,), vtype=GRB.CONTINUOUS, name='x1', lb=-np.inf, ub=np.inf)  # x1 = F(kx+b; x0)
    x1[0].lb = domain_min[0]
    x1[0].ub = domain_max[0]
    x1[1].lb = domain_min[1]
    x1[1].ub = domain_max[1]
    xx0 = m.addVar()
    m.addConstr(xx0 == x0[0] * x0[0])
    m.addConstr(x1[0] == x0[0] + time_step * (- (xx0 * x0[0]) / 3.0 + x0[0] - x0[1] + 0.875))
    m.addConstr(x1[1] == x0[1] + time_step * 2.0 / 25.0 * (x0[0] - 0.8 * x0[1] + 0.7))
    Bf = barrier_f_without_der(m, x1, wb_list, wb_length, True)
    m.addConstr(A[0] * x1[0] + A[1] * x1[1] + b_l <= Bf)
    m.addConstr(A[0] * x1[0] + A[1] * x1[1] + b_l + b >= Bf)
    m.setObjective(b, GRB.MINIMIZE)
    m.optimize()
    print(m.status)
    return A[0].x, A[1].x, b_l.x + b.x

if __name__ == "__main__":
    # model = cdinn2.gen_nn()
    # model.load_state_dict(torch.load('./model/cex_train.pt'), strict=True)
    # # nn_input = torch.tensor(np.array([0.4,0.1]), dtype=torch.float64)
    # nn_input = torch.tensor(np.array([[6, -2]]), dtype=torch.float64)
    # nn_output = model(prob.vector_field(nn_input)) - (1 - prob.alpha) * model(nn_input)
    # print(nn_output)
    # if nn_output>0:
    #     print(">0")
    # else:
    #     print("<0")

    # import numpy as np
    # dim_min = [-0.2, -0.3, -0.3]
    # dim_max = [-0.15, -0.25, -0.25]
    # blocks = block_space(dim_min, dim_max)
    # print(blocks)

    # model = cdinn2.gen_nn()
    # model.load_state_dict(torch.load('./model/cdinn_eg4_1_10_0_0.1_1_train.pt'), strict=True)
    # print(model(torch.tensor(np.array([-0.5, 0.8]))))

    x0_l = -2
    x0_r = 2
    x1_l = -2
    x1_r = 2
    x2_l = -2
    x2_r = 2
    x000 = np.linspace(x0_l, x0_r, 10)
    x111 = np.linspace(x1_l, x1_r, 10)
    x222 = np.linspace(x2_l, x2_r, 10)
    s = np.array(np.meshgrid(x000, x111, x222))
    b = s.reshape(-1, order='F')
    s = b.reshape(-1, 3)
    print(s)
    fs = vector_field(torch.tensor(s))
    print(fs.numpy())
    # print(fs[0][0].item())
    # x = s[:, 0]
    # y = s[:, 1]
    # z = s[:, 2]
    # for idx in range(len(s)):
    #     print("===============")
    #     print(model(torch.tensor(s[idx])))
        # print(s[idx, 0])


    # wb_1_list = []
    # wb_2_list = []
    # params = model.state_dict()
    # i = 0
    # for k in params:
    #     if 'layers' in k:
    #         continue
    #     if i % 4 < 2:
    #         wb_1_list.append(params[k].detach().numpy())
    #     else:
    #         wb_2_list.append(params[k].detach().numpy())
    #     i += 1
    # wb_length = int(len(wb_1_list) / 2)
    # gNN_f_A0, gNN_f_A1, gNN_b = barrier_Bf_nonlinear(wb_1_list, wb_length, domain_min, domain_max)
    # print(gNN_f_A0)
    # print(gNN_f_A1)
    # print(gNN_b)


# block = [[-4, -4], [4, 4]]
# block_min, block_max = block
#
# new_cex_list = []
# x0_l = block_min[0]
# x1_l = block_min[1]
# x0_r = block_max[0]
# x1_r = block_max[1]
# x0_m = (x0_l + x0_r) / 2
# x1_m = (x1_l + x1_r) / 2
# new_cex_list.append([[x0_l, x1_l], [x0_m, x1_m]])
# new_cex_list.append([[x0_l, x1_m], [x0_m, x1_r]])
# new_cex_list.append([[x0_m, x1_l], [x0_r, x1_m]])
# new_cex_list.append([[x0_m, x1_m], [x0_r, x1_r]])
#
# block = [[-2, -2, -2], [2, 2, 2]]
# block_min, block_max = block
# new_cex_list = []
# x0_l = block_min[0]
# x1_l = block_min[1]
# x2_l = block_min[2]
# x0_r = block_max[0]
# x1_r = block_max[1]
# x2_r = block_max[2]
# x0_m = (x0_l + x0_r) / 2
# x1_m = (x1_l + x1_r) / 2
# x2_m = (x2_l + x2_r) / 2
# new_cex_list.append([[x0_l, x1_l, x2_l], [x0_m, x1_m, x2_m]])
# new_cex_list.append([[x0_l, x1_l, x2_m], [x0_m, x1_m, x2_r]])
# new_cex_list.append([[x0_l, x1_m, x2_l], [x0_m, x1_r, x2_m]])
# new_cex_list.append([[x0_l, x1_m, x2_m], [x0_m, x1_r, x2_r]])
# new_cex_list.append([[x0_m, x1_l, x2_l], [x0_r, x1_m, x2_m]])
# new_cex_list.append([[x0_m, x1_l, x2_m], [x0_r, x1_m, x2_r]])
# new_cex_list.append([[x0_m, x1_m, x2_l], [x0_r, x1_r, x2_m]])
# new_cex_list.append([[x0_m, x1_m, x2_m], [x0_r, x1_r, x2_r]])







