import cmath
import os.path
from dataclasses import dataclass
import torch
import numpy as np
import time
from pathlib import Path
from torch_geometric.data import HeteroData

from branch_and_bound_algorithm.solve_relaxation_efficient import ConvexOptimization
from models.setting import *
from models.gnn_policy import MyGNN


def solve_bb(H, r, theta, prune_policy, prune_model):
    """

    """
    t1 = time.time()
    tree = Tree(epsilon=0.001)
    tree.reset(H=H, r=r, theta=theta, prune_policy=prune_policy, prune_model=prune_model)
    timestep = 0

    while timestep < 1000:
        node_id = tree.select_node()
        if node_id == None:
            print('|global_U-global_L|/|global_U|:{}'.format(abs(tree.global_upper_bound - tree.global_lower_bound)
                                                             / abs(tree.global_upper_bound)))
            break
        tree.active_node = tree.nodes[node_id]

        print('prune active node: {}'.format(tree.prune()))
        if tree.prune():
            tree.delete_node(node_id)
        else:
            branching_var_id = tree.select_branching_variable()
            if branching_var_id == None:
                print('|global_U-global_L|/|global_U|:{}'.format(abs(tree.global_upper_bound - tree.global_lower_bound)
                                                                 / abs(tree.global_upper_bound)))
                break
            tree.push_children(branching_var_id, node_id)

        timestep += 1
        tree.fathom_nodes()
        if len(tree.nodes) == 0:
            print('|global_U-global_L|/|global_U|:{}'.format(abs(tree.global_upper_bound - tree.global_lower_bound)
                                                             / abs(tree.global_upper_bound)))
            break
        if (abs(tree.global_upper_bound - tree.global_lower_bound) / abs(tree.global_upper_bound) < tree.epsilon):
            break

    return (tree.x_incumbent.copy(),
            tree.get_objective(tree.x_incumbent),
            time.time() - t1,
            tree.convex_optimization.num_solve_problem)


@dataclass
class Features:
    variable_features: np.array = None
    constraint_features: np.array = None
    edge_index: np.array = None
    edge_features: np.array = None


class SolverException(Exception):
    pass


class Node:
    def __init__(self, depth, node_index, parent_node, upper_bound, lower_bound, x_relax, x_feasible, theta):
        '''
        A node represents a sub-optimization problem of the original problem. The feasible region of the original problem
        includes that of sub-optimization problem.
        @params:
            depth: depth of the node from the root of the BB tree
            node_index: unique index assigned to the node in the BB tree
            parent_node: parent node of this node
            upper_bound: upper bound of sub-optimization problem
            lower_bound: lower bound of sub-optimization problem
            x_relax: the optimal solution of the relaxation optimization problem
            x_feasible: a solution of sub-optimization problem
            theta: parameter of the original optimization problem
        '''
        self.depth = depth
        self.node_index = node_index
        self.parent_node = parent_node
        self.upper_bound = upper_bound
        self.lower_bound = lower_bound
        self.x_relax = x_relax
        self.x_feasible = x_feasible
        self.theta = theta


class Tree(object):
    def __init__(self, epsilon=1e-3):
        '''
        Initializes a B&B search tree.
        Consider single-group multicast beamforming (SGMB) problem
        @params:
            epsilon: The maximum gap between the global upper bound and global lower bound.
        '''

        self.epsilon = epsilon  # B&B algorithm solving accuracy.

        self.N = None  # number of antennas
        self.M = None  # dimensionality of receiving signal
        self.H = None  # wireless channel gain, size = (M,N)
        self.r = None  # receving siganl, size = (M,)

        self.nodes = []  # list of problems (nodes)
        self.active_node = None  # current active node
        self.node_index_count = 0  # plus 1 when calling create_children()

        self.lower_bound_list = []  # list of lower bounds on the problem
        self.upper_bound_list = []  # list of upper bounds on the problem
        self.global_lower_bound = np.nan  # global lower bound
        self.global_upper_bound = np.nan  # global upper bound

        self.x_incumbent = None  # incumbent solution of the original optimization problem

        self.prune_policy = None  # {'default','gnn'}
        self.prune_model = None  # a gnn class instance to prune
        self.convex_optimization = None  #

    def reset(self, H, r, theta, prune_policy, prune_model):
        '''

        @param:

        '''

        # clear all variables
        self.N = None  # number of antennas
        self.M = None  # dimensionality of receiving signal
        self.H = None  # wireless channel gain, size = (M,N)
        self.r = None  # receving siganl, size = (M,)

        self.nodes = []  # list of problems (nodes)
        self.active_node = None
        self.node_index_count = 0

        self.lower_bound_list = []  # list of lower bounds on the problem
        self.upper_bound_list = []  # list of upper bounds on the problem
        self.global_lower_bound = np.nan  # global lower bound
        self.global_upper_bound = np.nan  # global upper bound

        # initialize variables
        self.M, self.N = H.shape
        self.H = H
        self.r = r

        self.prune_policy = prune_policy
        self.prune_model = prune_model
        self.convex_optimization = ConvexOptimization(N=self.N, M=self.M, H=self.H, r=self.r)

        # calculate lower and upper bounds
        (x_relax, fx_relax, is_solved) = self.convex_optimization.solve_relaxed_optimization(theta)
        assert (is_solved)
        x_feasible = self.get_feasible_solution(x_relax, theta)

        self.global_lower_bound = fx_relax
        self.global_upper_bound = self.get_objective(x_feasible)
        assert self.global_upper_bound + 0.001 > self.global_lower_bound
        self.x_incumbent = x_feasible
        self.active_node = Node(depth=1, node_index=self.node_index_count, parent_node=None,
                                upper_bound=self.global_upper_bound,
                                lower_bound=self.global_lower_bound, x_relax=x_relax, x_feasible=x_feasible,
                                theta=theta)

        self.nodes.append(self.active_node)
        self.lower_bound_list.append(self.global_lower_bound)
        self.upper_bound_list.append(self.global_upper_bound)
        if DEBUG:
            print('reset\n')
            print('global_L:{} global_U{}\n L_list:{}  U_list:{} '.format(self.global_lower_bound,
                                                                          self.global_upper_bound,
                                                                          self.lower_bound_list, self.upper_bound_list))

    def push_children(self, branch_var_id, node_id):
        '''
        Delete a node, create two children, and appends they to the node list.
        @params:
            branch_var_id: selected branching variable to branch on
            node_id: selected node to branch on

        '''
        # if DEBUG:
        #     print('push children')
        #     print('active L:{} U:{}'.format(self.active_node.lower_bound,self.active_node.upper_bound))

        theta = self.active_node.theta
        # Do not branch if the number of the elements of theta[branch_var_id] is smaller than 2.
        if len(theta[branch_var_id]) < 2:
            return

        self.delete_node(node_id)

        # divide theta[branch_var_id] into two list.
        index_middle = int(len(theta[branch_var_id]) / 2)
        theta_left = []
        theta_left.extend(theta[:branch_var_id])
        theta_left.extend([theta[branch_var_id][:index_middle]])
        theta_left.extend(theta[branch_var_id + 1:])
        theta_right = []
        theta_right.extend(theta[:branch_var_id])
        theta_right.extend([theta[branch_var_id][index_middle:]])
        theta_right.extend(theta[branch_var_id + 1:])

        # create child nodes
        left_child = self.create_children(theta_left)
        right_child = self.create_children(theta_right)
        # if DEBUG:
        #     print('left child, L:{}  U:{}'.format(left_child.lower_bound,left_child.upper_bound))
        #     print('right child, L:{}  U:{}'.format(right_child.lower_bound, right_child.upper_bound))
        # update
        if left_child.lower_bound <= self.global_upper_bound:
            self.nodes.append(left_child)
            self.upper_bound_list.append(left_child.upper_bound)
            self.lower_bound_list.append(left_child.lower_bound)
            self.global_lower_bound = min(self.lower_bound_list)
        if left_child.upper_bound < self.global_upper_bound:
            self.global_upper_bound = left_child.upper_bound
            self.x_incumbent = left_child.x_feasible
        if right_child.lower_bound <= self.global_upper_bound:
            self.nodes.append(right_child)
            self.upper_bound_list.append(right_child.upper_bound)
            self.lower_bound_list.append(right_child.lower_bound)
            self.global_lower_bound = min(self.lower_bound_list)
        if right_child.upper_bound < self.global_upper_bound:
            self.global_upper_bound = right_child.upper_bound
            self.x_incumbent = right_child.x_feasible

        assert self.global_lower_bound < self.global_upper_bound + 0.001
        if DEBUG:
            print('push_children\n')
            print('global_L:{} global_U{}\n L_list:{}  U_list:{} \n '.format(self.global_lower_bound,
                                                                             self.global_upper_bound,
                                                                             self.lower_bound_list,
                                                                             self.upper_bound_list))

    def create_children(self, theta):
        '''
        Create the Node
        Compute the local lower and upper bounds
        '''

        (x_relax, fx_relax, is_solved) = self.convex_optimization.solve_relaxed_optimization(theta)
        assert (is_solved)
        x_feasible = self.get_feasible_solution(x_relax, theta)
        upper_bound = self.get_objective(x_feasible)
        assert fx_relax < upper_bound + 0.01, "upper_bound-fx_relax/|upper_bound|:{}".format(
            (upper_bound - fx_relax) / abs(upper_bound))
        # create and append node
        self.node_index_count += 1
        new_node = Node(depth=self.active_node.depth + 1,
                        node_index=self.node_index_count,
                        parent_node=self.active_node,
                        upper_bound=upper_bound,
                        lower_bound=fx_relax,
                        x_relax=x_relax,
                        x_feasible=x_feasible,
                        theta=theta)
        return new_node

    def get_objective(self, x):
        return np.linalg.norm(self.H @ x - self.r)

    def select_branching_variable(self):
        '''
            This method returns the index at which x_relax and x_feasible differ most.
        '''
        x_relax = self.active_node.x_relax
        x_feasible = self.active_node.x_feasible
        indices = np.argsort(np.abs(x_relax - x_feasible))
        indices = indices[::-1]
        branching_variable = None
        for n in indices:
            if len(self.active_node.theta[n]) > 1:
                branching_variable = n
                break
        return branching_variable

    def select_node(self):
        '''
        Select a non-leaf node whose lower bound is minimum and update active node
        '''
        if len(self.lower_bound_list) == 0:
            return None
        else:
            node_id = np.argmin(self.lower_bound_list)
            return node_id

    def prune(self):
        if self.prune_policy == 'default':
            return False
        else:
            data = HeteroData()
            features = self.get_features()
            # add node feature information
            data['var'].x = torch.tensor(features.variable_features, dtype=torch.float)
            data['con'].x = torch.tensor(features.constraint_features, dtype=torch.float)
            data['met'].x = torch.tensor(features.metric_features, dtype=torch.float)
            # add edge index information
            data['var', 'v_c', 'con'].edge_index = torch.tensor(features.vc_edge_index,dtype=torch.int64)
            data['con', 'c_v', 'var'].edge_index = torch.tensor(features.cv_edge_index,dtype=torch.int64)
            data['var', 'v_m', 'met'].edge_index = torch.tensor(features.vm_edge_index,dtype=torch.int64)
            data['met', 'm_v', 'var'].edge_index = torch.tensor(features.mv_edge_index,dtype=torch.int64)

            # with torch.no_grad():
            #     out = self.prune_model(data.x_dict, data.edge_index_dict, 1)
            self.prune_model.eval()
            out = self.prune_model(data.x_dict, data.edge_index_dict, 1)
            if out > 0.5:
                # print('prune')
                return True
            else:
                # print('select')
                return False

    def fathom_nodes(self):
        # check if lower_bound_list is empty.
        if len(self.lower_bound_list) == 0:
            return
        del_ind = np.argwhere(np.array(self.lower_bound_list) > self.global_upper_bound + self.epsilon)
        if len(del_ind) > 0:
            # # the number of nodes is greater than that of deleting nodes.
            # assert (len(self.lower_bound_list) > len(del_ind))
            del_ind = sorted(list(del_ind.squeeze(axis=1)))
            for i in reversed(del_ind):
                self.delete_node(i)

    def delete_node(self, node_id):
        del self.nodes[node_id]
        del self.lower_bound_list[node_id]
        del self.upper_bound_list[node_id]

    def is_terminal(self):
        flag = False
        if abs(self.global_upper_bound - self.global_lower_bound) / abs(self.global_upper_bound) < self.epsilon:
            flag = True
        else:
            flag = False

        if len(self.nodes) == 0:
            flag = True
        else:
            flag = False

        return flag

    @staticmethod
    def get_feasible_solution(x_relax, theta):
        x_feasible = np.zeros(len(theta)) + 1j * np.zeros(len(theta))
        for n in range(len(theta)):
            index = np.argmin(np.array(theta[n]) - cmath.phase(x_relax[n]))
            phase = theta[n][index]
            x_feasible[n] = np.cos(phase) + 1j * np.sin(phase)
        return x_feasible

    def get_features(self):
        features = Features()
        # variable features
        features.variable_features = np.zeros((self.N, NUM_VARIABLE_FEATURES))
        features.variable_features[:, 0] = np.real(self.active_node.x_feasible.reshape(-1))
        features.variable_features[:, 1] = np.imag(self.active_node.x_feasible.reshape(-1))
        # constraint features
        features.constraint_features = np.zeros((self.N, NUM_CONSTRAINT_FEATURES))
        for n in range(self.N):
            tmp_theta = min(self.active_node.theta[n])
            features.constraint_features[n, 0] = np.real(self.active_node.x_feasible[n]) - np.cos(tmp_theta)
            features.constraint_features[n, 1] = np.imag(self.active_node.x_feasible[n]) - np.sin(tmp_theta)

        # metric features
        features.metric_features = np.zeros((1,NUM_METRIC_FEATURES))
        features.metric_features[0,0] = self.active_node.upper_bound
        features.metric_features[0,1] = self.active_node.lower_bound
        features.metric_features[0,2] = self.global_upper_bound
        features.metric_features[0,3] = self.global_lower_bound
        features.metric_features[0,4] = self.active_node.upper_bound-self.global_upper_bound
        features.metric_features[0,5] = self.active_node.depth


        # edge index
        features.vc_edge_index = np.stack((np.repeat(np.arange(self.N), self.N), np.tile(np.arange(self.N), self.N)))
        features.cv_edge_index = np.stack((features.vc_edge_index[1],features.vc_edge_index[0]))
        features.vm_edge_index = np.stack((np.arange(self.N), np.repeat([0],self.N)))
        features.mv_edge_index = np.stack((features.vm_edge_index[1],features.vm_edge_index[0]))
        # edge features
        # features.edge_features = np.zeros((self.N * self.N, NUM_EDGE_FEATURES))

        return features


if __name__ == '__main__':
    # np.random.seed(seed=100)

    N = 8
    M = N+1
    H = np.random.randn(M, N) + 1j * np.random.randn(M, N)
    r = np.random.randn(M) + 1j * np.random.randn(M)
    theta = np.random.rand(N, 8) * 2 * np.pi - np.pi
    theta = np.ndarray.tolist(theta)
    # sort theta[n], n=1,2,...N
    for row in range(N):
        theta[row].sort()

    prune_model_file = [str(path) for path in Path(os.path.join(MODEL_PATH, 'M6_N5')).glob('gnn*.pth')]
    prune_policy = 'gnn'
    prune_model = MyGNN(HIDDEN_CHANNEL,NUM_LAYER)
    prune_model.load_state_dict(torch.load(prune_model_file[-1]))

    (x, fx, t, num_problem) = solve_bb(H=H, r=r, theta=theta, prune_policy=prune_policy, prune_model=prune_model)

    # print('x:{}'.format(x))
    print('objective: {} time: {}, num problems: {}'.format(fx, t, num_problem))
