"""
GFlowNet Causal main function;include training and result test
"""

import pickle
import copy
import os
from itertools import count
import random

from tqdm import tqdm
import numpy as np
import torch
import gzip
from mindspore import context, Tensor, set_seed
from mindspore.nn.probability.distribution import Categorical
from mindspore.common import dtype as mstype
from mindspore.communication.management import init


from args import args
from env import CausalEnv
from loss_network import TrainNetWrapper
from network.model_ms import MsMLP
from castle.metrics import MetricsDAG
from utils import softmax_matrix, synthetic_data, synthetic_data_nonliear, Reward, get_graph_from_order, \
    pruning_by_coef, pruning_by_sortnregress, save_sample_batch, select_action_base_probability



set_seed(2022)
random.seed(2022)
np.random.seed(2022)

if args.sem_type == 'linear':
    print('linear!!')
    true_causal_matrix, X, data_generator = synthetic_data()
else:
    true_causal_matrix, X, data_generator = synthetic_data_nonliear()
print('true_causal_matrix1', true_causal_matrix)



def sample_batch(agent, envs, s):
    masked_matrix = tf(np.array([np.diag(-np.ones(args.n_node)).reshape(-1) for i in range(args.mbsize)])).to(args.dev)
    masked_list = [torch.where(item == -1)[0] for item in masked_matrix]
    sample_results = agent.sample_many(args.mbsize, s, masked_list)[-1]
    order = sample_results[0].int().numpy()
    graph = np.array(get_graph_from_order(sequence=order))
    reward = agent.reward_.cal_ori(graph, order, X)  # GP

    tpr = MetricsDAG(graph, true_causal_matrix).metrics['tpr']
    shd = MetricsDAG(graph, true_causal_matrix).metrics['shd']
    fdr = MetricsDAG(graph, true_causal_matrix).metrics['fdr']

    pruned_matrix = pruning_by_coef(graph, X=X)
    p_tpr = MetricsDAG(pruned_matrix, true_causal_matrix).metrics['tpr']
    p_shd = MetricsDAG(pruned_matrix, true_causal_matrix).metrics['shd']
    p_fdr = MetricsDAG(pruned_matrix, true_causal_matrix).metrics['fdr']

    return reward, tpr, shd, fdr, p_tpr, p_shd, p_fdr






class FlowNetAgent:
    def __init__(self, args, envs, reward_):
        self.n_node = args.n_node
        self.xdim = args.n_node * args.n_node
        self.embed_dim = args.embed_dim
        self.num_heads = args.num_heads
        self.reward_ = reward_

        # MLP
        if args.model_name == 'MLP':
            self.model = MsMLP(self.xdim, args.n_hid, args.n_layers, self.xdim)

        self.envs = envs
        self.tau = args.bootstrap_tau

    def sample_many(self, mbsize, states, masked_list, masked_matrix_ms):
        """
        sample generate flow trajectory
        """
        d = args.n_node
        d_2 = d * d
        done = [False] * mbsize
        transitive_list = copy.deepcopy(masked_list)
        transitive_matrix = np.diag(np.zeros(d)).reshape(-1)
        transitive_matrix[transitive_list[0]] = 1
        transitive_matrix = [transitive_matrix.reshape(d, d)] * mbsize
        updated_order = [np.array([], dtype=int)] * mbsize
        parents_, actions_, r_, sp_, done_ = [], [], [], [], []
        self.model.set_train(False)
        while not all(done):

            if args.model_name == 'MLP':
                output = self.model(states)
            else:
                raise Exception('model selection error')

            output = (output + Tensor((masked_matrix_ms), mstype.float32)).asnumpy()
            output_norm = np.array([softmax_matrix(output[i, :]) for i in range(args.mbsize)])

            # acts = Categorical(output_norm).sample().asnumpy()
            acts = select_action_base_probability(d_2, output_norm)


            step_full = [self.envs.step_new(a, state, tm, d, order) for a, state, tm, d, order in
                         zip(acts, states, transitive_matrix, done, updated_order)]

            # Find the parent node of the next node, if it cannot be selected, select the parent node of the current node
            p_a = [
                self.envs.parent_transitions(sp, updated_order) if not done else
                self.envs.parent_transitions(true_step, updated_order)
                for a, (sp, r, done, m_list, ini_done, transitive_m, true_step, updated_order) in
                zip(acts, step_full) if not ini_done]

            # add trajectory; p denote parent，sp denote sub
            for (p, a), (sp, r, d, m_list, ini_done, updated_mt, true_step, updated_order) in zip(p_a, step_full):
                if not ini_done:
                    if d:
                        sp = true_step
                    parents_.append(p[0][np.newaxis, :])
                    actions_.append(np.array([a[0]]))
                    r_.append(np.array([r]))
                    sp_.append(sp[np.newaxis, :])
                    done_.append(np.array([d]))

            c = count(0)
            m = {j: next(c) for j in range(mbsize) if not done[j]}
            done = [bool(d or step_full[m[i]][2]) for i, d in enumerate(done)]
            # extract data
            updated_order = []
            states = []
            masked_list_new = []
            transitive_matrix = []
            masked_matrix_ms_list = []
            for item in step_full:
                # 节点的顺序排序 第8个元素，0为list中一个元素无任何意义
                updated_order.append(item[7][0])
                # 状态为第1个元素
                states.append(np.array(item[0]))
                # 掩码列表第4个元素
                masked_list_new.append(item[3])
                # 传递矩阵第6个元素
                transitive_matrix.append(item[5])
                # 掩码矩阵
                mask_temp = np.zeros(d_2)
                mask_temp[item[3]] = -1e2
                masked_matrix_ms_list.append(mask_temp)
            # update mask matrix
            masked_matrix_ms = np.array(masked_matrix_ms_list)
            states = Tensor((np.array(states)), mstype.float32)
            # Not used yet
            if args.replay_strategy == "top_k":
                for (sp, r, d, m_list, ini_done, updated_mt, true_step, order) in step_full:
                    self.replay.add(tuple(sp), r)
        batch = [parents_, actions_, r_, sp_, done_]
        return batch, states, updated_order

    def learn_train_one_step(self, batch, train_net):
        parents, actions, reward, parents_sub, done = map(np.concatenate, batch)
        index_ = np.expand_dims(np.arange(parents.shape[0]), 1)
        actions_ = np.expand_dims(actions, 1)
        index = Tensor(np.concatenate([index_, actions_], 1).reshape(2, -1), mstype.int64)
        parents = Tensor(parents, mstype.float32)
        actions = Tensor(actions, mstype.int64)
        reward = Tensor(reward, mstype.float32)
        parents_sub = Tensor(parents_sub, mstype.float32)
        done = Tensor(done, mstype.float32)
        parent_range = Tensor(np.arange(parents.shape[0]), mstype.int64)
        loss = train_net(parents, actions, reward, parents_sub, done, parent_range)
        return loss


def main():
    # init env reward agent
    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_platform)
    # init()
    reward_ = Reward(args, X)
    envs = CausalEnv(args.n_node, reward_)
    agent = FlowNetAgent(args, envs, reward_)

    d = args.n_node
    ttsr = max(int(args.train_to_sample_ratio), 1)  # 1
    sttr = max(int(1 / args.train_to_sample_ratio), 1)  # sample to train ratio

    matrix_list = []

    loss_ = []
    mean_loss_ = []
    max_loss_ = []
    min_loss_ = []

    train_net = TrainNetWrapper(agent.model, args)

    for epoch in tqdm(range(args.epoches)):
        # init state, init mask matrix
        masked_matrix = np.array([np.diag(-np.ones(d)).reshape(-1) for i in range(args.mbsize)])
        masked_matrix_ms = masked_matrix * 1e2
        masked_list = [np.where(item == -1)[0] for item in masked_matrix]
        s = Tensor(np.zeros((args.mbsize, d ** 2)), mstype.float32)

        # sample data
        data = []
        for j in range(sttr):
            data, new_s, updated_order = agent.sample_many(args.mbsize, s, masked_list, masked_matrix_ms)  # mbsize = 16

        # training
        for j in range(ttsr):
            train_net.set_train()
            losses = agent.learn_train_one_step(data, train_net)
            # print("every loss", losses)
            if losses is not None:
                loss_.append(losses.asnumpy())
                if not epoch % 50:
                    mean_loss = np.mean(loss_)
                    max_loss = np.max(loss_)
                    min_loss = np.min(loss_)
                    print('********** loss：', mean_loss)
                    mean_loss_.append(mean_loss)
                    max_loss_.append(max_loss)
                    min_loss_.append(min_loss)
                    loss_ = []


            if not (epoch + 1) % 100:
                max_reward, order = reward_.best_result()
                results = {'model': agent.model,
                           'params': [(key, value.asnumpy()) for key, value in agent.model.parameters_dict().items()],
                           'best_order': [order],
                           'best_reward': max_reward,
                           'ground_truth': true_causal_matrix,
                           'dataset': X,
                           'args': args}

                save_dir = args.save_dir.format(args.data_scheme, args.model_name, args.n_node, epoch)
                root = os.path.split(save_dir)[0]
                if len(root):
                    os.makedirs(root, exist_ok=True)

                pickle.dump(results, gzip.open(save_dir, 'wb'))
                print('save_model')

                loss_ = []
                mean_loss_ = []
                max_loss_ = []
                min_loss_ = []
    print('************* Finish train ****************')
    test(reward_, agent, envs, matrix_list)


def test(reward_, agent, envs, matrix_list):
    # ---- Best Results
    # test
    max_reward, order = reward_.best_result()

    graph = np.array(get_graph_from_order(sequence=order))
    var_ = reward_.varsortability(X, order)

    matrix_list.append(graph.reshape(-1))
    print((true_causal_matrix.shape))
    print(graph)
    tpr = MetricsDAG(graph, true_causal_matrix).metrics['tpr']
    shd = MetricsDAG(graph, true_causal_matrix).metrics['shd']
    fdr = MetricsDAG(graph, true_causal_matrix).metrics['fdr']

    pruned_matrix = pruning_by_sortnregress(order, graph, X)
    p_tpr = MetricsDAG(pruned_matrix, true_causal_matrix).metrics['tpr']
    p_shd = MetricsDAG(pruned_matrix, true_causal_matrix).metrics['shd']
    p_fdr = MetricsDAG(pruned_matrix, true_causal_matrix).metrics['fdr']
    print('var_', var_ / 100)
    print('\n tpr {} , shd {} , fdr {} ,  p_tpr {} , p_shd {} ,p_fdr {} \n'.format(tpr, shd, fdr, p_tpr, p_shd, p_fdr))

    tpr_ = []
    shd_ = []
    fdr_ = []
    p_tpr_ = []
    p_shd_ = []
    p_fdr_ = []
    reward_ = []
    for i in tqdm(range(args.sampling_size)):
        s = tf(np.array([np.diag(np.zeros(args.n_node)).reshape(-1) for i in range(args.mbsize)]))
        reward, tpr, shd, fdr, p_tpr, p_shd, p_fdr = sample_batch(agent, envs, s)
        tpr_.append(tpr)
        shd_.append(shd)
        fdr_.append(fdr)
        p_tpr_.append(p_tpr)
        p_shd_.append(p_shd)
        p_fdr_.append(p_fdr)
        reward_.append(reward)
    save_sample_batch(reward_, tpr_, shd_, fdr_, p_tpr_, p_shd_, p_fdr_)


def test_():
    a = np.arange(12).reshape((4, 3))
    print(a)
    b = a[0, [0,0,0,0]]
    print(b)

if __name__ == '__main__':
    main()
    # test_()