import numpy as np
import treelib

from pritree.tree_model import TreeModel
from tools.general_tools import GeneralTools
from primarkov.mar_model import MarkovModel
from config.parameter_carrier import ParameterCarrier
import datetime


class Generator:

    #
    def __init__(self, cc: ParameterCarrier):
        self.cc = cc
        # this parameter decides if all states are lingering in trajectory. If unique states number / trajectory length
        # smaller than this threshold, all states in trajectory are circling. So abundant this trajectory.
        self.lingering_if_all_circle_threshold = 0.2
        # this parameter decides if the remaining trajectories are lingering. If states of former ratio has frequency
        # summing to total frequency times this ratio, then this trajectory is lingering.
        self.lingering_weight_threshold = 0.6

        self.markov_model = MarkovModel(self.cc)
        self.level1_length_threshold_value = -1  # divide_threshold = 10 * 0.8（固定参数）
        self.total_in_degree = np.array([])
        pass

    #
    def load_generator(self, mar_model):
        self.markov_model = mar_model
        # i = int(np.floor(subdividing_para.size / 3))
        self.level1_length_threshold()  # 5（固定参数）
        self.total_in_degree = np.sum(self.markov_model.noisy_markov_matrix[0:-2, 0:-2], axis=0)

    #
    def generate_no_gp_step(self, this_step, step_now, return_probability=False):
        cc1 = self.cc
        gt1 = GeneralTools()
        mar_m = self.markov_model
        all_state = mar_m.all_state_number  # 包含两个虚拟节点
        states = np.arange(all_state)
        probability = mar_m.get_noisy_tran_pro_of_step_i(this_step)  # 获得加噪马尔可夫转移矩阵第this_step行的数据
        if return_probability:
            return probability
        if step_now == 0:
            result = gt1.draw_by_probability_without_an_element(states, probability, [-2, -1])  # 返回根据概率分布随机抽取的状态
        else:
            result = gt1.draw_by_probability_without_an_element(states, probability, -2)
        return result

    #
    def check_if_neighbor(self, this_step, step_to_check):
        neighbor_matrix = self.markov_model.neighbor_matrix
        if_neighbor = neighbor_matrix[this_step, step_to_check]
        return if_neighbor

    #
    def generate_one_step(self, this_step, last_step, step_number_now, return_probability=False):
        # this_step: 当前状态索引
        mar_m = self.markov_model
        guide_post_indicator = mar_m.guidepost_indicator
        use_guidepost = False

        if guide_post_indicator[this_step]:
            use_guidepost = True
        if use_guidepost:  # 如果是敏感点就采用二阶马尔可夫
            guide_post_index = mar_m.index_dict[this_step]
            gp1 = mar_m.guidepost_set[guide_post_index]
            next_step = gp1.choose_direction(last_step, step_number_now, return_probability=return_probability)
            # 返回二阶马尔可夫第last_step行
            if np.sum(next_step) == 0:
                next_step = self.generate_no_gp_step(this_step, step_number_now, return_probability=return_probability)
        else:  # 不是敏感点就采用一阶马尔可夫
            next_step = self.generate_no_gp_step(this_step, step_number_now, return_probability=return_probability)
            # 返回二阶马尔可夫第this_step行

        return next_step

    #
    # def generate_no_guidepost_one_step(self, this_step, step_number_now, neighbor_check=False,
    #                                    return_probability=False):
    #     next_step = self.generate_no_gp_step(this_step, step_number_now, return_probability=return_probability)
    #     if return_probability:
    #         return next_step
    #     if neighbor_check:
    #         if next_step == self.markov_model.end_state_index:
    #             neighbor_check = True
    #         else:
    #             neighbor_check = self.check_if_neighbor(this_step, next_step)
    #         if neighbor_check:
    #             return next_step
    #         else:
    #             return False
    #     else:
    #         return next_step

    #
    def get_multilayer_neighbors(self, end_state):
        grid = self.markov_model.grid
        gt1 = GeneralTools()
        cc1 = self.cc
        multilayer_neighbors = []
        # neighbor_multiplier = cc1.neighbor_multiplier
        to_know_my_neighbor = np.array([end_state], dtype=int)
        for i in range(3):
            neighbors = gt1.neighbors_usable_indices_of_states(to_know_my_neighbor,
                                                               grid.subcell_neighbors_real_level2_index)
            multilayer_neighbors.append(neighbors)
            to_know_my_neighbor = neighbors
        return multilayer_neighbors

    #
    def level1_length_threshold(self):
        cc1 = self.cc
        grid = self.markov_model.grid
        level1_state_number = grid.level1_cell_number
        level1_para = np.sqrt(level1_state_number)
        threshold = int(np.floor(level1_para * 0.8))
        if threshold < 0:
            threshold = 0
        self.level1_length_threshold_value = threshold

    #
    def keep_this_trajectory_with_level1_threshold(self, trajectory, level1_len_threshold, filtered_time):
        cc1 = self.cc
        gt1 = GeneralTools()
        grid = self.markov_model.grid
        level1_divide_number = np.sqrt(grid.level1_cell_number)
        level1_step_number = gt1.level1_array_length(trajectory, grid)
        if level1_len_threshold is False:
            if level1_len_threshold > level1_divide_number:
                return False
            else:
                return True
        different_large_cell_number = np.unique(grid.subcell_index_to_large_cell_index_dict[trajectory]).size
        try_to_drop = False
        if different_large_cell_number / level1_step_number > 0.6:
            if level1_step_number > level1_len_threshold:
                try_to_drop = True
        else:
            if level1_step_number > 2 * level1_len_threshold:
                try_to_drop = True
        if try_to_drop:
            pass_probability = filtered_time / level1_divide_number  # 丢弃概率随filtered_time 增加而降低
            if pass_probability > 0.95:
                pass_probability = 0.95
            drop_probability = 1 - pass_probability
            drop = np.random.choice(np.array([True, False]), p=np.array([drop_probability, 1 - drop_probability]))
            if drop:
                return False
            # else:
            #     return True
        else:
            return True

    def get_level1_threshold_in_use(self, start_index):
        thr = self.markov_model.start_from_this_of_level1_cell_length_threshold[
            start_index]  # 值是以state_i为起点的轨迹的平均网格长度加上整个网格的长度（6）
        if thr <= 0:
            thr = self.level1_length_threshold_value  # 4（固定参数）保底是14
        # print("长度阈值：", self.level1_length_threshold_value)
        return thr

    # def generate_trajectory(self, neighbor_check=True):
    #     gt1 = GeneralTools()
    #     trajectory = []
    #     start_state = self.markov_model.start_state_index  # 虚拟头节点编号
    #     end_state = self.markov_model.end_state_index
    #     previous_step = start_state
    #     this_step = self.generate_no_gp_step(start_state, 0)  # 生成第一个状态（非虚拟头节点）
    #     real_end_state = self.choose_end(this_step)
    #     predicted_length = self.markov_model.calibrator.level2_shortest_path_cell_lengths[this_step, real_end_state]
    #     level1_len_threshold = self.get_level1_threshold_in_use(this_step)  # 返回以这个开始节点为起点的路径长度阈值
    #     filtered_time = 0
    #     level1_step_before = -1
    #     inner_step_in_this_large_cell = 1
    #     this_large_cell_inner_trajectory = []
    #     to_filter = False
    #     while this_step != end_state:
    #         trajectory.append(this_step)
    #         trajectory_length_now = len(trajectory)
    #         grid = self.markov_model.grid
    #         now_level1_cell_number = gt1.level1_array_length(trajectory, grid)  # 获得目前轨迹在level1上经过的网格数量
    #         this_step_large_cell = int(grid.subcell_index_to_large_cell_index_dict[this_step])
    #         this_large_cell_dividing_number = grid.level2_subdividing_parameter[this_step_large_cell]
    #         if now_level1_cell_number != level1_step_before:  # 如果生成的状态与前一个状态不在同一个大网格内
    #             if level1_step_before != -1:  # 新生成的状态已经不是第一个状态要进行过滤，第一个状态不进行过滤
    #                 to_filter = True
    #             level1_step_before = now_level1_cell_number  # 覆盖
    #             inner_step_in_this_large_cell = 1  # 重置为1
    #             this_large_cell_inner_trajectory = [this_step]
    #         else:
    #             if trajectory_length_now == 2:
    #                 to_filter = True
    #             this_large_cell_inner_trajectory.append(this_step)
    #             inner_step_in_this_large_cell = inner_step_in_this_large_cell + 1
    #
    #         if this_large_cell_dividing_number > 1:
    #             inner_this_large_cell_step_ratio = inner_step_in_this_large_cell / this_large_cell_dividing_number  # 在当前大网格单元内，已经生成的步数与该大网格单元内的最大步数比值
    #         else:
    #             inner_this_large_cell_step_ratio = 0
    #         if to_filter:
    #             if inner_this_large_cell_step_ratio > 0.4:
    #                 to_filter = False
    #                 filtered_time = filtered_time + 1
    #                 if self.keep_this_trajectory_with_level1_threshold(trajectory, level1_len_threshold,
    #                                                                    filtered_time) is False:
    #                     return False  # 防止生成的轨迹在 Level1网格中过长或过于集中。
    #                 # 若轨迹分散（经过多个大网格），允许稍长的路径,若轨迹集中（少量大网格），严格限制长度。
    #
    #         generating_result = self.end_neighbor_multiplied_next_step(trajectory, this_step, previous_step,
    #                                                                    trajectory_length_now,
    #                                                                    now_level1_cell_number,
    #                                                                    predicted_length)
    #         # 生成下一个状态
    #         if generating_result is False:
    #             return False
    #         this_step = generating_result
    #
    #
    #         previous_step = trajectory[-1]
    #         now_level1_cell_number = gt1.level1_array_length(trajectory, grid)
    #         if now_level1_cell_number <= 2:
    #             if len(trajectory) > 200:
    #                 print('生成的轨迹全挤在一个大网格里面')
    #                 return False
    #         else:
    #             if len(trajectory) > 100:
    #                 print('this trajectory generation cant stop')
    #                 return False
    #         if len(trajectory) > 8:
    #             if self.avoid_lingering(np.array(trajectory)):
    #                 pass
    #             else:
    #                 return False
    #         if neighbor_check:
    #             if (this_step < end_state - 2) and (previous_step < end_state - 2):
    #                 neighbor_indicator = self.check_large_neighbor(this_step, previous_step)
    #                 if neighbor_indicator is True:
    #                     pass
    #                 else:
    #                     return False
    #
    #     # while结束
    #     if len(trajectory) == 0:
    #         return False
    #     trajectory = np.array(trajectory, dtype=int)
    #     return trajectory

    def generate_trajectory(self, neighbor_check=True):
        gt1 = GeneralTools()
        start_state = self.markov_model.start_state_index  # 虚拟头节点编号
        end_state = self.markov_model.end_state_index

        previous_step = start_state
        this_step = self.generate_no_gp_step(start_state, 0)  # 生成第一个状态（非虚拟头节点）
        real_end_state = self.choose_end(this_step)
        predicted_length = self.markov_model.calibrator.level2_shortest_path_cell_lengths[this_step, real_end_state]
        level1_len_threshold = self.get_level1_threshold_in_use(this_step)  # 返回以这个开始节点为起点的路径长度阈值
        trajectory = [this_step]

        for i in range(1, int(predicted_length)):
            if i != 1:
                previous_step = trajectory[i - 2]
            this_step = trajectory[i - 1]
            next_step = self.sample_markov_next(this_step, previous_step, len(trajectory))
            if next_step == this_step:
                break;
            trajectory.append(next_step)
        return trajectory

    def generate_trajectory_by_tree(self, tree: treelib.Tree, tm: TreeModel):
        cc1 = self.cc
        total_epsilon = cc1.total_epsilon * (cc1.epsilon_partition[1] + cc1.epsilon_partition[2])
        gt1 = GeneralTools()
        grid = self.markov_model.grid
        start_state = self.markov_model.start_state_index  # 虚拟头节点编号
        end_state = self.markov_model.end_state_index#int类型
        previous_step = start_state
        this_step = self.generate_no_gp_step(start_state, 0)  # 生成第一个状态（非虚拟头节点）
        real_end_state = self.choose_end(this_step)
        predicted_length = self.markov_model.calibrator.level2_shortest_path_cell_lengths[this_step, real_end_state]
        level1_len_threshold = self.get_level1_threshold_in_use(this_step)  # 返回以这个开始节点为起点的路径长度阈值
        filtered_time = 0
        level1_step_before = -1
        inner_step_in_this_large_cell = 1
        this_large_cell_inner_trajectory = []
        to_filter = False
        trajectory = [this_step]
        this_node = tree.get_node("1_" + str(this_step))
        while this_step != end_state:
            step_number_now = len(trajectory)
            level1_step_number = gt1.level1_array_length(trajectory, grid)
            this_step_large_cell = int(grid.subcell_index_to_large_cell_index_dict[this_step])
            this_large_cell_dividing_number = grid.level2_subdividing_parameter[this_step_large_cell]
            if level1_step_number != level1_step_before:
                if level1_step_before != -1:
                    to_filter = True
                level1_step_before = level1_step_number
                inner_step_in_this_large_cell = 1
                this_large_cell_inner_trajectory = [this_step]
            else:
                if step_number_now == 2:
                    to_filter = True
                this_large_cell_inner_trajectory.append(this_step)
                inner_step_in_this_large_cell = inner_step_in_this_large_cell + 1
            if this_large_cell_dividing_number > 1:
                inner_this_large_cell_step_ratio = inner_step_in_this_large_cell / this_large_cell_dividing_number
            else:
                inner_this_large_cell_step_ratio = 0
            if to_filter:
                if inner_this_large_cell_step_ratio > 0.4:
                    to_filter = False
                    filtered_time = filtered_time + 1
                    if self.keep_this_trajectory_with_level1_threshold(trajectory, level1_len_threshold,
                                                                       filtered_time) is False:
                        return False



            is_retrieve_again = False
            this_level = int(this_node.identifier.split("_")[0])
            if this_node.tag == "&":
                print("遇到&节点：", this_node.identifier)

            if this_node.data['leaf']:
                if this_level == 1:
                    break;
                path = this_node.data['path'].copy()
                length = len(path)
                for j in range(length - 1):
                    path.pop(0)
                    node = self.find_most_level_node(path, tree)
                    if not isinstance(node, bool):
                        this_node = node
                        is_retrieve_again = True
                        break
                if is_retrieve_again:
                    # i -= 1
                    continue
            # 如果不是叶子节点


            childrens = tree.children(this_node.identifier)
            probability = [child.data['value'] for child in childrens]
            probability = np.array(probability)
            candidates = [child.tag for child in childrens]
            # if this_level >= 2:
            #     total_out_degree = np.sum(probability)
            #     condition1 = False
            #     condition2 = False
            #
            #     if total_out_degree < self.markov_model.subcell_number * 1.414 / total_epsilon:
            #         condition1 = True
            #
            #     temp_probability = probability.copy()
            #     sort_temp_temp_probability = np.sort(temp_probability)[::-1]
            #     if sort_temp_temp_probability[0] / sort_temp_temp_probability[1] > 5:
            #         condition2 = True
            #
            #     if condition1 and condition2 :
            #         path = this_node.data['path'].copy()
            #         path.pop(0)
            #         node = self.find_most_level_node(path, tree)
            #         if not isinstance(node, bool):
            #             this_node = node
            #             continue

            if level1_step_number == 1:
                probability[0] = probability[0] * 0.5
                if len(trajectory) < predicted_length * 0.5:
                    probability[0] = probability[0] * 0.2
            probability[0] = probability[0] * 0.8
            if np.sum(probability) <= 0:
                neighbors_of_this_step = tm.neighbors[this_step]
                weights = self.total_in_degree[neighbors_of_this_step]
                if np.sum(weights) == 0:
                    probability = np.zeros(neighbors_of_this_step.shape) + 1 / neighbors_of_this_step.size

                else:
                    probability[1:] = weights[0:]
            probability = probability / np.sum(probability)

            if len(candidates) == 0:
                print("遇到空节点：", this_node.tag, this_node.identifier, this_node.data['leaf'],
                      this_node.data['path'], len(this_node.data['path']))
            next_step = self.get_by_probability(candidates, probability)
            index = candidates.index(next_step)
            next_node = childrens[index]
            if next_node.tag == "&":
                break

            this_node = next_node
            previous_step = this_step
            this_step = int(this_node.tag)
            trajectory.append(int(next_step))
            level1_step_number = gt1.level1_array_length(trajectory, grid)
            if level1_step_number <= 2:
                if len(trajectory) > 200:
                    print('this trajectory generation cant stop')
                    return False
            else:
                if len(trajectory) > 100:
                    print('this trajectory generation cant stop')
                    return False
            if len(trajectory) > 8:
                if self.avoid_lingering(np.array(trajectory)):
                    pass
                else:
                    return False
            if (this_step < end_state - 2) and (previous_step < end_state - 2):
                neighbor_indicator = self.check_large_neighbor(this_step, previous_step)
                if neighbor_indicator is True:
                    pass
                else:
                    return False
        return trajectory

    def find_most_level_node(self, path: list, tree: treelib.Tree):
        start_index = path[0]
        root = tree.get_node("root")
        root_children = tree.children(root.identifier)
        this_node = root_children[start_index]
        for i in range(1, len(path)):
            children = tree.children(this_node.identifier)
            find = False
            for child in children:
                if child.tag == path[i]:
                    this_node = child
                    find = True
            if not find:
                return False
        return this_node

    def get_by_probability(self, candidates, probability):
        if len(candidates) == 0:
            raise ValueError('candidates can not be empty')
        if np.min(probability) < 0:
            raise ValueError('probability should not be nagative')

        pro_sum = np.sum(probability)
        if np.isinf(pro_sum):  # 无穷大处理
            print('this is inf')
            print(probability)
            print(np.arange(probability.size)[probability > 1000000000])
        if pro_sum == 0:  # 概率和为0处理
            probability = probability + 1 / probability.size
        probability = probability / np.sum(probability)  # 归一化
        if np.isnan(probability).any():  # 归一化后如果存在NaN，选择第一个元素作为结果
            result = candidates[0]
        else:
            try:
                result = np.random.choice(candidates, p=probability)  # 根据概率，随机选择一个元素，概率总和必须为1
            except ValueError as e:
                print('probabilities do not sum to 1, sum is {}'.format(np.sum(probability)))
                print(pro_sum)
                print(probability)
                print('ValueError message:', str(e))
                print(len(candidates),len(probability))
                # 最后的备选方案：选择概率最大的元素
                print("Using fallback method: selecting element with highest probability")
                max_index = np.argmax(probability)
                result = candidates[max_index]
        return result  # 数值

    def sample_markov_next(self, this_step, previous_step, trajectory_length_now):
        mar_m = self.markov_model
        guide_post_indicator = mar_m.guidepost_indicator
        use_guidepost = False
        if guide_post_indicator[this_step]:
            use_guidepost = True

        candidates = np.arange(self.markov_model.noisy_markov_matrix.shape[0])
        candidates_probabilities = np.zeros(len(candidates), dtype=float)

        for i in range(len(candidates)):
            if use_guidepost:  # 如果是敏感点就采用二阶马尔可夫
                guide_post_index = mar_m.index_dict[this_step]
                gp1 = mar_m.guidepost_set[guide_post_index]
                next_step = gp1.choose_direction(previous_step, 0, return_probability=True)
                # 返回二阶马尔可夫第last_step行
                if np.sum(next_step) == 0:
                    next_step = self.generate_no_gp_step(this_step, 1, return_probability=True)
            else:  # 不是敏感点就采用一阶马尔可夫
                next_step = self.generate_no_gp_step(this_step, 1, return_probability=True)
            candidates_probabilities[i] = next_step[i]
        if candidates_probabilities.sum() < 0.00001:
            return this_step
        candidate_probabilities = candidates_probabilities / candidates_probabilities.sum()
        sample_id = np.random.choice(np.arange(len(candidate_probabilities)), p=candidate_probabilities)

        # End
        if sample_id == len(candidate_probabilities) - 1:
            return this_step
        return sample_id

    def avoid_lingering(self, trajectory: np.ndarray):
        states, frequency_of_states = np.unique(trajectory, return_counts=True)
        large_frequency = - np.sort(- frequency_of_states)  # 降序排序
        limited_length = int(np.floor(trajectory.size * self.lingering_if_all_circle_threshold))
        if large_frequency.size > limited_length:
            if np.sum(large_frequency[0:limited_length]) > np.sum(large_frequency) * self.lingering_weight_threshold:
                return False
            else:
                return True
        else:
            return False

    def check_large_neighbor(self, this_step, previous_step):
        grid = self.markov_model.grid
        neighbor_relation = grid.large_neighbor_or_same_by_subcell_index(this_step, previous_step)
        if neighbor_relation is True:
            return True
        elif neighbor_relation == 'same':
            return True
        else:
            return False

    def end_neighbor_multiplied_next_step(self, trajectory, this_step, previous_step, trajectory_length_now,
                                          level1_step_number, predicted_length):
        gt1 = GeneralTools()
        cc1 = self.cc
        grid = self.markov_model.grid
        probability = self.generate_one_step(this_step, previous_step, trajectory_length_now,
                                             return_probability=True)
        candidates = np.arange(self.markov_model.noisy_markov_matrix.shape[0])
        if level1_step_number == 1:
            probability[-1] = probability[-1] * 0.5
            if len(trajectory) < predicted_length * 0.5:
                probability[-1] = probability[-1] * 0.2
        probability[-1] = probability[-1] * 0.8

        if np.sum(probability) <= 0:
            neighbors_of_this_step = grid.subcell_neighbors_real_level2_index[this_step]
            weights = self.total_in_degree[neighbors_of_this_step]
            if np.sum(weights) == 0:
                this_step1 = int(gt1.random_pick_element(neighbors_of_this_step))  # 随机选择一个邻居，概率相等
            else:
                this_step1 = int(np.random.choice(neighbors_of_this_step, p=weights / np.sum(weights)))
        else:
            this_step1 = gt1.draw_by_probability_without_an_element(candidates, probability, -2)  # 根据概率随机选择一个状态
        pass
        return this_step1

    def choose_end(self, start):
        pro = self.markov_model.optimized_start_end_distribution[start, :]  # 矩阵，值代表从i开始到j的汇集轨迹数量
        pro = pro / np.sum(pro)
        gt1 = GeneralTools()
        end = gt1.draw_by_probability(np.arange(pro.size), pro)
        return end

    def generate_many(self, number, neighbor_check=False):
        trajectory_list = []
        if neighbor_check:
            trajectory_number_already = 0
            while trajectory_number_already < number:
                trajectory = self.generate_trajectory(neighbor_check=True)
                if trajectory is not False:
                    trajectory_list.append(trajectory)
                    trajectory_number_already = trajectory_number_already + 1
        else:
            i = 1
            print('begin generating')
            print(datetime.datetime.now())
            while i < number + 1:
                trajectory = self.generate_trajectory()  # 生成网格的轨迹
                if trajectory is not False:
                    trajectory_list.append(trajectory)
                    i = i + 1
            print('end generating')
            print(datetime.datetime.now())
        return trajectory_list

    def generate_many_by_tree(self, number, tm: TreeModel):
        tree = tm.tree
        trajectory_list = []
        i = 1
        print('开始生成状态轨迹集')
        print(datetime.datetime.now())
        while i < number + 1:
            trajectory = self.generate_trajectory_by_tree(tree, tm)  # 生成网格的轨迹
            if trajectory is not False:
                trajectory_list.append(trajectory)
                i = i + 1
        print('结束生成状态轨迹集')
        print(datetime.datetime.now())
        # tm.tree.show()
        return trajectory_list
