import numpy as np
import math
import networkx as nx
from discretization.grid import Grid
import config.folder_and_file_names as config
# from tools.object_store import ObjectStore
from tools.general_tools import GeneralTools
from config.parameter_carrier import ParameterCarrier
import torch
import torch.optim as optim
import cvxpy as cp


class StartEndCalibrator:

    #
    def __init__(self, cc: ParameterCarrier):
        self.cc = cc
        self.start_index_array = np.array([], dtype=int)
        self.start_values_array = np.array([])#马尔可夫转移概率矩阵中虚拟头节点那一行
        self.end_index_array = np.array([], dtype=int)
        self.end_values_array = np.array([])
        self.level2_shortest_path_cell_lengths = np.array([], dtype=int)#记录两个子网格之间的最短路径长度
        self.level2_shortest_path_cell_lengths_transform_unique_large_cell_length = np.array([], dtype=int)#记录两个子网格最短路径对应的打网格路径（不重复）

        # self.distance_network = nx.Graph()
        # self.substate_number = -1
        self.total_trajectory_number = -1
        self.geo_lengths = np.array([])
        self.large_trans_indicator = np.array([])#boolean 是否存在两步的路径

    #
    # def init_network(self, grid: Grid):
    #     # step1 get central points of states
    #     central_point_gps_array = grid.level2_cell_central_points()
    #     self.substate_number = grid.subcell_number
    #     for state_index in range(self.substate_number):
    #         neighbors = grid.subcell_neighbors_real_level2_index[state_index]
    #         for neighbor_index in neighbors:
    #             distance = self.distance_of_central_points(central_point_gps_array, state_index, neighbor_index)
    #             self.distance_network.add_edge(state_index, neighbor_index, weight=distance)

    #
    # def distance_of_central_points(self, central_points_gps, state_index1, state_index2):
    #     state1_central_point_gps = central_points_gps[state_index1]
    #     state2_central_point_gps = central_points_gps[state_index2]
    #     displacement = state1_central_point_gps - state2_central_point_gps
    #     displacement_square = displacement ** 2
    #     distance = np.sqrt(np.sum(displacement_square))
    #     return distance

    #
    def setup_calibrator(self, grid: Grid, noisy_matrix, large_trans_indicator):
        # self.init_network(grid)
        self.total_trajectory_number = grid.trajectory_number
        self.large_trans_indicator = large_trans_indicator
        non_zero_start_indices = np.arange(grid.subcell_number)
        non_zero_end_indices = np.arange(grid.subcell_number)
        self.start_index_array = non_zero_start_indices
        self.start_values_array = noisy_matrix[-2, :-2]
        self.end_index_array = non_zero_end_indices
        self.end_values_array = noisy_matrix[:-2, -1]
        self.calculate_shortest_path_length(grid)

    #
    def calculate_shortest_path_length(self, grid: Grid):
        start_state_number = self.start_index_array.size
        end_state_number = self.end_index_array.size
        self.level2_shortest_path_cell_lengths = np.zeros((start_state_number, end_state_number)) - 1
        self.level2_shortest_path_cell_lengths_transform_unique_large_cell_length = np.zeros((start_state_number, end_state_number)) - 1
        for start in range(start_state_number):
            for end in range(end_state_number):
                shortest_path = nx.dijkstra_path(grid.distance_network, source=start,
                                                 target=end)
                #返回的列表是subcell_index组成的list列表[1,2,3],包含起点和终点
                # if start == 130 and end == 99:
                #     print(shortest_path)
                #     temp = shortest_path[1:]
                #     print(temp)
                unique_large_cell_shortest_path = grid.non_repeat_large_cell_array_from_shortest_path(shortest_path)#返回最短路径对应的大网格路径（不重复）
                self.level2_shortest_path_cell_lengths[start, end] = len(shortest_path)
                self.level2_shortest_path_cell_lengths_transform_unique_large_cell_length[start, end] = len(
                    unique_large_cell_shortest_path)


    #
    def break_constraints(self, start_end_trip_weights):
        cc1 = self.cc
        if isinstance(start_end_trip_weights, torch.Tensor):
            start_end_trip_weights = start_end_trip_weights.detach().numpy()
        # non zero constraint
        if (start_end_trip_weights <= 0).any():
            return True
        else:
            return False

    #
    def error_function(self, distribution):
        count_distribution = distribution
        start_error = self.start_distribution_error(count_distribution)
        end_error = self.end_distribution_error(count_distribution)
        total_error = start_error + end_error
        return total_error
    def start_distribution_error(self, distribution):
        if isinstance(distribution, torch.Tensor):
            error_sum = torch.zeros(1, requires_grad=True)
        elif isinstance(distribution, cp.Variable):
            error_sum = 0
        else:
            error_sum = np.zeros(1)
        for inner_start_index in range(self.start_index_array.size):
            error = self.error_of_inner_start_i(distribution, inner_start_index)
            error_sum = error_sum + error * error
        return error_sum
    def end_distribution_error(self, distribution):
        if isinstance(distribution, torch.Tensor):
            error_sum = torch.zeros(1, requires_grad=True)
        elif isinstance(distribution, cp.Variable):
            error_sum = 0
        else:
            error_sum = np.zeros(1)

        for inner_end_index in range(self.end_index_array.size):
            error = self.error_of_inner_end_i(distribution, inner_end_index)
            error_sum = error_sum + error * error
        return error_sum

    def expect_length_in_geometric_length_distribution(self, shortest_length):
        exp_of_two = np.exp(-(1 / shortest_length))
        re = 0.5 * shortest_length * (1 - exp_of_two) + np.exp(-((shortest_length + 1) / shortest_length)) * 1 / (
                    (1 - exp_of_two) ** 2)
        return re

    #
    def error_of_inner_start_i(self, distribution, inner_start_index):
        length_normalized_noisy_start_frequency = self.start_values_array[inner_start_index]
        if isinstance(distribution, torch.Tensor):
            length_normalized_count = torch.zeros(1, requires_grad=True)
        elif isinstance(distribution, cp.Variable):
            length_normalized_count = 0
        else:
            length_normalized_count = np.zeros(1)
        for inner_end_index in range(self.end_index_array.size):
            length = self.level2_shortest_path_cell_lengths[inner_start_index, inner_end_index]
            length = self.expect_length_in_geometric_length_distribution(length)
            length_normalized_count = length_normalized_count + distribution[
                inner_start_index, inner_end_index] / length
        total_error = length_normalized_count - length_normalized_noisy_start_frequency
        return total_error

    def error_of_inner_end_i(self, distribution, inner_end_index):
        length_normalized_noisy_end_frequency = self.end_values_array[inner_end_index]
        if isinstance(distribution, torch.Tensor):
            length_normalized_count = torch.zeros(1, requires_grad=True)
        elif isinstance(distribution, cp.Variable):
            length_normalized_count = 0
        else:
            length_normalized_count = np.zeros(1)
        for inner_start_index in range(self.start_index_array.size):
            length = self.level2_shortest_path_cell_lengths[inner_start_index, inner_end_index]
            length = self.expect_length_in_geometric_length_distribution(length)
            length_normalized_count = length_normalized_count + distribution[
                inner_start_index, inner_end_index] / length
        total_error = length_normalized_count - length_normalized_noisy_end_frequency
        return total_error

    #
    def attractiveness_of_states(self, noisy_matrix):
        useful_degrees = noisy_matrix[:-2, :-2]
        attractiveness = np.zeros(useful_degrees.shape[0])
        for i in range(useful_degrees.shape[0]):
            attractiveness_of_i = (np.sum(useful_degrees[i, :]) + np.sum(useful_degrees[:, i])) / 2
            attractiveness[i] = attractiveness_of_i
        return attractiveness

    def distribution_optimization_with_simple_gravity_model2(self, noisy_matrix, loose_parameter=20):
        discrete_lengths = self.level2_shortest_path_cell_lengths
        row_distribution = cp.Variable(self.start_index_array.size)
        objective = cp.Minimize(cp.square(cp.sum(row_distribution) - self.total_trajectory_number))
        attractiveness = self.attractiveness_of_states(noisy_matrix)
        distribution_weights = np.tile(attractiveness, (attractiveness.shape[0], 1)) / self.geo_lengths
        normalized_weights = distribution_weights / np.linalg.norm(distribution_weights, axis=1, ord=2)
        no_weights_distribution = row_distribution.T @ np.ones(attractiveness.shape[0])
        distribution = cp.multiply(normalized_weights, no_weights_distribution)
        expected_divided_start = cp.sum(cp.multiply(distribution, 1 / discrete_lengths), axis=0)
        expected_divided_end = cp.sum(cp.multiply(distribution, 1 / discrete_lengths), axis=1)
        constraints = [distribution >= 0,
                       expected_divided_start <= self.start_values_array + loose_parameter,
                       expected_divided_start >= self.start_values_array - loose_parameter,
                       expected_divided_end <= self.end_values_array + loose_parameter,
                       expected_divided_end >= self.end_values_array - loose_parameter]
        prob = cp.Problem(objective, constraints)
        prob.solve(solver=cp.SCS)
        row_distribution_v = row_distribution.value
        if not isinstance(row_distribution_v, np.ndarray):
            return None
        else:
            all_distribution = np.matmul(np.ones((attractiveness.shape[0], 1)), row_distribution_v) * normalized_weights
            return all_distribution

    #
    def distribution_optimization_with_simple_gravity_model(self, noisy_matrix, loose_parameter=20):
        discrete_lengths = self.level2_shortest_path_cell_lengths
        row_distribution = cp.Variable((1, self.start_index_array.size))
        attractiveness = self.attractiveness_of_states(noisy_matrix)
        distribution_weights = np.tile(attractiveness, (self.start_index_array.size, 1)) / np.sqrt(self.geo_lengths)
        normalized_weights = (distribution_weights.transpose() / np.linalg.norm(distribution_weights, axis=1, ord=1)).transpose()
        no_weights_distribution = np.ones((attractiveness.shape[0], 1)) @ row_distribution
        distribution = cp.multiply(normalized_weights, no_weights_distribution)
        divided_weights = cp.multiply(distribution, 1 / discrete_lengths)
        expected_divided_start = cp.sum(divided_weights, axis=1)
        expected_divided_end = cp.sum(divided_weights, axis=0)
        total_trajectory_number_error = cp.square(cp.sum(distribution) - self.total_trajectory_number)
        objective = cp.Minimize(cp.norm(expected_divided_start - self.start_values_array, 2) + cp.norm(expected_divided_end - self.end_values_array, 2))
        constraints = [distribution >= 0,
                       total_trajectory_number_error <= loose_parameter]
        prob = cp.Problem(objective, constraints)
        prob.solve(solver=cp.ECOS)
        distribution_v = distribution.value
        return distribution_v

    #
    def distribution_optimization_with_simple_gravity_model3(self, noisy_matrix, loose_parameter=20):
        discrete_lengths = self.level2_shortest_path_cell_lengths
        row_distribution = cp.Variable((1, self.start_index_array.size))
        attractiveness = self.attractiveness_of_states(noisy_matrix)
        distribution_weights = np.tile(attractiveness, (self.start_index_array.size, 1)) / np.sqrt(self.geo_lengths)
        normalized_weights = distribution_weights / np.linalg.norm(distribution_weights, axis=1, ord=2)
        no_weights_distribution = np.ones((attractiveness.shape[0], 1)) @ row_distribution
        distribution = cp.multiply(normalized_weights, no_weights_distribution)
        divided_weights = cp.multiply(distribution, 1 / discrete_lengths)
        expected_divided_start = cp.sum(divided_weights, axis=0)
        expected_divided_end = cp.sum(divided_weights, axis=1)
        total_trajectory_number_error = cp.square(cp.sum(distribution) - self.total_trajectory_number)
        objective = cp.Minimize(cp.norm(expected_divided_start - self.start_values_array, 2) + cp.norm(expected_divided_end - self.end_values_array, 2))
        constraints = [distribution >= 0,
                       total_trajectory_number_error <= loose_parameter]
        prob = cp.Problem(objective, constraints)
        prob.solve(solver=cp.ECOS)
        distribution_v = distribution.value
        return distribution_v

    def distribution_optimization_torch(self):
        distribution = Variable(torch.randn([self.start_index_array.size, self.end_index_array.size]))
        distribution = torch.abs(distribution)
        distribution = distribution / distribution.sum() * self.total_trajectory_number
        distribution.requires_grad = True
        while self.break_constraints(distribution):
            distribution = Variable(torch.randn([self.start_index_array.size, self.end_index_array.size]))
            distribution = torch.abs(distribution)
            distribution = distribution / distribution.sum() * self.total_trajectory_number
            distribution.requires_grad = True
        distribution.requires_grad = True
        optimizer = optim.Adam([distribution], lr=0.0001)
        num_epochs = 10000
        for epoch in range(num_epochs):
            optimizer.zero_grad()
            loss = self.error_function(distribution)
            loss.backward()
            optimizer.step()

            if self.break_constraints(distribution):
                break
            if epoch % 50 == 0:
                print(loss)
        return distribution.detach().numpy()

    #
    def distribution_optimization_cvxpy(self):
        distribution = cp.Variable((self.start_index_array.size, self.end_index_array.size))
        objective = cp.Minimize(self.error_function(distribution))
        constraints = [distribution >= 0,
                       np.sum(np.array(distribution)) == self.total_trajectory_number]
        prob = cp.Problem(objective, constraints)
        prob.solve()
        return distribution

    #
    def distribution_optimization_cvxpy12(self, loose_parameter=20):
        distribution = cp.Variable((self.start_index_array.size, self.end_index_array.size))
        objective = cp.Minimize(cp.square(cp.sum(distribution) - self.total_trajectory_number))
        lengths = self.level2_shortest_path_cell_lengths
        lengths[lengths < 1] = 1
        constraints = [distribution >= 0,
                       cp.sum(cp.multiply(distribution, 1 / lengths),
                              axis=0) <= self.start_values_array + loose_parameter,
                       cp.sum(cp.multiply(distribution, 1 / lengths),
                              axis=0) >= self.start_values_array - loose_parameter,
                       cp.sum(cp.multiply(distribution, 1 / lengths),
                              axis=1) <= self.end_values_array + loose_parameter,
                       cp.sum(cp.multiply(distribution, 1 / lengths),
                              axis=1) >= self.end_values_array - loose_parameter]
        prob = cp.Problem(objective, constraints)
        prob.solve(solver=cp.SCS)
        return distribution.value

    #
    def distribution_optimization_cvxpy2(self, loose_parameter=20):
        lengths = self.level2_shortest_path_cell_lengths
        lengths = lengths * self.large_trans_indicator
        lengths[lengths < 0.01] = 0.01
        distribution = cp.Variable((self.start_index_array.size, self.end_index_array.size))
        start_error = cp.norm(cp.sum(cp.multiply(distribution, 1 / lengths), axis=1) - self.start_values_array)
        end_error = cp.norm(cp.sum(cp.multiply(distribution, 1 / lengths), axis=0) - self.end_values_array)
        objective = cp.Minimize(start_error + end_error)
        constraints = [distribution >= 0,
                       cp.square(cp.sum(distribution) - self.total_trajectory_number) <= loose_parameter ** 2]
        prob = cp.Problem(objective, constraints)
        print(cp.installed_solvers())
        try:
            prob.solve(solver=cp.ECOS)
        except:
            prob.solve(solver=cp.SCS)
            print("使用SCS求解")
            if hasattr(prob, 'solver_stats') and hasattr(prob.solver_stats, 'num_iters'):
                print(f"SCS iterations: {prob.solver_stats.num_iters}")
            else:
                print("Iteration count not available in this CVXPY version")
        finally:
            if distribution.value is None:
                prob.solve(solver=cp.SCS)
            return distribution.value


    def optimized_non_length_divided_distribution(self, divided_distribution):
        distribution = divided_distribution
        distribution = \
            distribution / np.sum(distribution) * self.total_trajectory_number
        distribution[distribution < 0.8] = 0
        distribution = \
            distribution / np.sum(distribution) * self.total_trajectory_number
        return distribution

    #
    def distribution_calibration(self, grid: Grid, noisy_matrix, large_trans_indicator):
        cc = self.cc
        self.setup_calibrator(grid, noisy_matrix, large_trans_indicator)
        divided_distribution = self.distribution_optimization_cvxpy2()#divided_distribution是从第 i 个起点到第 j 个终点的轨迹的数量
        iter_turns = 0
        while (divided_distribution is None) and (iter_turns < 10):
            divided_distribution = self.distribution_optimization_cvxpy2()
            iter_turns = iter_turns + 1
        loose_multiplier = 2
        while divided_distribution is None:
            divided_distribution = self.distribution_optimization_cvxpy2(loose_parameter=loose_multiplier ** 2)
            loose_multiplier = loose_multiplier + 1
        non_length_divided_distribution = self.optimized_non_length_divided_distribution(divided_distribution)
        return non_length_divided_distribution

