# -*- coding: utf-8 -*-

# File Name： tsp_state
# Description :
# Author : lirui
# create_date： 2022/5/31
# Change Activity:
"""
tsp state describes the details of tsp diagrams
"""
import torch


class TSPState:
    """
    state of TSP
    """

    def __init__(self, points_location):
        """

        Args:
            points_location: location of points, shape = (batch_size,points_number,2)
        """
        self.points_location = points_location
        self.distance = (self.points_location[:, :, None, :] - self.points_location[:, None, :, :]).norm(p=2, dim=-1)  # distance between two points in each TSP graph.
        self.batch_size = self.distance.shape[0]
        self.num_location = self.distance.shape[1]
        device = self.points_location.device
        self.ids = torch.arange(self.batch_size, dtype=torch.int64, device=device)[:, None]  # Add steps dimension
        self.first_arrive = torch.zeros(self.batch_size, 1, dtype=torch.long, device=device)
        self.previous_arrive = torch.zeros(self.batch_size, 1, dtype=torch.long, device=device)
        self.visited_ = torch.zeros(self.batch_size, 1, self.num_location, dtype=torch.uint8, device=device)
        self.lengths = torch.zeros(self.batch_size, 1, device=device)
        self.cur_coord = None
        self.i = torch.zeros(1, dtype=torch.int64, device=device)  # Vector with length num_steps

    @property
    def visited(self):
        if self.visited_.dtype == torch.uint8:
            return self.visited_
        else:
            return mask_long2bool(self.visited_, n=self.points_location.size(-2))

    def get_final_cost(self):

        assert self.all_finished()
        # assert self.visited_.

        return self.lengths + (self.points_location[self.ids, self.first_arrive, :] - self.cur_coord).norm(p=2, dim=-1)

    def update(self, selected):

        # Update the state
        prev_a = selected[:, None]  # Add dimension for step

        # Add the length
        # cur_coord = self.loc.gather(
        #     1,
        #     selected[:, None, None].expand(selected.size(0), 1, self.loc.size(-1))
        # )[:, 0, :]
        cur_coord = self.points_location[self.ids, prev_a]
        lengths = self.lengths
        if self.cur_coord is not None:  # Don't add length for first action (selection of start node)
            lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1)  # (batch_dim, 1)

        # Update should only be called with just 1 parallel step, in which case we can check this way if we should update
        first_a = prev_a if self.i.item() == 0 else self.first_arrive

        if self.visited_.dtype == torch.uint8:
            # Add one dimension since we write a single value
            visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
        else:
            visited_ = mask_long_scatter(self.visited_, prev_a)

        return self._replace(first_a=first_a, prev_a=prev_a, visited_=visited_,
                             lengths=lengths, cur_coord=cur_coord, i=self.i + 1)

    def _replace(self, first_a, prev_a, visited_, lengths, cur_coord, i):
        self.first_arrive = first_a
        self.previous_arrive = prev_a
        self.visited_ = visited_
        self.lengths = lengths
        self.cur_coord = cur_coord
        self.i = i
        return self

    def all_finished(self):
        # Exactly n steps
        return self.i.item() >= self.points_location.size(-2)

    def get_current_node(self):
        return self.previous_arrive

    def get_mask(self):
        return self.visited > 0  # Hacky way to return bool or uint8 depending on pytorch version

    def get_nn(self, k=None):
        # Insert step dimension
        # Nodes already visited get inf so they do not make it
        if k is None:
            k = self.points_location.size(-2) - self.i.item()  # Number of remaining
        return (self.distance[self.ids, :, :] + self.visited.float()[:, :, None, :] * 1e6).topk(k, dim=-1, largest=False)[1]

    @staticmethod
    def construct_solutions(actions):
        return actions
