"""Defines the main task for the VRPTW.

The VRPTW is defined by the following traits:
    1. Each customer has a demand in [1, 9], which must be serviced by the robot.
    2. Each customer has a time window in [2,8], that start from 0 and end at 48.
    3. Each robot has a capacity, they must visit all cities.
    4. Each robot must visit customer before upper time window.
    5. If robot arrive in customer before lower time window, it must wait until the customer open.
    6. When the vehicle load is 0, it must return to the depot to refill
    7. The depot will reset the load and vtime.
"""

import numpy as np
import torch
from torch.utils.data import Dataset
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt


class VRPTWDataset(Dataset):
    def __init__(self, num_samples,input_size,
                 max_load=20, max_demand=9,
                 min_TW=2,max_TW=8,TW_from=0,TW_to=48,ServiceTime=0,V_speed=0.7,
                 seed=None):
        super(VRPTWDataset, self).__init__()

        if max_load < max_demand:
            raise ValueError(':param max_load[{}]: must be > max_demand[{}]'.format(max_load,max_demand))
        if max_TW < min_TW:
            raise ValueError(':param max_TW[{}]: must be > min_TW[{}]'.format(max_TW,min_TW))
        if TW_to < TW_from:
            raise ValueError(':param TW_end[{}]: must be > TW_from[{}]'.format(TW_to,TW_from))
        if seed is None:
            seed = np.random.randint(1234567890)

        np.random.seed(seed)
        torch.manual_seed(seed)

        self.num_nodes=input_size
        self.num_samples = num_samples
        self.max_load = max_load
        self.max_demand = max_demand

        self.min_TW=min_TW
        self.max_TW=max_TW
        self.tw_from=TW_from
        self.tw_end=TW_to

        # The self.time_time for tw normalization
        self.time_time=TW_to-TW_from
        self.servicetime=ServiceTime/(TW_to-TW_from)

        # V_speed is a time weighted speed
        self.v_speed=V_speed*(TW_to-TW_from)

        # Depot location will be the first node
        self.locations = torch.rand((num_samples, 2, input_size + 1))

        # Note that we only use a variable between [0, 1]
        # to prevent large numbers entering the neural network
        tw_start=torch.randint(TW_from,TW_to-max_TW+1,(num_samples, 1, input_size + 1),dtype=torch.float)
        tw_span=torch.randint(min_TW,max_TW+1,(num_samples, 1, input_size + 1),dtype=torch.float)
        tw_end=(tw_start+tw_span-TW_from)/float(TW_to-TW_from)
        tw_start=(tw_start-TW_from)/float(TW_to-TW_from)
        # Robot can visit depot all the time
        tw_start[:,0,0]=0.
        tw_end[:,0,0]=1.2


        self.static = torch.cat((self.locations,tw_start,tw_end),1)

        dynamic_shape = (num_samples, 1, input_size + 1)
        # All states will broadcast the robot current load
        loads = torch.full(dynamic_shape, 1.)

        # vtime is different from step, it is a route related time.
        # All states will broadcast the robot current vtime
        vtime=torch.full(dynamic_shape,0.)

        # All states will have their own intrinsic demand in [1, max_demand)
        demands = torch.randint(1, max_demand + 1, dynamic_shape,dtype=torch.float)
        demands = demands / float(max_load)

        demands[:, 0, 0] = 0  # depot starts with a demand of 0
        self.dynamic = torch.cat((loads, demands,vtime), 1)

    def __len__(self):
        return self.num_samples

    def __getitem__(self, idx):
        # (static, dynamic, start_loc)
        return (self.static[idx], self.dynamic[idx],
                self.static[idx, :, 0:1])

    def update_mask(self, dynamic, chosen_idx=None):
        """Mask the nodes that beyond load
        :param dynamic: torch.tensor of size (batch_size,dynamic_len,seq_len)
        :param chosen_idx: torch.tensor of size (batch_size)
        :return: mask: torch.tensor of size (batch_size,seq_len)
        """

        # Convert floating point to integers for calculations
        loads = dynamic.data[:, 0]  # (batch_size, seq_len)
        demands = dynamic.data[:, 1]  # (batch_size, seq_len)

        # If there is no positive demand left, we can end the tour.
        # Note that the first node is the depot, which always has a negative demand
        if demands.eq(0).all():
            return demands * 0.

        # Otherwise, we can choose to go anywhere where demand is > 0
        new_mask = demands.ne(0) * demands.lt(loads)

        # We should avoid traveling to the depot back-to-back
        repeat_home = chosen_idx.ne(0)

        if repeat_home.any():
            new_mask[repeat_home.nonzero(), 0] = 1.
        if ~repeat_home.any():
            new_mask[(~repeat_home).nonzero(), 0] = 0.

        # Unless we're waiting for all other samples in a minibatch to finish
        has_no_load = loads[:, 0].eq(0).float()
        has_no_demand = demands[:, 1:].sum(1).eq(0).float()

        combined = (has_no_load + has_no_demand).gt(0)
        if combined.any():
            new_mask[combined.nonzero(), 0] = 1.
            new_mask[combined.nonzero(), 1:] = 0.

        return new_mask.float()

    def update_time_mask(self,dynamic, chosen_idx,time_matrix,static):
        '''Mask the nodes that can't be serve before end time
        :param dynamic: torch.tensor of size (batch_size,dynamic_len,seq_len)
        :param chosen_idx: torch.tensor of size (batch_size)
        :param time_matrix: torch.tensor of size (batch_size,seq_len,seq_len)
        :param: static: torch.tensor of size (batch_size,dynamic_len,seq_len)
        :return: mask: torch.tensor of size (batch_size,seq_len)
        '''

        batch_size = len(chosen_idx)
        vtime = dynamic.data[:,2].clone()  # (batch_size, seq_len)
        endtime = static[:, -1]  # (batch_size,seq_len)

        # get the time matrix of chosen point from time_matrix
        chosen_time=time_matrix[range(batch_size),chosen_idx]   # (batch,seq_len)
        new_time=chosen_time+vtime

        # compare to end time windows
        time_mask=endtime.ge(new_time)
        time_mask[:,0]=1    # the vehicle can back to depot anytime
        return time_mask.float()

    def update_dynamic(self, dynamic, chosen_idx, pre_chosen_idx,time_matrix,static):
        '''Update the environment according to decision
        :param dynamic: torch.tensor of size (batch_size,dynamic_len,seq_len)
        :param chosen_idx: torch.tensor of size (batch_size)
        :param re_chosen_idx: torch.tensor of size (batch_size)
        :param time_matrix: torch.tensor of size (batch_size,seq_len,seq_len)
        :param: static: torch.tensor of size (batch_size,dynamic_len,seq_len)
        :return: dynamic: torch.tensor of size(batch_size,dynamic_size,seq_len)
        '''
        batch_size = len(chosen_idx)
        # Update the dynamic elements differently for if we visit depot vs. a city
        visit = chosen_idx.ne(0)
        depot = chosen_idx.eq(0)
        int_chosen_idx=chosen_idx

        # Clone the dynamic variable so we don't mess up graph
        all_loads = dynamic[:, 0].clone()
        all_demands = dynamic[:, 1].clone()
        all_vtime=dynamic[:,2].clone()

        load = torch.gather(all_loads, 1, chosen_idx.unsqueeze(1))
        demand = torch.gather(all_demands, 1, chosen_idx.unsqueeze(1))
        vtime=torch.gather(all_vtime,1,chosen_idx.unsqueeze(1))


        # Across the minibatch - if we've chosen to visit a city, try to satisfy
        # as much demand as possible
        if visit.any():
            new_load = torch.clamp(load - demand, min=0)
            new_demand = torch.clamp(demand - load, min=0)

            # change time include route time and service time
            rout_time=time_matrix[range(batch_size),pre_chosen_idx,chosen_idx].unsqueeze(1) # (batch_size)
            # TODO if vehicle arrived before the start time, the vehicle will leave at start time + service time

            startime = static[:, -2]
            startime=startime[range(batch_size),chosen_idx].unsqueeze(1) # (batch_size)
            temp_vtime=torch.clamp(vtime+rout_time,max=1) #(batch)
            temp_vtime=torch.where(temp_vtime<startime,startime,temp_vtime)
            new_vtime=torch.clamp(temp_vtime+self.servicetime,max=1)

            # Broadcast the load to all nodes, but update demand seperately
            visit_idx = visit.nonzero().squeeze()

            all_loads[visit_idx] = new_load[visit_idx]
            all_demands[visit_idx, chosen_idx[visit_idx]] = new_demand[visit_idx].view(-1)
            all_demands[visit_idx, 0] = -1. + new_load[visit_idx].view(-1)
            all_vtime[visit_idx]=new_vtime[visit_idx]

        # Return to depot to fill vehicle load
        if depot.any():
            all_loads[depot.nonzero().squeeze()] = 1.
            all_demands[depot.nonzero().squeeze(), 0] = 0.
            all_vtime[depot.nonzero().squeeze()]=0.

        new_dynamic = torch.cat((all_loads.unsqueeze(1), all_demands.unsqueeze(1),all_vtime.unsqueeze(1)), 1)
        return torch.tensor(new_dynamic.data, device=dynamic.device)


def reward(static, tour_indices):
    """Euclidean distance between all nodes given by tour_indices
    :param static: torch.tensor of size (batch_size,static_len,seq_len)
    :param tour_indices: torch.tensor of size (batch_size)
    :return tour_len: torch.tensor of size (batch_size)
    """

    # Convert the indices back into a tour
    idx = tour_indices.unsqueeze(1).expand(-1, static.size(1), -1)
    tour = torch.gather(static.data, 2, idx)
    tour=tour[:,:2,:].permute(0, 2, 1)

    # Ensure we're always returning to the depot
    start = static.data[:, :2, 0].unsqueeze(1)
    y = torch.cat((start, tour, start), dim=1)

    # Euclidean distance between each consecutive point
    diffpow=torch.pow(y[:, :-1] - y[:, 1:], 2)
    sum_pow=torch.sum(diffpow.type(torch.float), dim=2)
    tour_len = torch.sqrt(sum_pow)

    # For NN training, the reward should be negative
    return -tour_len.sum(1)


def render(static, tour_indices, save_path):
    """Show the plot
    :param static: torch.tensor of size (batch_size,static_len,seq_len)
    :param tour_indices: torch.tensor of size (batch_size)
    :param save_path: str
    :return None
    """

    plt.close('all')

    num_plots = 3 if int(np.sqrt(len(tour_indices))) >= 3 else 1

    _, axes = plt.subplots(nrows=num_plots, ncols=num_plots,
                           sharex='col', sharey='row')

    if num_plots == 1:
        axes = [[axes]]
    axes = [a for ax in axes for a in ax]

    for i, ax in enumerate(axes):

        # Convert the indices back into a tour
        idx = tour_indices[i]
        if len(idx.size()) == 1:
            idx = idx.unsqueeze(0)

        idx = idx.expand(static.size(1), -1)
        data = torch.gather(static[i].data, 1, idx).cpu().numpy()

        start = static[i, :, 0].cpu().data.numpy()
        x = np.hstack((start[0], data[0], start[0]))
        y = np.hstack((start[1], data[1], start[1]))
        startTW = np.hstack((start[2], data[2], start[2]))
        endTW = np.hstack((start[3], data[3], start[3]))

        # Assign each subtour a different colour & label in order traveled
        idx = np.hstack((0, tour_indices[i].cpu().numpy().flatten(), 0))
        where = np.where(idx == 0)[0]

        for j in range(len(where) - 1):

            low = where[j]
            high = where[j + 1]

            if low + 1 == high:
                continue

            ax.plot(x[low: high + 1], y[low: high + 1], zorder=1, label=j)

        ax.legend(loc="upper right", fontsize=3, framealpha=0.5)
        ax.scatter(x, y, s=4, c='r', zorder=2)
        ax.scatter(x[0], y[0], s=20, c='k', marker='*', zorder=3)
        ax.set_xlim(0, 1)
        ax.set_ylim(0, 1)

        # Addign each customer point a annotate
        tour_number = 1
        for x, y, i in zip(x, y, idx):
            if i:
                tw_str = '%i' % (tour_number)
                ax.annotate(tw_str, (x, y), fontsize=3)
                tour_number += 1


    plt.tight_layout()
    plt.savefig(save_path, bbox_inches='tight', dpi=600)

