# -*- coding: utf-8 -*-
'''
Created on Apr 2, 2014

@author: rgrunitzki
'''

'''
A class for the route learning simulation

'''

from copy import deepcopy
import os

from sumolib.net.edge import Edge
from sumolib.net.node import Node
import traci
import traci.constants as tc
from util import search
from vehicle import Vehicle
import xml.etree.ElementTree as ET


class RouteSimulation(object):

    '''
    Constructor
    '''
    def __init__(self, params):
        #a params object        
        self.params = params;
        #vehicles dictionary, the key is the vehicle's id 
        self.vehicles = self._create_vehicles_from_rou_file()
        #list containing the running vehicles' ids
        self.running_vehicles = {}
        #current travel time in minutes
        self._current_tt = 0
        #routes for the en-route mechanism
        self._en_routes = {}
        #self._create_routes_en_route()
    
    def _create_routes_en_route(self):
        i = 1
        for origin_node in self.params.network.getNodes():
            for outgoing_edge in origin_node.getOutgoing():
                print i,origin_node.getID(),outgoing_edge.getID(),self._create_route(origin_node.getID(), outgoing_edge.getToNode().getID())
                i+=1
                #print origin_node.getID(),'-', outgoing_edge.getToNode().getID()
    
    
    def _create_route(self, origin, destination):
        route_edges = search.dijkstra(self.params.network,
                                     self.params.network.getEdge(origin), 
                                     self.params.network.getEdge(destination))
        route = [e.getID().encode('utf-8') for e in route_edges]
        #final_route = [e.encode('utf-8') for e in route_ids]
        return route
    
    '''runs the route learning simulation
    '''
    def run(self):
        for episode in range(0, self.params.max_episode):
            self._load_simulator(episode)
            self._connect_traci()
            self._learning_process()
            self._create_statistics()
            self._reset_vehicles()
            self._disconnect_traci()
            self._update_epsilon(episode)
            
    def _create_statistics(self):
        tt = 0
        for veh in self.vehicles.keys():
            tt += self.vehicles[veh].get_travel_time()
            #print '\t\tvehicle:',veh,'tt:', self.vehicles[veh].get_travel_time(), 'route:',self.vehicles[veh].route 
        tt = tt/len(self.vehicles.keys())
        print '\t\t - vehicles\t', len(self.vehicles.keys()), '\n\t\t - avg tt\t', tt 
        
    def _update_epsilon(self, episode):
        self.params.epsilon = float(self.params.epsilon_initial*self.params.epsilon_decay_rate)**float(episode)
        
    def _reset_vehicles(self):
        for veh in self.vehicles.keys():
            self.vehicles[veh].reset()
            
    ''' 
                            SIMULATOR METHDOS
    '''   
            
                  
    '''
    loads the simulator following the parameters on config.xml
    '''    
    def _load_simulator(self, episode):
        command_line = self.params.get_sumo_command_line()
        print "\t", episode, "\t", command_line,
        if(os.system(command_line)==0):
            return True
        else:
            return False
    '''
    connects TraCI client to the server
    '''
    def _connect_traci(self):
        traci.init(self.params.traci_port)
    '''
    disconnects TraCI client to the server
    '''
    def _disconnect_traci(self):
        traci.close()
    
    '''
    counts the number of vehicles in the rou.xml file to be used during the simulation
    '''
    def _create_vehicles_from_rou_file(self):
        #markov decision process
        mdp = {}
        #verifies the kind of mdp to be created
        if self.params.en_route:
            #mdp for en route learning
            mdp = self._create_mdp_en_route()
        else:
            #mdp for route choice learning
            mdp = {}
        cfgtree = ET.parse(self.params.route_file)
        cfgtree = cfgtree.getroot()
        vehicles = {}
        for io_element in cfgtree:
            if io_element.tag == 'vehicle':
                veh_id = io_element.get('id')
                route = io_element.getchildren()[0].get('edges')
                route = route.split(' ')
                vehicles[veh_id] = Vehicle(veh_id, route[0], route[len(route)-1], deepcopy(mdp), 0)
        return vehicles
    
    ''' 
                            REINFORCEMENT LEARNING METHOD
    '''
    
    '''
    Create's the Markov Decision Process for the en route problem
    '''
    def _create_mdp_en_route(self):
        '''
        data structure - dictionary
        mdp['state']['action'][q_value, reward, f_value]
        state = node's name
        action = edge's name
        [0] q_value = q-learning value - float
        [1] reward = travel time on edge 'action' - float
        [2] f_value = not defined yet
        '''
        #Markov decision process structure
        mdp = {}
        
        #states
        states = self.params.network.getNodes()
        #iterate the list of states
        for state in states:
            #state's action
            state_actions = Node.getOutgoing(state)
            #dictionary of state's action used to populate the mdp
            actions = {}
            for action in state_actions:
                #create's an state and initialize q_value, reward and f_value
                
                actions[Edge.getID(action)] = ([self.params.random().uniform(0,-50), self.params.random().uniform(0,-50), self.params.random().uniform(0,-50)])
            #insert's the state's action on mdp
            mdp[Node.getID(state)] = actions
        return mdp
    
    
    '''
    runs the reinforcement learning algorithm for each vehicle in simulation
    '''
    def _learning_process(self):
        #number of vehicles that reaches their destination
        total_arrived_vehicle = 0
        
        #number of vehicles that departed in the simulation
        total_departed_vehicle = 0
        
        #step counter
        step = 0
        
        #this looping process all the vehicles
        while True:
            
            #updates the global variables
            self._update_current_travel_time()            
            
            #update the total arrived and departed vehicles
            departed_list = traci.simulation.getDepartedIDList()
            arrived_list = traci.simulation.getArrivedIDList()
            total_departed_vehicle += len(departed_list)
            total_arrived_vehicle += len(arrived_list)
            #stop condition
            if(total_arrived_vehicle == total_departed_vehicle) and (total_departed_vehicle >0):
                break
            
            #adds departed vehicles to the running vehicles list
            for departed in departed_list:
                self.running_vehicles[departed] = departed
                self.vehicles[departed].set_depart_time(self._current_tt)
                ''''
                adds a TraCI vehicle subscription
                [0] - Edge_ID - string
                [1] - Lane_ID - string
                '''
                traci.vehicle.subscribe(departed, ((tc.VAR_ROAD_ID, )))
            
            #removes the arrived vehicles from the running vehicles list
            for arrived in arrived_list:
                #update the travel time on last edge
                self.vehicles[arrived].set_travel_time_on_link(self.current_tt)
                #self.vehicles[arrived].update_travel_time(self.current_tt)
                
                #remove the vehicle from the running vehicles
                del self.running_vehicles[arrived]
                
            #verifies the king of learning
            if(self.params.en_route):
                self._en_route_learning()
            else:
                self._route_choice_learning()           
            
            #runs a new simulation step
            traci.simulationStep()
            #increments the step counter
            step+=1;
        
        '''
        Update the last vehicle
        
        this code updates the last vehicle. It is needed because when the last vehicle is out of simulation the simulator
        stops and there is no next step for update the running_vehicles list and the vehicle travel time
        '''
        if len(self.running_vehicles.keys()) > 0:
            for veh in self.running_vehicles.keys():
                #update the travel time on last edge
                if self.vehicles[veh].is_destination():
                    self.vehicles[veh].set_travel_time_on_link(self.current_tt)
                #remove the vehicle from the running vehicles
                del self.running_vehicles[veh]
        
    '''
    route choice method
    '''
    def _route_choice_learning(self):
        print 'route choice learning'
    
    '''
    en-route learning method
    '''
    def _en_route_learning(self):
        #process all the running vehicles    
        for veh in self.running_vehicles:
            #get the vehicle edge
            current_edge = self._vehicle_edge(veh)
            '''#verify if
                * the current edge is valid (not None)
                * the current edge is not the destination one
                * if the edge has changed
            '''
            if (current_edge != None) and (not self.vehicles[veh].is_destination()) and (current_edge != self.vehicles[veh].get_current_edge()) and not (self.vehicles[veh].removed):                                 
                                     
                #applies  the Q-Learning algorithm to create the new route
                self._q_learning_en_route(veh, current_edge)
            #does vehicle reach the max number of steps (actions)
            elif not self.vehicles[veh].removed and self.vehicles[veh].get_number_of_actions() > self.params.max_step:
                #removes the vehicle
                #print veh, 'saiu pq atingiu o número maximo de steps', self.vehicles[veh].get_number_of_actions()
                self.remove_vehicle(veh)
    
    def _q_learning_en_route(self, veh, current_edge):  
        #update the travel time on last edge
        self.vehicles[veh].set_travel_time_on_link(self.current_tt)
        #update the reward on the last action
        origin_node = self._origin_node(self.vehicles[veh].get_current_edge())
        self.vehicles[veh].mdp[origin_node][self.vehicles[veh].get_current_edge()][1] = -self.vehicles[veh].get_travel_time_on_link()
    
        #sets the current edge
        self.vehicles[veh].set_current_edge(current_edge)
        if veh == '1':
            True
        #update the route list
        self.vehicles[veh].route.append(current_edge)
        '''choose the new action'''
        
        #the next node
        next_node = self._destination_node(self.vehicles[veh].get_current_edge())
        #string with the action key
        new_action = ''      
        
        #exploration or exploitation?
        rand = self.params.random().random()
        if rand  < self.params.epsilon:#self.params.epsilon_initial:
            #choose an action randomly
            new_action = self.vehicles[veh].mdp[next_node].keys()[self.params.random().randint(0, len(self.vehicles[veh].mdp[next_node].keys())-1)]
            #print action_key
        else:
            #choose the best action
            new_action = self._choose_best_action_en_route(veh, next_node)
        
        ''''update Q-table'''
        #Q-value
        q_value = self.vehicles[veh].mdp[next_node][new_action][0]
        
        #verifies if this is the destination
        reward = 0
        future_node = ''
        future_q_value = 0
        if new_action == self.vehicles[veh].get_destination():
            #intermediate states receive the negative travel time on link
            reward = self.vehicles[veh].get_travel_time()
        else:
            #final state receive the positive veh's travel time
            reward = self.vehicles[veh].mdp[next_node][new_action][1]
            
            #node which maximizes the q-value on action
            future_node = self._destination_node(new_action)
            #Q-value for the best future action
            if self.vehicles[veh].mdp[future_node].keys()>0:
                try:
                    future_q_value = self.vehicles[veh].mdp[future_node][self._choose_best_action_en_route(veh, future_node)][0]
                except:
                    print 'veh', veh, 'future_node', future_node, 'new action', new_action
        #define the q-value following Q-learning algorithm
        new_q_value = (1-self.params.alpha)*q_value + self.params.alpha*(reward + self.params.gamma*future_q_value)
        #print 'reward', reward, 'q-value', q_value, 'future_q_value', future_q_value, 'new_q_value', new_q_value
        #update driver's Q-value
        self.vehicles[veh].mdp[next_node][new_action][0] = new_q_value
        
        ''''update the route'''
        new_route = self._create_route(self.vehicles[veh].get_current_edge(), new_action)
        
        #update the route
        traci.vehicle.setRoute(self.vehicles[veh].get_id(), new_route)
    
    def _choose_best_action_en_route(self, veh, next_node):
        actions = self.vehicles[veh].mdp[next_node].keys()
        best_action = ''
        maximum = -100000000
        for a in actions:
            if(self.vehicles[veh].mdp[next_node][a][0] > maximum):
                maximum = self.vehicles[veh].mdp[next_node][a][0]
                best_action = a
        return best_action
    
    
    ''' 
                            OTHER METHODS
    '''
    
    '''return the current vehicle's edge from TraCI subscriptions'''
    def _vehicle_edge(self, veh):
        result = traci.vehicle.getSubscriptionResults(veh)
        #
        if result != None and result[tc.VAR_ROAD_ID]!=None and self.params.edges_id.has_key(result[tc.VAR_ROAD_ID]):
            return result[tc.VAR_ROAD_ID]
        else:
            return None
    
    '''return the next vehicles's node'''
    def _destination_node(self, edge_id):
        node = (self.params.network.getEdge(edge_id).getToNode().getID())
        return node
    
    '''return the previous vehicle's node'''
    def _origin_node(self, edge_id):
        node = (self.params.network.getEdge(edge_id).getFromNode().getID())
        return node
    
    '''return the current simulation time'''
    def _update_current_travel_time(self):
        self.current_tt = traci.simulation.getCurrentTime()/1000
    '''
    trash code goes here
    '''    
    def print_mdp(self, veh):
        #True
        for state in self.vehicles[veh].mdp.keys():
            print 'state:', state
            for action in self.vehicles[veh].mdp[state].keys():
                print '\t action', action,': ', self.vehicles[veh].mdp[state][action]
        #print '\t\ttravel time: ', self.current_tt/60, ' route:', self.vehicles['1'].route
    '''
    removes the vehicle if it made more than max-steps actions
    '''
    def remove_vehicle(self, veh):
        #print 'traing to remove', veh
        #traci.vehicle.remove(veh, tc.REMOVE_PARKING)
        self.vehicles[veh].removed = True
        #print veh,'is removed'
       
