/**
 * 
 */
package cs534.crf.test;

import inference.traffic.CRFStructure;
import inference.traffic.LogisticRegression;
import inference.traffic.TrafficDisplay;
import inference.traffic.TrafficState;
import inference.traffic.TrafficState.LocalState;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NavigableMap;
import java.util.TreeMap;

import util.Pair;
import cs534.crf.ConditionalRandomField;
import cs534.crf.CrfBoltzmannPolicy;
import cs534.crf.CrfGibbsSampler;
import cs534.policy.Function2;
import cs534.policy.MyPolicy;
import cs534.policy.OnlinePolicyGradient;
import cs534.util.StringUtil;

/**
 * @author jhostetler
 *
 */
public class TrafficDomain
{
	
	public static NavigableMap<Pair<Integer, Integer>, NavigableMap<Integer, ArrayList<Double> > > 
		null_lr_model;
	
	public static double[] computeEpochStatistics( final double[] rewards )
	{
		return new double[] {
			cs534.util.List.mean( rewards ),
			cs534.util.List.variance( rewards )
		};
	}
	
	public static double[] writeEpochStats( final PrintStream out, final double[] rewards )
	{
		final double[] stats = computeEpochStatistics( rewards );
		out.println( StringUtil.join( ",", cs534.util.List.toStringArray( stats ) ) );
		return stats;
	}
	
	public static void naiveCrfExperiment( final CRFStructure crf_structure, final TrafficState s0 ) throws Exception
	{
		ConditionalRandomField<TrafficState> crf = crf_structure.createNaiveCrf();
		ArrayList<Function2<Double, TrafficState, int[]>> features = crf_structure.naiveFeatures();

		CrfGibbsSampler<TrafficState> sampler = new CrfGibbsSampler<TrafficState>( crf, gibbs_burn_in, seed );
		CrfBoltzmannPolicy<TrafficState> base_policy = new CrfBoltzmannPolicy<TrafficState>(
			new double[crf.numParameters()], features, crf, sampler, gibbs_samples );
		OnlinePolicyGradient<TrafficState, int[], CrfBoltzmannPolicy<TrafficState>> policy_gradient
			= new OnlinePolicyGradient<TrafficState, int[], CrfBoltzmannPolicy<TrafficState>>(
					base_policy, alpha, beta );
		
		final TrafficDisplay viz = new TrafficDisplay( viz_sleep_ms );
		
		final int window = 100;
		final List<Double> moving_average = new LinkedList<Double>();
		final List<Double> averages = new LinkedList<Double>();
		
		final int epoch = joint_steps;
		final double[] epoch_rewards = new double[epoch];
		
		final ArrayList<double[]> statistics = new ArrayList<double[]>();
		final PrintStream stats_out = new PrintStream( new File( "stats_naive_" + experiment_id + ".csv" ) );
		final PrintStream model_out = new PrintStream( new File( "model_naive_" + experiment_id + ".txt" ) );
		
		TrafficState s = s0.copy();
		int epoch_reward_idx = 0;
		
		// Evaluate baseline policy
		System.out.println( "Evaluating base policy" );
		// Evaluation
		for( int joint_step = 0; joint_step < joint_steps; ++joint_step ) {
			base_policy.enterObservation( s );
			final int[] a = base_policy.getAction();
			s.nextState( CRFStructure.flatActionToMap( crf_structure, a ) );
			final double r = s.getReward();
			epoch_rewards[epoch_reward_idx++] = r;
		}
		// Logging
		statistics.add( writeEpochStats( stats_out, epoch_rewards ) );
		
		// Evaluate policy gradient
		
		// Main loop
//		viz.display( s );
		for( int iteration = 0; iteration < iterations; ++iteration ) {
			System.out.println( "\n\n----------------------------------------" );
			System.out.println( "Iteration " + iteration );
			
			// Reset state
			s = s0.copy();
			epoch_reward_idx = 0;
			
			// Do policy gradient on the CRF
			for( int joint_step = 0; joint_step < joint_steps; ++joint_step ) {
				final TrafficState s_tminus1 = s.copy();
				policy_gradient.enterObservation( s_tminus1 );
				final int[] action = policy_gradient.getAction();
				final NavigableMap<Pair<Integer, Integer>, Pair<Boolean, Boolean>> action_map
					= CRFStructure.flatActionToMap( crf_structure, action );
				s.nextState( action_map );
				final double r = s.getReward();
				System.out.println( "Reward = " + r );
				// Store reward
				epoch_rewards[epoch_reward_idx++] = r;
				if( moving_average.size() == window ) {
					moving_average.remove( 0 );
				}
				moving_average.add( r );
				double avg = 0.0;
				for( final double d : moving_average ) {
					avg += d;
				}
				averages.add( avg / moving_average.size() );
				System.out.println( "Average reward = " + (avg / moving_average.size()) );
				policy_gradient.actionResult( s_tminus1, action, s, r );
//				viz.display( s );
			}
			// Logging
			statistics.add( writeEpochStats( stats_out, epoch_rewards ) );
			
			// Extract improved base policy.
			base_policy = policy_gradient.basePolicy();
			crf = base_policy.crf();
			
			System.out.println( "Crf parameters:" );
			for( ConditionalRandomField.Clique<TrafficState> c : crf.cliques() ) {
				System.out.println( Arrays.toString( c.parameters() ) );
			}
		}
	}
	
	public static void sophisticatedCrfExperiment( final CRFStructure crf_structure, 
			final TrafficState s0 ) throws Exception
	{
		LogisticRegression.init( seed, Rpath );
		
		null_lr_model = crf_structure.makeNullLRModel( s0 );
		
		ConditionalRandomField<TrafficState> crf = null;
		ArrayList<Function2<Double, TrafficState, int[]>> features = null;
		if( use_local_policies ) {
			crf = crf_structure.createSophisticatedCrf( null_lr_model , crf_structure );
			features = crf_structure.sophisticatedFeatures( null_lr_model , crf_structure );
		}
		else {
			crf = crf_structure.createNaiveCrf();
			features = crf_structure.naiveFeatures();
		}
		CrfGibbsSampler<TrafficState> sampler = new CrfGibbsSampler<TrafficState>( crf, gibbs_burn_in, seed );
		CrfBoltzmannPolicy<TrafficState> base_policy = new CrfBoltzmannPolicy<TrafficState>(
			new double[crf.numParameters()], features, crf, sampler, gibbs_samples );
		OnlinePolicyGradient<TrafficState, int[], CrfBoltzmannPolicy<TrafficState>> policy_gradient
			= new OnlinePolicyGradient<TrafficState, int[], CrfBoltzmannPolicy<TrafficState>>(
					base_policy, alpha, beta );
	//	final MyPolicy<TrafficState, int[]> active_policy = policy_gradient;
		
		final TrafficDisplay viz = new TrafficDisplay( viz_sleep_ms );
		
		final int window = 100;
		final List<Double> moving_average = new LinkedList<Double>();
		final List<Double> averages = new LinkedList<Double>();
		
		final int epoch = joint_steps;
		final double[] epoch_rewards = new double[epoch];
		final ArrayList<double[]> statistics = new ArrayList<double[]>();
		final ArrayList<double[]> local_statistics = new ArrayList<double[]>();
		final PrintStream stats_out = new PrintStream( new File( "stats_sophisticated_" + experiment_id + ".csv" ) );
		final PrintStream local_stats_out = new PrintStream( new File( "stats_sophisticated_local_" + experiment_id + ".csv" ) );
		
		TrafficState s = s0.copy();
		int epoch_reward_idx = 0;
		// Evaluate baseline policy
		System.out.println( "Evaluating base policy" );
		// Evaluation -- CRF
		for( int joint_step = 0; joint_step < joint_steps; ++joint_step ) {
			base_policy.enterObservation( s );
			final int[] a = base_policy.getAction();
			s.nextState( CRFStructure.flatActionToMap( crf_structure, a ) );
			final double r = s.getReward();
			epoch_rewards[epoch_reward_idx++] = r;
		}
		statistics.add( writeEpochStats( stats_out, epoch_rewards ) );
		// Reset state
		s = s0.copy();
		epoch_reward_idx = 0;
		// Evaluation -- LR
		for( int local_step = 0; local_step < local_steps; ++local_step ) {
			s.nextState( LogisticRegression.sampleLocalActions(
				LogisticRegression.getLocalActions( 
						s.getLocalStateDescriptions(), null_lr_model ,
						crf_structure 
				) 
			) );
			
			final double r = s.getReward();
			epoch_rewards[epoch_reward_idx++] = r;
		}
		// Logging
		local_statistics.add( writeEpochStats( local_stats_out, epoch_rewards ) );
		
		// Evaluate policy gradient
		
		// Main loop
//		viz.display( s );
		for( int iteration = 0; iteration < iterations; ++iteration ) {
			System.out.println( "\n\n----------------------------------------" );
			System.out.println( "Iteration " + iteration );
			
			// Reset state
			s = s0.copy();
			epoch_reward_idx = 0;
			
			// Get training samples for local policy
			
			final TreeMap<Pair<Integer, Integer>, ArrayList<Pair<LocalState, Pair<Boolean, Boolean>>>> instances
				= new TreeMap<Pair<Integer, Integer>, ArrayList<Pair<LocalState, Pair<Boolean, Boolean>>>>();
			for( final Pair<Integer, Integer> i : s.get_xions() ) {
				instances.put( i, new ArrayList<Pair<LocalState, Pair<Boolean, Boolean>>>() );
			}
			
			System.out.println( "Sampling joint policy" );
			for( int local_step = 0; local_step < local_steps; ++local_step ) {
				base_policy.enterObservation( s );
				final int[] joint_action = base_policy.getAction();
				final Iterator<Map.Entry<Pair<Integer, Integer>, ArrayList<Pair<LocalState, Pair<Boolean, Boolean>>>>>
					instance_itr = instances.entrySet().iterator();
				final Iterator<Map.Entry<Pair<Integer, Integer>, LocalState>>
					state_itr = s.getLocalStateDescriptions().entrySet().iterator();
				for( final int ai : joint_action ) {
					instance_itr.next().getValue().add( 
						new Pair<LocalState, Pair<Boolean, Boolean>>(
							new LocalState( state_itr.next().getValue() ),
							new Pair<Boolean, Boolean>( (ai & (1<<1)) != 0, (ai & 1) != 0 ) ) );
				}
				assert( !instance_itr.hasNext() );
				assert( !state_itr.hasNext() );
				
				s.nextState( CRFStructure.flatActionToMap( crf_structure, joint_action ) );
//				viz.display(s);
				
			}
			
			// Train logistic regression classifier
			System.out.println( "Training LR classifier" );
			NavigableMap<Pair<Integer, Integer>, NavigableMap<Integer, ArrayList<Double>>>
				lr_model = LogisticRegression.learnLogisticRegression( crf_structure, instances );
			System.out.println( "LR model:" );
			for( Entry<Pair<Integer, Integer>, NavigableMap<Integer, ArrayList<Double>>> e : 
					lr_model.entrySet() ) {
				System.out.println( e.getKey() );
				System.out.println( e.getValue() );
			}
			
			// Evaluate LR classifier
			s = s0.copy();
			epoch_reward_idx = 0;
			// Evaluation -- LR
			for( int local_step = 0; local_step < local_steps; ++local_step ) {
//				viz.display( s );;
				s.nextState( LogisticRegression.sampleLocalActions(
					LogisticRegression.getLocalActions( s.getLocalStateDescriptions(), 
							null_lr_model, crf_structure ) ) );
				
				final double r = s.getReward();
				epoch_rewards[epoch_reward_idx++] = r;
			}
			// Logging
			local_statistics.add( writeEpochStats( local_stats_out, epoch_rewards ) );
			
			// Create a new CRF with the LR classifiers for features
			System.out.println( "Creating new CRF" );
			crf = crf_structure.createSophisticatedCrf( lr_model , crf_structure );
			sampler = new CrfGibbsSampler<TrafficState>( crf, gibbs_burn_in, seed );
			base_policy = new CrfBoltzmannPolicy<TrafficState>(
					base_policy.parameters(), 
					crf_structure.sophisticatedFeatures( lr_model, crf_structure ), 
					crf, sampler, gibbs_samples );
			policy_gradient	= new OnlinePolicyGradient<TrafficState, int[], CrfBoltzmannPolicy<TrafficState>>(
					base_policy, alpha, beta );
			
			// Do policy gradient on the CRF
			System.out.println( "Optimizing CRF" );
			s = s0.copy();
			epoch_reward_idx = 0;
			for( int joint_step = 0; joint_step < joint_steps; ++joint_step ) {
				final TrafficState s_tminus1 = s.copy();
				policy_gradient.enterObservation( s_tminus1 );
				final int[] action = policy_gradient.getAction();
				final NavigableMap<Pair<Integer, Integer>, Pair<Boolean, Boolean>> action_map
					= CRFStructure.flatActionToMap( crf_structure, action );
				s.nextState( action_map );
				final double r = s.getReward();
				System.out.println( "Reward = " + r );
				// Store reward
				epoch_rewards[epoch_reward_idx++] = r;
				if( moving_average.size() == window ) {
					moving_average.remove( 0 );
				}
				moving_average.add( r );
				double avg = 0.0;
				for( final double d : moving_average ) {
					avg += d;
				}
				averages.add( avg / moving_average.size() );
				System.out.println( "Average reward = " + (avg / moving_average.size()) );
				policy_gradient.actionResult( s_tminus1, action, s, r );
//				viz.display( s );
			}
			statistics.add( writeEpochStats( stats_out, epoch_rewards ) );
			
			// Extract improved base policy.
			base_policy = policy_gradient.basePolicy();
			crf = base_policy.crf();
			
			System.out.println( "Crf parameters:" );
			for( ConditionalRandomField.Clique<TrafficState> c : crf.cliques() ) {
				System.out.println( Arrays.toString( c.parameters() ) );
			}
		}
	}
	
	
	public static int experiment_id = -1;
	public static long seed = 0;			// RNG seed
	public static int n = 0;				// # intersections per side
	public static int m = 0;				// Leg length
	public static double turn_prob = 0;	// Car turn probability
	public static double create_prob = 0;	// Car creation probability
	public static int H = 0; 				// Horizon
	public static double gamma = 0; 		// Reward discount
	public static int Delta = 0; 			// Simulator steps per decision epoch
	public static int gibbs_burn_in = 0;
	public static int gibbs_samples = 0;
	
	public static boolean use_local_policies = false;
	
	public static double alpha = 0; 		// Policy gradient learning rate
	public static double beta = 0; 		// Policy gradient eligibility trace discount
	
	public static int joint_steps = 0;
	public static int local_steps = 0;
	public static int iterations = 0;
	
	public static int viz_sleep_ms = 0;
	public static String Rpath = null;
	
	/**
	 * @param args
	 */
	public static void main( final String[] args ) throws Exception
	{
		experiment_id = 1569;
		seed = 42;			// RNG seed
		n = 2;				// # intersections per side
		m = 4;				// Leg length
		turn_prob = 0.3;	// Car turn probability
		create_prob = 0.1;	// Car creation probability
		H = 100; 				// Horizon
		gamma = 0.9; 		// Reward discount
		Delta = 2; 			// Simulator steps per decision epoch
		gibbs_burn_in = 100;
		gibbs_samples = 1000;
		
		use_local_policies = true;
		
		alpha = 0.01; 		// Policy gradient learning rate
		beta = 0.9; 		// Policy gradient eligibility trace discount
		
		joint_steps = 1000;
		local_steps = 100;
		iterations = 100;
		
		viz_sleep_ms = 300;
		Rpath = "C:/Program Files/R/R-2.14.1/bin/Rscript.exe";
		
		final TrafficState s0 = new TrafficState( n, n, m, m, seed, turn_prob, create_prob, H, gamma, Delta );
		final CRFStructure crf_structure = new CRFStructure( "./traffic_mdp.rddl", "./" + TrafficState.instance_name + ".rddl" );
		
		if( use_local_policies ) {
			sophisticatedCrfExperiment( crf_structure, s0 );
		}
		else {
			naiveCrfExperiment( crf_structure, s0 );
		}
	}

}
