#include "ai_monte_carlo.h"



void monte_carlo_algorithm::get_buy_commands( const game_state* gs )
{
	//clears current commands queue (not necessary)
	commands.clear();
	cout << "AI BEGIN" << endl;
	time_t start_time = time( 0 );
	
	commands.push_back( "END" );
	
	unsigned int elapsed_sec = ( unsigned int)( abs( ceil( difftime( time( 0 ), start_time ) ) ) );

	cout << "Elapsed time: " << ai_algorithms::sec_to_string( elapsed_sec ) << " (will not be added to graph)" << endl;
	cout << "AI END" << endl;
};



void monte_carlo_algorithm::get_commands( const game_state* gs )
{
	commands.clear();
	cout << "AI BEGIN" << endl;
	time_t start_time = time( 0 );

	original_state = gs;

	//init random seed
	srand( ( unsigned int )abs( ceil( ( double )time( 0 ) ) ) );

	state_levels = new vector< list< ai_states_node_mc* > >( ( unsigned int)( ceil( parameters::get_instance()->ai_parameters[ "MONTE_CARLO" ][ "depth" ] ) + 1 ) );
	searched_states_list = new map< unsigned long long int, list< game_state* > >;

	//adding root node
	search_tree = new ai_states_node_mc( 0, new ai_game_state( gs->clone(), "NXT", false ) );
	nodes_to_search = 1;
	searched_nodes = 0;
	cout << "Nodes searched: " << searched_nodes << "  -  Remaining: " << nodes_to_search << "          ";
	search_state( search_tree );

	cout << endl;
	
	//set expanded flag to parents of end level
	for ( list< ai_states_node_mc* >::iterator it = ( *state_levels )[ state_levels->size() - 1 ].begin(); it != ( *state_levels )[ state_levels->size() - 1 ].end(); ++it )
	{
		( *it )->expanded = true;
		( *it )->level_parent->expanded = true;
	}

	//compute scores for each level
	for ( unsigned int i = state_levels->size() - 2; i > 0; --i )
	{
		//for each node in level compute his score
		//we predict, that each player will do actions leading to the best score, so we choose that states and try to avoid them with previous player
		for ( list< ai_states_node_mc* >::iterator it = ( *state_levels )[ i ].begin(); it != ( *state_levels )[ i ].end(); ++it )
		{
			ai_states_node_mc* found_node = find_max_score_curr( ( *it )->level_children );
			if ( found_node == 0 )
			{
				( *it )->my_score = -1e50;
				( *it )->current_score = -1e50;
				continue;
			}
			( *it )->expanded = true;
			( *it )->my_score = found_node->my_score;
			( *it )->current_score = found_node->prev_score;
		}
	}

	//find best state for me
	ai_states_node_mc* best_node = find_max_score_me( &( *state_levels )[ 1 ] );
	
	if ( best_node != 0 )
	{
		//get all commands on way from best state
		for ( ai_states_node_mc* i = best_node; i->parent != 0; i = i->parent )
		{
			commands.push_front( i->state->command );
		}
	}
	else
	{
		commands.push_front( "NXT" );
	}
	
	delete search_tree;
	delete state_levels;
	delete searched_states_list;
	search_tree = 0;
	state_levels = 0;
	searched_states_list = 0;

	unsigned int elapsed_sec = ( unsigned int)( abs( ceil ( difftime( time( 0 ), start_time ) ) ) );
	time_cons.push_back( elapsed_sec );
	cout << "Elapsed time: " << ai_algorithms::sec_to_string( elapsed_sec ) << endl;
	cout << "AI END" << endl;
};



void monte_carlo_algorithm::add_to_list( game_state* state )
{
	( *searched_states_list )[ state->get_hash() ].push_back( state );
};



bool monte_carlo_algorithm::is_in_list( game_state* state )
{
	map< unsigned long long int, list< game_state* > >::iterator found_pair = searched_states_list->find( state->get_hash() );
	if ( found_pair == searched_states_list->end() )
		return false;
	list< game_state* > found_states = found_pair->second;
	for ( list< game_state* >::iterator it = found_states.begin(); it != found_states.end(); ++it )
		if ( ( *it )->equals( state ) )
			return true;
	return false;
};



void monte_carlo_algorithm::search_state( ai_states_node_mc* state_node )
{
	add_to_list( state_node->state->state );
	--nodes_to_search;
	++searched_nodes;
	cout << "\rNodes searched: " << searched_nodes << "  -  Remaining: " << nodes_to_search << "          ";
	if ( state_node->state->command == "NXT" )
	{
		++state_node->depth;
		state_node->state->state->get_map_structures_from_state( original_state );
		state_node->state->state->update_score_structures();
		( *state_levels )[ state_node->depth ].push_back( state_node );

		//connect levels
		ai_states_node_mc* connect_node = state_node->parent;
		while ( ( connect_node != 0 ) && ( connect_node->state->command != "NXT" ) )
		{
			connect_node = connect_node->parent;
		}

		state_node->level_parent = connect_node;
		if ( connect_node != 0 )
		{
			connect_node->level_children->push_back( state_node );
			state_node->prev_score = state_node->state->state->another_plrs_score( state_node->level_parent->state->state->current_player_id );
		}

		if ( state_node->depth >= parameters::get_instance()->ai_parameters[ "MONTE_CARLO" ][ "depth" ] )
		{
			//save my score to leafs
			state_node->my_score = state_node->state->state->another_plrs_score( get_owner()->get_id() );
			return;
		}
	}
	state_node->state->state->clear_all_structures();
	//generate all possible children
	ai_states_node_mc* generated_node;
	ai_game_state* generated_state;
	game_state* test_gs;
	player* test_pl;
	unit* test_u;
	unsigned int max_range_move;
	//for search only of possible square
	unsigned int search_top;
	unsigned int search_left;
	unsigned int search_bottom;
	unsigned int search_right;
	for ( unsigned int u = 0; u < state_node->state->state->plrs[ state_node->state->state->current_player_id ]->units.size(); ++u )
	{
		test_pl = state_node->state->state->plrs[ state_node->state->state->current_player_id ];
		test_u = test_pl->units[ u ];
		max_range_move = test_u->get_range() > ( test_pl->get_time() / test_u->get_cost_move() ) ? test_u->get_range() : ( test_pl->get_time() / test_u->get_cost_move() / 10 );
		//compute search ranges (square where to search for possibilities)
		search_top = ( ( ( int )test_u->get_y() - ( int )max_range_move ) > 0 ) ? ( test_u->get_y() - max_range_move ) : 0;
		search_left = ( ( ( int )test_u->get_x() - ( int )max_range_move ) > 0 ) ? ( test_u->get_x() - max_range_move ) : 0;
		search_bottom = ( ( test_u->get_y() + max_range_move ) < mapa::get_instance()->get_size_y() ) ? ( test_u->get_y() + max_range_move ) : ( mapa::get_instance()->get_size_y() - 1 );
		search_right = ( ( test_u->get_x() + max_range_move ) < mapa::get_instance()->get_size_x() ) ? ( test_u->get_x() + max_range_move ) : ( mapa::get_instance()->get_size_x() - 1 );
		for ( unsigned int x = search_left; x <= search_right; ++x )
			for ( unsigned int y = search_top; y <= search_bottom; ++y )
			{
				test_gs = state_node->state->state->clone();
				if ( ai_support_computations::ai_fire( test_gs->plrs[ test_gs->current_player_id ]->units[ u ], x, y, test_gs ) )
				{
					generated_state = new ai_game_state( state_node->state->state, game::create_command( "ATT", u, x, y ) );
					if ( is_in_list( generated_state->state ) )
					{
						delete generated_state;
					}
					else
					{
						generated_node = new ai_states_node_mc( state_node, generated_state );
					}
				}
				delete test_gs;
				test_gs = state_node->state->state->clone();
				if ( ai_support_computations::ai_move( test_gs->plrs[ test_gs->current_player_id ]->units[ u ], x, y, test_gs ) )
				{
					generated_state = new ai_game_state( state_node->state->state, game::create_command( "MOV", u, x, y ) );
					if ( is_in_list( generated_state->state ) )
					{
						delete generated_state;
					}
					else
					{
						generated_node = new ai_states_node_mc( state_node, generated_state );
					}
				}
				delete test_gs;
			}
	}

	//adding end of the turn as the last possibility to do
	generated_state = new ai_game_state( state_node->state->state, "NXT" );
	if ( is_in_list( generated_state->state ) )
	{
		delete generated_state;
	}
	else
	{
		generated_node = new ai_states_node_mc( state_node, generated_state );
	}

	list< ai_states_node_mc* >* nodes_to_search_list = get_list_to_expand( state_node->children );

	nodes_to_search += nodes_to_search_list->size();
	//recursive call to all new children
	for ( list< ai_states_node_mc* >::iterator it = nodes_to_search_list->begin(); it != nodes_to_search_list->end(); ++it )
		search_state( *it );

	delete nodes_to_search_list;
};



double monte_carlo_algorithm::rate_state( ai_game_state* state )
{
	//compute random value from < parameters::get_instance()->ai_parameters[ "MONTE_CARLO" ][ "pref_coef" ] ; 1 >
	double rand_val = rand() / RAND_MAX;
	while( rand_val < parameters::get_instance()->ai_parameters[ "MONTE_CARLO" ][ "rand_coef" ] )
		rand_val = rand() / RAND_MAX;

	//compute nxt_coef
	double nxt_coef = ( state->command == "NXT" ) ? parameters::get_instance()->ai_parameters[ "MONTE_CARLO" ][ "nxt_coef" ] : 1;
	

	//estimated score

	//vector of mean power for each army
	vector< double > army_power( state->state->players_count );
	//vector of mean weapon strength
	vector< double > weapons_strength( state->state->players_count );
	//mean opponents weapon
	vector< double > mean_opponents_weapon( state->state->players_count );
	//sum opponents power
	double sum_opponents_power;
	//remaining turns assumption
	double remaining_turns;
	//expected harm caused by army
	double expected_harm;
	//army endurance
	vector< double > army_endurance( state->state->players_count );
	//sum opponents endurance
	double sum_opponents_endurance;
	//number of alive players in game
	unsigned int players_in_game;
	//state balance not considering position
	double army_balance = 0;

	//army_power, weapons_strength and players_in_game calculation
	players_in_game = 0;
	for ( unsigned int p = 0; p < state->state->players_count; ++p )
	{
		if ( state->state->plrs[ p ]->is_in_game() )
			++players_in_game;

		double power = 0;
		double weapons = 0;
		for ( vector< unit* >::iterator it = state->state->plrs[ p ]->units.begin(); it != state->state->plrs[ p ]->units.end(); ++it )
		{
			power += parameters::get_instance()->game_parameters[ "START_TIME" ] / ( *it )->get_cost_fire() * ( *it )->get_weapon();
			weapons += ( *it )->get_weapon();
		}
		if ( state->state->plrs[ p ]->units.empty() )
		{
			army_power[ p ] = 0;
			weapons_strength[ p ] = 0;
		}
		else
		{
			army_power[ p ] = power / state->state->plrs[ p ]->units.size();
			weapons_strength[ p ] = weapons / state->state->plrs[ p ]->units.size();
		}
	}

	//mean_opponents_weapon calculation
	for ( unsigned int p = 0; p < state->state->players_count; ++p )
	{
		double opponents_weapon = 0;
		for ( unsigned int op = 0; op < state->state->players_count; ++op )
		{
			if ( op == p )
				continue;
			opponents_weapon += weapons_strength[ op ];
		}
		if ( players_in_game == 1 )
		{
			mean_opponents_weapon[ p ] = 0;
		}
		else
		{
			mean_opponents_weapon[ p ] = opponents_weapon / ( players_in_game - 1 );
		}
	}

	//sum_opponents_power calculation
	sum_opponents_power = 0;
	for ( unsigned int op = 0; op < state->state->players_count; ++op )
	{
		if ( op == state->state->current_player_id )
			continue;
		sum_opponents_power += army_power[ op ];
	}

	//army_endurance calculation
	for ( unsigned int p = 0; p < state->state->players_count; ++p )
	{		
		double endurance = 0;
		double opp_harm;
		for ( vector< unit* >::iterator it = state->state->plrs[ p ]->units.begin(); it != state->state->plrs[ p ]->units.end(); ++it )
		{
			opp_harm = mean_opponents_weapon[ p ] - ( int )( *it )->get_shield() < 1 ? 1 : mean_opponents_weapon[ p ] - ( int )( *it )->get_shield();
			endurance += ( *it )->get_health() + ( *it )->get_health() / opp_harm * ( *it )->get_shield();
		}
		army_endurance[ p ] = endurance;
	}

	//remaining_turns calculation
	if ( players_in_game == 1 )
	{
		remaining_turns = 1e50; //HACK
	}
	else
	{
		remaining_turns = army_endurance[ state->state->current_player_id ] / ( sum_opponents_power / ( players_in_game - 1 ) );
	}
	
	//expected_harm calculation
	expected_harm = remaining_turns * army_power[ state->state->current_player_id ];
	
	//sum_opponents_endurance calculation
	sum_opponents_endurance = 0;
	for ( unsigned int op = 0; op < state->state->players_count; ++op )
	{
		if ( op == state->state->current_player_id )
			continue;
		sum_opponents_endurance += army_endurance[ op ];
	}
	if ( players_in_game == 1 )
	{
		army_balance = expected_harm;
	}
	else
	{
		army_balance = expected_harm - sum_opponents_endurance / ( players_in_game - 1 );
	}

	double score = army_balance;

	//compute whole score as ( score ^ pref_coef ) * nxt_coef * rand_val;
	double mc_score = pow( score, ( int )( parameters::get_instance()->ai_parameters[ "MONTE_CARLO" ][ "pref_coef" ] ) ) * rand_val * nxt_coef;
	
	return mc_score;
}



list< ai_states_node_mc* >* monte_carlo_algorithm::get_list_to_expand( list< ai_states_node_mc* >* all_children )
{
	//separate nodes to those with depth 0 and others
	list< ai_states_node_mc* > all_children_d0;
	list< ai_states_node_mc* > all_children_dx;
	for ( list< ai_states_node_mc* >::iterator it = all_children->begin(); it != all_children->end(); ++it )
	{
		if ( ( *it )->depth == 0 )
			all_children_d0.push_back( *it );
		else
			all_children_dx.push_back( *it );
	}

	list< pair< ai_states_node_mc*, double > > temp_list_d0;
	list< pair< ai_states_node_mc*, double > > temp_list_dx;	
	for ( list< ai_states_node_mc* >::iterator it = all_children_d0.begin(); it != all_children_d0.end(); ++ it )
	{		
		temp_list_d0.push_back( make_pair< ai_states_node_mc*, double >( ( *it ), rate_state( ( *it )->state ) ) );
	}
	for ( list< ai_states_node_mc* >::iterator it = all_children_dx.begin(); it != all_children_dx.end(); ++ it )
	{
		temp_list_dx.push_back( make_pair< ai_states_node_mc*, double >( ( *it ), rate_state( ( *it )->state ) ) );
	}

	//find "width" best states to expand
	list< pair< ai_states_node_mc*, double > > final_temp_list_d0;
	list< pair< ai_states_node_mc*, double > > final_temp_list_dx;
	for ( list< pair< ai_states_node_mc*, double > >::iterator it = temp_list_d0.begin(); it != temp_list_d0.end(); ++it )
	{
		insert_to_sorted_list( final_temp_list_d0, ( *it ) );
		if ( final_temp_list_d0.size() > parameters::get_instance()->ai_parameters[ "MONTE_CARLO" ][ "width_plans" ] )
			final_temp_list_d0.pop_back();
	};
	for ( list< pair< ai_states_node_mc*, double > >::iterator it = temp_list_dx.begin(); it != temp_list_dx.end(); ++it )
	{
		insert_to_sorted_list( final_temp_list_dx, ( *it ) );
		if ( final_temp_list_dx.size() > parameters::get_instance()->ai_parameters[ "MONTE_CARLO" ][ "width" ] )
			final_temp_list_dx.pop_back();
	};

	//move these nodes to list to return
	list< ai_states_node_mc* >* list_to_expand = new list< ai_states_node_mc* >;
	for ( list< pair< ai_states_node_mc*, double > >::iterator it = final_temp_list_d0.begin(); it != final_temp_list_d0.end(); ++it )
		list_to_expand->push_back( it->first );
	for ( list< pair< ai_states_node_mc*, double > >::iterator it = final_temp_list_dx.begin(); it != final_temp_list_dx.end(); ++it )
		list_to_expand->push_back( it->first );

	return list_to_expand;
};



void monte_carlo_algorithm::insert_to_sorted_list( list< pair< ai_states_node_mc*, double > >& slist, pair< ai_states_node_mc*, double >& sunit )
{
	list< pair< ai_states_node_mc*, double > >::iterator it = slist.begin();

	while ( ( it != slist.end() ) && ( sunit.second < it->second ) )
		++it;

	slist.insert( it, sunit );
};



ai_states_node_mc* monte_carlo_algorithm::find_max_score_me( list< ai_states_node_mc* >* node_list )
{
	if ( node_list->empty() )
		return 0;
	ai_states_node_mc* found_node = 0;
	for ( list< ai_states_node_mc* >::iterator it = node_list->begin(); it != node_list->end(); ++it )
	{
		if ( ( *it )->expanded  )
		{
			if ( found_node == 0 )
				found_node = ( *it );
			else if ( ( *it )->my_score > found_node->my_score )
				found_node = ( *it );
		}
	}
	return found_node;
};



ai_states_node_mc* monte_carlo_algorithm::find_max_score_curr( list< ai_states_node_mc* >* node_list )
{
	if ( node_list->empty() )
		return 0;
	ai_states_node_mc* found_node = 0;
	for ( list< ai_states_node_mc* >::iterator it = node_list->begin(); it != node_list->end(); ++it )
	{
		if ( ( *it )->expanded  )
		{
			if ( found_node == 0 )
				found_node = ( *it );
			else if ( ( *it )->current_score > found_node->current_score )
				found_node = ( *it );
		}
	}
	return found_node;
};



ai_states_node_mc* monte_carlo_algorithm::find_max_score_prev( list< ai_states_node_mc* >* node_list )
{
	if ( node_list->empty() )
		return 0;
	ai_states_node_mc* found_node = 0;
	for ( list< ai_states_node_mc* >::iterator it = node_list->begin(); it != node_list->end(); ++it )
	{
		if ( ( *it )->expanded  )
		{
			if ( found_node == 0 )
				found_node = ( *it );
			else if ( ( *it )->prev_score > found_node->prev_score )
				found_node = ( *it );
		}
	}
	return found_node;
};



ai_states_node_mc::ai_states_node_mc( ai_states_node_mc* p, ai_game_state* s )
{
	children = new list< ai_states_node_mc* >;
	level_children = new list< ai_states_node_mc* >;
	state = s;
	parent = p;
	level_parent = 0;
	my_score = 0;
	prev_score = 0;
	expanded = false;
	current_score = s->state->score;
	if ( p != 0 )
	{
		p->children->push_back( this );
		depth = p->depth;
	}
	else
	//root node
	{
		prev_score = 0;
		//first pass will increase it to 0
		depth = -1;
	}
};



ai_states_node_mc::~ai_states_node_mc()
{
	if ( !children->empty() )
		for ( list< ai_states_node_mc* >::iterator it = children->begin(); it != children->end(); ++it )
		{
			delete *it;
		}
	delete children;
	delete level_children;
	delete state;
};