/*
  Juggle: userspace balancing of multithreaded apps according to thread progress.
  Ref: "Juggle: Proactive Load Balancing on Multicore Computers", by
  Hofmeyr, Colmenares, Iancu, Kubiatowicz. HPDC 2011.

  Copyright (C) 2009 Steven Hofmeyr, Lawrence Berkeley National Laboratory

  This program is free software: you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation, either version 3 of the License, or
  (at your option) any later version.

  This program is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.

  You should have received a copy of the GNU General Public License
  along with this program.  If not, see <http://www.gnu.org/licenses/>.

  ---

*/

#include <execinfo.h>
#include <signal.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <string.h>
#include <errno.h>
#include <stdio.h>
#include <dirent.h>
#include <signal.h>
#include <sched.h>
#include <time.h>
#include <pthread.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/stat.h>

#include "utils.h"
#include "my_numa.h"
#include "nl.h"
#include "config.h"

enum dbg_flag_t dbg_flags = DSTARTUP;

// I refer to cores in presentations, but Linux always refers to cpus so I do the same here

// for storing data on which tasks to swap
typedef struct {
	int cid;
	int tid;
} behind_task_t;

// per task data
typedef struct {
	// index in the _tasks array
	int index;
	// thread id 
	int pid;
	double rtime, elapsed_rtime;
	double utime, elapsed_utime;
	double stime, elapsed_stime;
	double speed, progress;
	int num_updates;
	int num_migrations;
	int is_swapped;
	int is_running;
} task_t;

#define MAX_TASKS 128

// balancer data - it is per cpu
typedef struct {
	pthread_t pthread;
	// index in the _cpus array
	int index;
	// thread id
	pid_t pid;
	// cpu id
	int cid;
	// idle ticks over the whole run - set at beginning and used at the end to compute utilization
	long long start_idle_ticks;
	// the idle ticks in the previous round - used for calculated the utilization each round
	long long prev_round_idle_ticks;
	int num_updates;
	double current_time;
	double speed;
	// average progress of all threads on this cpu
	double av_progress;
	// array of pointers to tasks - we could add or drop tasks at any time so we can't do this
	// dynamically with just one malloc to start with
	task_t* tasks[MAX_TASKS];
	pthread_mutex_t mx;
	#ifdef USE_NETLINK
	// for netlink communication
	netlink_data_t netlink_data;
	#endif
	unsigned int rnd_seed;
	int num_local_tasks;
	// a list of which tasks this balancer needs to swap, if it is on a fast core
	behind_task_t behind_tasks[MAX_TASKS];
	int num_behind_tasks;
	int num_steals;
	int num_stolen;
} balancer_t; 

// global variables - always prefixed with _
balancer_t _balancers[MAX_CPUS];
config_t _config;
pthread_barrier_t _barrier;
pid_t _exec_pid;
double _start_time;
double _clk_tick = 0;
pthread_mutex_t _global_mx = PTHREAD_MUTEX_INITIALIZER;
// these globals are modified by all threads and so the global lock must always be acquired beforehand
int _idle_task_pid = NOT_SET;
int _setting_swaps_numa_node = NOT_SET;
// these are arrays, one for each numa node
double* _av_task_progress = NULL;
double* _av_cpu_speed = NULL;
#ifdef COMPUTE_STDDEV
double _av_stddev = 0;
#endif

void init_balancer(int bi);
void exec_app();
void init_tasks();
int get_tids_from_procfs(int* tids);
int get_tids_from_file(int* tids);
void init_task(task_t* tp, int pid, int index);
void distribute_task(task_t* tp);
void *run_balancer(void *arg);
char* task_list_to_str(balancer_t* balancer, char *buf);
void balance_round(balancer_t* balancer);
void wait_for_app();
void summary();
void update_balancer(balancer_t* balancer);
double update_task(task_t *task, balancer_t* balancer, double utime, double stime, double rtime);
void do_idle_balance(balancer_t* balancer);
void set_swaps(balancer_t* balancer);
double get_av_cpu_speed(int numa_node);
double get_av_task_progress(int numa_node);
int get_least_av_progress_cpu(int numa_node);
int get_most_av_progress_cpu(int numa_node);
int get_num_behind_tasks(int numa_node);
int is_fast_cpu(balancer_t* balancer, int numa_node);
int is_slow_cpu(balancer_t* balancer, int numa_node);
int is_behind_task(task_t* task, int numa_node);
int is_ahead_task(task_t* task, int numa_node);
int get_next_fast_cpu(int* cindex, int numa_node);
int get_next_slow_cpu(int* cindex, int numa_node);
int get_next_behind_task_on_cpu(balancer_t* balancer, int numa_node);
void do_swaps(balancer_t* balancer);
void migrate_task(balancer_t* src_b, balancer_t* dst_b, task_t* task);


void segfault_sigaction(int signal, siginfo_t* si, void* arg) {
	pid_t pid = syscall(__NR_gettid);
	pthread_t pthread = pthread_self();

	printf("Thread %d, caught segfault at address %p\n", pid, si->si_addr);

	balancer_t* balancer = NULL;
	for (int i = 0; i < _config.num_cpus; i++) {
		//		balancer = &_balancers[i];
		if (pthread != _balancers[i].pthread) pthread_cancel(_balancers[i].pthread);
		else balancer = &_balancers[i];
	}

	// make sure the execd program does not keep running
	kill(_exec_pid, SIGKILL);

	printf("CPU %d @ %.3f P %.3f local t %d behind t %d steals %d stolen %d\n", 
		   balancer->cid, balancer->speed, balancer->av_progress,
		   balancer->num_local_tasks, balancer->num_behind_tasks, 
		   balancer->num_steals, balancer->num_stolen);
	for (int i = 0; i < _config.num_tasks; i++) {
		if (balancer->tasks[i]) printf("Has task %d %d\n", i, balancer->tasks[i]->index);
	}

	//	unsigned long long buf[1000];

	//	backtrace_symbols_fd((void*)buf, 1000, STDOUT_FILENO);

	exit(0);
}

int main(int argc, char* argv[]) {
	/*
	struct sigaction sa;
	bzero(&sa, sizeof(struct sigaction));
	sigemptyset(&sa.sa_mask);
	sa.sa_sigaction = segfault_sigaction;
	sa.sa_flags = SA_SIGINFO;
	sigaction(SIGSEGV, &sa, NULL);
	*/

	// check that sched_compat_yield is set to 1
	check_sched_compat_yield();
	_clk_tick = init_starting_time();
	get_config(argc, argv, &_config);
	init_numa(_config.enable_numa_migrate);
	// don't spawn the app to be balanced if we are going to attach to a running process
	if (!IS_EMPTY(_config.pid_fname)) _exec_pid = NOT_SET;
	else exec_app();
	#ifdef USE_SCHED_RR
	// set the scheduling policy to be FIFO, do it after starting the child
	struct sched_param p = {
		.sched_priority = 1,
	};
	if (sched_setscheduler(getpid(), SCHED_FIFO|SCHED_RESET_ON_FORK, &p) == -1) {
		//WARN("Cannot set scheduling to FIFO: %d, %s", errno, strerror(errno));
	} else {
		printf(_BLUE_ "Balancer is running %s\n", "SCHED_FIFO" _END_);
	}
	#endif
	DBG(DSTARTUP, "Clock tick %.0f", _clk_tick);
	DBG(DSTARTUP, "Clock resolution %.0e", get_clock_res());
	DBG(DSTARTUP, "Hyperthreads per core %d", get_hyperthreads_per_core());
	if (_config.enable_numa_migrate) {
		_av_task_progress = calloc(1, sizeof(double));
		_av_cpu_speed = calloc(1, sizeof(double));
	} else {
		_av_task_progress = calloc(get_num_numa_nodes(), sizeof(double));
		_av_cpu_speed = calloc(get_num_numa_nodes(), sizeof(double));
	}
	// set up barrier for all cpus
	CHECK_CALL(pthread_barrier_init(&_barrier, NULL, _config.num_cpus));
	// initialize the balancer structures - one per cpu
	for (int i = 0; i < _config.num_cpus; i++) init_balancer(i);
	init_tasks();
	if (_exec_pid == NOT_SET) FAIL("Could not get the app %s", "pid");
	printf(_BLUE_ "Balancer pid %d is balancing app pid %d" _END_ "\n", getpid(), _exec_pid);
	_start_time = get_current_time();
	// start the balancers going
	for (int i = 0; i < _config.num_cpus; i++) {
		CHECK_PTHREAD_CALL(pthread_create(&_balancers[i].pthread, NULL, run_balancer, 
										  (void*)&_balancers[i]));
	}
	wait_for_app();
	summary();
	return 0;
}

void exec_app() {
	// set the affinity for the main juggle process before any memory is allocated in the 
	// exec'd child. 
	// This is relevant on NUMA systems: the goal is to ensure that the tasks start up on the 
	// correct set of cpus
	set_process_affinity(_config.cids, _config.num_cpus);
	CHECK_CALL_RET(fork(), _exec_pid);
	if (_exec_pid == 0) {
		DBG(DSTARTUP, "forked child is now execing %s ", _config.exec_args_str);
		if (execvp(_config.exec_name, _config.exec_args) == -1) {
			FAIL("Cannot execute %s: %s", _config.exec_name, strerror(errno));
		}
	}
}

void init_balancer(int bi) {
	// initialize the cpus
	double current_time = get_current_time();
	_balancers[bi].index = bi;
	_balancers[bi].num_updates = 0;
	_balancers[bi].cid = _config.cids[bi];
	_balancers[bi].current_time = current_time;
	_balancers[bi].speed = NOT_SET;
	_balancers[bi].av_progress = NOT_SET;
	_balancers[bi].start_idle_ticks = 0;
	_balancers[bi].prev_round_idle_ticks = 0;
	#ifdef USE_NETLINK
	_balancers[bi].netlink_data.nl_sd = -1;
	_balancers[bi].netlink_data.nl_id = -1;
	#endif
	_balancers[bi].pid = NOT_SET;
	_balancers[bi].rnd_seed = bi;
	_balancers[bi].num_local_tasks = 0;
	mx_init(&_balancers[bi].mx, PTHREAD_MUTEX_RECURSIVE);
	_balancers[bi].num_behind_tasks = 0;
	_balancers[bi].num_steals = 0;
	_balancers[bi].num_stolen = 0;
}

void init_tasks() {
	// discover all the tasks
	int num_tasks = NOT_SET;
	int tids[MAX_TASKS];
	if (IS_EMPTY(_config.pid_fname)) num_tasks = get_tids_from_procfs(tids);
	else num_tasks = get_tids_from_file(tids);
	if (num_tasks != _config.num_tasks) {
		FAIL("Number of tasks found (%d) does not match expected (%d). "
			 "You may need to change the expected number (-n) "
			 "or increase the initialization time (-s)", num_tasks, _config.num_tasks);
	}
	//  usleep(100000);
	for (int i = 0; i < _config.num_tasks; i++) {
		task_t* task = (task_t*)malloc(sizeof(task_t));
		init_task(task, tids[i], i);
		distribute_task(task);
	}
}

int get_tids_from_file(int* tids) {
	if (access(_config.pid_fname, F_OK) == 0) {
		if (remove(_config.pid_fname) != 0) {
			FAIL("Could not remove existing file %s", _config.pid_fname);
		}
	}
	if (mkfifo(_config.pid_fname, S_IRUSR|S_IWUSR) != 0) {
		FAIL("Could not make FIFO %s, errno %d", _config.pid_fname, errno);
	}
	// if we are running sudo, make sure the fifo is user readable
	if (getuid() == 0) {
		// setting to owner nobody, group users
		chown(_config.pid_fname, 65534, 100);
		chmod(_config.pid_fname, S_IROTH|S_IWOTH|S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP);
	}
	FILE* f = fopen(_config.pid_fname, "r");
	if (!f) FAIL("Cannot open FIFO %s, errno %d", _config.pid_fname, errno);
	int num_tasks = 0;
	// the reads are blocking on the pipe
	if (fscanf(f, "%d %d", &num_tasks, &_exec_pid) != EOF) {
		for (int i = 0; i < num_tasks; i++) {
			int tid;
			if (fscanf(f, "%d", &tid) == EOF) break;
			tids[i] = tid;
			DBG(DTASKS, "Found task %d, pid %d", i, tid);
		}
	}
	fclose(f);
	return num_tasks;
}  

// creates a FIFO and expects the app to write to that FIFO. This means juggle 
// must start before the app to be balanced. Juggle blocks on the FIFO so it will wait
// until the app writes the tids.
int get_tids_from_procfs(int* tids) {
	DIR *dp;
	struct dirent *dentry;
	char procfs_fname[100];

	// reads tasks as pids under the main parent pid proc dir
	sprintf(procfs_fname, "/proc/%d/task", _exec_pid);
	double start_init_time = get_current_time();
	int num_tasks = 0;
	while (1) {
		if ((dp = opendir(procfs_fname)) == NULL) {
			FAIL("Cannot open directory to read tasks for process %d: %s", _exec_pid, strerror(errno));
		}
		while ((dentry = readdir(dp)) != NULL) {
			if (strcmp(dentry->d_name, ".") && strcmp(dentry->d_name, "..")) {
				int pid = atoi(dentry->d_name);
				if (pid > 0) {
					if (num_tasks > MAX_TASKS) {
						FAIL("Too many tasks, max is %d", MAX_TASKS);
					}
					// check for new task 
					// FIXME: this should probably be a hash function
					int new_task = 1;
					for (int i = 0; i < num_tasks; i++) {
						if (tids[i] == pid) {
							new_task = 0;
							break;
						}
					}
					if (IS_SET(_idle_task_pid) && _idle_task_pid == pid) new_task = 0;
					if (new_task) {
						if (_idle_task_pid == NOT_SET && IS_SET(_config.expected_idle_task) && 
							num_tasks == _config.expected_idle_task) {
							_idle_task_pid = pid;
							DBG(DSTARTUP, "Found idle task at %d, pid %d", num_tasks, _idle_task_pid);
						} else {
							tids[num_tasks] = pid;
							DBG(DTASKS, "Found task %d, pid %d", num_tasks, pid);
							num_tasks++;
						}
					}
				} else {
					FAIL("Cannot get PID from %s", dentry->d_name);
				}
			}
		}
		CHECK_CALL(closedir(dp));
		if (get_current_time() - start_init_time > _config.init_time) break;
		if (num_tasks == _config.num_tasks) break;
		// sleep a little before trying again
		usleep(10000);
	} 
	return num_tasks;
}

void init_task(task_t* tp, int pid, int index) {
	tp->pid = pid;
	tp->index = index;
	tp->rtime = NOT_SET;
	tp->utime = tp->stime = 0;
	tp->elapsed_rtime = tp->elapsed_utime = tp->elapsed_stime = 0;
	tp->speed = 0;
	tp->progress = 0;
	tp->num_updates = 0;
	tp->num_migrations = 0;
	tp->is_swapped = 0;
	tp->is_running = 1;
}

void distribute_task(task_t* tp) {
	// pin the task and set the cpu
	// the tasks are initially distributed evenly over the cpus. 
	// This is quite complicated, making sure that we don't put all the tasks on the early or 
	// late stage cpus
	int threshold = _config.num_cpus * (int)(_config.num_tasks / _config.num_cpus);
	int bi = tp->index % _config.num_cpus;
	if (tp->index >= threshold) {
		bi = (tp->index - threshold) * ((double)_config.num_cpus + 0.5) / 
			(_config.num_tasks - threshold);
		if (bi >= _config.num_cpus) bi = 0;
	}
	if (_config.no_init_spread) bi = 0;
	_balancers[bi].tasks[tp->index] = tp;
	_balancers[bi].num_local_tasks++;
	CHECK_CALL(pin_thread(tp->pid, _balancers[bi].cid));
	if (_config.sanity_check) check_is_pinned(_exec_pid, tp->pid, _balancers[bi].cid);
	DBG(DTASKS, "Initial task distribution, %d is pinned to cpu %d", 
		_balancers[bi].tasks[tp->index]->index, _balancers[bi].cid);
	if (_config.balancing == BALANCE_LOAD) {
		CHECK_CALL(pin_thread_multi(tp->pid, _config.cids, _config.num_cpus));
		DBG(DTASKS, "Linux load balancing, %d is unpinned", _balancers[bi].tasks[tp->index]->index);
	} 
}

void *run_balancer(void *arg) {
	balancer_t* balancer = (balancer_t*)arg;
	if (balancer->index < 0 || balancer->index >= _config.num_cpus) {
		FAIL("attempting to start cpu thread for invalid index %d", balancer->index);
	}
	// sanity check 
	if (IS_SET(balancer->pid)) {
		FAIL("attempting to start already started balancer %d on cpu %d", 
			 balancer->index, balancer->cid);
	}
	balancer->pid = syscall(__NR_gettid);
	DBG(DCORES, "Got pid %d for balancer %d", balancer->pid, balancer->index);
	// now pin to the relevant cpu
	CHECK_CALL(pin_thread(balancer->pid, balancer->cid));
	DBG(DCORES, "Pinned balancer %d on %d", balancer->index, balancer->cid);
	if (_config.sanity_check) check_is_pinned(getpid(), balancer->pid, balancer->cid);
	#ifdef USE_NETLINK
	if (_config.use_netlink) {
		// establish netlink channel
		setup_netlink(&balancer->netlink_data, balancer->pid, balancer->index);
	}
	#endif
	// it doesn't matter if we use the slow method because we only do this once
	// and this means we don't require the kernel module if we are not using idle feedback
	balancer->start_idle_ticks = get_cpu_idle_ticks(balancer->cid, 0);
	//printf("start cpu %d idle ticks %lld\n", balancer->cid, balancer->start_idle_ticks);
	balancer->prev_round_idle_ticks = balancer->start_idle_ticks;
	char task_list[1000];
	DBG(DSTARTUP, "Balancer %d started with pid %d, %s", balancer->cid, balancer->pid, 
		task_list_to_str(balancer, task_list));
	while (1) balance_round(balancer);
	return NULL;
}

// convert a list of tasks to a string
char* task_list_to_str(balancer_t* balancer, char *buf) {
	char tnum_buf[1000], tlist_buf[10000] = "";
	int num_local_tasks = 0;
	MX_LOCK(&balancer->mx); {
		for (int i = 0; i < _config.num_tasks; i++) {
			if (balancer->tasks[i]) {
				if (num_local_tasks) strcat(tlist_buf, " ");
				num_local_tasks++;
				sprintf(tnum_buf, "%d:%s%.2f%s", i, _GREEN_, balancer->tasks[i]->progress, _BLUE_);
				strcat(tlist_buf, tnum_buf);
			}
		}
		sprintf(buf, "%d tasks: [%s]", num_local_tasks, tlist_buf);
	} MX_UNLOCK(&balancer->mx);
	return buf;
}

void balance_round(balancer_t* balancer) {
	// don't do anything if we are not speed balancing
	if (_config.balancing != BALANCE_SPEED) {
		sleep(10000000);
		return;
	}
	if (_config.add_randomness && balancer->index == 0) {
		// add up to 10% randomness to the sleep to avoid synchronization with the app
		// only do this for one balancer
		double rnd_interval = _config.interval + get_rnd(_config.interval * 200, 
														 &balancer->rnd_seed) / 1e3;
		struct timespec rnd_ts;
		rnd_ts.tv_sec = (time_t)rnd_interval;
		rnd_ts.tv_nsec = (rnd_interval - (time_t)rnd_interval) * 1e+9;
		// NOTICE("%d wait is now %.5f", balancer->index, (rnd_ts.tv_sec * 1e9 + rnd_ts.tv_nsec) / 1e9);
		CHECK_CALL(nanosleep(&rnd_ts, NULL));
	} else CHECK_CALL(nanosleep(&_config.interval_ts, NULL));
	// need the barrier here because balancers could sleep different amounts
	if (_config.add_randomness) {
		pthread_barrier_wait(&_barrier);
	}
	update_balancer(balancer);
	pthread_barrier_wait(&_barrier);
	// before we do swaps, check to see if this core is idle and needs to pull a task
	// the core could be idle because a thread has stopped running
	if (IS_SET(balancer->speed)) do_idle_balance(balancer);
	// done in serial
	if (balancer->index == 0) _setting_swaps_numa_node = 0;
	pthread_barrier_wait(&_barrier);
	if (IS_SET(balancer->speed)) set_swaps(balancer);
	pthread_barrier_wait(&_barrier);
	do_swaps(balancer);
	pthread_barrier_wait(&_barrier);
}

void update_balancer(balancer_t* balancer) {
	double sum_speeds = 0, sum_progress = 0;
	double elapsed_time;
	double utilization = 1.0;
	long long idle_ticks = 0;
	elapsed_time = get_current_time() - balancer->current_time;
	// compute CPU utilization so we can better estimate task speeds
	if (_config.use_cpu_idle) {
		idle_ticks = get_cpu_idle_ticks(balancer->cid, 1);
		double delta_idle_ticks = idle_ticks - balancer->prev_round_idle_ticks;
		utilization = 1.0 - delta_idle_ticks / _clk_tick / elapsed_time;
	}

	// FIXME: this should be a linked list or something - inefficient to iterate through 
	// all tasks
	int num_local_tasks = 0;
	int updated = 1;
	for (int i = 0; i < _config.num_tasks; i++) {
		task_t* task = balancer->tasks[i];
		// the task is on this cpu
		if (task) {
			double stime = 0, utime = 0, rtime = 0;
			// If there is an error here, it means we are done balancing - the tasks no longer 
			// exist so we stop the thread
			#ifdef USE_NETLINK
			if (_config.use_netlink) {
				int err = 0;
				if ((err = get_task_stats_from_nl(&balancer->netlink_data, balancer->pid, 
												  task->pid, &utime, &stime, &rtime)) < 0) {
					if (err == -3) pthread_exit(NULL);
					else {
						WARN("Switching to using %s for task stats, errno %d: %s", "procfs", -
							 -err, strerror(-err));
						// FIXME: this should probably be atomic
						_config.use_netlink = 0;
					}
				}
			} 
			if (!_config.use_netlink) {
				if (get_task_stats_from_proc(_exec_pid, task->pid, &utime, &stime, &rtime) == -1) {
					DBG(DTASKS, "task %d exits", i);
					pthread_exit(NULL);
				}
			}
			#else
			if (get_task_stats_from_proc(_exec_pid, task->pid, &utime, &stime, &rtime) == -1) {
				DBG(DTASKS, "task %d exits", i);
				pthread_exit(NULL);
			}
			#endif

			double task_speed = update_task(task, balancer, utime, stime, rtime);
			if (task_speed == NOT_SET) {
				balancer->speed = NOT_SET;
				updated = 0;
				break;
			}

			if (task->is_running) {
				num_local_tasks++;
				// adjusted for CPU utilization - this wouldn't be needed for yields
				task_speed /= utilization;
				if (task_speed > 1.0) task_speed = 1.0;
				sum_speeds += task_speed;
				sum_progress += balancer->tasks[i]->progress;
			}
		}
	}
	if (updated) {
		if (_config.use_cpu_idle) {
			balancer->prev_round_idle_ticks = idle_ticks;
		}
		balancer->current_time = get_current_time();
		balancer->num_local_tasks = num_local_tasks;
		if (num_local_tasks) {
			balancer->speed = sum_speeds / (double)num_local_tasks;
			balancer->av_progress = sum_progress / (double)num_local_tasks;
		} else {
			balancer->speed = 1.0;	// if we have no tasks on this cpu, set the speed to 1
		}
		balancer->num_updates++;
		if (dbg_flags & (1 << DCORES)) {
			char task_list[2000] = "";
			DBG(DCORES, "CPU %d @ %.3f P %.3f U %.3f, %s", balancer->cid, balancer->speed, 
				balancer->av_progress, utilization, task_list_to_str(balancer, task_list));
		}
	}
}

double update_task(task_t *task, balancer_t* balancer, double utime, double stime, double rtime) {
	// the elapsed realtime should be approx the same as an interval, anything else and we 
	// haven't updated fully, in which case we skip this round
	if (task->rtime == NOT_SET) {
		task->rtime = rtime;
		task->utime = utime;
		task->stime = stime;
		return NOT_SET;
	}
	double elapsed_rtime = rtime - task->rtime;
	if (elapsed_rtime < _config.interval) {
		DBG(DTASKS, "balancer pid %d, cpu %d, task %d NOT updated: rtime %.3f", 
			balancer->pid, balancer->cid, task->index, task->elapsed_rtime);
		//NOTICE("balancer pid %d, cpu %d, task %d NOT updated: elapsed rtime %.3f", 
		//balancer->pid, balancer->cid, task->index, elapsed_rtime);
		return NOT_SET;
	}

	int was_running = task->is_running;
	// it may be too inefficient to do this every round
	task->is_running = is_task_running(task->pid);
	if (was_running && !task->is_running) {
		DBG(DTASKS, "Task %d has stopped running", task->index);
	}
	// make sure we clear all the swap info
	task->is_swapped = 0;
	task->elapsed_utime = utime - task->utime;
	if (task->elapsed_utime == 0) task->elapsed_utime = 1e-10;
	task->utime = utime;
	task->elapsed_stime = stime - task->stime;
	task->stime = stime;
	task->elapsed_rtime = elapsed_rtime;
	task->rtime = rtime;
	#ifdef RND_MIGRATION
	task->progress = (double)(get_rnd(100, balancer->rnd_seed) + 1.0) / 101.0;
	task->speed = (double)(get_rnd(100, balancer->rnd_seed) + 1.0) / 101.0;
	#else
	double elapsed_time = task->elapsed_utime + task->elapsed_stime;
	task->speed = elapsed_time / task->elapsed_rtime;
	// task speed can never be more than one, if it is, this is due to measurement errors from the OS
	if (task->speed > 1) task->speed = 1;
	task->progress += elapsed_time;
	task->num_updates++;
	#endif
	DBG(DTASKS, "b pid %d, cpu %d, tsk %d spd %.3f, prg %.3f, ut %.3f st %.3f rt %.3f rnng %d",
		balancer->pid, balancer->cid, task->index, task->speed, task->progress, task->elapsed_utime, 
		task->elapsed_stime, task->elapsed_rtime, task->is_running);
	return task->speed;
}

void do_idle_balance(balancer_t* balancer) {
	if (balancer->num_local_tasks) return;
	DBG(DBALANCE, "Balancer %d is idle, looking for a task", balancer->index);
	// find the first cpu with more than one local task on this numa node
	int numa_node = get_numa_node(balancer->cid);
	for (int i = 0, found = 0; i < _config.num_cpus && !found; i++) {
		if (i == balancer->index) continue;
		balancer_t* src_balancer = &_balancers[i];
		if (!same_numa_node(numa_node, src_balancer->cid)) continue;
		MX_LOCK(&src_balancer->mx); {
			//		MX_LOCK(&_global_mx); {
			if (src_balancer->num_local_tasks > 1) {
				for (int ti = 0; ti < _config.num_tasks; ti++) {
					task_t* task = src_balancer->tasks[ti];
					if (task && task->is_running && !task->is_swapped) {
						migrate_task(src_balancer, balancer, task);
						found = 1;
						DBG(DMIGRATE, "Migrating task c%d t%d (%.1f, %d) --> c%d", 
							src_balancer->cid, task->index, src_balancer->speed, 
							src_balancer->num_local_tasks, balancer->cid);
						balancer->num_steals++;
						src_balancer->num_stolen++;
						break;
					}
				}
			}
			//		} MX_UNLOCK(&_global_mx);
		} MX_UNLOCK(&src_balancer->mx);
	}
}

void set_swaps(balancer_t* balancer) {
	// the first balancer to get the lock does all this to work to determine how the tasks should be 
	// redistributed. We do this so that the fastest cpu to get here does all the work
	// for NUMA, we have a different balancer doing it for each NUMA node
	int numa_node = NOT_SET;
	MX_LOCK(&_global_mx); {
		if (_setting_swaps_numa_node < get_num_numa_nodes()) {
			numa_node = _setting_swaps_numa_node;
			_setting_swaps_numa_node++;
		}
	} MX_UNLOCK(&_global_mx);
	if (numa_node == NOT_SET) return;

	// below this is the work done by a single balancer per NUMA node
	_av_cpu_speed[numa_node] = get_av_cpu_speed(numa_node);
	_av_task_progress[numa_node] = get_av_task_progress(numa_node);
	if (_av_task_progress[numa_node] == NOT_SET) return;
	int start_slow_cid = get_least_av_progress_cpu(numa_node);
	int start_fast_cid = get_most_av_progress_cpu(numa_node);
	int num_behind_tasks = get_num_behind_tasks(numa_node);
	DBG(DBALANCE, "Balancer %d distributing %d tasks on NUMA node %d, "
		"av cpu speed %.3f, av task prog %.3f", 
		balancer->index, num_behind_tasks, numa_node, _av_cpu_speed[numa_node], 
		_av_task_progress[numa_node]);
	// redistribute all behind tasks until we run out of fast slots 
	while (num_behind_tasks) {
		int slow_cid = get_next_slow_cpu(&start_slow_cid, numa_node);
		if (slow_cid == NOT_SET) break;
		int behind_tid = get_next_behind_task_on_cpu(&_balancers[slow_cid], numa_node);
		if (behind_tid == NOT_SET) continue;
		// find a fast slot for this one
		int fast_cid = get_next_fast_cpu(&start_fast_cid, numa_node);
		if (fast_cid == NOT_SET) break;
		balancer_t* fast_balancer = &_balancers[fast_cid];
		fast_balancer->behind_tasks[fast_balancer->num_behind_tasks].cid = slow_cid;
		fast_balancer->behind_tasks[fast_balancer->num_behind_tasks].tid = behind_tid;
		fast_balancer->num_behind_tasks++;
		// make sure we don't try assigning this a second time
		_balancers[slow_cid].tasks[behind_tid]->is_swapped = 1;
		num_behind_tasks--;
		DBG(DBALANCE, "Assigned behind task %d from slow cpu %d to fast cpu %d",
			behind_tid, slow_cid, fast_cid);
	} // while
}

void do_swaps(balancer_t* balancer) {
	if (balancer->num_behind_tasks) {
		DBG(DBALANCE, "Balancer %d on cpu %d is doing %d swaps", balancer->index, balancer->cid,
			balancer->num_behind_tasks);
	}
	int numa_node = get_numa_node(balancer->cid);
	int next_task_i = 0;
	for (int i = 0; i < balancer->num_behind_tasks; i++) {
		balancer_t* slow_balancer = &_balancers[balancer->behind_tasks[i].cid];
		task_t* behind_task = slow_balancer->tasks[balancer->behind_tasks[i].tid];
		while (!is_ahead_task(balancer->tasks[next_task_i], numa_node) && 
			   next_task_i < _config.num_tasks) {
			next_task_i++;
		}
		if (next_task_i == _config.num_tasks) break;
		task_t* ahead_task = balancer->tasks[next_task_i];
		DBG(DMIGRATE, "Swapping tasks c%d t%d (%.1f, %d) <-> c%d t%d (%.1f, %d)", 
			balancer->cid, ahead_task->index, balancer->speed, balancer->num_local_tasks,
			slow_balancer->cid, behind_task->index, slow_balancer->speed, 
			slow_balancer->num_local_tasks);
		MX_LOCK(&slow_balancer->mx); {
			migrate_task(slow_balancer, balancer, behind_task);
			migrate_task(balancer, slow_balancer, ahead_task);
		} MX_UNLOCK(&slow_balancer->mx);
	}
	balancer->num_behind_tasks = 0;
}

void migrate_task(balancer_t* src_b, balancer_t* dst_b, task_t* task) {
	if (pin_thread(task->pid, dst_b->cid) == -1) {
		//WARN("Cannot pin task %d to cpu %d: %s", task->index, dst_b->cid, strerror(errno));
		// this is cause for thread termination
		pthread_exit(NULL);
	}
	if (_config.sanity_check) check_is_pinned(_exec_pid, task->pid, dst_b->cid);
	task->is_swapped = 1;
	task->num_migrations++;
	src_b->tasks[task->index] = NULL;
	dst_b->tasks[task->index] = task;
	src_b->num_local_tasks--;
	dst_b->num_local_tasks++;
}

int get_next_behind_task_on_cpu(balancer_t* balancer, int numa_node) {
	for (int ti = 0; ti < _config.num_tasks; ti++) {
		if (is_behind_task(balancer->tasks[ti], numa_node)) return ti;
	}
	return NOT_SET;
}

double get_av_cpu_speed(int numa_node) {
	double av_cpu_speed = 0;
	int num_cpus = 0;
	for (int i = 0; i < _config.num_cpus; i++) {
		if (get_numa_node(_balancers[i].cid) == numa_node) {
			av_cpu_speed += _balancers[i].speed;
			num_cpus++;
		}
	}
	return av_cpu_speed / (double)num_cpus;
}

double get_av_task_progress(int numa_node) {
	double av_task_progress = 0;
	int num_active_tasks = 0;
	for (int i = 0; i < _config.num_cpus; i++) {
		if (get_numa_node(_balancers[i].cid) == numa_node) {
			for (int ti = 0; ti < _config.num_tasks; ti++) {
				if (_balancers[i].tasks[ti] && _balancers[i].tasks[ti]->is_running) {
					av_task_progress += _balancers[i].tasks[ti]->progress;
					num_active_tasks++;
				}
			}
		}
	}
	if (!num_active_tasks) return NOT_SET;
	av_task_progress /= (double)num_active_tasks;
	return av_task_progress;
}

int get_least_av_progress_cpu(int numa_node) {
	int cid = 0;
	double av_progress = 1e12;
	for (int i = 0; i < _config.num_cpus; i++) {
		if (!same_numa_node(numa_node, _balancers[i].cid)) continue;
		if (av_progress > _balancers[i].av_progress) {
			av_progress = _balancers[i].av_progress;
			cid = i;
		}
	}
	return cid;
}

int get_most_av_progress_cpu(int numa_node) {
	int cid = 0;
	double av_progress = 0;
	for (int i = 0; i < _config.num_cpus; i++) {
		if (!same_numa_node(numa_node, _balancers[i].cid)) continue;
		if (av_progress < _balancers[i].av_progress) {
			av_progress = _balancers[i].av_progress;
			cid = i;
		}
	}
	return cid;
}

int get_num_behind_tasks(int numa_node) {
	// we get the standard deviation here too
	double mean_speed = 0;
	#ifdef COMPUTE_STDDEV
	double speeds[MAX_TASKS];
	#endif
	int num_behind_tasks = 0;
	for (int i = 0; i < _config.num_cpus; i++) {
		if (is_slow_cpu(&_balancers[i], numa_node)) {
			for (int ti = 0; ti < _config.num_tasks; ti++) {
				if (is_behind_task(_balancers[i].tasks[ti], numa_node)) {
					#ifdef COMPUTE_STDDEV
					speeds[num_behind_tasks] = _balancers[i].tasks[ti]->speed;
					#endif
					mean_speed += _balancers[i].tasks[ti]->speed;
					num_behind_tasks++;
				}
			}
		}
	}
	#ifdef COMPUTE_STDDEV
	if (num_behind_tasks) {
		mean_speed /= (double)num_behind_tasks;
		double sum_terms = 0;
		for (int i = 0; i < num_behind_tasks; i++) {
			double diff = speeds[i] - mean_speed;
			sum_terms += (diff * diff);
		}
		double stddev = sqrt(1.0 / (double)num_behind_tasks * sum_terms);
		//    if (stddev > 0.001) NOTICE("speed mean %.3f, stddev %.3f", mean_speed, stddev);
		MX_LOCK(&_global_mx); {_av_stddev += stddev;} MX_UNLOCK(&_global_mx);
	}
	#endif
	return num_behind_tasks;
}

int is_behind_task(task_t* task, int numa_node) {
	if (task == NULL) return 0;
	// we can't swap tasks that have already been swapped
	if (task->is_swapped) return 0;
	// ignore any non-running tasks
	if (!task->is_running) return 0;
	// skip any threads that have better than average progress
	if (task->progress >= _av_task_progress[numa_node] - _config.task_progress_threshold) return 0;
	return 1;
}

int is_ahead_task(task_t* task, int numa_node) {
	if (task == NULL) return 0;
	// we can't swap tasks that have already been swapped
	if (task->is_swapped) return 0;
	// ignore any non-running tasks
	if (!task->is_running) return 0;
	// skip any threads that have better than average progress
	if (task->progress >= _av_task_progress[numa_node] - _config.task_progress_threshold) return 1;
	return 0;
}

// this is called by only one thread, so it doesn't have to be thread-safe
int get_next_fast_cpu(int* cindex, int numa_node) {
	int next_fast_cpu = NOT_SET;
	for (int i = 0; i < _config.num_cpus + 1 && next_fast_cpu == NOT_SET; i++) {
		if (is_fast_cpu(&_balancers[*cindex], numa_node) && 
			_balancers[*cindex].num_behind_tasks < _balancers[*cindex].num_local_tasks) {
			next_fast_cpu = *cindex;
		}
		(*cindex)++;
		if (*cindex == _config.num_cpus) *cindex = 0;
	}
	return next_fast_cpu;
}

int get_next_slow_cpu(int* cindex, int numa_node) {
	int next_slow_cpu = NOT_SET;
	for (int i = 0; i < _config.num_cpus + 1 && next_slow_cpu == NOT_SET; i++) {
		if (is_slow_cpu(&_balancers[*cindex], numa_node))  next_slow_cpu = *cindex;
		(*cindex)++;
		if (*cindex == _config.num_cpus) *cindex = 0;
	}
	return next_slow_cpu;
}

int is_fast_cpu(balancer_t* balancer, int numa_node) {
	if (balancer->speed == NOT_SET) return 0;
	if (IS_SET(numa_node) && get_numa_node(balancer->cid) != numa_node) return 0;
	if (balancer->speed > _av_cpu_speed[numa_node]) return 1;
	return 0;
}

int is_slow_cpu(balancer_t* balancer, int numa_node) {
	if (balancer->speed == NOT_SET) return 0;
	if (IS_SET(numa_node) && get_numa_node(balancer->cid) != numa_node) return 0;
	if (balancer->speed < _av_cpu_speed[numa_node]) return 1;
	return 0;
}

void wait_for_app() {
	if (IS_EMPTY(_config.pid_fname)) {
		// wait for the forked child 
		int wait_status;
		pid_t wait_pid;
		CHECK_CALL_RET(waitpid(_exec_pid, &wait_status, 0), wait_pid);
		if (WIFEXITED(wait_status)) {
			if (WEXITSTATUS(wait_status) != 0) {
				FAIL("%s, pid %d: exit status: %d", 
					 _config.exec_name, wait_pid, WEXITSTATUS(wait_status));
			}
		} else FAIL("Could not run '%s'", _config.exec_args_str);
	} else {
		// we cannot rely on the balancer threads always terminating when they can no 
		// longer read the proc file info: an occasional race occurs where a balancer 
		// thread opens the proc file successfully, but then the app terminates and the
		// fscanf following the open never aborts, so the thread just hangs waiting for 
		// input. To cope with this we need this master thread to loop and check that 
		// the main proc file still exists
		char procfs_fname[255];
		sprintf(procfs_fname, "/proc/%d", _exec_pid);
		do {
			usleep(1e6);
		} while (access(procfs_fname, F_OK) == 0);
	}
}

void summary() {
	// kill all pthreads
	double tot_idle_time = 0;
	int total_migrations = 0;
	double elapsed_time = get_current_time() - _start_time;
	DBG(DSUMMARY, "Elapsed time %.3f", elapsed_time);
	if (_config.balancing != BALANCE_SPEED) {
		for (int i = 0; i < _config.num_cpus; i++) {
			// don't check the return because this may fail if the thread has already exited
			pthread_cancel(_balancers[i].pthread);
		}
	}
	for (int i = 0; i < _config.num_cpus; i++) {
		// get the difference in the idle time - use fast cpu idle method if possible
		long long idle_ticks = get_cpu_idle_ticks(_balancers[i].cid, 0);
		//printf("finish cpu %d idle ticks %lld\n", _balancers[i].cid, idle_ticks);
		double delta_idle_ticks = idle_ticks - _balancers[i].start_idle_ticks;
		double idle_time = delta_idle_ticks / _clk_tick;
		/*
		double utilization = 1.0 - idle_time / elapsed_time;
		DBG(DSUMMARY, "Cpu %d: utilization %.3f, num steals %d, idle time %.3f", 
			_balancers[i].cid, utilization, _balancers[i].num_steals, idle_time);
		*/
		tot_idle_time += idle_time;
		for (int t = 0; t < _config.num_tasks; t++) {
			if (_balancers[i].tasks[t]) {
				total_migrations += _balancers[i].tasks[t]->num_migrations;
				/*
				DBG(DSUMMARY, "Task %d number migrations %d", 
					_balancers[i].tasks[t]->index, _balancers[i].tasks[t]->num_migrations);
				*/
			}
		}
	}
	DBG(DSUMMARY, "Total utilization %.3f", 
		1.0 - tot_idle_time / (elapsed_time * (double)_config.num_cpus));
	DBG(DSUMMARY, "Total migrations %d", total_migrations);
	#ifdef COMPUTE_STDDEV
	DBG(DSUMMARY, "Average stddev %.3f", _av_stddev / (double)_balancers[0].num_updates);
	#endif
	struct rusage usage;
	getrusage(RUSAGE_SELF, &usage);
	double ut = (double)usage.ru_utime.tv_sec + (double)usage.ru_utime.tv_usec / 1000000.0;
	double st = (double)usage.ru_stime.tv_sec + (double)usage.ru_stime.tv_usec / 1000000.0;
	DBG(DSUMMARY, "Balancer compute time per cpu: u %.3f, s %.3f, sum %.3f", 
		ut / _config.num_cpus, st / _config.num_cpus, (ut + st) / _config.num_cpus);
	// get rid of the pipe
	remove(_config.pid_fname);
}


