#!/usr/bin/perl
#
# usage:
#  solve problemType problemID node [node node ...]

$HOME_DIR = "/home/one-tenth";
$HOME_DIR = $ENV{'CHOPSET_HOME'} if($ENV{'CHOPSET_HOME'} ne '');

require "$HOME_DIR/config.ph";

use IO::Pipe;
use IO::File;

$SIG{'CHLD'} = sigchld_handler;
$SIG{'ALRM'} = sigAlarmHandler;

$opt_n = 0;
if($ARGV[0] eq '-n'){
	$opt_n = 1;
	shift(@ARGV);
}
($type, $problem, @nodesTemp) = @ARGV;

@nodes = &cropAliveNodes(@nodesTemp);

# error check
#
# check for Chopper and Setter alive
$Chopper = "$PROBLEMS_DIR/$type/Chopper";
$Setter  = "$PROBLEMS_DIR/$type/Setter";
if(!(-x $Chopper) || !(-x $Setter)){
	print("error: $Chopper or $Setter can not execute.\n");
	exit(1);
}

# check for nodes
if($#nodes < 0){
	print("error: no node defined. ARGV: @ARGV\n");
	exit(1);
}

# check for problem file
$problemFile = "$PROBLEMS_DIR/$type/$problem";
$problemFile =~ s/'/\\'/;
#if(!(-f $problemFile)){
#	print("error: problem file $problemFile not found.\n");
#	exit(1);
#}
$nodeNum = $#nodes + 1;
$tmpDir = &createWorkingDirectory("${type}_$problem");
$ID = $tmpDir; $ID =~ s|.*/||;
$SetterResultFile = "$tmpDir/result";

# try create working directory
if(!mkdir("$tmpDir/jobs") || !mkdir("$tmpDir/returns")){
	`rm -rf $tmpDir`;
	print("error: can not create $tmpDir/jobs or $tmpDir/returns\n");
	exit(1);
}

# run Chopper
#print("$Chopper $tmpDir/jobs '$problemFile' $nodeNum\n");
$ret = system("$Chopper $tmpDir/jobs '$problemFile' $nodeNum");
if(($ret >> 8) != 0){
	`rm -rf $tmpDir`;
	print("error: $Chopper return error. (Chopper return $ret)\n");
	exit(1);
}

# check created chop list
if(!opendir(DIR, "$tmpDir/jobs")){
	`rm -rf $tmpDir`;
	print("error: can not readdir $tmpDir\n");
	exit(1);
}
my(%queue);
while(defined($file = readdir(DIR))){
	if($file > 0 && $#nodes >= 0){
		$queue{"$tmpDir/jobs/$file"} = shift @nodes;
	}
}
closedir(DIR);
if($#nodes >= 0){
	`rm -rf $tmpDir`;
	print("error: chop queue num too small. more $#nodes+1 need.\n");
	exit(1);
}

# queueing!
print("process queueing. id: \"$ID\" node: ", join(', ', values(%queue)), "\n");
# become daemon.
&daemon();
#alarm 1;
foreach $key (keys(%queue)){
	my($target) = $key;
	$target =~ s|/jobs/\d+|/returns/$queue{$key}|;
	my($pid, $outFile) = &createWorker(
		#&node2uri($queue{$key}). $type. ".cgi?${ID}_$queue{$key}", $key, $target);
		&node2uri($queue{$key}). "worker.cgi?$type_${ID}_$queue{$key}", $key, $target);
	push(@outFiles, $outFile);
	$workers{$pid} = $outfile;
	$workNodes{$queue{$key}} = $pid;
	$workProblems{$pid} = $ID;
	&addNodeJob($queue{$key}, $ID);
}

#print("process queued. id: \"$ID\" node: ", join(', ', keys(%workNodes)), "\n");
# main loop.
while(1){
	sleep(1);
	if(&checkSetter() != 0){
		# success.
		my($node);
		foreach $node (keys(%workNodes)){
			&killJob($ID, $node);
		}
	}
	my(@workerPids) = keys(%workers);
	if($#workerPids < 0){
		# all job finished.
		#print("all process done.\n");
		last;
	}
	if((stat($DESTROYED_NODES_FILE))[9] + 10 > time()){
		@destroyedNodes = listDestroyedNodes(keys(%workNodes));
		foreach $node (@destroyedNodes){
			kill(2, $workNodes{$node});
		}
	}
}

# set Setter result to resultFile
`$Setter @outFiles > $SetterResultFile`;
if(open(OUT, "<$SetterResultFile")){
	print <OUT>;
	close(OUT);
}

# normal exit
exit(0);


#
# sub routines.
#

# run Setter for test.
sub checkSetter {
	#my($SetterResultFile, @outFiles) = @_;
	my($result);
	my(@files) = @outFiles;
	my($i, $dir);
	for($i = 0; $i <= $#files; $i++){
		$dir = $1 if($files[$i] =~ s|^(.*)/||);
	}
	chdir($dir);
	if(open(SETTER, "$Setter @files|")){
		$result = join('', <SETTER>);
	#	close(SETTER);
	}
	#$result = join('', `(cd $dir; $Setter @files)`);
	#$result = join('', `$Setter @files`);
	if($result =~ m/^success./){
		return 1;
	}
	return 0;
}

# for wget process(reading for worker node process) exit.
sub sigchld_handler {
	my($pid) = wait;
	return if($pid < 0);
	delete($workers{$pid});
	my($key);
	my($sceduledProcess) = 0;
	foreach $key (keys(%workNodes)){
		if($workNodes{$key} == $pid){
			&deleteNodeJob($key, $workProblems{$pid});
			delete($workProblems{$pid});
			delete($workNodes{$key});
			$sceduledProcess = 1;
			last;
		}
	}
	if($sceduledProcess != 0 && &checkSetter() != 0){
		# success.
		my($node);
		foreach $node (keys(%workNodes)){
			&killJob($ID, $node);
		}
	}
}

sub sigAlarmHandler{
	if(&checkSetter() != 0){
		my($node);
		foreach $node (keys(%workNodes)){
			&killJob($ID, $node);
		}
	}
	alarm 1;
}

# run one worker node (call wget process for worker CGI)
sub createWorker {
	my($url, $putFile, $outFile) = @_;
	my($pid);
	if($pid = fork()){
		# parent
		symlink($putFile, $outFile. ".link"); # for original request file.
	}elsif(defined($pid)){
		# child
		#open(STDOUT, ">&=". $pipe->fileno());
		exec($WGET, "--timeout=9999999", "-q", "-O", $outFile, "--post-file=$putFile", $url);
		exit(0);
		return undef;
	}else{
		return undef;
	}
	return ($pid, $outFile);
}

# Worker object.
# but not used.
{
	package Worker;

	sub new {
		my $pkg = shift;
		my $hash = {
			pid => shift,
			outFile => shift,
			nodeName => shift,
		};
		return bless $hash,$pkg;
	}
	sub Pid{
		my($self) = shift;
		return $self->{pid};
	}
	sub OutFile{
		my($self) = shift;
		return $self->{outFile};
	}
	sub NodeName{
		my($self) = shift;
		return $self->{nodeName};
	}
	sub DESTROY{
		my($self) = shift;
	}
}
