/***********************************************************************************************************************
*                                                                                                                      *
* SPLASH build system v0.1                                                                                             *
*                                                                                                                      *
* Copyright (c) 2013 Andrew D. Zonenberg                                                                               *
* All rights reserved.                                                                                                 *
*                                                                                                                      *
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the     *
* following conditions are met:                                                                                        *
*                                                                                                                      *
*    * Redistributions of source code must retain the above copyright notice, this list of conditions, and the         *
*      following disclaimer.                                                                                           *
*                                                                                                                      *
*    * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the       *
*      following disclaimer in the documentation and/or other materials provided with the distribution.                *
*                                                                                                                      *
*    * Neither the name of the author nor the names of any contributors may be used to endorse or promote products     *
*      derived from this software without specific prior written permission.                                           *
*                                                                                                                      *
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED   *
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL *
* THE AUTHORS BE HELD LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES        *
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR       *
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE       *
* POSSIBILITY OF SUCH DAMAGE.                                                                                          *
*                                                                                                                      *
***********************************************************************************************************************/

#include "splashcore.h"

using namespace std;

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// SlurmClusterJob

SlurmClusterJob::SlurmClusterJob(uint32_t jobid)
 : m_jobid(jobid)
{
	m_done = false;
	m_cachedstatus = ClusterJob::JOB_STATE_PENDING;
}

SlurmClusterJob::~SlurmClusterJob()
{
}
	
ClusterJob::JobStatus SlurmClusterJob::GetStatus()
{
	if(m_done)
		return m_cachedstatus;
	
	//Get the status
	job_info_msg_t * job = NULL;
	if(slurm_load_job(&job, m_jobid, 0))
	{
		slurm_perror((char*)"Failed to load job info");
		FatalError("slurm_load_job() failed\n");
	}

	//If no records at all, job must not be running yet
	if(job->record_count == 0)
		return ClusterJob::JOB_STATE_PENDING;
	job_info_t* jobinfo = job->job_array + (job->record_count - 1);
	
	//Look up the job state
	int state = jobinfo->job_state & JOB_STATE_BASE;
	slurm_free_job_info_msg(job);
	switch(state)
	{
	case JOB_PENDING:
		return ClusterJob::JOB_STATE_PENDING;
	case JOB_RUNNING:
	case JOB_SUSPENDED:
		return ClusterJob::JOB_STATE_RUNNING;
	case JOB_COMPLETE:
		m_done = true;
		return (m_cachedstatus = ClusterJob::JOB_STATE_COMPLETE);
	case JOB_CANCELLED:
		m_done = true;
		return (m_cachedstatus = ClusterJob::JOB_STATE_CANCELED);
	case JOB_FAILED:
	case JOB_TIMEOUT:
	case JOB_NODE_FAIL:
	case JOB_PREEMPTED:
	default:
		m_done = true;
		return (m_cachedstatus = ClusterJob::JOB_STATE_FAILED);
	}
}

void SlurmClusterJob::Cancel()
{
	FatalError("SlurmClusterJob::Cancel() not implemented\n");
}

string SlurmClusterJob::GetID()
{
	char id[32];
	snprintf(id, sizeof(id), "%u", m_jobid);
	return string(id);
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Construction / destruction

SlurmCluster::SlurmCluster()
{
	DebugPrintfHeading(3, "Initializing SLURM cluster interface\n");
	long version = slurm_api_version();
	DebugPrintfSilent(3, "    API version %d.%d.%d\n", 
		SLURM_VERSION_MAJOR(version),
		SLURM_VERSION_MINOR(version),
		SLURM_VERSION_MICRO(version));
		
	//Default partition names
	m_cppBuildPartition = "cppbuild";
	m_fpgaBuildPartition = "fpgabuild";
}

SlurmCluster::~SlurmCluster()
{
}

bool SlurmCluster::SanityCheck()
{
	slurm_ctl_conf_t* conf = NULL;
	if(0 != slurm_load_ctl_conf(0, &conf))
	{
		DebugPrintfSilent(3, "    SLURM cluster configuration could not be loaded\n");
		return false;
	}
	DebugPrintfSilent(3, "    SLURM cluster configuration loaded OK\n");
	slurm_free_ctl_conf(conf);
	return true;
}

SlurmCluster* SlurmCluster::CreateDefaultCluster()
{
	SlurmCluster* cluster = new SlurmCluster;
	if(cluster->SanityCheck())
		return cluster;
	delete cluster;
	return NULL;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Job submission

ClusterJob* SlurmCluster::SubmitBatchJob(
	string path,
	string partition,
	string dir,
	const vector<ClusterJob*>& dependencies,
	int cpus,
	int memory,
	string features)
{
	//If any of the jobs we depend on failed, fail
	for(size_t i=0; i<dependencies.size(); i++)
	{
		if(dynamic_cast<FailedClusterJob*>(dependencies[i]) != NULL)
			return new FailedClusterJob;
	}
	
	//For now, just call sbatch rather than doing raw API stuff.
	//TODO: run time
	
	//Basic settings
	char cmd_raw[2048];
	snprintf(
		cmd_raw,
		sizeof(cmd_raw),
		"sbatch --share --ntasks=1 --cpus-per-task=%d --partition=%s --error=output.txt --output=output.txt "
		"--workdir=%s ",
		cpus,
		partition.c_str(),
		dir.c_str()
		);
	string cmd = cmd_raw;
	if(memory > 0)
	{
		snprintf(cmd_raw, sizeof(cmd_raw), "--mem=%d ", memory);
		cmd += cmd_raw;
	}
	if(features != "")
	{
		snprintf(cmd_raw, sizeof(cmd_raw), "--constraint=%s ", features.c_str());
		cmd += cmd_raw;
	}
	
	//Dependencies
	if(dependencies.size() != 0)
	{
		cmd += "--dependency=afterok";
		for(size_t i=0; i<dependencies.size(); i++)
			cmd += string(":") + dependencies[i]->GetID();	
		cmd += " ";
	}
	cmd += path;
	
	//Get the job ID
	string retval = ShellCommand(cmd, true);
	unsigned int job_id = 0;
	if(1 != sscanf(retval.c_str(), "Submitted batch job %u", &job_id))
	{
		//DebugPrintfSilent(1, "    SLURM job submission failed: %s\n", retval.c_str());
		FatalError("SLURM job submission failed: %s\n", retval.c_str());
		return NULL;
	}
	DebugPrintfSilent(5, "    SLURM job ID is %u\n", job_id);
	
	return new SlurmClusterJob(job_id);
}
