/**
* This executable periodically (the interval is definable in the project's config.xml file) to see if any new data sets have been dropped off by researchers. If new work exists, the data set is partitioned into pieces of approximately one hours worth of floating point operations. That is, each result has approximately one hours worth of linear regressions to perform. Since the science application (client application) depends on knowing where to begin and end to complete its piece, a scoping file is created.  Once created, the scoping file, along with the actual data file (.csv file) are copied to the appropriate directory (BOINC uses a special hierarchical directory structure - see http://boinc.berkeley.edu/trac/wiki/DirHierarchy ) on the server. Once the files have been copied, the workunits themselves are created and inserted into the BOINC database so that the scheduling server may create results based on them.
* 
* Current known issues: 
* 1) Since the memory to create the scoping files is allocated all at once, it has the ability to consume all the system resources. This should also be rewritten to allocate memory in small chunks, perform the work generation, free memory, and repeat until all work has been created.  This is a more professional approach and allows for truly arbitrary data sets.
* 2) Need to check if there is a memory leak in the for-loop or after it.
*
* @author Max Rupplin
* @date 4-28-08
* @file ddas_workgen.C
*/

//Long Integer Package
extern "C"
{
        #include "lip.h"
}

//DDAS related
#include "DDAS_Work_Handler.hpp"
#include "DDAS_DB_Handler.hpp"
#include "DDAS_XML_Handler.hpp"
#include "ddas_workgen.h"

//BOINC related
#include <sched_util.h>
#include <backend_lib.h> 
#include <boinc_db.h> 
#include <parse.h>
#include <config.h>

//C/C++ related
#include <string>
#include <stdlib.h>
#include <fstream>
#include <iostream>

//namespaces
using namespace std;


/**
* The main method instantiates and makes use of a DDAS_Work_Handler object to create work for DDAS
*/
int main()
{
	DDAS_Work_Handler work_handler;
	DDAS_DB_Handler db_handler;

	//if no new problems have been submitted simply return
	if( !work_handler.exists_new_work() )
	{
		return 1;
	}

	//new problem(s) found 
	else
	{
		//problem IDs of new jobs
		int* pids;
		
		//get problem IDs
		int pid_count = db_handler.getNewProblemIDs(&pids);

		//the contents of the data file
        	char* data_file_content;

		//the name of the data file
		char* data_file_name;

		//the contents of the scoping file
        	char** scoping_file_contents;

		//the names of the scoping files
		char** scoping_file_names;

		//the names of the state files (they are initially empty so no need for content twin)
		char** state_file_names;


		//for each new job
		for(int i=0; i<pid_count; i++)
		{
                        //find out how many workunits have already been created for this job (if any)
                        unsigned long starting_value = db_handler.getCurrentWorkgenIteration( pids[i] );

			//find out how many total iterations will be needed and store it for future reference
			if( db_handler.getTotalWorkunitsRequiredForJob( pids[i] )==0 )
			{
				db_handler.setTotalWorkunitsRequiredForJob( pids[i], db_handler.calculateTotalWorkunitsRequiredForJob(pids[i]) );
			}

                        //find out what the final workunit's iteration should be for THIS iteration ( 0 < ending_value < DESIRED_CUSHION_VALUE )
                        unsigned long ending_value = db_handler.getFinalWorkUnitIterationForCurrentWorkgenCycle( pids[i], starting_value );

			//alloc some memory for the files
			long size = (ending_value-starting_value);
			
			//just a quick check
			if(size <= 0) continue;
			
			scoping_file_contents 	= new char*[size];
               		scoping_file_names 	= new char*[size];
                	state_file_names 	= new char*[size];


//START DATA FILE
			//get the data file for a given job (data_file_xxx are initialized in the function)
			if( !work_handler.get_related_data_file( pids[i], &data_file_content, &data_file_name) ) continue;

                        //write data files to download directory using hierarchical scheme
                        work_handler.write_data_file_to_download_dir( data_file_content, data_file_name );
//END DATA FILE


//START SCOPING FILES
			//create the scoping files for a given job and store them in scoping_file_xxxxx arrays
			if( work_handler.get_related_scoping_files( pids[i], &scoping_file_contents, &scoping_file_names, starting_value, ending_value)==0 ) continue;

			//write scoping files to dowload directory using hierarchical scheme
			work_handler.write_scoping_files_to_download_dir( scoping_file_contents, scoping_file_names, starting_value, ending_value );
//END SCOPING FILES


//START STATE FILES
			//create and store names for state files (they are based on scoping_file_names)
			if( work_handler.get_related_state_files( &state_file_names, scoping_file_names, starting_value, ending_value )==0 ) continue;
		
			//write the (empty) state files to download dirs
			work_handler.write_state_files_to_download_dir( state_file_names, starting_value, ending_value );
//END STATE FILES


                        //create and add workunits to database for processing
                        work_handler.create_workunits( pids[i], data_file_name, scoping_file_names, state_file_names, starting_value, ending_value );


			//update the current workgen iteration ( what work unit are we on e.g. 10,000 of 150,000 )
			db_handler.setCurrentWorkgenIteration( pids[i], ending_value );

			//if all work has been created for this job mark it as complete ( final_workgen_iteration==ending_value means we are done)
			if( db_handler.getTotalWorkunitsRequiredForJob(pids[i]) <= ending_value )
			{
				db_handler.setAllWorkCompleteForJob(pids[i]);
			}

	                //free memory
	                delete[] data_file_content;
	                delete[] data_file_name;

			for(int i=0; i<size; i++)
			{
				delete[] scoping_file_contents[i];
				delete[] scoping_file_names[i];
				delete[] state_file_names[i];
			}


			if(scoping_file_contents) 	delete[] scoping_file_contents;
			if(scoping_file_names) 		delete[] scoping_file_names;
			if(state_file_names)		delete[] state_file_names;
		}

		return 1;
	}
}
