import sys, os, cPickle
import tempfile
sys.path.append('../Configuration/')
import Configuration.epistasisconfiguration as configuration

def fragment_epistasis(job_size, values):
    """Return a list of sub jobs of size job_size."""
    value_range = []
    current_size = 0
    job_classes = []
    for i in range(len(values)):
        value_range.append(str(values[i]))
        current_size += 1
        if current_size == job_size:
            job_classes.append(value_range)
            value_range = []
            current_size = 0
    if value_range != []:
        job_classes.append(value_range)

    return job_classes


def create_epistasis_jobs(
        job_size,
        genotype_names, 
        genotype_files,
        phenotype_names, 
        phenotype_files,
        selection_variable,
        classes,
        analysis_name,
        run_local, 
        significance_level, 
        bonf_corr
        ):
    
    """Create a dictionary that contains all the information needed to start the job. 
        Also creates a pickled dictionary "job data file" that it is imported at runtime.  
    """
 
    #partition the workload
    init_jobs = fragment_epistasis(job_size, values=classes)
    
    jobs = []
    ser_number = 1

    for j in range(len(init_jobs)):
        
        job = {}
        job['main_script'] = configuration.main_script
        job["started"] = "---"
        job["finished"] = "---"
        job["status"] = "init"
 #       job['data_file_raw'] = configuration.data_file.split('/')[-1]#MF CHANGE
#        job["gene_first"] = configuration.gene_first_index
#        job["gene_last"] = configuration.gene_last_index
        job["significance_level"] = significance_level #configuration.default_significance_level
        job["bonf_corr"] = bonf_corr
        job_class = init_jobs[j]
        
        job["genotype_file"] = genotype_files[j] 
        job["phenotype_file"] = phenotype_files[j]
        
        job["selection_file"] = selection_variable
        
        job["genotype_names"] = genotype_names
        job["phenotype_names"] = phenotype_names
        
        job['class'] = job_class
        job['analysis_name'] = analysis_name
 
        output_filename = 'epifiles' + str(ser_number) + '.tar.gz'

        # output file for the job
        job['output_files'] = [output_filename]
        
        input_files = list(configuration.program_files)
        input_files.append(genotype_files[j])
        input_files.append(phenotype_files[j])
        
        job['input_files'] = input_files
        job['resource_specs'] = configuration.resource_specs
#        
        # write the job to a pickle file for storing input arguments and data needed on execution
        job_data_file = "job_file%s.pkl"%ser_number
        job_filepath = os.path.join(tempfile.tempdir, job_data_file)
        jf = open(job_filepath, 'w')
        cPickle.dump(job, jf)
        jf.close()
  
        job['input_files'].append(job_filepath)
        
        # the execution command on the grid resource
        job_cmds = ['$PYTHON '+ job['main_script'] + ' '+job_data_file]
  
        # mig settings
        job['commands'] = job_cmds
        jobs.append(job)
        ser_number += 1
        
    return jobs
