#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys
import tarfile
import subprocess
import pickle
import time
import numpy as np
import epicalcP
import epi_output
import entropy_shannon
import entropy_shannon_weight

#sys.path.append('../Epicalc/')
#from Epicalc import epicalcP as epicalcP

def main():
    #"""Load pkl-file"""

    job = load_job_data()
    start_time= time.asctime()
    time_sec=time.time()
    proj_name = job["analysis_name"]
    genotype_file = job["genotype_file"]
    genotype_names = job["genotype_names"] # list of (snp, gene)-tuples
    phenotype_file = job["phenotype_file"]
    phenotype_names = job["phenotype_names"]
    
    genotype_array = np.fromfile(genotype_file, dtype=int, sep=" ")
    
    print genotype_array.shape
    print "len geno names", len(genotype_names)
    
    genotype_matrix = genotype_array.reshape((len(genotype_names), -1), order="C")
    
    phenotype_array = np.fromfile(phenotype_file, sep=" ")
    phenotype_matrix = phenotype_array.reshape((len(phenotype_names), -1), order="C")
    
    num_genotypes_before_filter = len(genotype_names)
    
    #Filter genotypes in epicalcP
    filtered_genotype_matrix, filtered_genotype_names, monomorph_genotypes = epicalcP.filter_genotypes(genotype_matrix, genotype_names)
    
	#Generate list of valid snps
    valid_pkl = [] 
    for i in range(0,len(filtered_genotype_names)):
        #NBNBNB This is just a rescue-operation as call for gene-names somtimes turn up 2 names. Has to be solved in eftech
        geneindex_select = filtered_genotype_names[i][1].split(',')
        val_out = str(filtered_genotype_names[i][0])+'\t' +str(geneindex_select[0])+"\n"        
        valid_pkl.append(val_out)#to epi_resultsXX.pkl
           
    ### Generate list of monopmorph snps
    monomorph_pkl = [] 
    for i in range(0,len(monomorph_genotypes)):
        #NBNBNB This is just a rescue-operation as call for gene-names somtimes turn up 2 names. Has to be solved in eftech
        monoindex_select = monomorph_genotypes[i][1].split(',')
        mo_out = str(monomorph_genotypes[i][0])+'\t'+str(monoindex_select[0]) +"\n"
        monomorph_pkl.append(mo_out)#to epi_resultsXX.pkl
    ###
    
    significance_level = job["significance_level"] 
    
    class_type_select = job["selection_file"]
    nom_sign_level = significance_level
    
    #Bonferoni correction
    perform_bonferoni_correction = job["bonf_corr"] # boolean
    bonf_correct = 0
    if filtered_genotype_names:
    #This is a correction in the extreme
        bonf_correct_extreme = nom_sign_level/(len(filtered_genotype_names)*len(phenotype_names))
    #Maybe the correction should only be done inside a trait? For now we will do this to get some working results
        bonf_correct = nom_sign_level/len(filtered_genotype_names)
	
	#Minimum allele frequences TODO Move to GUI
    minallfr = 0.1
	

    analysis_output = {}
    analysis_output["start_time"] = start_time
    analysis_output["time_sec"] = time_sec
    analysis_output["proj_name"] = proj_name
    analysis_output["num_snps"] = num_genotypes_before_filter
    analysis_output["valid_snps"] = valid_pkl   
    analysis_output["num_valid_snps"] = len(filtered_genotype_names)
    analysis_output["monomorph_snps"] = monomorph_pkl    
    analysis_output["num_monomorph_snps"] = len(monomorph_genotypes)
    analysis_output["significance_level"] = significance_level
    analysis_output["num_subjects"] = genotype_matrix.shape[1] 
    analysis_output["class"] = job["class"][0]
    analysis_output["class_type"] = class_type_select #change to import file    
    analysis_output["perform_bonf"] = perform_bonferoni_correction#boolean  
    analysis_output["bonferroni_correction"] = bonf_correct 
    analysis_output["phenotype_file"] = phenotype_file #change to import file
    analysis_output["genotype_file"] = genotype_file #change to import file
    analysis_output["minimumAllFre"] = minallfr
    
    print "Epistasis is calculated" 

    #Epistasis is calculated
    epistasis_data = epicalcP.epi_selection(filtered_genotype_names, filtered_genotype_matrix, phenotype_names, phenotype_matrix, significance_level, bonf_correct,analysis_output)
#   epistasis_data is a list of trait_results, which are saved as a dictionary

    analysis_output["epistasis_data"] = epistasis_data#this contains everything 
 


    #Entropy output,analysis_output to pickle
#    ent_out = analysis_output["epistasis_data"][0]["Entropy"]
#    entropy_data = entropy_shannon.entropy_shanon_init(allel_freq)
 #   analysis_output["Entropy_data"] =  ent_out
    
#    analysis_output["entropy_data"] = entropy_data#husk dict
    #Written output are now processed
#    epi_output.format_output(analysis_output,result_file)

    #All info and results are dumped in results...pkl
    result_file = "epi_results%s.pkl" % str(job["class"][0])    
    epi_output.format_output(analysis_output,result_file)    
    rf = open(result_file, "w")
    pickle.dump(analysis_output, rf)
    rf.close()
    
    
    #..and compressed in a tar.gz-file
    """this is the epifileX.tar.gz file"""
    archive_name = os.path.basename(job["output_files"][0]) # epifilesN.tar.gz

#

    #os.chdir("../")
    archive_output(archive_name) # archive output dir

def run_epistasis_job(job): #????Not in use?????
#    print("XXXXrun_epistasis_job in epuistaisi.py")
#    create_dir(job['output_dir'], True)
    # create and enter output dir
 #   """job['output_dir'] is the filename epifiles/"""
 #   print("34) EPIST The folder epifiles is added to the tmp-folder")
  #  """Is this tmp-epifles used at all? intermediate to gx-file??"""

   # """Here the real programme is started in Py"""
    #print("Execute apicalp...calc")
    execute_epistasisP(job,"../") # execute program

#    archive_name = job["output_files"][0]      
    """this is the epifileX.tar.gz file"""
    archive_name = os.path.basename(job["output_files"][0]) # epifilesN.tar.gz

    #os.chdir("../")
    archive_output(archive_name) # archive output dir
    
    
    #clean_up_epistasis(job["output_dir"], [archive_name, job["main_script"]]) 

#def extract_job_files(filepath, dest_dir):
#    prog_files = tarfile.open(dest_dir+filepath, "r")
#    prog_files.extractall(path=dest_dir)
#    prog_files.close()

def create_dir(dir_path, enter_directory=False):#????Not in use?????

    if os.path.exists(dir_path):
        if enter_directory :
            os.chdir(dir_path)
    else: 
        os.mkdir(dir_path)
        if enter_directory:
            os.chdir(dir_path)

def archive_output(dest_dir): # = archive_name
    print "archiving"
    
    files = os.listdir(".")
    
    output_archive = tarfile.open(dest_dir, 'w:gz')
    for f in files:
        output_archive.add(f)
    output_archive.close()

def load_job_data():
    #import pickle
    job_file = sys.argv[1]

    #pkl_file = open("job_file.pkl", 'rb')
    
    pkl_file = open(job_file)
    data1 = pickle.load(pkl_file)
    pkl_file.close()
#    print "loaded job data: ", data1
    return data1
     #pickle.load(job, pickledfile)

        
       
if __name__ == "__main__":
#import profile
#profile.run(main)
    
    
    main()
