from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext, Row	
import os
import operator
import ConfigParser as configparser
import ntpath
from vina_utils import get_file_name_sorted_energy, get_directory_pdbqt_analysis, get_files_pdbqt, get_directory_pdb_analysis, get_files_pdb, loading_pdb_2_list, get_name_receptor_pdb, get_name_model_pdb, get_files_pdb_filter
from summary_statistics import get_summary_statistics, save_txt_summary_statistics
from pdbqt_io import split_pdbqt, pdbqt2pdb
from datetime import datetime
from pdb_io import replace_chain_atom_line

def save_analysis_log(finish_time, start_time):
	log_file_name = 'vs_prepare_files_for_analysis.log'
	current_path = os.getcwd()
	path_file = os.path.join(current_path, log_file_name)
	log_file = open(path_file, 'w')

	diff_time = finish_time - start_time
	msg = 'Starting ' + str(start_time) +'\n'
	log_file.write(msg)
	msg = 'Finishing ' + str(finish_time) +'\n'
	log_file.write(msg)
	msg = 'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\n'
	log_file.write(msg)


def main():

	sc = SparkContext()

	config = configparser.ConfigParser()
	config.read('config.ini')

	#Broadcast
	#Path that contains all files for analysis
	path_analysis = config.get('DEFAULT', 'path_analysis')
	#Path where all pdbqt files from VS are 
	path_save_structure = config.get('DEFAULT', 'path_save_structure')
	#Path where all pdb receptor are
	path_receptor_pdb = config.get('DEFAULT', 'pdb_path')	
	#Path for saving pdbqt files that are splited from VS
	path_analysis_pdbqt = get_directory_pdbqt_analysis(path_analysis)
	#Path for saving pdb files of models generated by VS
	path_analysis_pdb = get_directory_pdb_analysis(path_analysis)
	#Path for drugdesign project
	path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign')
	#Runing MGLTools for pdbqt to pdb
	pythonsh       = config.get('VINA', 'pythonsh')
	script_pdbqt_to_pdb = config.get('VINA', 'script_pdbqt_to_pdb')	

	#Adding Python Source file
	sc.addPyFile(os.path.join(path_spark_drugdesign,"vina_utils.py"))
	sc.addPyFile(os.path.join(path_spark_drugdesign,"summary_statistics.py"))
	sc.addPyFile(os.path.join(path_spark_drugdesign,"pdbqt_io.py"))
	sc.addPyFile(os.path.join(path_spark_drugdesign,"pdb_io.py"))
	sc.addPyFile(os.path.join(path_spark_drugdesign,"json_utils.py"))

	start_time = datetime.now()

	#File that contains sorted energies from all log file
	energy_file_name = os.path.join(path_analysis,get_file_name_sorted_energy())

	text_file = sc.textFile(energy_file_name)

	#Spliting energy file by \t
	header = text_file.first() #extract header
	rdd_vs_energies_sorted_split = text_file.filter(lambda x:x !=header)    #filter out header
	rdd_vs_energies_sorted_split = rdd_vs_energies_sorted_split.map(lambda line: line.split("\t"))
	rdd_vs_energies_sorted = rdd_vs_energies_sorted_split.map(lambda p: Row(energy=float(p[0]), pose=str(p[1]) ))

	# Appling Summary and Descriptive Statistics in Energies
	summary_statistics_out = get_summary_statistics(sc, rdd_vs_energies_sorted)
	save_txt_summary_statistics(path_analysis, summary_statistics_out)

	#Creating model pdbqt files from VS structures 
	list_pdbqt_model = []
	all_structures = get_files_pdbqt(path_save_structure)
	for structure in all_structures:
		list_pdbqt_model.append( (structure, path_analysis_pdbqt) )
	
	pdbqtRDD = sc.parallelize(list_pdbqt_model)	
	pdbqtRDD.foreach(split_pdbqt)

	#Creating pdb files from model pdbqt files
	list_pdb_model = []
	all_models = get_files_pdbqt(path_analysis_pdbqt)
	for model in all_models:
		list_pdb_model.append( (model, path_analysis_pdb, pythonsh, script_pdbqt_to_pdb) )
	
	pdb_modelRDD = sc.parallelize(list_pdb_model)	
	pdb_modelRDD.foreach(pdbqt2pdb)
	
	finish_time = datetime.now()

	save_analysis_log(finish_time, start_time)

main()