#!/usr/bin/python2.6
# -*- coding: utf-8 -*-
# Copyright (c) 2008 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.


import sys
#import pickle
#import string
import os
#import io
import numpy as np
import time
#import PartitionModifyConf
sys.path.append("../")
#import GUIconfig


#data_file_trim = "btsupClassPC.csv"#this is modified in spss  on PC
#partition_output_dir= "partition_modify"
#partition_threshold = 0.9




def partition_trim(path_to_file, partition_threshold, output_dir):
	#print "partition args", path_to_file, partition_threshold, output_dir
#	data_files = GenoTypeConvertConf.data_file_conv
	#Create root output directory
#	print("OUTPUT DIRECTORY IS CREATED")
	summary_dir = os.path.join(output_dir, "class_partition")
	output_file = os.path.join(summary_dir, "class_output_file.csv")

	#summary_dir = os.path.join(summary_dir)#PartitionModifyConf.partition_output_dir
	if not os.path.exists(summary_dir):
		os.mkdir(summary_dir)

	jobGC = {}
	jobGC["DataFile"]=path_to_file		
	jobGC["OutputDir"]=summary_dir
	jobGC["PartThreshold"]=partition_threshold
	jobGC["StartTime"]=time.asctime()

	#Create files
#	jobGC["PartModtxt"] = open(jobGC["OutputDir"]+"/Partition modified text "+ str(jobGC["PartThreshold"]),"w")
	jobGC["PartModsum"] = open(jobGC["OutputDir"]+"/Partition modified summary "+ str(jobGC["PartThreshold"]),"w")
	#jobGC["PartMod"] = open(jobGC["OutputDir"]+"/Partition modified "+ str(jobGC["PartThreshold"]),"w")
	jobGC["PartMod"] = open(output_file,"w")

	#print("Outout dir "+jobGC["OutputDir"])
	#Headers
	Part_header0 = "Partition modification" + "\n\n"
	Part_header1 = "Original file: " +jobGC["DataFile"]+"\n\n"
	Part_header2 = "Partition threshold: " + str(jobGC["PartThreshold"]) +"\n\n"
	#Part_header3 = "Subject \t %s \n" % os.path.basename(path_to_file)
	
	partition_label = ""
	import string
	valid_chars = string.ascii_letters + string.digits
	
	for c in os.path.basename(path_to_file):
		if valid_chars.find(c) != -1:
			partition_label += c 
	
	if not partition_label:
		partition_label = "subjectpartition"
		
	#partition_label = "".join([for c in os.path.basename(path_to_file)].strip([string.punctuation].extend(string.whitespace))
	Part_header3 = "Subject \t %s \n" % partition_label
	
	#Write to file
	jobGC["PartModsum"].write(Part_header0)
	jobGC["PartModsum"].write(jobGC["StartTime"]+"\n\n")
	jobGC["PartModsum"].write(Part_header1)
	jobGC["PartModsum"].write(Part_header2)
	jobGC["PartMod"].write(Part_header3) 
	#Import data list as csv-file or dat file convert to lists
	data_raw=list(open(jobGC["DataFile"],"r"))
	print "data_raw", data_raw
	for subj in range(0,len(data_raw)): 
		data_raw[subj]=data_raw[subj].replace(";","\t").split("\t")

	#Remove \r\n tag
	for subj in range(0,len(data_raw)):
		for col in range(0,len(data_raw[0])):
			data_raw[subj][col] = data_raw[subj][col].strip()

	data_raw_matrix=np.matrix(data_raw)#,dtype=list)

	num_subj = data_raw_matrix.shape[0]
	Part_header4 = "Number of subjects in file: " + str(num_subj) +"\n\n"
	jobGC["PartModsum"].write(Part_header4)
	
#	print jobGC
	#Call to function finding partition for each subject
	part_trim(jobGC,data_raw_matrix)
	
	return output_file #used in program, but also in output, latter should be dropped
	

def part_trim(jobGC,data_raw_matrix):
	"""This is a partition filter based on probabilities from MPlus outputs as SPSS .csv and .dat file and Excell .csv generated on a PC"""

#	print("in matrix")
#	print(data_raw_matrix)
#	print(data_raw_matrix.shape)
	partvalue =[]
	partnumb = []#to calculate class sizes
#
	#For every case create a list of classification probality..
	for idnum in range(1,data_raw_matrix.shape[0]):
		deltnr=data_raw_matrix[idnum,0]
		#List all entries in a list...
		partlist = []
		pvaluefound=0
		for par in range(1,data_raw_matrix.shape[1]-1):
			if pvaluefound == 1:
				break
			partlist.append(float(data_raw_matrix[idnum,par]))
			for part in range(1,len(partlist)):
				if partlist[part]>=jobGC["PartThreshold"]:
					clasno = "%s \t %s" % (deltnr, part+1) #dump as binary
					partvalue.append(clasno)
					partnumb.append(part+1)				
					pvaluefound = 1
					break

	num_subjF = len(partvalue)
	#part_value_matrix=np.matrix(partvalue)
	#part_value_matrix.dump(jobGC["PartMod"])
	Part_header5 = "Number of subjects after filtering: " + str(num_subjF) +"\n\n"
	jobGC["PartModsum"].write(Part_header5)
	Part_header6 = "{0} {1} {2}\n\n".format("Class","\t","Size") 
	jobGC["PartModsum"].write(Part_header6)	
	#Calculate size of classes
	partcont = set(partnumb)
	for pns in partcont:
		partcount = partnumb.count(pns)
		Part_header6 = "{0} {1} {2}\n".format(pns,"\t",partcount) 
		jobGC["PartModsum"].write(Part_header6)	
	
	jobGC["PartMod"].write("\n".join(partvalue))
	print "wrote partition file" 
	
if __name__ == "__main__":
	partition_trim("btsupClassPC.csv", partition_threshold=0.9, output_dir="partition_summary")


