#!/usr/bin/python
'''
Evaluate the mapping results.
'''
import sys
import os

# Parameters.
read1_file = sys.argv[1]
read2_file = sys.argv[2]
junct_file = sys.argv[3]

map1_file = sys.argv[4]
map2_file = sys.argv[5]
mapjunct_file = sys.argv[6]

def read_file(filepath):
	# define data structure.
	data = {}
	
	# Check if zipped.
	zipped = False
	if filepath.count(".gz"):
		zipped = True
	
	# Open file.
	seq = ""
	seqheader = ""
	first = True
	
	if zipped == True:
		fin = gzip.open(filepath,"r")
	else:
		fin = open(filepath,"r")
	for line in fin:
		# Skip comments.	
		if line[0] == "#": continue

		# first time.
		if first == True and line[0] == ">":
			seq = ""
			seqheader = line.strip().replace(">","")
			first = False
			continue		

		# finished seq.
		if line[0] == ">":
			# save data.
			data[seqheader] = seq.strip()

			# Reset variables.
			seq = ""
			seqheader = line.strip().replace(">","")
			continue

		# get seq.
		seq += line.strip()
			
	# Handle last close and return.
	data[seqheader] = seq.strip()
	fin.close()
	return data


def get_sege_matches(matchstr):
	match = 0
	mis = 0
	for m in matchstr:
		# Check type.
		if m.count("M") == 0: 
			# Handle indels.
			m = m.replace("M","").replace("I","").replace("S","").replace("D","")
			if m == "": continue
			val = int(m)
		
			mis += val
			continue
		
		# Get match.
		val = int( m.replace("M","") )
		
		# Filter shorties.
		if val < 10: 
			mis += val
			continue
			
		# Otherwise its a match.
		match += val
		
	return match, mis

def parse_sege_file(file_path):
	# Read in file.
	dat = []
	used = {}
	fin = open(file_path, "rb")
	for line in fin:
		# skip comments.
		if line[0] == "#": continue
		
		# tokenize.
		tmp = line.strip().split("\t")
		read = tmp[0].replace(">","")
		start = int(tmp[11])
		stop = int(tmp[12])
		rlen = len(reads[read])

		# TOkenize match string.
		match, mis = get_matches(tmp[14].split(";"))
		
		# Only keep one alignment per read.
		if read in used: 
			# See if beats previous.
			if match > used[read][0]:
				# Delete previous.
				del dat[used[read][1]]
			else:
				# skip this one.
				continue
		used[read] = [match, len(dat)]
		
		# Save data.
		dat.append( (float(rlen), float(match), float(mis), float(stop-start) ) )
		
	fin.close()
	return dat
	
def parse_blast_file(file_path):
	# Read in file.
	dat = []
	used = {}
	fin = open(file_path, "rb")
	for line in fin:
		# skip comments.
		if line[0] == "#": continue
		
		#queryId, subjectId, percIdentity, alnLength, mismatchCount, gapOpenCount, queryStart, queryEnd, subjectStart, subjectEnd, eVal, bitScore
		# tokenize.
		tmp = line.strip().split("\t")
		read = tmp[0]
		alnlen = int(tmp[3])
		rlen = len(reads[read])

		# TOkenize match string.
		match, mis = get_matches(tmp[14].split(";"))
		
		# Only keep one alignment per read.
		if read in used: 
			# See if beats previous.
			if match > used[read][0]:
				# Delete previous.
				del dat[used[read][1]]
			else:
				# skip this one.
				continue
		used[read] = [match, len(dat)]
		
		# Save data.
		dat.append( (float(rlen), float(match), float(mis), float(stop-start) ) )
		
	fin.close()
	return dat


# Read in reads.
reads = read_file(read2_file)
tmp = read_file(junct_file)

# Merge dictionaries.
for n in tmp:
	if n in reads:
		print "Bad files"
		sys.exit()
	reads[n] = tmp[n]

# Read in results.
#junct_dat = parse_sege_file(mapjunct_file)
#map2_dat = parse_sege_file(map2_file)

junct_dat = parse_blast_file(mapjunct_file)

# Setup permeter study.

res = .05
i = 0.5
iness = []
cness = []
while i < 1.0:
	cness.append(i)
	iness.append(i)
	i += res
#iness.reverse()
#cness.reverse()

results_j = {}
results_m2 = {}
for x in iness:
	results_j[x] = {}
	results_m2[x] = {}
	for y in cness:
		results_j[x][y] = 0
		results_m2[x][y] = 0

# Loop over each entry.
for entry in junct_dat:
	# Calc param.
	i_act = 1.0 - (entry[2] / entry[3])
	c_act = entry[1] / entry[0]
	
	# Make cumulative counts.
	for i_min in iness:
		for c_min in cness:			
			# Check threshold.
			#print i_min, c_min, i_act, c_act
			
			if i_min < i_act and c_min < c_act:
				# Add count.
				results_j[i_min][c_min] += 1


# Loop over each entry.
iness.reverse()
cness.reverse()
for entry in map2_dat:
	# Calc param.
	i_act = 1.0 - (entry[2] / entry[3])
	c_act = entry[1] / entry[0]
	
	# Make cumulative counts.
	for i_min in iness:
		for c_min in cness:			
			# Check threshold.
			#print i_min, c_min, i_act, c_act
			#print i_act, c_act
			
			if i_act >= i_min and c_act >= c_min:
				# Add count.
				results_m2[i_min][c_min] += 1
			
# Switch keys back.
iness.reverse()
cness.reverse()

# Print results as matrix.
print "total juncts", len(junct_dat)
row = ""
for c in cness:
	row += "\t" + str(c)
print row
result = results_j
ikeys = sorted(result.keys())
for ikey in ikeys:
	ckeys = sorted(result[ikey].keys())
	
	row = [str(ikey)]
	for ckey in ckeys:
		row.append( str(result[ikey][ckey]) )
	row = '\t'.join(row)
	print row
	

# Print results as matrix.
print "total map2", len(map2_dat)
row = ""
for c in cness:
	row += "\t" + str(c)
print row
result = results_m2
ikeys = sorted(result.keys())
for ikey in ikeys:
	ckeys = sorted(result[ikey].keys())
	
	row = [str(ikey)]
	for ckey in ckeys:
		row.append( str(result[ikey][ckey]) )
	row = '\t'.join(row)
	print row	
