#!/usr/bin/python
'''
Reads in a filtered gff, rna_fasta and sam file
and prints out individual batches of "utr" and mirna.
'''
import sys


# parameters.
rna_file = sys.argv[1]
gff_file = sys.argv[2]
utr_file = sys.argv[3]
utr_dir = sys.argv[4]
rna_dir = sys.argv[5]
collapse_file = sys.argv[6]
k_size = 3
k_count = 8


def read_fasta(fasta_file, nosimple=False):
	# store as hash.
	data = {}
	
	# read in the fasta.
	cnt = 0
	first = True
	fin = open(fasta_file, "rb")
	for line in fin:
		cnt += 1
	#	if cnt > 100000: break
		
		# check if we start new entry.
		if line[0] == ">":
			# tally.
			if first != True:
				if nosimple == False:
					# add no matter what.
					data[head] = seq.replace("s","")
				else:
					# check if has unique kmers.
					kbin = {}
					for i in range(0,len(seq)-k_size):
						kbin[seq[i:i+k_size]] = True
						
					# skip if not unique enough.
					if len(kbin) >= k_count:
						data[head] = seq.replace("s","")
				
			else:
				first = False
				
			# clear.
			head = line.strip().replace(">","")
			seq = ""
			continue
		
		
		# get only seq.
		seq += line.strip()
	fin.close()

	# get last.
	data[head] = seq.replace("s","")
	return data

def parse_gff(gff_file, scafs, rnas):
	# make hash of rna to target.
	data = {}
	
	# open gff.
	cnt = 0
	fin = open(gff_file, "rb")
	for line in fin:
		cnt += 1
		#if cnt > 10000000: break
		if cnt % 1000000 == 0:
			print cnt
		
		# tokenize.
		tmp = line.split("\t")
		
		# make association.
		if tmp[0] in scafs and tmp[2] in rnas:
			# initialize list.
			if tmp[0] not in data:
				data[tmp[0]] = []
			
			# add to list.
			data[tmp[0]].append(tmp[2])
	fin.close()
	
	# return.
	return data

# read rna and target file.
print "Reading rna fasta."
rna_seqs = read_fasta(rna_file, nosimple=True)

# collapse repeats.

uniq = {}
for rna in rna_seqs:
	# check if found.
	seq = rna_seqs[rna]
	
	# see if present.
	if seq in uniq:
		uniq[seq].append(rna)
		continue
		
	# initialize.
	uniq[seq] = [rna]
	
# print out simplify and slim RNA database.
print "before slimming: %i" % len(rna_seqs)
fout = open(collapse_file, "wb")
for seq in uniq:
	# get first one.
	txt = uniq[seq][0]
	
	# fill in rest.
	for i in range(1, len(uniq[seq])):
		# write txt.
		txt += "\t%s" % uniq[seq][i]
		
		# slim database.
		del rna_seqs[uniq[seq][i]]
		
	fout.write("%s\n" % txt)
	
fout.close()
print "after slimming: %i" % len(rna_seqs)	
	
print "Reading utr fasta."
utr_seqs = read_fasta(utr_file)

# process gff to find which rna go with each target.
print "Parsing rna gff."
data = parse_gff(gff_file, utr_seqs, rna_seqs)

# generate files for parallel execution.
print "Generating files."
for scaf in data:
	# skip emptry.
	if len(data[scaf]) == 0: continue
	
	# build scaffold string.
	scaf_txt = ">%s\n%s\n" % (scaf, utr_seqs[scaf])
	
	# build rna string.
	rna_txt = ""
	for rna in data[scaf]:
		if rna in rna_seqs:
			rna_txt = "%s>%s\n%s\n" % (rna_txt, rna, rna_seqs[rna])
	
	# write stuff to files.
	fout = open("%s/%s_utr.fa" % (utr_dir, scaf), "wb")
	fout.write(scaf_txt)
	fout.close()
	
	fout = open("%s/%s_microRNAs.fa" % (rna_dir, scaf), "wb")
	fout.write(rna_txt)
	fout.close()
	
