#!/usr/bin/python
'''
Translates SAM to HAM format.
'''
import sys
import ham
import h5py
import numpy as np
import time
import logging
from processing import Process, Queue

# parameters.
sam_file = sys.argv[1]
ham_file = sys.argv[2]
num_threads = int(sys.argv[3])


ham.ham_dt = ham.ham_50_dt
#buffer = 32212254720		# 30GB
#buffer = 10737418240		# 10GB
#buffer = 5368709120		# 5GB
#buffer = 1073741824		# 1GB
#buffer = 536870912			# 1/2 GB
buffer = 104857600			# 100 MB
#buffer = buffer / 10

logging.basicConfig(level=logging.INFO, format='[%(levelname)s] (%(threadName)-10s) %(message)s', )



# read in the data into line list.
def read_sam(fpath, lq, cq):
	# read in sam file in chunks.
	chunk_cnt = 0
	fin = open(sam_file, "rb")
	while 1 == 1:

		# read in new chunk.
		logging.info("reading chunk %i" % chunk_cnt)
		lines = fin.readlines(buffer)
		
		# stop condition.
		if lines == []: break
		
		# enque the lines.
		lq.put(lines)
		chunk_cnt += 1
			
		#if chunk_cnt > 2: break
	# close file.
	fin.close()
	
	# pass done token.
	logging.info("done reading.")
	cq.put(chunk_cnt)
				

# tokenizes lines into ham array.
def tokenize_lines(lq, hq):
	# loop for ever.
	while 1 == 1:
		# wait for lines.
		logging.info("waiting for chunk.")
		lines = lq.get()
		
		# see if its done token.
		if lines == 187:
			break
			
		# create ham array.
		sz = len(lines)
		logging.info("creating ham chunk of size %i." % sz)
		ham_data = ham.create_array(sz)	
		
		# loop over chunk.
		logging.info("tokenizing chunk.")
		i = 0
		for line in lines:
			
			# skip headers.
			if line[0] == "@": continue
					
			# process required part of line.
			tmp = line.strip().split("\t")

			ham_data[i]['QNAME'] = tmp[0]
			ham_data[i]['FLAG'] = int(tmp[1])
			ham_data[i]['RNAME'] = tmp[2]
			ham_data[i]['POS'] = int(tmp[3])
			ham_data[i]['MAPQ'] = int(tmp[4])
			ham_data[i]['CIGAR'] = tmp[5]
			ham_data[i]['MRNM'] = tmp[6]
			ham_data[i]['MPOS'] = int(tmp[7])
			ham_data[i]['ISIZE'] = int(tmp[8])
			ham_data[i]['SEQ'] = tmp[9]
			ham_data[i]['QUAL'] = tmp[10]

			
			# throw the rest into optional.
			if len(tmp) > 10:
				ham_data[i]['OPTIONAL'] = '\t'.join(tmp[11::])
				
			i += 1
			
		# resize if there was bad chunks.
		if i < sz:
			ham_data.resize((i, 1))
			
		# enque the ham chunk.
		if ham_data.size != 0:
			hq.put(ham_data)


# takes ham arrays and writes to hdf5 file.
def write_ham(fpath, hq, cq):
	# create ham object.
	ham_obj = ham.HamFile(fpath, cprs=True)
	
	# loop till done token.
	chunk_cnt = 0
	while 1 == 1:
		
		# wait for ham array.
		ham_data = hq.get()
		
		# check for done token.
		if ham_data == 187: break
		
		# save chunk of ham obj.
		logging.info("saving chunk %i to disk." % chunk_cnt)
		ham_obj.save_array(ham_data)
		
		# note we processed chunk.
		cq.put(True)
		chunk_cnt += 1
		
		
	# close ham object.
	logging.info("closing ham object.")
	ham_obj.close()
	logging.info("done writing.")

# setup parallel structures.
line_queue = Queue()
ham_queue = Queue()
cnt1_queue = Queue()
cnt2_queue = Queue()

# start reading the file.
reader_p = Process(target=read_sam, name="reader", args=(sam_file, line_queue, cnt1_queue))
reader_p.start()

# start tokenizng processes.
toks = []
for i in range(num_threads):
	worker = Process(target=tokenize_lines, name="tokenizer_%i" % i, args=(line_queue, ham_queue))
	worker.setDaemon(True)
	worker.start()
	toks.append(worker)
	
# start writing process.
writer_p = Process(target=write_ham, name="reader", args=(ham_file, ham_queue, cnt2_queue))
writer_p.start()

# debug part.
'''
read_sam(sam_file, line_queue)
tokenize_lines(line_queue, ham_queue)
write_ham(ham_file, ham_queue)
'''


# wait for writer to finish.
logging.info("waiting for reading finish.")
reader_p.join()

# get number of chunks created.
num_chunks = cnt1_queue.get()
logging.info("found %i chunks were read." % num_chunks)

logging.info("waiting for writing finish.")
for i in range(num_chunks):
	cnt2_queue.get()
	
logging.info("writing is finished, closing thread.")
ham_queue.put(187)
writer_p.join()

	
