#!/usr/bin/python
'''
Takes several pileup files and makes an h5 object from them.
'''
import sys
import h5py
import numpy as np

# parameters.
ctg_sz_file = sys.argv[1]
out_file = sys.argv[2]
files = []
for i in range(3,len(sys.argv)):
	files.append(sys.argv[i])

# Numpy datatypes.
names_dt = np.dtype([\
		('name', np.str_, 200),\
		('start', np.int),\
		('stop', np.int),\
		])
		
profile_dt = np.int

# read in contig sizes.
print "reading contig sizes."
fin = open(ctg_sz_file, "rb")
lines = fin.readlines()
fin.close()

ctg_sizes = {}
total_bases = 0
for line in lines:
	tmp = line.strip().split()
	ctg_sizes[tmp[0]] = int(tmp[1])
	total_bases += int(tmp[1])
	
	#if total_bases > 10000000: break
del lines
num_ctgs = len(ctg_sizes)

# Create arrays.
print "instantiating index array."
names = np.zeros(num_ctgs, dtype=names_dt)

print "instantiating profile array."
profiles = np.zeros(total_bases, dtype=profile_dt)

# Populate index info.
print "populate index array."
idx = 0
i = 0
name_to_idx = {}
for ctg in ctg_sizes:
	# set info.
	stop = idx + ctg_sizes[ctg]
	names[i]['name'] = ctg
	names[i]['start'] = idx
	names[i]['stop'] = stop
	
	# save pointer.
	name_to_idx[ctg] = i
	
	# update counters.
	idx = stop
	i += 1
	
# gather data from pileups.
for f in files:
	# Loop over file.
	print "processing ", f
	fin = open(f,"rb")
	i = 0
	for line in fin:
		# tokenize.
		tmp = line.strip().split()
		
		if tmp[0] not in name_to_idx: continue
		ctg_idx = name_to_idx[tmp[0]]
		ctg_pos = int(tmp[1])
		cov = int(tmp[2])
			
		# add to profile.
		offset = names[ctg_idx]['start']
		profiles[offset + ctg_pos] += cov
		
		i += 1
		if i % 100000 == 0:
			print i


#ctg_idx = name_to_idx["ABBA01035502.1"]
#offset = names[ctg_idx]['start']
#for i in range(names[ctg_idx]['start'], names[ctg_idx]['stop']):
#	print "%s\t%i\t%i" % ("ABBA01035502.1", i-offset, profiles[i])


# save data to h5 object.
h5_out = h5py.File(out_file, 'w')  # Default mode is 'a'
h5_out['names'] = names
h5_out['profiles'] = profiles
h5_out.close()

