from math import copysign
from PIL import Image
from numpy import array, zeros
from optparse import OptionParser
from string import ascii_lowercase, digits, letters, maketrans, punctuation
from sys import path
path.append(".//utils")

import netcdf_helpers
import os
import sys
import timeit


def read_dataset(path_trainset, inputFile):
	f = open(path_trainset + inputFile + ".txt")
	t = set([])
	for line in f.readlines():
		t.add(line.strip())
	f.close()
	return t

def save_binarized_pic(path, element, new_img):
	# save_img
	local_dir = ".//dataset//words-b//"
	if not os.path.exists(local_dir):
		os.mkdir(local_dir)
	list_dir = ((path.strip()).split("//"))[3:]
	for dirs in list_dir:
		if not os.path.exists(local_dir + dirs):
			os.mkdir(local_dir + dirs)
		local_dir += dirs + "//"
	new_img.save(local_dir + "//" + element)
	# end save

def read_words(path_words, inputset, to_lower = False, sw = False):
	f = open(path_words + "words.txt")
	paths = {}
	words_with_errors = []
	total_width = 0
	if sw:
		stopwords = set([x.strip() for x in (open(path_words + "english.stop", "r")).readlines()])
	for line in f.readlines():
		words = (line.strip()).split()
		lw = words[0].split("-")
		lines_word = "-".join(lw[:(len(lw)-1)])
		if lines_word in inputset:
#		if words[0] in inputset:
			if int(words[5]) > 0:
				if to_lower:				
					words[8] = words[8].lower()
				if sw:
					if words[8].lower() not in stopwords:
						paths[words[0] + ".png"] = words[1:]
						total_width += int(words[5])
				else:
					paths[words[0] + ".png"] = words[1:]
					total_width += int(words[5])
			else:
				words_with_errors.append(words[0] + ".png")
	f.close()
	return paths, total_width, words_with_errors

def preprocess_dataset(path, paths, save=False):
	listing = os.listdir(path)
	for element in listing:	
		if os.path.isdir(path + element):
			for e,f in preprocess_dataset(path + element + "//", paths, save):
				yield e,f
		else:			
			features = []
			if element in paths:
				try:
					img = Image.open(path + element)			
					new_img = img.point(lambda i : int((1 + copysign(1, (i - int(paths[element][1]) - 1))) / 2) * 255)	
					#for saving binarized png, just call save_binarized_pic here
					if save:
						save_binarized_pic(path, element, new_img)
					for i, j, k, t, u, v, w, x, y in extract_features(new_img):
						features.append([i, j, k, t, u, v, w, x, y])
					yield element, features
				except: 
					print "There's an error with image", element
					del paths[element]
					
def extract_features(image):
	width = image.size[0]
	height = image.size[1]
	h = float(height)
	old_top_most, old_bottom_most = 0, 0
	old_m_0 = 0.
	for i in range(width):
		img_2 = image.crop((i, 0, i + 1, height))
		m_0, m_1, m_2 = 0., 0, 0
		top_most = 0
		bottom_most = 0
		inclination_top, inclination_bottom = 0, 0
		gray_transition = 0
		prev_m = h + 1
		for m in [h - m for m, w in enumerate(img_2.getdata()) if w == 0]:
			m_0 += 1
			m_1 += m
			m_2 += m**2
			if m > top_most:
				top_most = m
			bottom_most = m
			if (prev_m - m) > 1:
				gray_transition += 2
			prev_m = m		
		if old_m_0 != 0:	
			inclination_top = top_most - old_top_most
			inclination_bottom = bottom_most - old_bottom_most
		old_m_0 = m_0
		old_top_most = top_most
		old_bottom_most = bottom_most
		if bottom_most > 1:
			gray_transition += 1
		if top_most < height:
			gray_transition -= 1		
		if m_0 != 0:
			yield m_0 / h, m_1 / h**2, m_2 / h**3, top_most / h, bottom_most / h, inclination_top / h, inclination_bottom / h, gray_transition / h, m_0 / (top_most - bottom_most + 1)	
		else:		
			yield 0., 0., 0., 0., 0., 0., 0., 0., 0.

def pack_features(path, words, total_width, to_lower = False, save = False):
	if to_lower:
		labels = list(ascii_lowercase) + list(digits) + list(punctuation)
	else:
		labels = list(letters) + list(digits) + list(punctuation)
	seqTags = []
	seqDims = []
	targetStrings = []
	seqLengths = []
	inputs = zeros((total_width, 9), 'f')
	offset = 0
	for element, features in preprocess_dataset(path, words, save):
		seqLengths.append(len(features))
		seqTags.append(str(element))
		dims = (9, len(features))
		seqDims.append(dims)
		s = " ".join(list(words[element][7].strip()))
		targetStrings.append(s)
		for f in features:
			for i in range(9):
				inputs[offset][i] = f[i]
			offset += 1
	return inputs, seqLengths, seqTags, seqDims, targetStrings, labels

def prepare_dataset(seqLengths, inputs, labels, seqTags, seqDims, targetStrings, targetFile):
	#create a new .nc file
	outputFilename = targetFile + ".nc"
	file = netcdf_helpers.NetCDFFile(outputFilename, 'w')

	#create the dimensions
	netcdf_helpers.createNcDim(file,'numSeqs',len(seqLengths))
	netcdf_helpers.createNcDim(file,'numTimesteps',len(inputs))
	netcdf_helpers.createNcDim(file,'inputPattSize',len(inputs[0]))
	netcdf_helpers.createNcDim(file,'numLabels',len(labels))

	#create the variables
	netcdf_helpers.createNcStrings(file,'seqTags',seqTags,('numSeqs','maxSeqTagLength'),'sequence tags')
	netcdf_helpers.createNcStrings(file,'labels',labels,('numLabels','maxLabelLength'),'labels')
	netcdf_helpers.createNcStrings(file,'targetStrings',targetStrings,('numSeqs','maxTargStringLength'),'target strings')
	netcdf_helpers.createNcVar(file,'seqLengths',seqLengths,'f',('numSeqs',),'sequence lengths')
	netcdf_helpers.createNcVar(file,'inputs',inputs,'f',('numTimesteps','inputPattSize'),'input patterns')


	#write the data to disk
	print "Writing data to", outputFilename
	file.close()

def main():
	path = ".//dataset//words//"
	path_ascii = ".//dataset//ascii//"
	
	parser = OptionParser(usage = "usage: %prog [options]")
	parser.add_option("-f", "--file", dest="filename", help="make the nc file for a train/test/validation set", metavar="FILE")
	parser.add_option("-s", "--stopwords", action="store_true", dest="sw", default=False, help="unconsider stopwords from trainset")
	parser.add_option("-l", "--lowercase", action="store_true", dest="lc", default=False, help="case insensitive on trainset")
	parser.add_option("-b", "--bin-save", action="store_true", dest="bs", default=False, help="save binarized pics")
	(option, args) = parser.parse_args()
	if len(args) != 0 and option.filename:
		parser.print_help()
	else:
		print "Reading " + option.filename + "..."
		inputset = read_dataset(path_ascii, option.filename)
		print "Reading only the words in " + option.filename + "..."
		words, total_width, words_with_errors = read_words(path_ascii, inputset, option.lc, option.sw)
		print "The words in", option.filename, "are", len(words)
		if words_with_errors:
			print "The following " + str(len(words_with_errors)) + " pictures will not be taken into account as they have negative length:"
			for w in words_with_errors:
				print w
		print "Extracting features..."
		inputs, seqLengths, seqTags, seqDims, targetStrings, labels = pack_features(path, words, total_width, option.lc, option.bs)

#		print len(inputs), len(seqLengths), len(seqLengths)/9

		prepare_dataset(seqLengths, inputs, labels, seqTags, seqDims, targetStrings,  option.filename)

if __name__ == "__main__":
	# tempo di esecuzione
	time = timeit.Timer("main()", "from __main__ import main").timeit(1)	
	print "Executed in", int(time // 60), "minutes and", int(time % 60), "seconds."


