#!/usr/bin/env python
# encoding: utf-8
"""
NLP.py

Created by Pablo Ortega Mesa on 2011-04-16.
Copyright (c) 2011 Toeska Research Group. All rights reserved.
"""

import sys
import os
import getopt
import nltk
import re
from time import gmtime, strftime
from nltk.stem.porter import PorterStemmer

class NLP():
	''' Constructor de la clase '''
	def __init__(self,path,filename,logFile):
		self.path = path
		self.filename = filename
		self.logFile = logFile
		self.files = self.__getFileList__()
		self.tags = self.pos_tagger()
	
	''' Método de clase: escribir eventos en log file '''
	def __writeLog__(self,txt):
		if self.logFile != None:
			log = open(self.logFile,'a')
			msg = '['+strftime("%a, %d %b %Y %H:%M:%S", gmtime())+'] '+txt+'\n'
			log.write(msg)
			log.close()
	
	''' Obtener lista de docs '''	
	def __getFileList__(self):
		filesPossible = os.listdir(self.path)
		files = []
		for f in filesPossible:
			if re.search('(txt)$',f):
				files.append(self.path+f)
		self.__writeLog__(str(len(files)) + ' files loaded')
		return files
	
	''' Normalize tokens: toLower, stem '''
	def __normalize__(self,tokens,stem=False):
		self.__writeLog__('Start normalize tokens, size initial: '+str(len(tokens)))
		salida = []
		for t in tokens:
			if re.search("\W+",t,re.IGNORECASE):
				parts = re.split("\W+",t)
				for p in parts:
					if p != '':
						salida.append(PorterStemmer().stem_word(p.lower()))
			elif len(t)>3:
				salida.append(t.lower())
		final = []		
		for t in salida:
			flag = True
			for t2 in final:
				if t == t2:
					flag = False
					break
			if flag:
				final.append(t)
		self.__writeLog__('End normalize tokens, size final: '+str(len(final)))
		return final
		
	''' Apply pos tag to collection of docs in self var '''
	def pos_tagger(self):
		pos_tagged = []
		self.__writeLog__('Pos tagged procesing start')
		for filename in self.files:
			archivo = open(filename,'r')
			data = archivo.read()
			archivo.close()
			tokens = nltk.word_tokenize(data)
			tags = nltk.pos_tag(self.__normalize__(tokens))
			
			pos_tagged.append(tags)
			self.__writeLog__('Write tags on '+filename+' complete')
		self.__writeLog__('Pos tagged procesing end')
		return pos_tagged
	
	''' Write in terms file specific tag, by default all Nouns* '''	
	def writeSpecificTag(self,tag='N*'):
		self.__writeLog__('Start write specific tag')
		cont = 0
		contGlob = 0
		archivo = open(self.filename,'a')
		for f in self.tags:
			cont += 1
			for k,v in f:
				if re.search('(\AN).*',v) and len(k)>3:
					contGlob += 1
					archivo.write(k+'\n')
		archivo.close()
		self.__deleteDuplicates__()
		self.__writeLog__(str(contGlob)+' terms write in '+self.filename)
		
	''' Delete terms duplicates '''
	def __deleteDuplicates__(self):
		self.__writeLog__('Start eliminate repeated terms')
		'''Se leen los terminos'''
		terms = []
		termsFile = open(self.filename,'r')
		for line in termsFile:
			line = line.split("\n")[0]
			terms.append(line)
		termsFile.close()
		self.__writeLog__(str(len(terms))+' terms loaded')
		'''Se elimina los terminos repetidos'''
		terms_aux = []
		for t in terms:
			flag = True
			for t2 in terms_aux:
				if t==t2:
					flag=False
					break
			if flag:
				terms_aux.append(t)
		self.__writeLog__(str(len(terms)-len(terms_aux))+" repeated terms removed")
		'''Se escriben nuevamente los termions sin repetidos'''
		termsFile = open(self.filename,'w')
		for t in terms_aux:
			termsFile.write(t+'\n')
		termsFile.close()
		self.__writeLog__(str(len(terms_aux))+" write terms")
	
	''' Apply stopwords filter '''
	def stopwordsFilter(self,stopwordsFilename):
		self.__writeLog__('Start stopword filter with list: '+stopwordsFilename)
		'''Se leen los terminos'''
		terms = []
		termsFile = open(self.filename,'r')
		for line in termsFile:
			line = line.split("\n")[0]
			terms.append(line)
		termsFile.close()
		self.__writeLog__(str(len(terms))+' terms loaded')
		'''Se leen los stopterms '''
		stopterms = []
		stopTermsFile = open(stopwordsFilename,'r')
		for line in stopTermsFile:
			line = line.split("\n")[0]
			stopterms.append(line.lower())
		stopTermsFile.close()
		self.__writeLog__(str(len(stopterms))+' stopterms loaded')
		'''Se aplica el filtro'''
		aux = []
		for t in terms:
			flag = True
			for t2 in stopterms:
				if t==t2:
					flag=False
					break
			if flag:
				aux.append(t)
		self.__writeLog__(str(len(terms)-len(aux))+" stopterms removed")
		'''Se escriben los terminos con el filtro aplicado'''
		termsFile = open(self.filename,'w')
		for t in aux:
			termsFile.write(t+'\n')
		termsFile.close()
		self.__writeLog__('End stopword filter, the results are: '+str(len(aux))+" write terms")
		
		
		

options, remainder = getopt.getopt(sys.argv[1:], 'p:t:s:', ['path=', 
                                                         'terms=',
														 'stopwords='])

path = False
terms = False
stopwords = False
for opt, arg in options:
	if opt in ('-p','--path'):
		path = arg
	if opt in ('-t','--terms'):
		terms = arg
	if opt in ('-s','--stopwords'):
		stopwords = arg

nlp = NLP(path,terms,None)
nlp.writeSpecificTag()
if stopwords:
	nlp.stopwordsFilter(stopwords)