'''
Created on 17/09/2013

@author: Hugo
'''
import glob
import os
import re

class DocumentCollection:
    docFolder = ""
    stopWords = []
    def __init__(self,docFolder):
        if isinstance(docFolder,str) and os.path.isdir(docFolder):
            self.docFolder = os.path.abspath(docFolder)
        else:
            self.docFolder = os.path.abspath("docs/")
        #cargando stop words
        stopWordsFile  = open(os.path.abspath("common-english-words.txt"),'r')
        for row in stopWordsFile:
            self.stopWords.append(row.strip().split(','))
            
    def setDocumentFolder(self,docFolder):
        if isinstance(docFolder,str)  and os.path.isdir(docFolder):
            self.docFolder = docFolder
            return True
        else:
            return False
        
    def collectDocuments(self):
        documents = []
        #os.chdir(self.docFolder)
        for files in glob.glob(os.path.abspath(self.docFolder+"/*.txt")):
            documents.append(os.path.abspath(files))
        return documents
    
    def getTokens(self,document):
        try:
            tokens = []
            documentFile = open(document, 'r')
            lineTokens = ""
            for lines in documentFile:
                #checando la linea por palabras y obteniendo los tokens
                lineTokens = self.parseWords(lines)
                #revisando si los tokens ya se encontraban de lo contrario agregarlos
                for token in lineTokens:
                    if token.lower() not in tokens: tokens.append(token.lower())
            documentFile.close()
            return tokens
        except os.error:
            return False

    def parseWords(self,text):
        return re.findall("[a-zA-Z]+'?[a-zA-Z]?", text)

    def normalizeTokens(self,tokens):
        #agregar mas simbolos en caso de ser mas preciso en la removida de puntuacion
        punctuations = "'"
        removeTokens = []
        for index, token in enumerate(tokens):
            #agregando los tokens que seran removidos por ser stop words
            if self.isStopWord(token):
                removeTokens.append(index)
                continue
            tokens[index] = token.lower()
            #removiendo puntuaciones
            for symbol in punctuations:
                tokens[index] = tokens[index].replace(symbol, '')
        #removiendo stop words
        for index in removeTokens:
            tokens.pop(index)
        return tokens
    
    def isStopWord(self,token):
        for stopWordsLine in self.stopWords:
            if token in stopWordsLine:
                return True
        return False
       
    
    def writeSearchResult(self,fileName,documentNames):
        if not documentNames:
            documentNames = ["No se encontraron resultados"]
        f = open(fileName, 'w')
        for document in documentNames:
            f.write(document + "<BR>")
        f.close()