import re

from rai.search.searchenginemanager import *
from rai.web.webpreprocessor import *
from rai.tagger.sentenceparser import *
from rai.sem.FOL import *
import rai.logger.logger

class AskManager():
    """
        This class will do all the stuff that has to be done to answer a question
    """
    
    def __init__(self, question, max_results = 3):
        self.question = question
        self.manager = SearchEngineManager()
        self.manager.setMaxResults(max_results)
        
        self.results = None # the results of the SearchEngineManager
        
        self.parser = SentenceParser() # parser
        self.world = World()
        
    
    def search(self):
        self.manager.setSearchQuery(self.question)
        self.manager.executeQuery()
        self.results = self.manager.getResults()

    def getSummaries(self):
        """
            Returns a list of summaries list<String>
        """
        summaries = []
        for result in self.results:
            summaries.append(self.getSummary(result))
        return
            
    def getSummary(self, result):
        summary_format = "Title: %s\nSummary: %s\nURL: %s\n\n"
        short_summary = summary_format % (result.Title, result.Summary, result.Url) 
        #short_summary = "Title: " + res.Title + "\nSummary: " + res.Summary + "\nURL: " + res.Url + "\n\n"
        return short_summary
    
    def processPage(self, result):
        wp = WebPreprocessor(result.Url)

        #all_lines = zip(wp.getWhichLinesAreFiltered(), wp.getUnfilteredLines())
        #return all_lines
        return wp

    def debugFilterResult(self, wp):
        """
            Returns a list of strings with debug information about the wp filter
        """
        debug_lines = []
        debug_line = "Filtered:\tText:\n\n"
        debug_lines.append(debug_line)
        for (filtered,line) in zip(wp.getWhichLinesAreFiltered(), wp.getUnfilteredLines()):
            try:
                debug_line = str(filtered) + "\t" + line + "\n"
                debug_lines.append(debug_line)
            except UnicodeDecodeError, e:
                # illegal characters, skip sentence
                print e
        return debug_lines
        
    
    def __splitSentences(self, filtered_lines):
        # a line may contain multiple sentences
        split_sents = []
        sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
        for line in filtered_lines:
            sents = sent_tokenizer.tokenize(line)
            split_sents.extend(sents)
            #pprint.pprint(sents)
        return split_sents

    def cleanupSentences(self, result, sentences):
        """
            Clean up the sentence strings.
            For example if the sourc is wikipedia, remove all references 
            e.g.: 'Lorem ipsum dolor[6] sit amet' becomes 'Lorem ipsum dolor  sit amet'
        """
        url = result.Url
        cleanup_sentences = []
        ref_pattern = re.compile(r'\[\d+\]')
        for sentence in sentences:
            cleanup_sentences.append(ref_pattern.sub(' ', sentence))
        return cleanup_sentences
        

    def getParsedSentences(self, result, filtered_lines):
        #construct Parse
        split_sents = self.__splitSentences(filtered_lines)
        sents = self.cleanupSentences(result, split_sents)
        parsedSentences = []
        
        for line in sents:
            try:
                parsed_sentence = self.parser.parseSentence(line)
                parsedSentences.append(parsed_sentence)
                #self.world.addFact(parsed_sentence)
                #print "Succes!"
            except Exception, e:
                print "Exception: ", e
                print e.args
                #rai.logger.logger.LOG_ON = True
                #parsed_sentence = self.parser.parseSentence(line)
                #raise e
        return parsedSentences
    
    def parseQuestion(self):
        """
            Parses the question and returns a syntactic representation in the form of a Tree
        """
        sents = [self.question]
        gramfile = self.world.grammar_file_location
        #gramfile = 'grammars/sample_grammars/event.fcfg'
        data = []
        try:
            results  = nltk.sem.batch_interpret(sents, gramfile, trace=5)
            for sent in sents:
                for (synrep, semrep) in results[sent]:
                    print synrep
                    data.append(str(synrep))
        except ValueError, e:
            print e
            data.append(str(e))
        return '\n'.join(data)
            
    def __semantics(self, parsed_sents):
        # do semantic stuff
        pass
        world = World()
        for parsed_sentence in parsed_sents:
            try:
                world.addFact(parsed_sentence)
                print "Succes!"
            except Exception, e:
                print "Exception: ", e
                print e.args
    