from types import *
from inspect import *
from sets import Set

import nltk
#from nltk import cfg, parse
from nltk.grammar import *
from nltk import parse
from nltk.parse import FeatureEarleyChartParser

from rai.util.util import *
from rai.tagger.sentenceparser import *
from rai.sem.verbhandler import *
from rai.sem.sentenceHandler import *
from rai.logger.logger import *
import rai.sem
from rai.file.FileManager import *

import production

#from test.wordnet.testwordnet import *

class DomainWrapper:
    """ This class is a wrapper class around the domain used in nltk.sem
    This class provides easy methods to create new (unused) variables in the domain
    """

    def __init__(self):
        """ init the DomainWrapper """
        self.domain = Set()
        self.start_char = 'a'
        self.start_int = 1
        self.last_char = 'a'
        self.last_int = 0
        
    def addDomain(self, existing_domain):
        for x in existing_domain:
            if (x in self.domain):
                raise NameError, "variable already exists in Domain"
            self.domain.add(x)         
        
    def __nextVar(self):
        if self.last_int == 10:
            self.last_int = 1
            self.last_char = chr(ord(self.last_char)+1)
        else:
            self.last_int = self.last_int + 1
            
        return self.last_char + str(self.last_int)

    def getNewVar(self):
        new_var = self.__nextVar()
        while(new_var in self.domain):
            new_var = self.__nextVar()
        self.domain.add(new_var)
        return new_var
        
    def getDomain(self):
        return self.domain

class World:
    """
        Represents the world of facts, domains, grammar and the model.
    """
    def __init__(self, grammar_file=None, val_file=None, isEvent=False):
        """
            @param isEvent: isEvent is true when Davidson-style event semantics should be used
        """
        __dir__ = rai.file.__path__
        #print __dir__
        if grammar_file == None:
            grammar_file = "file:" + __dir__[0] + "\\world.test.fcfg"
        if val_file == None:
            val_file = "file:" + __dir__[0] + "\\world.test.val"
        self.grammar_file_location = grammar_file
        self.val_file_location = val_file
        self.isEvent = isEvent
        
        self.grammar = nltk.data.load(grammar_file,format="fcfg") # grammar
        self.val4 = nltk.data.load(val_file,format="val") # all the facts
        self.dom4 = self.val4.domain # domain: the variables representing NP's
        self.m4 = nltk.sem.Model(self.dom4, self.val4) # create model, model can reason about facts
        self.g = nltk.sem.Assignment(self.dom4) # This is a mapping from individual variables to entities in the domain

        self.domainwrapper = DomainWrapper()
        self.domainwrapper.addDomain(self.dom4)
        self.newProductions = []

    def getVal(self):
        return self.val4
    
    def getDomainWrapper(self):
        return self.domainwrapper
    
    def getGrammar(self):
        """
            The grammar used by the World.
            Use addNewProduction to add new grammar rules. The grammar needs 
            to be saved and reloaded before these new rules have effect.
        """
        return self.grammar
    
    def addFact(self, parsed_sentence):
        """ Adds a parsed sentence to the world
            The SentenceHandler will analyze the parsed_sentence. 
            It will add any words that are not covered in the lexicon of the grammar to the grammar (generate new production rules)
            And it will convert the parsed sentence to a semantic valuation
            @param parsed_sentence: [['NP', ('Bob', 'NP', 'NNP')], ['VP', ('is', 'BEZ', 'VBZ')], ['NP', ('a', 'AT', 'DT'), ('kitchen', 'NN', 'NN')]]
        """
        sentHandler = SentenceHandler(parsed_sentence, self)
        information = sentHandler.analyzeSentence()
        return information

    def satifiers(self, logic_exp, var):
        """ var should be free in logic_exp"""
        lp = nltk.LogicParser()
        fmla1 = lp.parse(logic_exp)
        valuation = self.getVal()
        dom = valuation.domain
        model = nltk.sem.Model(dom, valuation)
        assignment = nltk.sem.Assignment(dom)        
        #fmla1 = lp.parse('(girl(x) -> exists y.(dog(y) and chase(x, y)))')
        #fmla1 = lp.parse('\\x.loves(x,Mary)')
        #fmla1 = lp.parse('\\x.exists z1.(girl(z1) & loves(x,z1))')
        #some x.loves(x,y)
        print model.satisfiers(fmla1, 'z1', assignment, trace = 5)

    def answerLogic(self, logic_exp):
        """
            evaluates a logical expression
        """
        valuation = self.getVal()
        dom = valuation.domain
        
        model = nltk.sem.Model(dom, valuation)
        assignment = nltk.sem.Assignment(dom)
        entity_evaluation = model.evaluate(logic_exp, assignment, trace = 5)
        if entity_evaluation == "Undefined":
            log("Evaluation is Undefined.")
            pass
        elif type(entity_evaluation) is BooleanType:
            # anwser is single value
            log("Anwser is: " + str(entity_evaluation))
        else:
            true_facts = filter(lambda n: entity_evaluation[n], entity_evaluation)
            #print "True: "+str(true_facts)
            for x in true_facts:
                log(" * " + str(x) + " : " + str(find_key(valuation, x)) + " = True")    
    
    def answerQuestion(self, question, trace=0):
        """
            Answers a question (normal sentence).
        """
        grammar_file = self.grammar_file_location # reload the grammar from this file
        val_file = self.val_file_location # reload the valuation file
        self.val4 = nltk.data.load(val_file,format="val") # all the facts
                
        valuation = self.getVal()
        dom = valuation.domain # domain: the variables representing NP's
        model = nltk.sem.Model(dom, valuation) # create model, model can reason about facts
        assignment = nltk.sem.Assignment(dom) # This is a mapping from individual variables to entities in the domain
        #text_evaluate(inputs, grammar, model, assignment, semtrace=0)
        #result = nltk.sem.text_evaluate([question], self.getGrammar(), model, assignment, semtrace=trace)
        if trace > 0:
            for tree in nltk.sem.batch_parse([question], grammar_file, trace):
                print "Parse:\n %s" %tree
        result = nltk.sem.batch_evaluate([question], grammar_file, model, assignment, trace)[0]
        
        #print "Semantics: %s" %  root_semrep(tree)
        #print result
        #print question
        if result == []:
            # question has no answer
            log("Question: " + question)
            log("No result")
        for (syntree, semrep, value) in result:
            #print syntree
            #print "'%s' is %s in Model m\n" % (semrep, value)
            log("Question: " + question)            
            if value == 'Undefined':
                log("No answer")
            elif type(value) is BooleanType:
                # anwser is single value
                log("Anwser is: " + str(value))
            else:
                #print "ALL:"
                #for x in value:
                #    print x
                #    print value[x]
                true_facts = filter(lambda n: value[n], value)
                #print "True: "+str(true_facts)
                log("Question has " + str(len(true_facts)) + " anwser(s).")
                for x in true_facts:
                    log(" * " + find_key(valuation, x))

    def addNewProduction(self, production_line):
        """
            Add a new production line (string) to the grammar.
            The grammar has to be saved and reloaded before these new rules can be used.
            When we write the world these new productions will be added to the .fcfg file
        """
        self.newProductions.append(production_line)
    
    
    def writeWorld(self, filename):
        """
            Writes grammar (with its new productions) and valuation to a file.
            The grammar is saved as filename+".fcfg" 
            The valuation is saved as filename+".val"
        """
        #print self.getGrammar().lexicon()#["VP[num=?n,sem=?vp] PP[-pred,sem=?pp]"]
        filestore = FileStore()
        filestore.createFile(filename+".fcfg")
        
        filestore.writeLine("% start S")
        self.getGrammar().productions().sort(production.prod_cmp)
        for prod in self.getGrammar().productions():
            #print prod._lhs
            #print prod._rhs            
            #lexline =  lex_key + " -> " + str(self.getGrammar().productions()[lex_key])
            #filestore.writeLine(str(prod.lhs()))
            
            prodprinter = production.ProductionPrinter(prod)
            parts = []
            parts.append(prodprinter.getLeft())
            parts.append("->")
            parts.append(prodprinter.getRight())
            filestore.writeLine(' '.join(parts))
            
            
        for index, line in enumerate(self.newProductions):
            print line
            if line is not None:
                filestore.writeLine(line)
            else:
                log("line %s in self.newProductions is None" % index)
        filestore.closeFile()
        
        filestore = FileStore()
        filestore.createFile(filename+".val")
        #print self.getVal()
        for lex_key in self.getVal():
            line = None
            #filestore.writeLine(lex_key + " => " + str(self.getVal()[lex_key]))
            value = self.getVal()[lex_key]
            if type(value) == type(""):
                # value is a string not a set()
                # just write it
                line = lex_key + " => " + value
            elif type(value) == type(set()):
                val_items = []
                for setitem in value:
                    # setitem is a tuple
                    # convert tuple to printable val-item
                    # if tuple has length one, omit the parenthesis
                    if len(setitem) == 1:
                        val_items.append(str(setitem[0]))
                    else:
                        tuple_items = []
                        for tuple_item in setitem:
                            tuple_items.append(tuple_item)
                        
                        val_items.append("(" + ', '.join(tuple_items) + ")")
                line = lex_key + " => " + "{" + ', '.join(val_items) + "}"
            filestore.writeLine(line)
            
        filestore.closeFile()        
        #print getmembers(self.getGrammar(),ismethod)
        #print self.getGrammar().check_coverage()
        #print self.getGrammar().covers()
        #print self.getGrammar().productions()
        #print self.getGrammar().start()

    def getString(self):
        result = []
        result.append("Lexicon:\n")
        for lex_key in self.getGrammar().lexicon():
            result.append(lex_key + " -> " + str(self.getGrammar().lexicon()[lex_key]) + "\n")
        result.append("\nValuation:\n")
        for lex_key in self.getVal():
            result.append(lex_key + " => " + str(self.getVal()[lex_key]) + "\n")
        return ''.join(result)






###############################################################33
# Evaluation methods

def evaluateSentences(sentences):
    """[sentence, sentence]"""
    grammar = nltk.data.load('grammars/sem4test.fcfg')
    val4 = nltk.data.load('grammars/valuation1test.val')
    dom4 = val4.domain
    m4 = nltk.sem.Model(dom4, val4)
    g = nltk.sem.Assignment(dom4)
    results = nltk.sem.text_evaluate(sentences, grammar, m4, g)
    #print sent
    #if result[sent] == []:
        # question has no answer
    #    print "No result"
    for sentence in results:
        print sentence
        for (syntree, semrep, value) in results[sentence]:
            #print syntree
            #print "'%s' is %s in Model m\n" % (semrep, value)
            #if 
            log("Question: " + sentence)            
            if value == 'Undefined':
                log("No answer")
            elif type(value) is BooleanType:
                # anwser is single value
                log("Anwser is: " + str(value))
            else:
                true_facts = filter(lambda n: value[n], value)
                #print "True: "+str(true_facts)
                log("Question has " + str(len(true_facts)) + " anwser(s).")
                for x in true_facts:
                    log(" * " + find_key(val4, x))

def evaluateSentenceCustomValuation(grammar, valuation, sent):
    #grammar = nltk.data.load('grammars/sem4test.fcfg') # grammar
    #val4 = nltk.data.load('grammars/valuation1test.val') # all the facts
    val4 = valuation
    dom4 = val4.domain # domain: the variables representing NP's
    m4 = nltk.sem.Model(dom4, val4) # create model, model can reason about facts
    g = nltk.sem.Assignment(dom4) # This is a mapping from individual variables to entities in the domain
    #text_evaluate(inputs, grammar, model, assignment, semtrace=0)
    result = nltk.sem.text_evaluate([sent], grammar, m4, g)
    #print result
    #print sent
    if result[sent] == []:
        # question has no answer
        log("No result")
    for (syntree, semrep, value) in result[sent]:
        #print syntree
        #print "'%s' is %s in Model m\n" % (semrep, value)
        #if 
        log("Question: " + sent)            
        if value == 'Undefined':
            log("No answer")
        elif type(value) is BooleanType:
            # anwser is single value
            log("Anwser is: " + str(value))
        else:
            true_facts = filter(lambda n: value[n], value)
            #print "True: "+str(true_facts)
            log("Question has " + str(len(true_facts)) + " anwser(s).")
            for x in true_facts:
                log(" * " + find_key(val4, x))


def evaluateSentence(sent):
    __dir__ = rai.file.__path__
    grammar_file = "file:" + __dir__[0] + "\\world.test.fcfg"
    val_file = "file:" + __dir__[0] + "\\world.test.val"    

    val4 = nltk.data.load(val_file) 
    #grammar = nltk.data.load('grammars/sem4test.fcfg') # grammar
    #val4 = nltk.data.load('grammars/valuation1test.val') # all the facts
    dom4 = val4.domain # domain: the variables representing NP's
    m4 = nltk.sem.Model(dom4, val4) # create model, model can reason about facts
    g = nltk.sem.Assignment(dom4) # This is a mapping from individual variables to entities in the domain
    #text_evaluate(inputs, grammar, model, assignment, semtrace=0)
    #result = nltk.sem.text_evaluate([sent], grammar, m4, g)
    result = nltk.sem.batch_evaluate([sent], grammar_file, m4, g, trace=5)[0]
    #print result
    #print sent
    if result == []:
        # question has no answer
        log("No result")
    for (syntree, semrep, value) in result:
        #print syntree
        #print "'%s' is %s in Model m\n" % (semrep, value)
        #if 
        log("Question: " + sent)            
        if value == 'Undefined':
            log("No answer")
        elif type(value) is BooleanType:
            # anwser is single value
            log("Anwser is: " + str(value))
        else:
            true_facts = filter(lambda n: value[n], value)
            #print "True: "+str(true_facts)
            log("Question has " + str(len(true_facts)) + " anwser(s).")
            for x in true_facts:
                log(" * " + find_key(val4, x))


def _repr(featDict, reentrances, reentrance_ids):
    segments = []
    prefix = ''
    suffix = ''

    # If this is the first time we've seen a reentrant structure,
    # then assign it a unique identifier.
    if reentrances[id(featDict)]:
        assert not reentrance_ids.has_key(id(featDict))
        reentrance_ids[id(featDict)] = `len(reentrance_ids)+1`

    # sorting note: keys are unique strings, so we'll never fall
    # through to comparing values.
    for (fname, fval) in sorted(featDict.items()):
        display = getattr(fname, 'display', None)
        if reentrance_ids.has_key(id(fval)):
            segments.append('%s->(%s)' %
                            (fname, reentrance_ids[id(fval)]))
        elif (display == 'prefix' and not prefix and
              isinstance(fval, (Variable, basestring))):
                prefix = '%s' % fval
        elif display == 'slash' and not suffix:
            if isinstance(fval, Variable):
                suffix = '/%s' % fval.name
            else:
                suffix = '/%r' % fval
        elif isinstance(fval, Variable):
            segments.append('%s=%s' % (fname, fval.name))
        elif fval is True:
            segments.append('+%s' % fname)
        elif fval is False:
            segments.append('-%s' % fname)
        elif isinstance(fval, Expression):
            #print str(type(fval))
            if isinstance(fval, nltk.sem.logic.ApplicationExpression):
                segments.append('%s=<app(%s)>' % (fname, fval))
            else:
                segments.append('%s=<%s>' % (fname, fval))
        elif not isinstance(fval, FeatStruct):
            segments.append('%s=%r' % (fname, fval))
        else:
            fval_repr = fval._repr(reentrances, reentrance_ids)
            segments.append('%s=%s' % (fname, fval_repr))
    # If it's reentrant, then add on an identifier tag.
    if reentrances[id(featDict)]:
        prefix = '(%s)%s' % (reentrance_ids[id(featDict)], prefix)
    return '%s[%s]%s' % (prefix, ', '.join(segments), suffix)