'''
dionysus - a web based dialogue management framework
Copyright (C) 2011  Eugeniu Costetchi <eugeniu.costetchi (at) tudor.lu>

This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

Created on Dec 29, 2010
'''
from DialogueManager.spark import GenericScanner
from DialogueManager.Context import Context
from DialogueManager.utils.utils import trim

class TokenTypes:
    DELIM = 'delimiter'
    TOKEN = 'token'

class InputTypes:
    YN = 'yn'
    SINGLE = 'single'
    LOW = 'low'
    NONE = 'none'
    
def input_compatible(expected, received):
    ''' compare the expected answer with the received one, 
    and decides on the acceptance
    '''
    if expected == InputTypes.NONE:
        if received in (InputTypes.YN, InputTypes.LOW , InputTypes.SINGLE, InputTypes.NONE): return True
    
    if expected == InputTypes.YN:
        if received in (InputTypes.YN): return True
    
    if expected == InputTypes.SINGLE:
        if received in (InputTypes.YN, InputTypes.SINGLE): return True

    if expected == InputTypes.LOW:
        if received in (InputTypes.LOW, InputTypes.SINGLE, InputTypes.YN): return True
    
    return False

#===============================================================================
# Token
#===============================================================================
class Token(object):
    type = ''
    attribute = ''
    
    def __init__(self, type='', attr=''):
        self.type = type
        self.attribute = attr
        
    def __str__(self):
        return self.type + ":" + self.attribute
    
    def __repr__(self):
        return self.__str__()

    def __cmp__(self, o):
        return cmp(self.type, o)


#===============================================================================
# AST
#===============================================================================
#@DeprecationWarning
#class AST(object):
#    type = ''
#    left = ''
#    right = ''
#    
#    def __init__(self, type='', left='', right=''):
#        self.type = type
#        self.left = left
#        self.right = right
#    
#    def __str__(self):
#        return self.type + "(" + self.left + "," + self.right + ")"
#    
#    def __repr__(self):
#        return self.__str__()
    
#===============================================================================
# LowynScanner
#===============================================================================
class LowynScanner(GenericScanner):
    
    rv = []
    
    def __init__(self):
        GenericScanner.__init__(self)
    
    def tokenize(self, input, removeDelimitors=True):
        self.rv = []
        # forece convert str to String, does not handle Uniode strings
        GenericScanner.tokenize(self, str(input)) 
        
        if removeDelimitors:
            self.removeDelimitors()
            
        return self.rv
    
    def removeDelimitors(self):
        x = []
        for i in self.rv:
            if i.type == TokenTypes.TOKEN:
                x.append(i)
        self.rv = x
            
    def t_delimitor(self, s):
        r' , | ; '
        t = Token(type=TokenTypes.DELIM, attr=s)
        self.rv.append(t)
        
#    def t_number(self, s):
#        r' \d+ '
#        t = Token(type='number', attr=s)
#        self.rv.append(t)
        
    def t_token(self, s):
        r' [ \w \* \? \- \\ \' \" \( \) \{ \} \[ \] \. \^ \$ \+ \| \< \> \= ~!@#%&_/:]+  '
        t = Token(type=TokenTypes.TOKEN, attr=trim(s))
        if t.attribute:
            self.rv.append(t)

#===============================================================================
# SyntaxException
#===============================================================================
#class SyntaxException(Exception):
#    def __init__(self, value):
#        self.value = value
#    
#    def __str__(self):
#        return repr(self.value)
    

#===============================================================================
# SyntaxParser
#===============================================================================
class SyntaxParser(object):

    def __init__(self, context):
        self.context = context
        self.constants = context[Context.DICTIONARY].AFFIRMATIVE + context[Context.DICTIONARY].NEGATIVE 
    
    def __isSingle(self, tokens):
        if len(tokens) == 1:
            if tokens[0].type == TokenTypes.TOKEN:
                return True
        return False
    
    def isType(self, type, tokens):
        if type == InputTypes.SINGLE:
            if self.__isSingle(tokens):
                if tokens[0].attributes not in self.constants:
                    return True
        if type == InputTypes.YN:
            if self.__isSingle(tokens):
                if tokens[0].attributes in self.constants:
                    return True
        if type == InputTypes.LOW:
            if not self.__isSingle(tokens):
                for i in range(0, len(tokens)):
                    if tokens[i] in self.constants:
                        return False
                return True 
        return False 

    def detectType(self, tokens):
        if not tokens: 
            return InputTypes.NONE
        
        if self.__isSingle(tokens):
            if tokens[0].attribute.lower() in self.constants:
                return InputTypes.YN
            else:
                return InputTypes.SINGLE
        else:
#            for i in range(0, len(tokens)):
#                    if tokens[i].attribute.lower() in self.constants:
#                        raise SyntaxError('keywords (Yes/No) can only be single and cannot be enumerated in a list of answers')
            return InputTypes.LOW
    
    def parse(self, tokens):
        return

def lowyn_type_and_tokens(string, context, detokenize=True):
    scanner = LowynScanner()
    parser = SyntaxParser(context)
    tokens = scanner.tokenize(string)
    low_type = parser.detectType(tokens)
    
    if detokenize:
        x = []
        for i in tokens:
            x.append(i.attribute)
        tokens = x
    
    return (low_type, tokens) 
    
### test bed

#print "starting to test"
#input = 'asd'
#s = LowynScanner()
#t = s.tokenize(input)
#print t
#sp = SyntaxParser()
#print sp.detectType(t)

#str1 = unicode(", , , , ,  asd , qwe, qwe-qwe, @#$54,365 $^, 324324; 432.54,6 5,46,. 'ds;fgfd?fgf ??, ?, *, ; {} [] (), ~!@#$%^&  =_+<>/:")
#str2 = ''
#print lowyn_type_and_tokens(str2)
