from UserList import UserList
import re
import Stemmer
from collections import deque

stemmer = Stemmer.Stemmer('english')


POS_list = (
        'CC',	#Coordinating conjunction
        'CD',  	#Cardinal number
        'DT',  	#Determiner
        'EX',  	#Existential there
        'FW',  	#Foreign word
        'IN',  	#Preposition or subordinating conjunction
        'JJ',  	#Adjective
        'JJR',  	#Adjective, comparative
        'JJS',  	#Adjective, superlative
        'LS',  	#List item marker
        'MD',  	#Modal
        'NN',  	#Noun, singular or mass
        'NNS',  	#Noun, plural
        'NNP',  	#Proper noun, singular
        'NNPS',  	#Proper noun, plural
        'PDT',  	#Predeterminer
        'POS',  	#Possessive ending
        'PRP',  	#Personal pronoun
        'PRP$',  	#Possessive pronoun
        'RB',  	#Adverb
        'RBR',  	#Adverb, comparative
        'RBS',  	#Adverb, superlative
        'RP',  	#Particle
        'SYM',  	#Symbol
        'TO',  	#to
        'UH',  	#Interjection
        'VB',  	#Verb, base form
        'VBD',  	#Verb, past tense
        'VBG',  	#Verb, gerund or present participle
        'VBN',  	#Verb, past participle
        'VBP',  	#Verb, non-3rd person singular present
        'VBZ',  	#Verb, 3rd person singular present
        'WDT',  	#Wh-determiner
        'WP',  	#Wh-pronoun
        'WP$',  	#Possessive wh-pronoun
        'WRB',  	#Wh-adverb
)

POS_size = 8
POS_dict = {
 'CC': 0,
 'CD': 1,
 'DT': 0,
 'EX': 0,
 'FW': 1,
 'IN': 0,
 'JJ': 2,
 'JJR': 2,
 'JJS': 2,
 'LS': 1,
 'MD': 3,
 'NN': 4,
 'NNP': 4,
 'NNPS': 4,
 'NNS': 4,
 'PDT': 0,
 'POS': 5,
 'PRP': 5,
 'PRP$': 5,
 'RB': 6,
 'RBR': 6,
 'RBS': 6,
 'RP': 0,
 'SYM': 1,
 'TO': 0,
 'UH': 0,
 'VB': 7,
 'VBD': 7,
 'VBG': 7,
 'VBN': 7,
 'VBP': 7,
 'VBZ': 7,
 'WDT': 0,
 'WP': 5,
 'WP$': 5,
 'WRB': 6
}

TRIVIALS = (
)

MARKED_PARAMETERS = {
    "Num": "marked",
    "VersionNum": "marked",
    "Symbol": "marked",
    "Space": "marked",
    "Word": "marked",
    "Sentence": "marked",
    "Entity": "marked",
}

RAW_PARAMETERS = {
}

RAW_S_PARAMETERS = {
    "Sentence_raw_strip": True,
}

DEFAULT_PARAMETERS = {
    "Num": "raw",
    "VersionNum": "raw",
    "Symbol": "empty",
    "Space": "single_space",
    "Space_check_previous": True,
    "Word": "custom",
    "Word_stem": False,
    "Word_case": "lower",
    "Word_trivials": TRIVIALS,
    "Sentence": "join",
    "Sentence_strip": True,
    "Sentence_ignore_allnum": False,
    "Entity": "marked",
}

DEFAULT_NOT_TRIVIALS_PARAMETERS = {
    "Num": "raw",
    "VersionNum": "raw",
    "Symbol": "empty",
    "Space": "single_space",
    "Space_check_previous": True,
    "Word": "custom",
    "Word_stem": True,
    "Word_case": "upper",
    "Word_trivials": (),
    "Sentence": "join",
    "Sentence_strip": True,
    "Sentence_ignore_allnum": True,
    "Entity": "marked",
}

CLEAN_PARAMETERS = {
    "Num": "raw",
    "VersionNum": "raw",
    "Symbol": "raw",
    "Space": "single_space",
    "Word": "raw",
    "Sentence": "marked",
    "Entity": "marked",
}

MAX_PERMUTES =  5 + 1
# http://www.regular-expressions.info/floatingpoint.html
is_num = re.compile("^[-+]?[0-9]*\.?[0-9]+$")


class LiveChunk:
    def __init__(self, chunk, attrs=None, offset=-1, parent=None):
        self.chunk = isinstance(chunk, str) and unicode(chunk, 'latin-1') or chunk
        self.name = str(self.__class__).split('.')[-1][4:]
        self.attributes = {}
        self.__hash = None
        self.offset = offset
        self.children = []
        self.set_parent(parent)
        if attrs:
            self.attributes.update(attrs)
            
    def add_child(self, child):
        self.children.append(child)
        
    def set_parent(self, parent):
        self.parent = parent
        if parent:
            parent.add_child(self)

    def get_path_to_root(self):
        if not self.parent:
            return [self]
        res = self.parent.get_path_to_root()
        res.append(self)
        return res

    def path_length(self, other_node):
        my = self.get_path_to_root()
        her = other_node.get_path_to_root()
        res = len(my) + len(her)
        for i in range(min(len(my), len(her))):
            if my[i] == her[i]: res -= 2
            else: break
        return res

    def bfs(self, visit):
        """Breadth-first search on a graph, starting at top_node."""
        visited = set()
        queue = deque([self])
        while queue:
            curr_node = queue.popleft()         # Dequeue
            if curr_node in visited: continue   # Skip visited nodes
            visit(curr_node)                    # Visit the node
            visited.add(curr_node)
            # Enqueue the children
            queue.extend(curr_node.children)

    def render(self, parameters=DEFAULT_PARAMETERS):
        if self.name in parameters:
            return getattr(self, 'render_%s' % parameters[self.name])(parameters)
        else:
            return self.render_raw(parameters)
            
    def render_raw(self, parameters={}):
        return self.chunk

    def render_empty(self, parameters={}):
        return ''
        
    def render_marked(self, parameters):
        return u'<%s%s>%s</%s>' % (self.name.lower(), 
                                   "".join([" %s='%s'" % (
                                                item[0], str(item[1])) for \
                                            item in self.attributes.items()]),
                                   self.render_raw(parameters),
                                   self.name.lower())

    def number_of_characters(self):
        return len(self.chunk)

    def __repr__(self):
        return unicode(self).encode('utf-8')

    def __str__(self):
        return unicode(self).encode('utf-8')

    def __unicode__(self):
        return u'<%s>' % self.render()

    def __hash__(self):
        if not self.__hash:
            self.__hash = self.render().__hash__()
        return self.__hash
    
    # def __eq__(self, other):
    #     if isinstance(other, basestring):
    #         other = analysis_sentence(other)
    #     if self.__hash__() != other.__hash__(): return False
    #     return self.render().__eq__(other.render())
    # 
    # def __lt__(self, other):
    #     if isinstance(other, basestring):
    #         other = analysis_sentence(other)
    #     return self.render().__lt__(other.render())
    # def __le__(self, other):
    #     if isinstance(other, basestring):
    #         other = analysis_sentence(other)
    #     return self.render().__le__(other.render())
    # def __ne__(self, other):
    #     if isinstance(other, basestring):
    #         other = analysis_sentence(other)
    #     return self.render().__ne__(other.render())
    # def __gt__(self, other):
    #     if isinstance(other, basestring):
    #         other = analysis_sentence(other)
    #     return self.render().__gt__(other.render())
    # def __ge__(self, other):
    #     if isinstance(other, basestring):
    #         other = analysis_sentence(other)
    #     return self.render().__ge__(other.render())

class LiveSymbol(LiveChunk):
    def __unicode__(self):
        return u'_%s_' % self.chunk

class LiveNum(LiveChunk):
    def __unicode__(self):
        return u'(%s)' % self.chunk

class LiveVersionNum(LiveChunk):
    def __unicode__(self):
        return u'[%s]' % self.chunk

class LiveSpace(LiveChunk):
    def __init__(self, chunk, previous=None, attrs=None, *args, **args_dict):
        LiveChunk.__init__(self, chunk, *args, **args_dict)
        self.previous = previous

    def __unicode__(self):
        return u"'%s'" % self.chunk

    def render_single_space(self, parameters):
        if parameters.get("Space_check_previous", True):
            if self.previous and not self.previous.render(parameters).strip():
                return ''
        return u' '

class LiveWord(LiveChunk):
    def render_custom(self, parameters):
        chunk = self.chunk
        if parameters.get("Word_stem", False):
            chunk = ("stem" in self.attributes and self.attributes["stem"]) or \
                    ("base" in self.attributes and stemmer.stemWord(self.attributes["base"])) or \
                    stemmer.stemWord(chunk)
        case = parameters.get("Word_case", None)
        if case in ("upper", "lower", "title", "swapcase"):
            chunk = getattr(chunk, case)()
        trivials = parameters.get("Word_trivials", ())
        for from_, to_ in trivials:
            if chunk == from_:
                return to_
            
        return chunk

    def __unicode__(self):
        return u'"%s"' % self.chunk


class LiveSentence(UserList, LiveChunk):
    "Should have some docstring :D"

    def __init__(self, initlist=None, attrs=None, *args, **args_dict):
        UserList.__init__(self, initlist)
        LiveChunk.__init__(self, '', attrs=attrs, *args, **args_dict)
        self.words = []

    def add_dummy_spaces(self):
        copy = self.data[:]
        self.data = []
        previous_chunk = None
        for chunk in copy:
            if chunk.__class__ == LiveSpace:
                chunk.previous = previous_chunk
            elif previous_chunk and previous_chunk.__class__ != LiveSpace and \
               chunk.__class__ != LiveSpace:
                  self.data.append(LiveSpace(u'', previous_chunk))
            self.data.append(chunk)
            previous_chunk = chunk
        return self

    def get_words(self):
        if not self.words:
            for c in self.data:
                if isinstance(c, LiveWord):
                    self.words.append(c)
        return self.words
        
    def get_root(self):
        if not self.words:
            self.get_words()
        a = self.words[0]
        while a.parent: a = a.parent
        return a

    def chunks_offset_to_index(self, st_offset, end_offset, all=True):
        
        i = 1
        ii = 0
        while i < len(self) and self[i].offset <= st_offset: 
            i += 1
            if all or isinstance(self[i-1], LiveWord): ii += 1

        jj = ii
        while i < len(self) and self[i].offset <= end_offset: 
            i += 1
            if all or isinstance(self[i-1], LiveWord): jj += 1
        
        return (ii, jj)

    def render_join(self, parameters):
        res = u"".join([lc.render(parameters) for lc in self])
        if parameters.get("Sentence_strip", True):
            res = res.strip()
        if parameters.get("Sentence_ignore_allnum", True):
            if is_num.search(res):
                return u""
        return res

    def render_marked(self, parameters):
        return u'<%s%s>%s</%s>' % (self.name.lower(), 
                                 "".join([" %s='%s'" % (
                                            item[0], str(item[1])) for \
                                        item in self.attributes.items()]),
                                 self.render_join(parameters), self.name.lower())

    def render_raw(self, parameters={}):
        res = u"".join([lc.render_raw(parameters) for lc in self])
        if parameters.get("Sentence_raw_strip", False):
            res = res.strip()
        if parameters.get("Sentence_ignore_allnum", False):
            if is_num.search(res):
                return u""
        return res
    
    def __unicode__(self):
        return u'{%s}' % self.render()

    def __str__(self):
        return LiveChunk.__str__(self)

    def __repr__(self): return self.__unicode__()

    # def __hash__(self):
    #     return LiveChunk.__hash__(self)

    def __smart_call__(self, another_thing, function):
        if isinstance(another_thing, LiveChunk):
            return getattr(LiveChunk, function)(self, another_thing)
        if isinstance(another_thing, list):
            return getattr(UserList, function)(self, another_thing)
        return getattr(LiveChunk, function)(self, another_thing)

    # def __eq__(self, other): return self.__smart_call__(other, "__eq__")
    # def __lt__(self, other): return self.__smart_call__(other, "__lt__")
    # def __le__(self, other): return self.__smart_call__(other, "__le__")
    # def __ne__(self, other): return self.__smart_call__(other, "__ne__")
    # def __gt__(self, other): return self.__smart_call__(other, "__gt__")
    # def __ge__(self, other): return self.__smart_call__(other, "__ge__")

    def number_of_characters(self):
        res = 0
        for chunk in self.data:
            res += chunk.number_of_characters()
        return res


    def prepare_for_unordered_find(self, parameters=DEFAULT_PARAMETERS):
        """
        >>> ref = analysis_sentence("HIV-1 Nefinteracts with ABCA1 and induces downregulation and redistribution of ABCA1 to the plasma membrane.")
        >>> ref.prepare_for_unordered_find()
        >>> for k in xrange(len(ref._ref_parts)):
        ...     if ref.org2opt(ref.opt2org(k)) != k:
        ...         print "Oh! %d -> %d -> %d :(" % (
        ...             k, ref.opt2org(k), ref.org2opt(ref.opt2org(k)))
        """
        self._unordered_parameters = parameters

        self._ref_parts = []
        self._ref_parts_index = []
        self._ref_parts_index_reverse = []
        for i in xrange(len(self)):
            r = self[i].__class__ != LiveSpace and self[i].render(parameters)
            if r:
                self._ref_parts.append(r)
                self._ref_parts_index.append(i)
            self._ref_parts_index_reverse.append(len(self._ref_parts)-1)

        self._ref_parts = tuple(self._ref_parts)
        self._ref_parts_index = tuple(self._ref_parts_index)
        self._ref_parts_index_reverse = tuple(self._ref_parts_index_reverse)

        self.opt2org = lambda opt: self._ref_parts_index[opt]
        self.org2opt = lambda org: self._ref_parts_index_reverse[org]
        
    def find_unordered_subtext_all(self, subtext, ignore=0):
        """
        Finds all the occurrences of any variations of the normalized form of
        subtext in the normalized form of self.

        The prepare_for_unordered_find function should be called before this 
        function.

        Input:
            subtext: TextLive
        Returns:
            a list of (mention, offset):
            mention is the original mention of the phrase in the sentence
            offset is the offset of the mention in the sentence, 
            starting from 0=start of the sentence
                
        >>> TESTS = (
        ...     ('B A', [('a b', 0)]),         # Be careful! You may not expect this
        ...     (' a ', [(u'a', 0), (u'a', 4)]),
        ... )
        >>> ref = analysis_sentence("a b a")
        >>> ref.prepare_for_unordered_find()
        >>> for input, output in TESTS:
        ...     actual_output = ref.find_unordered_subtext_all(analysis_sentence(input))
        ...     if not actual_output == output:
        ...         print "%s  !=  %s   for INPUT = %s" % (actual_output, output, input)

        >>> ref = analysis_sentence("HIV-1 Nefinteracts with ABCA1 and induces downregulation and redistribution of ABCA1 to the plasma membrane.")
        >>> ref.prepare_for_unordered_find()
        >>> ref.find_unordered_subtext_all(analysis_sentence("ABCA1"))
        [(u'ABCA1', 24), (u'ABCA1', 79)]
        >>> ref.find_unordered_subtext_all(analysis_sentence("Nefinteracts 2"))
        []
        >>> ref.find_unordered_subtext_all(analysis_sentence("Nefinteracts 2"), 1)
        [(u'Nefinteracts', 6)]
        """

        result = []
        indexes = []

        opt2org = self.opt2org
        org2opt = self.org2opt

        subtext_parts = {}
        for i in xrange(len(subtext)):
            r = subtext[i].__class__ != LiveSpace and subtext[i].render(self._unordered_parameters)
            if r:
                if r in subtext_parts:
                    subtext_parts[r] += 1
                else:
                    subtext_parts[r] = 1
        
        k = len(subtext_parts.keys())
        if not k: return []
        
        ref_parts = self._ref_parts
        n = len(ref_parts)

        text_index = subtext_counter = 0
        found_index = -1
        
        s_parts = subtext_parts.copy()
        
        while text_index <= n:
            if found_index > -1:
                if subtext_counter >= (k - ignore) or \
                   (text_index < n and ref_parts[text_index] in s_parts and s_parts[ref_parts[text_index]] > 0):
                    if text_index < n and ref_parts[text_index] in s_parts and s_parts[ref_parts[text_index]] > 0:
                        s_parts[ref_parts[text_index]] -= 1
                        if s_parts[ref_parts[text_index]] == 0:
                            subtext_counter += 1
                        text_index += 1
                    if subtext_counter >= (k - ignore):
                        f_index = opt2org(found_index)
                        if text_index < n:
                            e_index = opt2org(text_index)
                            indexes.append((f_index, e_index))
                            result.append((self[f_index:e_index].render(RAW_S_PARAMETERS),
                                           len(self[:f_index].render(RAW_PARAMETERS))))
                        else:
                            indexes.append((f_index, len(self)))
                            result.append((self[f_index:].render(RAW_S_PARAMETERS),
                                           len(self[:f_index].render(RAW_PARAMETERS))))
                        s_parts = subtext_parts.copy()
                        subtext_counter = 0
                        found_index = -1
                else:
                    text_index = found_index + 1
                    s_parts = subtext_parts.copy()
                    subtext_counter = 0
                    found_index = -1
            else:
                if text_index < n and ref_parts[text_index] in s_parts:
                    s_parts[ref_parts[text_index]] -= 1
                    found_index = text_index
                    subtext_counter += 1
                text_index += 1

        return result


def analysis_sentence(sentence_text, sentence_tokenized=None, offset=-1):

    if not sentence_tokenized:
        sentence_tokenized = sentence_text
    
    last_index = 0
    pr = None
    
    s = LiveSentence(offset=offset)
    tokenized = sentence_tokenized.split()
    for c in tokenized:
        new_index = sentence_text.find(c, last_index)
        if new_index > last_index:
            s.append(LiveSpace(sentence_text[last_index:new_index], previous=pr, offset=last_index))
        pr = LiveWord(c, offset=new_index)
        s.append(pr)
        last_index = new_index + len(c)
    
    return s


def regulate(text):
    r"""For including in regular expressions
    
    >>> print regulate(r"c:\doc.txt\(.*)?\ ")
    c:\\doc\.txt\\\(\.\*\)\?\\ 

    """
    return text.replace('\\', '\\\\') \
               .replace('(', r'\(') \
               .replace(')', r'\)') \
               .replace('?', r'\?') \
               .replace('*', r'\*') \
               .replace('+', r'\+') \
               .replace('^', r'\^') \
               .replace('$', r'\$') \
               .replace('{', r'\{') \
               .replace('}', r'\}') \
               .replace('[', r'\[') \
               .replace(']', r'\]') \
               .replace('.', r'\.')

def _test():
    import doctest
    doctest.testmod()

if __name__ == "__main__":
    _test()
