#!/usr/bin/env python
# -*- coding: utf-8 -*-

__all__ = ['Tokenizer']
__version__ = '$Id$'

import re

class Tokenizer(object):
    """Tokenize strings as tuples"""

    class State:
        """Define all states the Tokenizer can recognize"""
        NONE = 0x0000
        COMMENT = 0x0001
        SCRIPT = 0x0002
        KEYWORD = 0x0004
        RULE = 0x0008
        PROPERTY = 0x0010
        VALUE = 0x0020
        ALL = 0xFFFF

    def __str__(self):
        return '<jsCSS.tokenizer.Tokenizer.%s>' % (self.__class__.__name__, )


    def __init__(self):
        """Set up Tokenizer and compile type checking regular expressions"""
        self._state = None
        self._state_stack = None
        self._expecting = None
        self._expecting_stack = None
        self._buffer = None
        self._offset = None
        self._script_offset = None
        self._nesting_level = None

        self._re_test_keyword = re.compile(r'[;\n]$', re.S | re.U)
        self._re_test_property = re.compile(r'[A-Z\-\\_\*]+:$', re.I | re.U)
        self._re_test_value = re.compile(r'[;\n\}]$', re.S | re.U)
        self._re_sub_scripts = re.compile(r'\<\?.*?\?\>', re.M | re.U)

    def tokenize_string(self, string):
        """Tokenize string and yield tokens where each token is a tuple of:

            (type, value, start_offset, end_offset)

        Scripts are parsed, but not tokenized.
        """
        self._state = Tokenizer.State.NONE
        self._state_stack = []
        self._expecting = Tokenizer.State.NONE
        self._expecting_stack = []
        self._nesting_level = 0

        def flagged(state):
            """Check if the Tokenizer is in the given state"""
            return self._state & getattr(Tokenizer.State, state)

        def flag(state='NONE', **kwargs):
            """Turn the given Tokenizer state on. If **kwargs has group=True
            then the current state is pushed into the stack and set to
            NONE."""
            if 'group' in kwargs:
                self._state_stack.append(self._state)
                self._state = Tokenizer.State.NONE
            self._state |= getattr(Tokenizer.State, state)

        def unset(state='NONE', **kwargs):
            """Turn the given Tokenizer state off. If **kwargs has group=True
            then the state is restored from the stack."""
            if 'group' in kwargs:
                self._state = self._state_stack.pop()
            self._state &= ~getattr(Tokenizer.State, state)

        def expecting(state):
            """Check if the Tokenizer is expecting the given state"""
            return self._expecting == Tokenizer.State.NONE or \
                   self._expecting & getattr(Tokenizer.State, state)

        def expect(*args, **kwargs):
            """Allow the Tokenizer to accept states in *args. If **kwargs has
            group=True then the current state is pushed into the stack and set
            to NONE."""
            if 'group' in kwargs:
                self._expecting_stack.append(self._expecting)
                self._expecting = Tokenizer.State.NONE
            for state in args:
                self._expecting |= getattr(Tokenizer.State, state)

        def stop(*args, **kwargs):
            """Deny the Tokenizer to accept states in *args. If **kwargs has
            group=True then the state is restored from the stack."""
            if 'group' in kwargs:
                self._expecting = self._expecting_stack.pop()
            for state in args:
                self._expecting &= ~getattr(Tokenizer.State, state)

        def reset(position):
            """Reset last recognized position and buffer"""
            self._buffer, self._offset = u'', position

        def append(char):
            """Append data to the buffer"""
            self._buffer += unicode(char)

        def conforms(type):
            """Check the current buffer against the given type's regular
            expression"""
            return getattr(self, '_re_test_' + type).search(self._re_sub_scripts.sub('', self._buffer))

        reset(0)
        # While in the global namespace, accept only the below
        expect('COMMENT', 'SCRIPT', 'KEYWORD', 'RULE')
        for i in range(self._offset, len(string)):
            append(string[i])

            # If we are not already in a comment and we have just found one,
            # start a new state group
            if '/*' == string[i:i + 2] and not flagged('COMMENT') and expecting('COMMENT'):
                flag('COMMENT', group=True)
                expect('COMMENT', group=True)
                reset(i)
            # If we are inside a comment and we have just found the end of it,
            # restore to previous state and yield token
            elif '*/' == string[i - 1:i + 1] and flagged('COMMENT'):
                yield ('COMMENT', self._buffer[1:-2], self._offset, i + 1, )
                unset(group=True)
                stop(group=True)
                reset(i)

            # If we are not already in a script and we have just found one,
            # start a new state group. This is needed to allow multi-line
            # scripts within keywords or property values.
            if '<?' == string[i:i + 2] and not flagged('SCRIPT') and expecting('SCRIPT'):
                flag('SCRIPT', group=True)
                expect('SCRIPT', group=True)
                self._script_offset = i
            elif '?>' == string[i - 1:i + 1] and flagged('SCRIPT'):
                yield ('SCRIPT', self._buffer[self._script_offset - i + 1:-2], self._script_offset, i + 1, )
                unset(group=True)
                stop(group=True)

            # Capture keywords such as @import, @font-face, etc.
            if '@' == string[i] and not flagged('KEYWORD') and expecting('KEYWORD'):
                flag('KEYWORD')
                expect('KEYWORD')
                reset(i)
            elif flagged('KEYWORD') and conforms('keyword'):
                yield ('KEYWORD', self._buffer[:-1].strip(), self._offset, i, )
                unset('KEYWORD')
                stop('KEYWORD')
                reset(i)

            # If we encounter an opening bracket, store current state and
            # start a new state group. Allow PROPERTIES and deny KEYWORDS
            # while processing rules.
            if '{' == string[i] and expecting('RULE'):
                yield ('RULE', self._buffer[:-1].strip(), self._offset, i, )
                flag('RULE', group=True)
                expect('RULE', 'PROPERTY', 'COMMENT', 'SCRIPT', group=True)
                reset(i)
                self._nesting_level += 1

            # If while within a rule we encounter a colon, assume property
            # definition and expect value to follow
            if ':' == string[i] and expecting('PROPERTY') and conforms('property'):
                yield ('PROPERTY', self._buffer[:-1].strip(), self._offset, i, )
                flag('VALUE')
                expect('VALUE')
                reset(i)
            elif flagged('VALUE') and conforms('value'):
                yield ('VALUE', self._buffer[:-1].strip(), self._offset, i, )
                unset('VALUE')
                stop('VALUE')
                reset(i)

            # If we encounter a closing bracket and we are back in the global
            # namespace, restore to previous state
            if '}' == string[i] and flagged('RULE'):
                yield ('END', self._buffer[:-1], self._offset, i, )
                self._nesting_level -= 1
                if self._nesting_level == 0:
                    unset(group=True)
                    stop(group=True)
                reset(i)

    def __str__(self):
        return '<jsCSS.tokenizer.%s>' % (self.__class__.__name__, )
