"""dprparser

Parses a .dpr/.pas "uses" clause (and nothing more!) 
"""

import os
from itertools import takewhile
from pygments.lexers import get_lexer_by_name
from pygments.token import Text, Comment, Operator, Keyword, Name, String, Number, Punctuation
from .cpinfo import getwindowsencoding


class UnitInUses:
    def __init__(self, name=u'', path=u''):
        self.head = u''    # whatever is before the unit name
        self.name = name   # unit name
        self.path = path   # source file name, if there is an 'in' clause
        self.form = u''    # form class name (including {}), if present
        self.tail = u''    # everything up to the final , or ;

    def finish(self):
        if u'\n' in self.head:
            self.head = self.head.lstrip(u' \t')
        self.tail = self.tail.strip(u' \t')
        self.path = self.path.strip(u"'")

    def __unicode__(self):
        result = self.head + self.name
        if self.path and self.path[-4:].lower()!='.dcu': 
            result += u' in ' + u"'" + self.path + u"'"
        if self.form: 
            result += u' ' + self.form
        if self.tail:
            result += u' ' + self.tail
        return result

class UnitList(list):
    element_type = UnitInUses
    
    @classmethod
    def from_uses_clause(cls, uses_clause):
        # states: STARTING_TEXT <unitname> MAYBE_IN <in> IN_PAS <sourcename>
        STARTING_TEXT, MAYBE_IN, IN_PAS = range(3)

        # create an UnitInUses object for each unit in uses_clause
        aux = []
        state = STARTING_TEXT
        unit = cls.element_type()
        for tokentype, value in uses_clause:
            if tokentype==Operator and value==u',':
                unit.finish()
                aux.append(unit)
                unit = cls.element_type()
                state = STARTING_TEXT
                continue
            if state==STARTING_TEXT:
                if tokentype==Name:
                    unit.name = value
                    state = MAYBE_IN
                else:
                    unit.head += value
            elif state==MAYBE_IN:
                if tokentype==Keyword and value.lower()==u'in':
                    state = IN_PAS
                    unit.tail = u''
                else:
                    unit.tail += value
            elif state==IN_PAS:
                if tokentype==String:
                    unit.path += value
                    unit.tail = u''
                elif tokentype==Comment.Multiline:
                    unit.form += value
                    unit.tail = u''
                else:
                    unit.tail += value

        if unit.name: # la ultima
            unit.finish()
            aux.append(unit)
        
        return cls(aux)


def _tokenize(filename, source=u''):
    encoding = getwindowsencoding()
    lexer = get_lexer_by_name('delphi')
    if not source:
      with open(filename, 'r') as f:
        source = f.read().decode(encoding, 'replace')
    tokens = lexer.get_tokens(source)
    return tokens


def parse_dpr_uses(filename):

    def not_uses(item):
        # False at start of 'uses', else True
        tokentype, value = item
        return not(tokentype==Keyword and value.lower()==u'uses')

    itokens = iter(_tokenize(filename))

    # store whatever comes *before* `uses` clause
    before_list = []
    for (tokentype, value) in itokens:
        before_list.append(value)
        if tokentype==Keyword and value.lower()==u'uses':
            break
    before = u''.join(before_list)

    # this is the interesting part
    uses_clause = takewhile(lambda item: item!=(Operator,u';'), itokens)
    uses_list = UnitList.from_uses_clause(uses_clause)

    # store whatever comes *after* the `uses` clause
    after = u';' + u''.join(value for (tokentype, value) in itokens)
   
    return before, uses_list, after

        
def parse_pas_uses(filename):
    itokens = iter(list(_tokenize(filename)))

    # skip until 'uses' (in interface section)
    for (tokentype, value) in itokens:
        if tokentype==Keyword and value.lower() in (u'uses', u'implementation'):
            break
    # this is the interesting part
    if value.lower()==u'uses':
        uses_clause = takewhile(lambda item: item!=(Operator,u';'), itokens)
        uses_interface = UnitList.from_uses_clause(uses_clause)
    else:
        uses_interface = []

    # skip until 'uses' (in implementation section)
    for (tokentype, value) in itokens:
        if tokentype==Keyword and value.lower() == u'uses':
            break
    # this is the interesting part
    if value.lower()==u'uses':
        uses_clause = takewhile(lambda item: item!=(Operator,u';'), itokens)
        uses_implementation = UnitList.from_uses_clause(uses_clause)
    else:
        uses_implementation = []

    return uses_interface, uses_implementation

def remove_unnecesary_whitespace(filename, source=u''):
  def _get_tokens():
    for (tokentype, value) in _tokenize(filename, source):
        if tokentype==Text:
            if '\n' in value: 
                yield '\n'
                continue
            else:
                value = value.strip()
                if not value: value = ' '
        yield value
  return u''.join(_get_tokens())
            
