# coding: utf-8

import re
from fluent import Fluent



def read_data(filename):
    """
    Reads the data in the file and returns it as a dictionary.
    Each value in the dictionary is a list of fluents.
    """
    tokens = tokenize(filename)
    return all_sequences(tokens)
    
    
    
def tokenize(filename):
    """Generates tokens from the file."""
    with file(filename) as f:
        for line in f:
            for match in re.finditer(r'\(|\)|\d+|".+"', line):
                tkn = match.group()
                if tkn == '(':
                    yield ('lparen', tkn)
                elif tkn == ')':
                    yield ('rparen', tkn)
                elif re.search(r'^\d+$', tkn):
                    yield ('int', int(tkn))
                elif re.search(r'^".+"$', tkn):
                    yield ('str', tkn[1:-1])



def all_sequences(tokens):
    """Reads all sequences from a token generator."""
    sequences = {}
    while True:
        try:
            tokens.next()
            t = tokens.next()
            sequences[t[1]] = sequence(tokens)
        except:
            break
    return sequences
    
    
    
def sequence(tokens):
    """Reads the next sequence in a token generator."""
    sequence = []
    tokens.next()
    while True:
        t = tokens.next()
        if t[1] == '(':
            sequence.append(fluent(tokens))
        else:
            tokens.next()
            break
    return sequence



def fluent(tokens):
    """Reads the next fluent in a token generator."""
    name = tokens.next()[1]
    start = tokens.next()[1]
    end = tokens.next()[1]
    tokens.next()
    return Fluent(name, start, end)
