"""
ArffBarfer.py - Parses reviews and creates ARFF files for use with Weka.

Author: Miles Malerba
"""

import codecs
import os
import random
import re

################################################################################
# ArffBarfer                                                                   #
# Generates ARFF feature files for use with Weka.                              #
################################################################################
class ArffBarfer(object):
  def __init__(self, data_fetcher):
    """
    Constructor.

    Parameters:
      data_fetcher - DataFetcher object. Given a file name return a list of
                     tagged data (tagged data = tuple of data and list of tags
                     (name-value pairs)).
    """
    self._data_fetcher = data_fetcher
    self._ARFF_DIR = 'arff/'
    self._PLACEHOLDER_RE = re.compile('"([0-9]+)"')
    

  def generate_arffs(self, label, fgen, train_percent=0.7):
    """
    Generate a pair of arff files (train and test) from the given data files.

    Parameters:
      label         - The name to give to the arff files.
      fgen          - FeatureGenerator object.
                      Given raw data return a feature set.
      train_percent - The percent of the data used for the train file.
    """
    tmp = codecs.open('%s%s.arff.tmp' % (self._ARFF_DIR, label), 'w', 'utf-8')
    feature_indices = dict()
    feature_types = dict()
    tag_sets = dict()
    feature_count = 0

    for (data, tags) in self._data_fetcher.fetch():
      # Update the feature types and indices.
      feature_set = fgen.gen_features(data)
      for f in feature_set.keys():
        self._update_types(feature_types, f, feature_set[f])
        if not f in feature_indices:
          feature_indices[f] = feature_count
          feature_count += 1

      # Keep track of the tag values we've seen.
      for (n, t) in tags:
        if not n in tag_sets:
          tag_sets[n] = set()
        tag_sets[n].add(t)

      self._write_feature_set(tmp, feature_set, feature_indices, tags)

    tmp.close()
    self._add_header(label, feature_types, feature_indices, tag_sets,
                     train_percent)


  def _update_types(self, types, feature, val):
    """
    Update the types dictionary for the specified feature.

    Parameters:
      types   - The types dictionary.
      feature - The name of the feature to update the dictionary with.
      val     - The value of the feature.
    """
    if not feature in types:
      if type(val) == type(0) or type(val) == type(0.0):
        types[feature] = 'NUMERIC'
      else:
        types[feature] = set([str(val)])
    if type(types[feature]) == type(set()):
      types[feature].add(val)


  def _add_header(self, label, types, indices, tag_sets, train_percent):
    """
    Add header information and convert the temp file to two real files
    (train and test).

    Parameters:
      label         - The label to use for the arff files.
      types         - A dictionary of features to their types.
      indices       - A dictionary of the indices for the features.
      tag_sets      - A dictionary of tag names to set of possible values.
      train_percent - The percent of the data to use for the training file.
    """
    train = codecs.open('%s%s_train.arff' % (self._ARFF_DIR, label), 'w',
      'utf-8')
    test = codecs.open('%s%s_test.arff' % (self._ARFF_DIR, label), 'w', 'utf-8')
    train.write('@RELATION %s_train\n\n' % label)
    test.write('@RELATION %s_test\n\n' % label)
    for feature in sorted(types.keys(), key=lambda x: indices[x]):
      if type(types[feature]) == type(set()):
        types[feature] = '{' + ','.join(types[feature]) + '}'
      train.write('@ATTRIBUTE %s %s\n' % (feature, types[feature]))
      test.write('@ATTRIBUTE %s %s\n' % (feature, types[feature]))
    for tag in sorted(tag_sets.keys()):
      tag_sets[tag] = '{' + ','.join(tag_sets[tag]) + '}'
      train.write('@ATTRIBUTE %s %s\n' % (tag, tag_sets[tag]))
      test.write('@ATTRIBUTE %s %s\n' % (tag, tag_sets[tag]))
    train.write('\n@DATA\n')
    test.write('\n@DATA\n')
    tmp = codecs.open('%s%s.arff.tmp' % (self._ARFF_DIR, label), 'r', 'utf-8')
    lines = [self._fill_tag_placeholders(ln, len(types.keys())) \
             for ln in tmp.readlines()]
    random.shuffle(lines)
    train.writelines(lines[:int(len(lines) * train_percent)])
    test.writelines(lines[int(len(lines) * train_percent):])
    tmp.close()
    train.close()
    test.close()
    os.remove('%s%s.arff.tmp' % (self._ARFF_DIR, label))


  def _fill_tag_placeholders(self, line, base_index):
    """Replace the tag index placeholders now that we know all the values.

    Parameters:
      lines      - The line to replace the placeholders in.
      base_index - The index of the first tag.

    Return:
      The line with the placeholders replaced.
    """
    m = re.search(self._PLACEHOLDER_RE, line)
    while m:
      line = re.sub(self._PLACEHOLDER_RE, str(int(m.groups()[0]) + base_index), 
                    line, count=1)
      m = re.search(self._PLACEHOLDER_RE, line)
    return line


  def _write_feature_set(self, fp, fset, indices, tags):
    """
    Writes a single feature set to the specified file.

    Parameters:
      fp      - A pointer to the file to write to.
      fset    - The feature set to write.
      indices - The indices dictionary for the features.
      tags    - The tags for this feature set.
    """
    out_data = []
    for key in fset.keys():
      out_data.append((indices[key], fset[key]))
    out_data = sorted(out_data, key=lambda x: x[0])
    tags = sorted(tags, key=lambda x: x[0])
    new_tags = []
    next_tag_index = 0
    for n, t in tags:
      new_tags.append((next_tag_index, t))
      next_tag_index += 1
    fp.write(
      '{' + ','.join([str(k) + ' ' + str(v) for (k, v) in out_data]) + ',' +
      ','.join(['"%s" %s' % (str(k), str(t)) for (k, t) in new_tags]) + '}\n')
