"""
DataFetchers.py - Provide methods to fetch data from the Pang & Lee corpus as
well as the Rotten Tomatoes scraped data.

Author: Miles Malerba
"""

import os
import random

from nltk.corpus.reader import XMLCorpusReader

################################################################################
# ScaleDataFetcher                                                             #
# Data Fetcher for Bo Pang and Lillian Lee rating scale corpus.                #
################################################################################
class ScaleDataFetcher(object):
  def __init__(self, num_files=None):
    """Constructor.

    Parameters:
      num_files - The number of files to include in the corpus. If none, all
                  files are used.
    """
    self._num_files = num_files
    self._meta_root = os.path.join('data', 'scaledata')
    self._data_root = os.path.join('data', 'scale_whole_review')
    self._tag_dict = None
    self._files = None


  def fetch(self):
    """Read in the scaledata corpus. The corpus is returned as an iterator
    rather than a list for performance reasons.

    Return:
      An iterator of tagged data for the scaledata corpus.
    """
    if not self._tag_dict:
      self._read_meta()
    if not self._files:
      self._select_files()
    return (self._read_file(*x) for x in self._files)


  def reselect_fies(num_files=None):
    """Reselesct the file set to use for the corpus.

    Parameters:
      num_files - The number of files to incldue. If none, all files are used.
    """
    self._num_files = num_files
    self._files = None


  def _select_files(self):
    """Randomly select some files to include in the corpus."""
    self._files = []
    for reviewer in os.listdir(self._data_root):
      self._files += [(reviewer, f) for f in os.listdir(
        os.path.join(self._data_root, reviewer, 'txt.parag'))]

    random.shuffle(self._files)
    if self._num_files:
      self._files = self._files[:self._num_files]


  def _read_meta(self):
    """Read in the meta data for the scaledata corpus."""
    self._tag_dict = dict()
    for reviewer in os.listdir(self._meta_root):
      f = open(os.path.join(self._meta_root, reviewer, 'id.' + reviewer))
      ids = f.readlines()
      f.close()

      f = open(os.path.join(
        self._meta_root, reviewer, 'label.3class.' + reviewer))
      tags_3scale = f.readlines()
      f.close()

      f = open(os.path.join(
        self._meta_root, reviewer, 'label.4class.' + reviewer))
      tags_4scale = f.readlines()
      f.close()
      
      f = open(os.path.join(self._meta_root, reviewer, 'rating.' + reviewer))
      tags_0_1scale = f.readlines()
      f.close()
      
      self._tag_dict[reviewer] = dict()
      for i in range(len(ids)):
        self._tag_dict[reviewer][ids[i].strip()] = [
          ('3_SCALE', tags_3scale[i].strip()),
          ('4_SCALE', tags_4scale[i].strip()),
          ('0_1_SCALE', tags_0_1scale[i].strip())
        ]


  def _read_file(self, reviewer, fname):
    """Read in a file from the scaledata corpus.

    Parameters:
      reviewer - The author of the review (indicates the folder the file is in).
      fname    - The name of the file to open.

    Return:
      The tagged data for the file.
    """
    f = open(os.path.join(self._data_root, reviewer, 'txt.parag', fname))
    result = (''.join(f.readlines()),
              self._tag_dict[reviewer][fname[:fname.find('.txt')]])
    f.close()
    return result


################################################################################
# RottenTomatoesDataFetcher                                                    #
# Data Fetcher for the Rotten Tomatoes data we scraped.                        #
################################################################################
class RottenTomatoesDataFetcher(object):
  def __init__(self, num_files=None):
    """Constructor.

    Parameters:
      num_files - The number of files to include in the corpus. If none, all
                  files are used.
    """
    self._num_files = num_files
    self._corpus = XMLCorpusReader(
      os.path.join('data', 'RottenTomatoesScrape'), '.*')
    self._files = None


  def fetch(self):
    """Read in the scaledata corpus. The corpus is returned as an iterator
    rather than a list for performance reasons.

    Return:
      An iterator of tagged data for the scaledata corpus.
    """
    if not self._files:
      self._files = self._corpus.fileids()
      random.shuffle(self._files)
      if self._num_files:
        self._files = self._files[:self._num_files]
    return (self._read_file(x) for x in self._files)


  def _read_file(self, fname):
    """Read in a file from the scaledata corpus.

    Parameters:
      fname    - The name of the file to open.

    Return:
      The tagged data for the file.
    """
    d = dict()
    nodes = self._corpus.xml(fname).getchildren()
    for node in nodes:
      d[node.tag] = node.text.strip()
    return (d['review_text'], self._create_tags(d['rating']))


  def _create_tags(self, rating):
    """Convert the rating to the different tag scales we're interested in.

    Parameters:
      rating - The 0.5-5 rating scraped from rotten tomatoes.

    Return:
      A list of tags for the scales we're interested in.
    """
    index = int(float(rating) * 2) - 1
    three = [0, 0, 0, 1, 1, 1, 1, 2, 2, 2]
    four = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3]
    return [
      ('3_SCALE', str(three[index])),
      ('4_SCALE', str(four[index])),
      ('0_1_SCALE', str(index / 10.0))
    ]
