#!/usr/bin/python
#
# Copyright (c) 2009 Josef Hardi. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#-*- coding: utf-8 -*-

"""
    Robo Review Parser
    
    This robot crawls the Chromium code review comment logs and parses
    the message exchange between a code owner and reviewers.
    
    @copyright: 2009 Josef Hardi
    @license: GNU GPL
"""

__author__ = 'Josef Hardi <josef.hardi@gmail.com>'


import os, sys
import re
import urllib
import time, datetime

import StringIO
from optparse import OptionParser
from lxml.html import fromstring
from lxml import etree

reload(sys)
sys.setdefaultencoding('utf-8')

USAGE = os.path.basename(sys.argv[0]) + ' [options]'
DESCRIPTION = '''
Collect the interaction data from the message exchange in the Chromium
code review between a code owner and reviewers.
'''
ISO8601_FORMAT = '%Y-%m-%dT%H:%M:%S'
CSV_HEADER = 'issue,owner,owner_timestamp,reviewer,reviewer_timestamp'
CSV_TEMPLATE = '%(issue)s,"%(owner)s",%(otime)s,"%(reviewer)s",%(rtime)s'

NAMESPACE = {'t': 'http://www.w3.org/2005/Atom'}


def print_csv(issue_urls, header=True):
  """Print the interaction data in CSV format"""
  if header:
    print CSV_HEADER
  
  for issue_url in issue_urls:
    issue, discussion = run_parser(issue_url)
    
    if discussion is not None:
      for dyad in discussion:
        entry = {'issue' : issue,
                 'owner' : dyad[0][0],
                 'otime' : dyad[0][1],
                 'reviewer' : dyad[1][0],
                 'rtime' : dyad[1][1]}
        
        print CSV_TEMPLATE % entry


def run_parser(url):
  """Take the review page URL and parse the comment body
  
  Examples of the URLs:
    http://codereview.chromium.org/rss/issue/12859
    http://codereview.chromium.org/rss/issue/13052
    http://codereview.chromium.org/rss/issue/13059
  
  It should be noticed that the URL connects to the RSS-XML page of the
  review instead of to the plain HTML page. You can modify the common URL
  by inserting 'rss/issue/' string.
  """
  try:
    document = read_remote_document(url)
    issue, discussion = parse_document(document)
  except IndexError:
    print "Error parsing at: " + url
  
  return issue, discussion


def parse_document(doc):
  """Get the logic of the discussion
  
  An example of review discussion (I ommitted several markups for simplicity):
  
  $
  <feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us">
    <title>Code review - Issue 13052: Cleanup: move base/platform_test.h</title>
    <updated>2008-12-02T19:16:01+00:00</updated>
    <entry>
      <title>Message from jeremy@chromium.org</title>
      <updated>2008-12-02T16:01:52+00:00</updated>
      <author>
        <name>jeremy</name>
      </author>
      <summary type="html">
        some comments...
      </summary>
    </entry>
    <entry>
      <title>PatchSet : patch really split this time</title>
      <updated>2008-12-02T17:56:22+00:00</updated>
      <author>
        <name>jeremy</name>
      </author>
      <summary type="html">
        Download raw patch set
      </summary>
    </entry>
    <entry>
      <title>Message from mark@chromium.org</title>
      <updated>2008-12-02T18:23:13+00:00</updated>
      <author>
        <name>>Mark Mentovai</name>
      </author>
      <summary type="html">
        some comments...
      </summary>
    </entry>
  </feed>
  $
  """
  # Get the issue number
  feed_title = doc.xpath('/t:feed/t:title', namespaces=NAMESPACE)
  feed_title = feed_title[0].text.strip()
  issue_number = lookup_issue_pattern(feed_title)
  
  # Get the discussion
  owner = None
  reviewer = None
  entries = doc.xpath('/t:feed/t:entry', namespaces=NAMESPACE)
  
  discussion = []
  for entry in entries:
    entry_title = entry.xpath('./t:title', namespaces=NAMESPACE)
    entry_title = entry_title[0].text.strip()
    
    updated_date = entry.xpath('./t:updated', namespaces=NAMESPACE)
    updated_date = updated_date[0].text.strip()
    
    if not is_patch(entry_title):
      if owner is None:
        # Typically the first message contains the owner name.
        owner = lookup_user_pattern(entry_title)
        owner_message = [owner, create_timestamp(updated_date)]
      else:
        dyad = []
        someone = lookup_user_pattern(entry_title)
        if someone != owner:
          # If someone is not an owner then he is a reviewer
          reviewer = someone
          reviewer_message = [reviewer, create_timestamp(updated_date)]
          
          # The dyad is complete and append to the discussion entry
          dyad.append(owner_message)
          dyad.append(reviewer_message)
          discussion.append(dyad)
        else:
          # If someone is an owner then just update his information.
          owner_message = [owner, create_timestamp(updated_date)]
  
  return issue_number, discussion


def is_patch(string):
  patch_pattern = re.compile('PatchSet')
  
  return patch_pattern.match(string)


def lookup_issue_pattern(string):
  issue_pattern = re.compile('Issue (\d+)')
  
  return issue_pattern.search(string).group(1)


def lookup_user_pattern(string):
  user_pattern = re.compile('Message from (.*)')
  user = user_pattern.match(string).group(1)
  
  return get_alias(user)


def create_timestamp(date):
  """Create timestamp from date string
  
  Return the timestamp."""
  dt = datetime.datetime(*map(int, re.split('[^\d]', date)[:-1]))
  timetuple = dt.timetuple()
  timestamp = time.mktime(timetuple)
  
  return int(timestamp)


def get_alias(string):
  return string.split('@')[0]


def read_remote_document(url):
  """Read a remote document via the URL.
  
  Return markup document."""
  content = urllib.urlopen(url).read()
  infile = StringIO.StringIO(content)
  document = etree.parse(infile)
  
  return document


def read_input_file(filename):
  """Read the external file.
  
  Return a list of URLs."""
  infile = open(filename, "r")
  urls = infile.readlines()
  
  return urls


def get_options(argv):
  parser = OptionParser(usage=USAGE)
  parser.description = DESCRIPTION
  parser.add_option("-u",
                    dest="url",
                    default=None,
                    help="input URL for the web page.")
  parser.add_option("-b",
                    dest="batch_file",
                    default="url.txt",
                    help="if you need to parse a batch of URLs, you can \
                          put the URLs into a file separated by a newline \
                          for each URL. The default name for the file is \
                          url.txt")
  parser.add_option("--no-header",
                    dest="header",
                    default=True,
                    action="store_false",
                    help="print without CSV header.")
  
  options, args = parser.parse_args(argv)
  script, args = args[0], args[1:]
  
  return options, script, args


def main(argv=None):
  argv = argv or sys.argv
  options, script, args = get_options(argv)
  
  # Get the URLs
  url = options.url
  issue_urls = []
  if url:
    issue_urls = [url]
  elif batch:
    issue_urls = read_input_file(batch_file)
  
  print_csv(issue_urls, options.header)


if __name__ == "__main__":
  sys.exit(main())
