#!/usr/bin/python2.7
#
# Copyright 2012 Twist & Shout Inc.  All Rights Reserved.
#
# A pipeline stage that runs a mapreduce to convert logs to csv files.

from csv import DictWriter
from cStringIO import StringIO
import datetime
import logging
import time

from google.appengine.api import app_identity
from google.appengine.api import logservice

from mapreduce import base_handler, mapreduce_pipeline
from pytz.gae import pytz

_TIMEZONE = pytz.timezone('America/Los_Angeles')

FIELDS = [
      {'name': 'start_time', 'type': 'FLOAT', 'mode': 'REQUIRED'},
      {'name': 'method', 'type': 'STRING', 'mode': 'REQUIRED'},
      {'name': 'resource', 'type': 'STRING', 'mode': 'REQUIRED'},
      {'name': 'status', 'type': 'INTEGER', 'mode': 'REQUIRED'},
      {'name': 'latency', 'type': 'FLOAT', 'mode': 'REQUIRED'},
      {'name': 'response_size', 'type': 'INTEGER', 'mode': 'REQUIRED'},
      {'name': 'loading_request', 'type': 'BOOLEAN', 'mode': 'REQUIRED'},
      {'name': 'cost', 'type': 'FLOAT', 'mode': 'REQUIRED'},
      {'name': 'user_agent', 'type': 'STRING'},
      {'name': 'nickname', 'type': 'STRING'},
      {'name': 'version_id', 'type': 'STRING'},
      {'name': 'location_city', 'type': 'STRING'},
      {'name': 'location_region', 'type': 'STRING'},
      {'name': 'location_country', 'type': 'STRING'},
      {'name': 'all_logs', 'type': 'STRING'},
      {'name': 'local_time', 'type': 'STRING'},
]


def parse_app_logs(l):
    properties = {}
    all_logs = []
    for log in l.app_logs:
        if log.level == logservice.LOG_LEVEL_DEBUG and log.message.startswith('Property:'):
            key, _, value = log.message[len('Property: '):].partition('=')
            properties[key.strip()] = value.strip()
        else:
            all_logs.append(log)

    return properties, all_logs


def log2csv(l):
    """Convert log API RequestLog object to csv."""
    values = StringIO()
    csv_writer = DictWriter(values, [field['name'] for field in FIELDS])
    log_properties, all_logs = parse_app_logs(l)

    start_date = datetime.datetime(*tuple(time.gmtime(l.start_time))[:7])
    start_date = start_date + _TIMEZONE.utcoffset(start_date)

    csv_writer.writerow(
         {'start_time': l.start_time,
          'method': l.method,
          'resource': l.resource,
          'status': l.status,
          'latency': l.latency,
          'response_size': l.response_size,
          'loading_request': l.was_loading_request,
          'cost': l.cost,
          'user_agent': l.user_agent,
          'nickname': l.nickname,
          'version_id': l.version_id,
          'location_city': log_properties.get('location-city'),
          'location_region': log_properties.get('location-region'),
          'location_country': log_properties.get('location-country'),
          'all_logs': '\n'.join([log.message for log in all_logs]),
          'local_time': str(start_date),
         })
    yield values.getvalue()


class Log2Gs(base_handler.PipelineBase):
  """A pipeline to ingest log as CSV in Google Storage
  """
  @staticmethod
  def start_date_from_time(start_time):
      start_date = datetime.datetime(*tuple(time.gmtime(start_time))[:7])
      return (start_date + _TIMEZONE.utcoffset(start_date)).date()

  @staticmethod
  def file_prefix(start_time):
      start_date = Log2Gs.start_date_from_time(start_time)
      return 'log2csv-%s-%s-%s' % (app_identity.get_application_id(), start_date, start_time)

  def run(self, gsbucketname, start_time, end_time, version_ids):
    shards = max(1, int((end_time - start_time) / (3600 * 4)))
    logging.info('Using %s shards', shards)
    yield mapreduce_pipeline.MapperPipeline(
        Log2Gs.file_prefix(start_time),
        'log2gs.log2csv',
        'mapreduce.input_readers.LogInputReader',
        output_writer_spec='mapreduce.output_writers.FileOutputWriter',
        params={
            'input_reader': {
                'start_time': start_time,
                'end_time': end_time,
                'version_ids': version_ids,
                'include_app_logs': True,
            },
            'output_writer': {
                'filesystem': 'gs',
                'gs_bucket_name': gsbucketname,
            },
            'root_pipeline_id': self.root_pipeline_id,
            },
        shards=shards)
