"""
A pipeline to generate and store statistics.
"""
import logging

from google.appengine.ext import deferred, ndb

from dsbq.config import config
from dsbq.pipeline import bigquery
from dsbq.models import Stat, VALID_CONTEXT

__all__ = ['execute_bigquery_query']

BATCH_MODE        = bigquery.BQ_BATCH_MODE
INTERACTIVE_MODE  = bigquery.BQ_INTERACTIVE_MODE
VALID_MODES       = [BATCH_MODE, INTERACTIVE_MODE]
PAGE_SIZE         = 5000
DIMENSION_PREFIX  = 'DIMENSION__'
STAT_PREFIX       = 'STAT__'
COUNTDOWN = {
    BATCH_MODE: 60*15,
    INTERACTIVE_MODE: 60,
}

def execute_bigquery_query(query, context, query_mode=INTERACTIVE_MODE):
    """
    Starts a pipeline to run a BigQuery query and ingest the results.

    Args:
        query the BigQuery SELECT statement to run, must contain at least one STAT__* column name.
        context the stat context, e.g., a serialized report date like '2013-03-09'
    """
    if not query:
        raise ValueError('query is required.')
    if not context:
        raise ValueError('context is required.')
    if not VALID_CONTEXT.match(context):
        raise ValueError('context must match %s' % VALID_CONTEXT)
    if query_mode not in VALID_MODES:
        raise ValueError('query_mode must be in %s' % VALID_MODES)
    if STAT_PREFIX not in query:
        raise ValueError('query must have at least one column name starting with %s' % STAT_PREFIX)
    logging.info('Submitting BigQuery %s query.\n\n%s\n\n' % (query_mode, query))
    job_id = bigquery.submit_query_job(query, query_mode=query_mode)
    logging.info('BigQuery job_id "%s" submitted.', job_id)
    deferred.defer(poll_for_bigquery_status, job_id, COUNTDOWN[query_mode], context,
                   _queue=config.DEFERRED_QUEUE,
                   _url=config.DEFERRED_URL_PREFIX + '/poll_for_bigquery_status',
                   _countdown=COUNTDOWN[query_mode])

def poll_for_bigquery_status(job_id, countdown, context, iteration=1):
    """
    Checks to see if the BigQuery job is complete, loops and sleeps if not.
    """
    logging.info('Checking to see if BigQuery job "%s" is complete (attempt %s).',
                 job_id, iteration)
    complete = bigquery.is_job_complete(job_id)
    if complete:
        logging.info('BigQuery job "%s" is complete.', job_id)
        deferred.defer(ingest_bigquery_results, job_id, context,
                       _queue=config.DEFERRED_QUEUE,
                       _url=config.DEFERRED_URL_PREFIX + '/ingest_bigquery_results')
        return
    else:
        logging.info('BigQuery job "%s" is not complete. Sleeping.', job_id)
        deferred.defer(poll_for_bigquery_status, job_id, countdown, context,
                       iteration=iteration+1,
                       _queue=config.DEFERRED_QUEUE,
                       _url=config.DEFERRED_URL_PREFIX + '/poll_for_bigquery_status/%s' % iteration+1,
                       _countdown=countdown)
        return

def ingest_bigquery_results(job_id, context, start_index=0):
    """
    Reads in a BigQuery result set and creates stats from it.
    """
    result = bigquery.get_results_page(job_id, start_index=start_index, page_size=PAGE_SIZE)
    if len(result) == 0:
        logging.warning('Found no rows in result set, nothing to ingest. (job_id "%s")', job_id)
        return
    logging.info('Starting at row %s of %s rows.', start_index, result.total_rows)
    process_bigquery_page(result, context, start_index)
    if len(result) == PAGE_SIZE:
        logging.info('Deferred to get next page of results.')
        deferred.defer(ingest_bigquery_results, job_id, context,
                       start_index=start_index+len(result),
                       _queue=config.DEFERRED_QUEUE,
                       _url=config.DEFERRED_URL_PREFIX + '/ingest_bigquery_results/%s' % start_index)

def process_bigquery_page(result, context, row_num):
    """
    Reads through the page of BigQuery rows and creates a stat entry for each row.
    """
    # build a list of dimensions and stats (columns starting with DIMENSION__ and STAT__)
    dimension_names = []
    stat_names = []
    for index, column_name in enumerate(result.column_names):
        if column_name.startswith(DIMENSION_PREFIX):
            dimension_name = column_name.strip(DIMENSION_PREFIX)
            dimension_names.append((index, dimension_name))
        elif column_name.startswith(STAT_PREFIX):
            stat_name = column_name.strip(STAT_PREFIX)
            stat_names.append((index, stat_name))
    if not stat_names:
        raise ValueError('Result must have at least one column starting with "STAT__".')

    entities = []
    for row in result:
        dimensions = []
        for index, dimension_name in dimension_names:
            dimension = Stat.encode_dimension(dimension_name, row[index])
            dimensions.append(dimension)
        for index, stat_name in stat_names:
            entity = Stat.create(context, stat_name, row_num, row[index], dimensions=dimensions, put=False)
            entities.append(entity)
        row_num += 1
    ndb.put_multi(entities)
    logging.info('Added %s Stat entities.', len(entities))
