from __future__ import with_statement

from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.datastore import entity_pb
from google.appengine.api import datastore

import pipeline
 
from mapreduce.lib import files
from mapreduce.lib.pipeline import common as pipeline_common
from mapreduce import base_handler
from mapreduce.mapreduce_pipeline import *
from mapreduce import operation as op
from mapreduce import shuffler
from mapreduce import input_readers

import time
from jobTime import *

TIMESPAN_FILTER_VALUE = 1.0
FETCH_SIZE = 5000   # 5000 for measurement

def wrapperEncoder(bytesStr):
    chrStr = ''
    for ch in bytesStr:
	first = (ord(ch)>>4) & 0x0f
        second = ord(ch) & 0x0f
	chrStr += hex(first)[2:] + hex(second)[2:]
          
    return chrStr

def wrapperDecoder(utf8Str):
    assert len(utf8Str)%2 == 0
    bytesStr = ''
    i = 0
    while (i+1) < len(utf8Str):
        ch = utf8Str[i]+utf8Str[i+1]	
        bytesStr += chr(int(ch, 16))
	i += 2
    return bytesStr


class PrintRst(pipeline.Pipeline):
    def run(self, list):
        import logging
        logging.info('-------------- map result len: ' + str(len(list)))
        logging.info('-------------- map result: ' + str(list))

###############################################
# Start of Pipeline
# FilterTimespan
###############################################

class FilterResult(db.Model):
    action = db.IntegerProperty() # 0:click, 1:read, 2:change
    sourceIp = db.IntegerProperty()

''' this works fine, but use mapreduce api instead
class FilterOnTimespan(pipeline.Pipeline):
    def run(self, bin_log_list):
	result_list = []
	for bin_str in bin_log_list:
	    log_proto = entity_pb.EntityProto(wrapperDecoder(bin_str))
	    log = db.model_from_protobuf(log_proto)
	    if log.timespan > TIMESPAN_FILTER_VALUE:
		filterRt = FilterResult(action=log.action, sourceIp=log.sourceIp)
		result_list.append(filterRt)
	db.put(result_list)

    def finalized(self):
	# record start time
        endTime = JobTime(name='pipeline_logFilter', time=time.time())
        endTime.put()

class FetchAllLog(pipeline.Pipeline):
    def run(self):
    	query = db.GqlQuery('SELECT * FROM LogAction')
    	while True:
            result = query.fetch(FETCH_SIZE)
	    if len(result) > 0:
		bin_result = []
		for log in result:
		    bin_str = db.model_to_protobuf(log).Encode()
		    bin_result.append(wrapperEncoder(bin_str))
                yield FilterOnTimespan(bin_result)
            if len(result) < FETCH_SIZE:
                break
            cursor = query.cursor()
            query.with_cursor(cursor)

    def finalized(self):
        # record end time
        endTime = JobTime(name='pipeline_logFilter', time=time.time())
        endTime.put()
'''

def mapTimeFilter(data):
    import logging
    logging.info('---------------- data: ' + str(data))
    if data.timespan > TIMESPAN_FILTER_VALUE:
	yield(data.sourceIp, data.action)

def reduceTimeFilter(key, value):
    import logging
    logging.info('---------------- key: ' + str(key) + ' value: ' + str(value))
    yield(key, value)

class FetchAllLog(base_handler.PipelineBase):
    def run(self):
	map_time_filter = yield MapreducePipeline('time_filter',
						'analytics_pl.mapTimeFilter',
						'analytics_pl.reduceTimeFilter',
						'mapreduce.input_readers.DatastoreInputReader',
						'mapreduce.output_writers.BlobstoreOutputWriter',
                                        	mapper_params=dict(entity_kind='logAction.LogAction'),
                                        	reducer_params=dict(mime_type='text/plain'),
						shards=8)
    def finalized(self):
        # record end time
        endTime = JobTime(name='pipeline_logFilter', time=time.time())
        endTime.put()
	
###############################################
# End of Pipeline
# FilterTimespan
###############################################

class LogFilter(webapp.RequestHandler):
    def post(self):
	# delete previous run time
        time_entities = JobTime.all().fetch(10)
	db.delete(time_entities)
	# record start time
	startTime = JobTime(name='pipeline_logFilter', time=time.time())
	startTime.put()

	job = FetchAllLog()
	job.start()
	self.redirect('/_ah/pipeline/status?root=%s' % job.pipeline_id)


###############################################
# Start of Pipeline
# AggregateTimespan
###############################################

def map_ip_time(data):
    yield(data.sourceIp, data.timespan)

def reduce_ip_time(key, values):
    sum = 0.00
    for v in values:
	sum += float(v)
    yield(key, sum)

class FetchAllLog_aggr(base_handler.PipelineBase):
    ''' works fine, but dont have clean up code, see below
    def run(self):
        map_ip_time = yield MapPipeline('map_ip_time',
                                        'analytics_pl.map_ip_time',
                                        'mapreduce.input_readers.DatastoreInputReader',
                                        params=dict(entity_kind='logAction.LogAction'),
                                        shards=16)
        shuffle_ip_time = yield ShufflePipeline(map_ip_time)
	ip_totalTime = yield ReducePipeline('reduce_ip_time',
						'analytics_pl.reduce_ip_time',
						'mapreduce.output_writers.BlobstoreRecordsOutputWriter',
						{text/plain},
						shuffle_ip_time)
	yield pipeline_common.Return(ip_totalTime) 
    '''

    def run(self):
	yield MapreducePipeline("timespan_mapreduce",
                                "analytics_pl.map_ip_time",
                                "analytics_pl.reduce_ip_time",
                                "mapreduce.input_readers.DatastoreInputReader",
                                "mapreduce.output_writers.BlobstoreRecordsOutputWriter",
                                mapper_params={'entity_kind':'logAction.LogAction',},
                                reducer_params={'mime_type': 'text/plain',},
                                shards=8)

    def finalized(self):
        # record end time
        endTime = JobTime(name='pipeline_logAggr', time=time.time())
        endTime.put()

''' my own pipeline without mapreduce api
class TotalTimePerIp(db.Model):
    sourceIp = db.IntegerProperty()
    timespan = db.FloatProperty()

class Shuffle(pipeline.Pipeline):
   def run(self, *part_timespan_sum_list):
	ip_dict = {}
	for sub_ip_dict in part_timespan_sum_list:
	    for ip, timeSum in sub_ip_dict.items():
		if ip in ip_dict:
                    ip_dict[ip] += timeSum
                else:
                    ip_dict[ip] = timeSum
	timePerIp_list = []
	for ip, time in ip_dict.items():
	    if len(timePerIp_list) == FETCH_SIZE:
		db.put(timePerIp_list)
		timePerIp_list = []
	    timePerIp = TotalTimePerIp(sourceIp=int(ip), timespan=time)
	    timePerIp_list.append(timePerIp)
        if len(timePerIp_list) > 0:
	    db.put(timePerIp_list)
	return
	
class AddOnTimespan(pipeline.Pipeline):
    def run(self, log_bin_list):
	ip_dict = {}
	for bin_str in log_bin_list:
	    log_proto = entity_pb.EntityProto(wrapperDecoder(bin_str))
            log = db.model_from_protobuf(log_proto)
	    if log.sourceIp in ip_dict:
		ip_dict[int(log.sourceIp)] += log.timespan
	    else:
		ip_dict[int(log.sourceIp)] = log.timespan
	return ip_dict
	
class FetchAllLog_aggr(pipeline.Pipeline):
    def run(self):
        query = db.GqlQuery('SELECT * FROM LogAction')
	part_timespan_sum_list = []
        while True:
            result = query.fetch(FETCH_SIZE)
            if len(result) > 0:
                bin_result = []
                for log in result:
                    bin_str = db.model_to_protobuf(log).Encode()
                    bin_result.append(wrapperEncoder(bin_str))
                part_timespan_sum = yield AddOnTimespan(bin_result)
		part_timespan_sum_list.append(part_timespan_sum)

            if len(result) < FETCH_SIZE:
                break
            cursor = query.cursor()
            query.with_cursor(cursor)

	yield Shuffle(*part_timespan_sum_list)
'''
###############################################
# End of Pipeline
# AggregateTimespan
###############################################

class LogAggr(webapp.RequestHandler):
    def post(self):
	# delete previous run time
        time_entities = JobTime.all().fetch(10)
        db.delete(time_entities)
	# record start time
        startTime = JobTime(name='pipeline_logAggr', time=time.time())
        startTime.put()
        job = FetchAllLog_aggr()
        job.start()
        self.redirect('/_ah/pipeline/status?root=%s' % job.pipeline_id)


###############################################
# Start of Pipeline
# AggregateTimespan
###############################################

def mapUserTimespan(data):
    yield(data.key().id_or_name(), [data.user.city, data.user.sex, data.timespan])

def mapUser(data):
    yield(data.key().id_or_name(), [data.city, data.sex])

def mapLog(data):
    yield(data.user.key().id_or_name(), data.timespan)

def ReduceTimespan(key, value):
    yield value 

class Extend(pipeline.Pipeline):
    def run(self, list):
	return

##################################
# join version 2
class JoinOnUser(base_handler.PipelineBase):
    
    def run(self):
	with pipeline.InOrder():
	    user_map = yield MapPipeline('user_map',
					'analytics_pl.mapUser',
					'mapreduce.input_readers.DatastoreInputReader',
					params=dict(entity_kind='logAction.WebUser'),
					shards=16)
	    log_map = yield MapPipeline('log_map',
					'analytics_pl.mapLog',
					'mapreduce.input_readers.DatastoreInputReader',
                                        params=dict(entity_kind='logAction.LogAction'),
                                        shards=16)
	    # can not implement join by Append
	    yield PrintRst(user_map)
	    yield PrintRst(log_map)	    
	    join = yield pipeline_common.Append(user_map, log_map)
	    yield ShufflePipeline(join)

class TimespanPipeline_2(base_handler.PipelineBase):
    def run(self):
	joined_data = yield JoinOnUser()
	with pipeline.After(joined_data):
            reduce = yield ReducePipeline('reduce',
					'analytics_pl.ReduceTimespan',
					'mapreduce.output_writers.BlobstoreRecordsOutputWriter',
					{'mime_type':'text/plain',},
					joined_data)

class LogJoin_2(webapp.RequestHandler):
    def post(self):
        # delete previous run time
        time_entities = JobTime.all().fetch(10)
        db.delete(time_entities)
        # record start time
        startTime = JobTime(name='pipeline_logjoin2', time=time.time())
        startTime.put()
        job = TimespanPipeline_2()
        job.start()
        self.redirect('/_ah/pipeline/status?root=%s' % job.pipeline_id)

# end of join version 2
######################################

class TimespanPipeline(base_handler.PipelineBase):
    def run(self):
	yield MapreducePipeline("timespan_mapreduce",
        			"analytics_pl.mapUserTimespan",
        			"analytics_pl.ReduceTimespan",
        			"mapreduce.input_readers.DatastoreInputReader",
        			"mapreduce.output_writers.BlobstoreRecordsOutputWriter",
        			mapper_params={'entity_kind':'logAction.LogAction',},
        			reducer_params={'mime_type': 'text/plain',},
        			shards=8)
    
    def finalized(self):
        # record end time
        endTime = JobTime(name='pipeline_logjoin', time=time.time())
        endTime.put()

class LogJoin(webapp.RequestHandler):
    def post(self):
	# delete previous run time
        time_entities = JobTime.all().fetch(10)
        db.delete(time_entities)
	# record start time
        startTime = JobTime(name='pipeline_logjoin', time=time.time())
        startTime.put()	
        job = TimespanPipeline()
        job.start()
        self.redirect('/_ah/pipeline/status?root=%s' % job.pipeline_id)

