<?php
/**
 * Celebrio Kernel
 *
 * @copyright  Copyright (c) 2012 Celebio Software, s.r.o.
 * @package    Kernel
 */
namespace KernelModule\LogsModule\PerformanceModule;

use Nette\Object;

use \Microsoft_WindowsAzure_Storage_DynamicTableEntity;

use KernelModule\LogsModule\PerformanceModule\IPerformanceLogsProcessor;

use Celebrio\WindowsAzure\AzureStorage\AzureStorageTable;
use Celebrio\Core\Config;
use Celebrio\Diagnostics\Timer;
use Celebrio\Diagnostics\TimerLevel;

/**
 * Azure performance logs in Celebrio
 * 
 * It stores statistics data in Azure storage table.
 * In case of failure of update, the system recovers itself on the next update.
 * 
 * @author Martin Novák
 */
class PerformanceLogsProcessorAzure extends Object implements IPerformanceLogsProcessor {
    private static $statisticsTableName = "CelebrioKernelTimers";
    private static $timerTableName = "log4phpTimer";
    
	/**
     * Deletes hard logs older than a set number of months
     * @param int $numberOfMonths
     * @return mixed number of deleted logs or FALSE
     */
    public static function deleteHardLogsOlderThan($numberOfMonths){
    	if($numberOfMonths == 0) {
    		self::deleteAllHardLogs();
    		return TRUE;
    	} else {
    		//TODO: implement deleting based on months
    		return FALSE;	
    	}
    }
    
    /**
     * deletes all performance hard logs
     */
    private static function deleteAllHardLogs(){
    	$configArray = Config::current()->loadConfigArray("ProcessedTimers");
    	$oldStatisticsTableName = $configArray["tablename"];
    	if(AzureStorageTable::connection()->tableExists(self::$timerTableName)){
	    	AzureStorageTable::connection()->deleteTable(self::$timerTableName);
	    }
	    Config::current()->saveConfigArray(array("processed" => 0.0, "tablename" => $oldStatisticsTableName),"ProcessedTimers");
    }
    
    /**
     * deletes statistics and sets processed logs to 0.0
     */
    public static function deleteStatistics(){
    	$configArray = Config::current()->loadConfigArray("ProcessedTimers");
    	$oldStatisticsTableName = $configArray["tablename"];
    	$statisticsTableName = self::$statisticsTableName.time();
    	if(AzureStorageTable::connection()->tableExists($oldStatisticsTableName)){
	    	AzureStorageTable::connection()->deleteTable($oldStatisticsTableName);
	    }
	    Config::current()->saveConfigArray(array("processed" => 0.0, "tablename" => $statisticsTableName),"ProcessedTimers");
    }

    /**
     * Process performance logs
     * @return int number of processed logs
     */
    public static function processLogs()
    {
    	$processedLogs = array();
    	$lastProcessedLogPartition = 0.0;
    	$processed = 0.0;
    	$statisticsTableExists = FALSE;
    	$numberOfProcessedLogs = 0;
    	$oldStatisticsTableName = "";
    	$statisticsTableName = self::$statisticsTableName.time();

    	/**
    	 * we retrieve the load the configuration with last proccessed partition of timers
    	 */
    	$configArray = Config::current()->loadConfigArray("ProcessedTimers");
    	if($configArray){
    		$configArray = Config::current()->loadConfigArray("ProcessedTimers");
    		$processed = $configArray["processed"];
    		$oldStatisticsTableName = $configArray["tablename"];
	    	if(AzureStorageTable::connection()->tableExists($oldStatisticsTableName)){
	    		$statisticsTableExists = TRUE;
	    	}
    	} else {
	    	Config::current()->saveConfigArray(array("processed" => 0.0, "tablename" => $statisticsTableName),"ProcessedTimers");
	    	$processed = 0.0;
    	}

	    /**
	     * We retrieve Timer logs starting from last proccessed partition with reading time
	     * limit of 20 seconds
	     */
	    $logs = AzureStorageTable::connection()->retrieveEntities(
	    				self::$timerTableName,
	    				"",
    					"Microsoft_WindowsAzure_Storage_DynamicTableEntity",
	    				$processed);
	    $start = time(); // we have time limit for reading stored Timer logs
	    Timer::start("Loading Timer logs");
    	foreach ($logs as $log) {
    		// we let the loop go for maximum of 20 seconds
    		if((time() - $start) >= 20) {
                \Logger::getRootLogger()->error("Performance Logs Processor for Azure skipped some Timer logs because of time limit. There is no data curruption but you need to update the statistics again to cover all available logs.");
    			break;
    		}
    		// TODO: use whole partition/row key combination
    		$lastProcessedLogPartition = $log->getPartitionKey();
    		$rowKey = $log->getRowKey();
    		$time = $log->time;

    		if(isset($processedLogs[$rowKey])){
    			$processedLogs[$rowKey] = array(
    					($processedLogs[$rowKey][0]+$time),
    					($processedLogs[$rowKey][1]+1));
    		} else {
    			$processedLogs[$rowKey] = array($time,1);
    		}
    		$numberOfProcessedLogs++;
    	}
    	unset($logs);
    	Timer::stop("Loading Timer logs");

    	/**
    	 * We retrieve old statistics
    	 */
    	$start = time(); // we count time limit for inserting data into azure storage table
    	Timer::start("Updating performance statistics");
    	$updatedStatistics = array();
    	if($statisticsTableExists) {
		    $statistics = AzureStorageTable::connection()->retrieveEntities($oldStatisticsTableName);
		    foreach($statistics as $statistic) {
		    	// we let the loop go for maximum of 30 seconds
			    if((time() - $start) >= 30) {
	    			$lastProcessedLogPartition = 0.0;
                    \Logger::getRootLogger()->error("Performance Logs Processor run out of time limit while updating Timer statistics. Last proccessed partition set to 0.0 so that the logs are not currupted. No statistics were saved.");
    				break;
	    		}
		    	$block = $statistic->getRowKey();
		    	if(isset($processedLogs[$block])) {
		    		$processedLog = $processedLogs[$block];
		    		$statistic->time 	= ($statistic->time + $processedLog[0]);
		    		$statistic->count 	= ($statistic->count + $processedLog[1]);
		    		$statistic->average = ($statistic->time / $statistic->count);
		    		unset($processedLogs[$block]);
		    	}
		    	// we have to create new object from some unknown reason...
		    	$timerEntity = new Microsoft_WindowsAzure_Storage_DynamicTableEntity("timers", $statistic->getRowKey());
		    	$timerEntity->time = $statistic->time;
		    	$timerEntity->count = $statistic->count;
		    	$timerEntity->average = $statistic->average;
		    	$updatedStatistics [] = $timerEntity;
		    }
    	}

	    /**
	     * we delete and recreate the statistics table (it is faster than updating) and set
	     * processed partition to 0.0 so that in case of failure the next update starts again
	     * from the first log and there is no ongoing data corruption
	     */
	    if($statisticsTableExists){
	    	AzureStorageTable::connection()->deleteTable($oldStatisticsTableName);
	    }
	    Config::current()->saveConfigArray(array("processed" => 0.0, "tablename" => $statisticsTableName),"ProcessedTimers");
    	AzureStorageTable::connection()->createTable($statisticsTableName);
	    
 		// starting batch
	    $batch = AzureStorageTable::connection()->startBatch();
	    $counter = 0;

	    /**
	     * we store the updated statistics in Azure
	     */
	    if($statisticsTableExists) {
		    foreach($updatedStatistics as $statistic){
		    	// we let the loop go for maximum of 30 seconds
			    if((time() - $start) >= 30) {
	    			$lastProcessedLogPartition = 0.0;
                    \Logger::getRootLogger()->error("Performance Logs Processor for Azure run out of time limit while inserting updated Timer statistics. Last proccessed partition set to 0.0 so that the logs are not currupted.");
    				break;
	    		}
	    		// there is a limit in Azure storage for number of entities in one batch, the limit is 100 but I use 90 just in case...
	    		if($counter == 90){
	    			$batch->commit();
	    			$batch = AzureStorageTable::connection()->startBatch();
	    			$counter = 0;
	    		}
		    	AzureStorageTable::connection()->insertEntity($statisticsTableName,$statistic);
		    	$counter++;
		    }
		    unset($updatedStatistics);
	    }

	    /**
	     * we process the rest of the logs
	     */
	    $counter = 0;
	    foreach($processedLogs as $block => $log){
	    	// we let the loop go for maximum of 30 seconds
	    	if((time() - $start) >= 30) {
    			$lastProcessedLogPartition = 0.0;
                \Logger::getRootLogger()->error("Performance Logs Processor for Azure run out of time limit while inserting new unique Timer statistics. Last proccessed partition set to 0.0 so that the logs are not currupted.");
    			break;
    		}
	    	// there is a limit in Azure storage for number of entities in one batch, the limit is 100 but I use 90 just in case...
    		if($counter == 90){
    			$batch->commit();
    			$batch = AzureStorageTable::connection()->startBatch();
    			$counter = 0;
    		}
	    	$timerEntity = new Microsoft_WindowsAzure_Storage_DynamicTableEntity("timers", $block);
	    	$timerEntity->time = $log[0];
	    	$timerEntity->count = $log[1];
	    	$timerEntity->average = $log[0] / $log[1];
	    	AzureStorageTable::connection()->insertEntity($statisticsTableName, $timerEntity);
	    	$counter++;
	    }
	    unset($processedLogs);

	    // finishing bash
	    $batch->commit();
	    Timer::stop("Updating performance statistics");

	    /**
	     * we log the last proccessed partition in Timers log
	     */
	    Config::current()->saveConfigArray(array("processed" => $lastProcessedLogPartition, "tablename" => $statisticsTableName),"ProcessedTimers");

    	return $numberOfProcessedLogs;
    }

}