#!/usr/bin/perl

###############################################################################
# Description:
#	This Perl script will read the data set from server A to G
#	and will take two arguments "start_time" and "time_period"
#	to identify the target logs within a decided period. It will
#	grab the data for each line of log files and connected it to the
#	hashtables of references files. It will then work out the
#	detailed usage and centile information for each device, market
#	and application respectively. Finally it will write the result
#	to the result files according to the three field as metioned
#	above. The outout format will be like (Set device name as an example):
#	"DeviceName|Bytes|CentileByDevice|CentileOfTotalBytes"
#	and it's sorted according to the usage of each device among all
#	the devices in desc order.
# Author:
#	djia
# Time:
#	08/09/2013
###############################################################################

use strict;

# Set when in debug mode
my $DEBUG_FLAG = 1;

# Must needs two argument: start_time, time_period
die "Need 2 arguments: start_time time_period" unless @ARGV == 2;

# Prepare the filename strings to traverse every target log
my $dataset_file_prefix = 'mob_sub_agg';
my @ndc = [ '_A_', '_B_', '_C_', '_D_', '_E_', '_F_', '_G_' ];
my ( $start_time, $time_period ) = @_;
$time_period -= $time_period % 300;
my $final_time = $start_time + $time_period;

# Build the lookup hash table for the reference files with default
# key 'missing' if there's no key matches
my %app_map        = ( 'missing' => 'N/A' );
my %device_tac_map = ( 'missing' => 'N/A' );
my %device_id_map  = ( 'missing' => 'N/A' );
my %market_map     = ( 'missing' => 'N/A' );

# The total hashe tables for device name, market, app respectively
my %usage_device_name;
my %usage_market;
my %usage_app;

# The record of total downlink and uplink bytes
my $usage_total = 0;

# Preparation work - load the reference files. See the following subroutine for details
&load_reference_data;

# Here we assume the log files' timestamp are same from server _A_ to _G_, and
# the given time is the exact same time according to log's timestamp otherwise
# we will need to work out the real start time which is nearest to the
# given start time for each server respectively. This will involve bisect search

# traverse A to G all 7 servers
for ( my $i = 0 ; $i < 7 ; $i++ ) {

	# Keeping track of past Unix time;
	my $past_time = $start_time;

	# handle the target logs for a specific server
	while ( $past_time <= $final_time ) {
		my $input_file_name =
		  $dataset_file_prefix . $ndc[$i] . $past_time . ".gz";

		# use 'next' to skip if one log is missing
		open( INPUT_FILE, "gunzip -c $input_file_name |" )
		  or next;

		#process each item in a log file
		while ( my $line = <INPUT_FILE> ) {
			chomp;
			my @data_field = split( '|', $_ );

			# Enter the subroutine to grab the infomation
			&process_item(@data_field);
		}
		close INPUT_FILE;

		# Preparation for the following log
		$past_time += 300;
	}
}

# Process the data and output to the target result files
&output_as_device_name;
&output_as_market_name;
&output_as_app_name;

#################################################################
### 					Subroutines 						  ###
#################################################################

### Subroutine for loading the reference data to hash table
sub load_reference_data {

	# Identify the reference files name
	my $app_map_file        = 'APPLICATION_Mapping.dat';
	my $device_tac_map_file = 'Device_TAC_Mapping.dat';
	my $device_id_map_file  = 'Device_ID_Mapping.dat';
	my $market_map_file     = 'Market_Mapping.dat';

	# If the reference files are not in current folder, the following code
	# should be set as the prefix of the file
	my $reference_folder = '';

	# Process application lookup hashtable
	open( REF_APP_INPUT, "<$app_map_file" )
	  or die "Cannot open APPLICATION_Mapping.dat";
	while (<REF_APP_INPUT>) {
		my @fields = split( '|', $_ );
		$app_map{ $fields[0] } = $fields[1];
		undef @fields;
	}
	close REF_APP_INPUT;

	# Process device tac lookup hashtable
	open( REF_DEVICE_TAC_INPUT, "<$device_tac_map_file" )
	  or die "Cannot open Device_TAC_Mapping.dat";
	while (<REF_DEVICE_TAC_INPUT>) {
		my @fields = split( '|', $_ );
		$device_tac_map{ $fields[0] } = $fields[1];
		undef @fields;
	}
	close REF_DEVICE_TAC_INPUT;

	# Process device id lookup hashtable
	open( REF_DEVICE_ID_INPUT, "<$device_id_map_file" )
	  or die "Cannot open Device_ID_Mapping.dat";
	while (<REF_DEVICE_ID_INPUT>) {
		my @fields = split( '|', $_ );
		$device_id_map{ $fields[2] } = $fields[3];
		undef @fields;
	}
	close REF_DEVICE_ID_INPUT;

	# Process market lookup hashtable
	open( REF_MARKET_INPUT, "<$market_map_file" )
	  or die "Cannot open Market_Mapping.dat";
	while (<REF_MARKET_INPUT>) {
		my @fields = split( '|', $_ );
		$fields[0] .= $fields[1];
		$market_map{ $fields[0] } = $fields[3];
		undef @fields;
	}
	close REF_MARKET_INPUT;
}

### Subroutine to process each line of each dataset file
sub process_item {
	print "@_\n" if $DEBUG_FLAG;

	# Ignore the phone number as 'undef' because it's useless
	my ( undef, $imei, $app_id, $down_bytes, $up_bytes, $lac, $cid ) = @_;

	# Find the application name in app_map hashtable
	my $final_app_name =
	  ( exists $app_map{$app_id} ) ? $app_map{$app_id} : $app_map{'missing'};

	# Find the device name in device_tac_map and device_id_map
	my $tac = substr( $imei, 0, 8 );
	my $final_device_id =
	  ( exists $device_tac_map{$tac} )
	  ? $device_tac_map{$tac}
	  : $device_tac_map{"missing"};
	my $final_device_name = "N/A";
	unless ( $final_device_id eq "N/A" ) {
		$final_device_name =
		  ( exists $device_id_map{$final_device_id} )
		  ? $device_id_map{$final_device_id}
		  : $device_id_map{"missing"};
	}

	# Find the market name in market_map
	my $lac_cid = $lac . $cid;
	my $final_market_name =
	  ( exists $market_map{$lac_cid} )
	  ? $market_map{$lac_cid}
	  : $market_map{"missing"};

	# Calculate the usage bytes;
	my $single_usage = $down_bytes + $up_bytes;

	# Add this item's usage to each hash
	$usage_device_name{$final_device_name} += $single_usage;
	$usage_market{$final_market_name}      += $single_usage;
	$usage_app{$final_app_name}            += $single_usage;

	# Sum up to the total bytes
	$usage_total += $single_usage;

}

# Subroutine to output device-based result, the aim is to output
# the result file as the format
# 	"DeviceName|Bytes|CentileByDevice|CentileOfTotalBytes"
# "CentileByDevice" represents its centile of all the device
# "CentileOfTotalBytes" represents its centile of all the bytes
sub output_as_device_name {

	# Sort the usage_device_name hashtable according to the value
	# linked to each key as the desc order, and get the keys array
	# which their usage already ranked from high to low
	my @device_name_keys =
	  sort { $usage_device_name{$b} <=> $usage_device_name{$a} }
	  keys %usage_device_name;

	#Prepare the output file name
	my $output_device_file =
	  'result_device_name' . "_$start_time" . "_$final_time" . '.dat';

	# variable to calculate the centile for each item
	my $item_count = 0;

	# Get the output file handler
	open DEVICE_OUTPUT, ">$output_device_file"
	  or die "Cannot write to DEVICE result file!";

	# Result file header
	print DEVICE_OUTPUT (
		"DeviceName|Bytes|CentileByDevice|CentileOfTotalBytes");

	# Traverse the usage_device_name hashtable using the sorted keys array
	foreach my $key (@device_name_keys) {

		# get the bytes usage for this device
		my $each_bytes = $usage_device_name{$key};

		# Calculate the centile of all devices
		my $centile_in_device =
		  int( ( ++$item_count / ( $#device_name_keys + 1 ) ) * 100 + 0.9999 );

		# Calcualte the centile of all bytes
		my $centile_in_total_bytes =
		  int( ( $each_bytes / $usage_total ) * 100 + 0.9999 );

		# Output to the target file
		print DEVICE_OUTPUT (
			"$key|$each_bytes|$centile_in_device|$centile_in_total_bytes\n");
	}
	close DEVICE_OUTPUT;
}

### Subroutine to output market-based result, refer to "output_as_device_name"
sub output_as_market_name {
	my @market_name_keys =
	  sort { $usage_market{$b} <=> $usage_market{$a} }
	  keys %usage_market;
	my $output_market_file =
	  'result_market_name' . "_$start_time" . "_$final_time" . '.dat';
	my $item_count = 0;
	open MARKET_OUTPUT, ">$output_market_file"
	  or die "Cannot write to MARKET result file!";
	print MARKET_OUTPUT (
		"MarketName|Bytes|CentileByMarket|CentileOfTotalBytes");
	foreach my $key (@market_name_keys) {
		my $each_bytes = $usage_market{$key};

		my $centile_in_market =
		  int( ( ++$item_count / ( $#market_name_keys + 1 ) ) * 100 + 0.9999 );

		my $centile_in_total_bytes =
		  int( ( $each_bytes / $usage_total ) * 100 + 0.9999 );
		print MARKET_OUTPUT (
			"$key|$each_bytes|$centile_in_market|$centile_in_total_bytes\n");
	}
	close MARKET_OUTPUT;
}

### Subroutine to output application-based result, refer to "output_as_device_name"
sub output_as_app_name {
	my @app_name_keys =
	  sort { $usage_app{$b} <=> $usage_app{$a} }
	  keys %usage_app;
	my $output_app_file =
	  'result_app_name' . "_$start_time" . "_$final_time" . '.dat';
	my $item_count = 0;
	open APP_OUTPUT, ">$output_app_file"
	  or die "Cannot write to APPLICATION result file!";
	print APP_OUTPUT ("AppName|Bytes|CentileByApp|CentileOfTotalBytes");
	foreach my $key (@app_name_keys) {
		my $each_bytes = $usage_app{$key};

		my $centile_in_app =
		  int( ( ++$item_count / ( $#app_name_keys + 1 ) ) * 100 + 0.9999 );

		my $centile_in_total_bytes =
		  int( ( $each_bytes / $usage_total ) * 100 + 0.9999 );
		print APP_OUTPUT (
			"$key|$each_bytes|$centile_in_app|$centile_in_total_bytes\n");
	}
	close APP_OUTPUT;
}

