
# coding: utf-8

# In[ ]:

"""Part 1: Apache Web Server Log file format"""
"""The log files that we use for will look something like this:
   127.0.0.1 - - [01/Aug/1995:00:00:01 -0400] "GET /images/launch-logo.gif HTTP/1.0" 200 1839
   Each part of this log entry is described below.
   127.0.0.1
   This is the IP address (or host name, if available) of the client (remote host) which made the request to the server.
   -
   The "hyphen" in the output indicates that the requested piece of information (user identity from local logon) is not available.
   [01/Aug/1995:00:00:01 -0400]
   The time that the server finished processing the request. The format is:
   [day/month/year:hour:minute:second timezone]
   day = 2 digits
   month = 3 letters
   year = 4 digits
   hour = 2 digits
   minute = 2 digits¶
   second = 2 digits
   zone = (+ | -) 4 digits 
   "GET /images/launch-logo.gif HTTP/1.0"
   This is the first line of the request string from the client. It consists of a three components: the request method (e.g., GET, POST, etc.), 
   the endpoint (a Uniform Resource Identifier), and the client protocol version.
   200
   This is the status code that the server sends back to the client. This information is very valuable, because it reveals whether the request 
   resulted in a successful response (codes beginning in 2), a redirection (codes beginning in 3), an error caused by the client (codes beginning in 4), 
   or an error in the server (codes beginning in 5). The full list of possible status codes can be found in the HTTP specification (RFC 2616 section 10).
   1839
   The last entry indicates the size of the object returned to the client, not including the response headers. If no content was returned to 
   the client, this value will be "-" (or sometimes 0).
   Note that log files contain information supplied directly by the client, without escaping. Therefore, it is possible for malicious clients 
   to insert control-characters in the log files, so care must be taken in dealing with raw logs.
"""


# In[ ]:

"""(1a) Parsing Each Log line"""

import re
import datetime

from pyspark.sql import Row

month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
    'Aug':8,  'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12'}

def parse_apache_time(s):
    """Convert Apache time format into a Python datetime object
    Args:
        s(str):date and time in Apache time format
    Returns:
        datetime: datetime object (ignore timezone for now)
    """
    return datetime.datetime(int(s[7:11]),
                            month_map[s[3:6]],
                            int(s[0:2]),
                            int(s[12:14]),
                            int(s[15:17]),
                            int(s[18:20]))
              
def parseApacheLogLine(logline):
    """Parse a line in the Apache Common Log format
    Args:
        logline (str): a line of text in the Apache Common Log format
    Returns:
        tuple: either a dictionary containing the parts of the Apache Access Log and 1, or the original invalid log line and 0
    """
    match = re.search(APACHE_ACCESS_LOG_PATIERN, logline):
    if match is None:
        return (logline, 0)
    size_field = match.group(9)
    if size_field = '_':
        size = long(0)
    else:
        size = long(match.group(9))
    return(Row(
        host = match.group(1),
        client_identd = match.group(2),
        user_id = match.group(3),
        date_time = parse_apache_time(match.group(4)),
        method = match.group(5),
        endpoint = match.group(6),
        protocol = match.group(7),
        response_code = int(match.group(8)),
        content_size = size), 1)

# A regular expression pattern to extract fields from the log line
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)\s*" (\d{3}) (\S+)'



# In[ ]:

"""(1b) Configuration and Initial RDD Creation"""
"""We are ready to specify the input log file and create an RDD containing the parsed log file data
   Note : you need to access your data file basing on your filePath, I will use a symbol '#' to represent the filePath
   To create the primary RDD that we'll use in the rest of this assignment, we first load the file using sc.textfile(logFile) to convert each line
   of the file into an element in an RDD
   Next, we use map(parseApacheLogLine) to apply the parse function to each element ( that is, a line from the log file) in the RDD and turn each
   line into a pair Row object
   Finally, we cache the RDD in memory since we'll use it throughout this assignment
"""

import sys
import os
from test_helper import Test

baseDir = os.path.join('data')
inputPath = os.path.join('#')
logFile = os.path.join(baseDir, inputPath)

def parseLogs():
    """Read and parse log file"""
    parsed_logs = (sc.textFile(logFile).map(parseApacheLogLine).cache())
    access_logs = (parsed_logs.filter(lambda s:s[1] == 1).map(lambda s:s[0]).cache())
    failed_logs = (parsed_logs.filter(lambda s:s[1] == 0).map(lambda s:s[0]))
    failed_logs_count = failed_logs.count()
    if failed_logs_count > 0:
        print 'Number of invalid logline: %d' % failed_logs.count()
        for line in failed_logs.take(20):
            print 'Invalid logline: %s' % line
            
    print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (parsed_logs.count(), access_logs.count(), failed_logs.count())
    return parsed_logs, access_logs, failed_logs

parsed_logs, access_logs, failed_logs = parseLogs()


# In[ ]:

"""Part 2: Sample Analyses on the Web Server Log File"""

"""(2a) Example: Content Size Statistics"""
"""
   Let's compute some statictics about the sizes of content being returnd by the web server. In particular, we'd like to know what are the average, 
   minimum, and maximum content sizes.
"""
# Calculate statistics based on the content size
content_sizes = access_logs.map(lambda log:log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: %s' % (
    content_sizes.reduce(lambda a, b: a+b)/content_sizes.count(),
    content_sizes.min(),
    content_sizes.max())


# In[ ]:

"""(2b) Example: Response Code Analysis"""
"""
   Next, lets look at the response codes that appear in the log. As with the content size analysis, first we create a new RDD by using a lambda
   funciton to extract the reponse_code field from the access_logs RDD. Using a pair tuple consising of the response code and I will let us count
   how many records have a particular response code.
"""
# Response Code to Count
responseCodeCount = (access_log.map(lambda log:(log.response_code, 1).reduceByKey(lambda a, b: a+b).cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found %d response codes' % len(responseCodeToCountList)
print 'Response Code Counts: %s' % responseCodeToCountList
                     


# In[ ]:

"""(2c) Example: Response Code Graphing With matplotlib"""
"""
    First we need to extract the labels and fractions for the graph. We do this with two separate map functions with a lambda functions.Next 
    we create a figure with figure() constructor and use the pie() method to create the pie plot.
    
"""
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs_count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y)/count)).collect()
print fracs

import matplotlib.pyplot as plt

def pie_pct_format(value):
    """Determine the appropriate format string for the pie chart percentage label
    Args:
       value: value of the pie slice
    Returns:
       str: formated string label; if the slice is too small to fit, returns an empty string for label
    """
    return '' if value < 7 else '%.Of%%' % value

fig = plt.figure(figsize=(4.5,4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors, explode=explode, autopct=pie_pct_format,
                                    shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
    if autotext.get_text() == '':
        text.set_text('')
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
pass


# In[ ]:

"""(2d) Example: Frequent Hosts"""
# Any hosts that has accessed the server more than 10 times
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))

hostSum = hostCountPairTuple.reduceByKey(lambda a, b: a+b)

hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)

hostsPick20 = (hostMoreThan10.map(lambda s: s[0].take(20)))

print 'Any 20 hosts that have accessed more than 10 times: %s' % hostsPick20


# In[ ]:

"""(2e) Example: Visualizing Endpoints"""

endpoints = (access_logs
             .map(lambda log: (log.endpoint, 1))
             .reduceByKey(lambda a, b : a + b)
             .cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()

fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
pass


# In[ ]:

"""(2f) Example: Top Endpoints"""
# Top Endpoints
endpointCounts = (access_logs.map(lambda log: (log.endpoint, 1)).reduceByKey(lambda a, b: a+b))

topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1*s[1])

print 'Top Ten Endpoints: %s' % topEndpoints


# In[ ]:

"""Part 3: Analyzing Web Server Log File"""

"""(3a) Exercise: Top Ten Error Endpoints"""
"""
   Create a sorted list containing top ten endpoints and the number of times that they were accessd with non-200 return code.
"""
not200 = access_logs.filter(lambda log: log.response_code != 200)

endpointCountPairTuple = not200.mao(lambda log:(log.endpoint, 1))

endpointSum = endpointCountPairTuple.reduceByKey(lambda x, y: x+y)

topTenErrURLs = endpointSum.takeOrdered(10, lambda s: -1*s[1])

print 'Top Ten failed URLs: %s' % topTenErrURLs


# In[ ]:

"""(3b) Exercise: Number of Unique Hosts"""
hosts = access_logs.map(lambda log: log.host)

uniqueHosts = hosts.distinct()

uniqueHostCount = uniqueHosts.count()

print 'Unique hosts: %d' % uniqueHostCount


# In[ ]:

"""(3c) Exercise: Number of Unique Daily Hosts"""
"""
   Let's determine the number of unique hosts in the entire log on a day-by-day basis
"""
dayToHostPairTuple = access_logs.map(lambda log: (log.date_time.day, log.host)).distinct()

dayGroupedHosts = dayToHostPairTuple.map(lambda (x, y): (x, 1))

dayHostCount = dayGroupedHosts.reduceByKey(lambda x, y: x+y)

dailyHosts = (dayHostCount.sortByKey()).cache()

dailyHostsList = dailyHosts.take(30)

print 'Unique hosts per day: %s' % dailyHostsList
print dayToHostPairTuple


# In[ ]:

"""(3d) Exercise: Visualizing the Number of Unique Daily Hosts"""

daysWithHosts = dailyHosts.map(lambda (x, y): x).collect()
hosts = dailyHosts.map(lambda (x, y): y).collect()

fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
pass


# In[ ]:

"""(3e) Exercise: Average Number of Daily Requests per Hosts"""
"""
   Let's determine the average number of requests on a day-by-day basis
"""
dayAndHostTuple = access_logs.map(lambda log: (log.date_time.day, log.host))

groupedByDay = dayAndHostTuple.map(lambda (x, y): (x, 1)).reduceByKey(lambda x, y : x+y)

sortedByDay = groupedByDay.sortByKey()

avgDailyReqPerHost = (sortedByDay.join(dailyHosts).map(lambda (x, (y,z)):(x, y/z)).sortByKey()).cache()

avgDailyReqPerHostList = avgDailyReqPerHost.take(30)

print 'Average number of daily requests per Hosts is %s' % avgDailyReqPerHostList


# In[ ]:

"""(3f) Exercise: Visualizing the Average Daily Requests per Unique Host"""

daysWithAvg = avgDailyReqPerHost.map(lambda (x,y): x).collect()
avgs = avgDailyReqPerHost.map(lambda (x, y):y).collect()

fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
pass


# In[ ]:

"""Part 4: Exploring 404 Response Codes"""

"""(4a) Exercise: Counting 404 Response Codes"""
"""
   Create a RDD containing only log records with a 404 response code
"""

badRecords = (access_logs.filter(lambda log: log.response_code == 404)).cache()
print 'Found %d 404 URLs' % badRecords.count()


# In[ ]:

"""(4b) Exercise: Listing 404 Response Code Records"""

badEndpoints = badRecords.map(lambda log:log.endpoint)

badUniqueEndpoints = badEndpoints.distinct()

badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)

print '404 URLS: %s' % badUniqueEndpointsPick40


# In[ ]:

"""(4c) Exercise: Listing the Top Twenty 404 Response Code Endpoints"""
badEndpointsCountPairTuple = badRecords.map(lambda log:(log.endpoint, 1))

badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda x, y: x+y)

badEndpointsTop20 = badEndpointsSum.takeOrdered(20, lambda s:-1*s[1])
print 'Top Twenty 404 URLs: %s' % badEndpointsTop20


# In[ ]:

"""(4d) Exercise: Listing the Top Twenty-five 404 Response Code Hosts"""
"""
   Instead of looking at the endpoints that generated 404 errors, let's look at the hosts that encountered 404 errors
"""
errHostsCountPairTuple = badRecords.map(lambda log: (log.host, 1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda x, y: x+y)
errHostsTop25 = errHostsSum.takeOrdered(25, lambda s: -1*s[1])

print 'Top 25 hosts that generated errors: %s' % errHostsTop25


# In[ ]:

"""(4e) Exercise: Listing 404 Response Codes per Day"""

errDateCountPairTuple = badRecords.map(lambda log:(log.date_time.day, 1))

errDateSum = errDateCountPairTuple.reduceByKey(lambda x, y: x+y)

errDateSorted = (errDateSum.sortByKey()).cache()

errByDate = errDataSorted.collect()

print'404 Errors by day: %s' % errByDate


# In[ ]:

"""(4f) Exercise: Visualizing the 404 Response Codes by Day"""

daysWithErrors404 = errDateSorted.map(lambda (x, y): x).collect()
errors404ByDay = errDateSorted.map(lambda (x, y): y).collect()

fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass


# In[ ]:

"""(4g) Exercise: Top Five Days for 404 Response Codes"""

topErrDate = errDateSorted.takeOrdered(5, lambda s:-1*s[1])
print 'Top Five dates for 404 requests: %s' % topErrDate


# In[ ]:

"""(4h) Exercise: Hourly 404 Response Codes"""

hourCountPairTuple = badRecords.map(lambda log: (log.date_time.hour, 1))

hourRecordsSum = hourCountPairTuple.reduceByKey(lambda x, y: x+y)

hourRecordsSorted = (hourRecordsSum.sortByKey()).cache()

errHourList = hourRecordsSorted.collect()

print 'Top hours for 404 requests: %s' % errHourList


# In[ ]:

"""(4i) Exercise: Visualizing the 404 Response Codes by Hour"""

hoursWithErroes404 = hourRecordsSorted.map(lambda (x, y): x).collect()
errors404ByHours = hourRecordsSorted.map(lambda (x, y): y).collect()

fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
pass

