# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Norman Kosmal (n.kosmal -at- gmx.de)
# All rights reserved.
#
# This software is licensed under Creative Commons 
# Attribution-Noncommercial-No Derivative Works 3.0 Unported License.
# See also: http://creativecommons.org/licenses/by-nc-nd/3.0/

from datetime import date

from constants import cs_User_Agent,cs_Referer,c_ip
import traceback

__toolname__ = "Logfileparser v1.0"
__author__   = "Norman Kosmal"

class Logfilecontainer(object):
    """
    Container to store the separated logfile entries. 
    Logile entries are separated by the line that starts with "date time.." etc.
    e.g.
    This would be first entry:
    date time s-sitename cs-method cs-uri-stem cs-uri-query s-port c-ip etc...
    2009-01-18 00:28:57 W3SVC194 GET /robots.txt - 80 85.88.36.37 etc...
    
    And this the second entry:
    date time s-sitename cs-method cs-uri-stem cs-uri-query s-port c-ip etc...
    2009-01-18 00:57:32 W3SVC194 GET /index.asp - 80 208.80.194.54 etc...
    
    Make sure to also check 
    http://www.microsoft.com/technet/prodtechnol/WindowsServer2003/Library
    /IIS/ffdd7079-47be-4277-921f-7a3a6e610dcb.mspx?mfr=true 
    
    This class stores exactly one serparated logfile entry.
    The line that starts with "date time..."etc will be stored in the discriptor member.
    All lines below that line will be stored in the loglines list.
    """
    
    def __init__(self):
        self.discriptor = []
        self.loglines   = []
        self.logfile_date = None
    
class Useragents(object):
    """Stores all user agents found in a logfile."""
    
    def __init__(self):
        self.useragents = []

class Referer(object):
    """Stores all referers found in a logfile."""
    
    def __init__(self):
        self.referer = []

class Ip(object):
    """
    Stores all Ip addresses found in a logfile.
    iplist stores all (even duplicate Ip addresses). Can be used to determine
    how many times your page got viewed.
    ipset contains unique Ip addresses. Can be used for to determine how many hits
    your page has got.
    """
    
    def __init__(self):
        self.iplist = []
        self.ipset  = None
             
def clean_log(file = None, dir = None):
    """
    This method removes any lines that starts with "#Software: " , "#Version: " , "#Date: ".
    The lines that start with "#Fields: " will be sliced, cutting the "#Field: " part off.
    
    Params:
    file = path to the logfile that`s going to be parsed.
    dir  = directory the file exists in.
    
    clean_log(file = None, dir = None) -> container_list
    """   
    #import re
    #date = None
    lines = []
    search_terms = "#Software: ", "#Version: ", "#Date: "
    j = -1
    container_list = []
    
    if dir:
        file = dir + "\\" + file
        
    in_file = open(file, "r")
    lines = in_file.readlines()
    in_file.close()
    
    #Clear Lines that start with the above specified terms, if a line
    #starts with "#Fields: " slice the Field part off.
    for i in range(0, len(lines)):
        if lines[i].startswith(search_terms):
            if lines[i].startswith("#Date: "):
                date_buffer = lines[i].split()
                date =  date_buffer[1]
                lines[i] = ""
            lines[i] = ""
        elif lines[i].startswith("#Fields: "):
            lines[i] = lines[i][9:]
        
        if lines[i].find("&") != -1:
            lines[i] = lines[i].replace("&","&amp;")
        
        #TODO: Regex for "++" and "+" chars in lines
        """
        elif pattern.match(lines[i]):
            lines[i] = lines[i].replace("++","+")
        #TODO:Remove this!
        elif lines[i].find("++") != -1:
            lines[i] = lines[i].replace("++","+")
        elif lines[i].find("+") != -1:
            lines[i] = lines[i].replace("+","")
        """
    
    #For each line that starts with a "date " string we create a new
    #Logfilecontainer object and append that line in its discriptor member.
    #The following lines will be added to that same objects loglines list until a
    #new line starts with a "date " string. Ignore empty lines here.    
    for line in lines:    
        if line == "" or line =="\n" or line == "\r\n":
            continue
        elif line.startswith("date"):
            container = Logfilecontainer()
            container.discriptor = line
            container.logfile_date = date
            container_list.append(container)
            j += 1
        else:
            container_list[j].loglines.append(line)
        
    #Replace "-","(" for "_" and ")" for "" in discriptor string,
    #since we cant use these chars in variable names (using them for naming the constants).
    for obj in container_list:
        obj.discriptor = obj.discriptor.replace("-","_").replace(
                                                "(","_").replace(")","")           
    return container_list 
       
def create_logfile(container = None, bulk = False):
    """
    Creates a logfile cleaned from various strings. Use this method on 
    your logfile so its cleaned up and can be used for further processing.
    
    Params:
    container = container list that contains Logfilecontainer objects.
    bulk      = the bulk option
    
    create_logfile(container = None, bulk = False) ->  
    """
    
    logfile_date = container[0].logfile_date
    today = date.today().strftime("%d-%m-%Y")
    
    if not bulk:
        import os
        if not os.path.exists("../output/logs/single/" + today + "/"):
            dir = os.mkdir("../output/logs/single/" + today + "/")
        file= "../output/logs/single/" + today + "/clean_log_" + logfile_date + ".log"
    else:
        import os
        if not os.path.exists("../output/logs/bulk/" + today + "/"):
            dir = os.mkdir("../output/logs/bulk/" + today +"/")
        file = "../output/logs/bulk/" + today + "/clean_log_" + logfile_date + ".log"

    in_file = open(file, "w")

    for obj in container:
        in_file.write(obj.discriptor)
        for log in obj.loglines:
            in_file.write(log)
        in_file.write("\r\n")
    in_file.close()
    return

def create_html_file(container = None, bulk = False):
    """
    Creates a non formated html file from a container list. Actually
    this writes some standard doctype to the file and generates some html code 
    for a table.
    
    Params:
    container = container list that contains Logfilecontainer objects.
    bulk      = the bulk option.
    
    create_html_file(container = None, bulk = False) ->
    """
    
    logfile_date = container[0].logfile_date
    today = date.today().strftime("%d-%m-%Y")
    
    if not bulk:
        import os
        if not os.path.exists("../output/html/single/html/" + today + "/"):
            dir = os.mkdir("../output/html/single/html/" + today + "/")
        file = "../output/html/single/html/" + today + "/html_log_" + logfile_date + ".html"
    else:
        import os
        if not os.path.exists("../output/html/bulk/html/" + today + "/"):
            dir = os.mkdir("../output/html/bulk/html/" + today +"/")
        file = "../output/html/bulk/html/" + today + "/html_log_" + logfile_date + ".html"
   
    in_file = open(file, "w")
    html_header_in = open("../files/html_header.txt","r")
    in_file.write(html_header_in.read())
    html_header_in.close()
    
    #Build a html table from objects in the container list.
    for obj in container:
        in_file.write('<tr bgcolor="red">' + "\r\n")
        discriptor_list = obj.discriptor.split()
        for i in range(0,len(discriptor_list)):
            in_file.write("<td><b>" + discriptor_list[i] + "</b></td>")
        in_file.write("\r\n" + '</tr>' + "\r\n")
        
        #A new tablerow for each logline in the object.
        for entry in obj.loglines:
            loglines_list = entry.split()
            in_file.write("<tr>" + "\r\n")
            
            #A new table cell for each splitted string in loglines_list.
            for j in range(0, len(loglines_list)):
                in_file.write("<td>" + loglines_list[j] + "</td>")
            in_file.write("\r\n" + '</tr>' + "\r\n")
    in_file.write("</table>\r\n</body></html>")
    in_file.close()
    return
   
def create_yui_html_file(container = None, bulk = False):
    """
    Creates a html file from a container list. The table is
    rendered by javaScript (YUI) and supports column sort and pagination.
    The javaScript is stored in the file yui_script.txt in the files
    folder.
    
    Params:
    container = a container list that contains Logfilecontainer objects.
    bulk      = the bulk option.
    
    create_yui_html_file(container = None, bulk = False) ->
    """
    
    logfile_date = container[0].logfile_date
    today = date.today().strftime("%d-%m-%Y")
    
    if not bulk:
        import os
        if not os.path.exists("../output/html/single/yui/" + today + "/"):
            dir = os.mkdir("../output/html/single/yui/" + today + "/")
        file_name = "../output/html/single/yui/" + today + "/yui_log_" + logfile_date + ".html"
    else:
        import os
        if not os.path.exists("../output/html/bulk/yui/" + today + "/"):
            dir = os.mkdir("../output/html/bulk/yui/" + today + "/")
        file_name = "../output/html/bulk/yui/" + today + "/yui_log_" + logfile_date + ".html"
    
    #Get the html header for the yui powered html file.
    in_file = open(file_name, "w")
    yui_html_in = open("../files/yui_html_header.txt","r")
    in_file.write(yui_html_in.read())
    yui_html_in.close()

    #Fetch data for the table head, which is stored in the discriptor of the 
    #Logfilecontainer object.
    buffer = container[0].discriptor.split()
    for b in buffer:
        in_file.write('<th>' + b + '</th>\n')
    in_file.write('</tr>\n</thead>\n')
    
    #For each logline stored in object.loglines in the container list create table
    #rows and table cells.
    for obj in container:
        for line in obj.loglines:
            in_file.write('<tr>\n')
            buffer = line.split()
            for b in buffer:
                in_file.write('<td>' + b + '</td>\n')
            in_file.write('</tr>\n')
    in_file.write('</table>\n</div>\n<center><div id="my_pagination"></div></center>\n')
    
    #Get javaScript from the file yui_script.txt and write it
    #into the html file.This is essential to make the table colums sortable and
    #add pagination to the html file.
    yui_script_in = open("../files/yui_script.txt","r") 
    in_file.write(yui_script_in.read())
    in_file.write("</body>\n</html>\n")
    yui_script_in.close()
    in_file.close()
    return

def get_useragents(container = None):
    """
    Reads all user agents from the container list and returns
    a Useragents object.
    
    Params:
    container = container list that contains Logfilecontainer objects.
    
    get_useragents(container = None) -> agents
    """
    
    agents = Useragents()
    for obj in container:
        for entry in obj.loglines:
            buffer = entry.split()
            agents.useragents.append(buffer[cs_User_Agent])
    return agents

def get_referer(container = None):
    """
    Reads all referer from the container list and returns
    a Referer object.
    
    Params:
    container = container list that contains Logfilecontainer objects.
    
    get_referer(container = None) -> referer
    """
    
    referer = Referer()
    for obj in container:
        for entry in obj.loglines:
            buffer = entry.split()
            referer.referer.append(buffer[cs_Referer])
    return referer

def get_ips(container = None):
    """
    Reads all ip addresses from the container list and returns
    a Ip object.
    
    Params:
    container = container list that contains Logfilecontainer objects.
    
    get_ips(container = None) -> ip_addresses
    """
    
    ip_addresses = Ip()
    for obj in container:
        for entry in obj.loglines:
            buffer = entry.split()
            ip_addresses.iplist.append(buffer[c_ip])
            ip_addresses.ipset = set(ip_addresses.iplist)
    return ip_addresses

def path():
    import os
    return os.getcwd()

def advanced_help():
    print ""
    print "[ADVANCED HELP]"
    print ""
    print "Usage of Logfileparser using the command line:"
    print ""
    print "If you want to parse a single logfile you have to"
    print ""
    print "a)Specify the path to the logfile:"
    print 'IIS_6_0.py -l "C:/Docs/myLogfile.log"'
    print ""
    print "b)If you want to create a standard HTML file:"
    print 'IIS_6_0.py -l "C:/Docs/myLogfile.log" -s'
    print ""
    print "c)If you want to create a YUI powered HTML file:"
    print 'IIS_6_0.py -l "C:/Docs/myLogfile.log" -y'
    print ""
    print "d)For bulkoperation on a logfile directory:"
    print 'IIS_6_0.py -l "C:/Docs/" -b'
    print ""
    print ""
    print "[EXAMPLES]"
    print ""
    print '1.)IIS_6_0.py -l "C:/Docs/" -b -y'
    print "This command will attempt to parse all logfiles found in the logfile directory"
    print "and create a YUI powered HTML file for each logfile."
    print ""
    print '2.)IIS_6_0.py -l "C:/Docs/logfile.log" - y'
    print "This command attempts to parse a single logfile and create a YUI powered HTML file."
    print ""
    print "Parsed logfiles and HTML files can be found in the output directory."
    return

def process_bulk(logfile = None, yui = False, sta = False):
    logfiles = []
    suffix   = ".log"
    error_list = []
    bulk = True
    dir = logfile
    
    #Get all logs in directory
    if logfile != None:
        import os
        files = os.listdir(logfile)
        for file in files:
            if file.endswith(suffix):
                logfiles.append(file)
    
    print ""
    print "-[START]-"
    print ""
    print "Directory to parse ->", logfile
    print ""
    print "-[STATUS]-"

    if len(logfiles) > 0:
        print ""
        print "Found ", len(logfiles), " logfiles in directory."
        print ""
        for log in logfiles:
            try:
                container = clean_log(log,dir)
                create_logfile(container, bulk)
                print "Parsed: " + dir + "\\" + log
                if yui:
                    create_yui_html_file(container, bulk)
                    print "Created YUI powered HTML file."
                if sta:
                    create_html_file(container, bulk)
                    print "Created HTML file."
                print ""
            except Exception, e:
                error_list.append(e)
                error_list.append(traceback.format_exc(10))
            
        if error_list != []:
            print ""
            print "Aborted because of following error message:"
            for error in error_list:
                print error
        else:
            print ""
            print "-[END]-"
            exit()
    else:
        print ""
        print "No logfiles found in directory."
        print ""
        print "-[END]-"
        exit()

def process_single_logfile(logfile = None, yui = False, sta = False):
    error_list = []
    
    print ""
    print "-[START]-"
    print ""
    print "Logfile to parse ->", logfile
    print ""
    print "-[STATUS]-"
    print ""
    try:
        container = clean_log(logfile)
        create_logfile(container)
        print "Parsed logfile."
        if yui:
            create_yui_html_file(container)
            print "Created YUI powered HTML file."
        if sta:
            create_html_file(container)
            print "Created standard HTML file."
    except Exception, e:
        error_list.append(e)
        error_list.append(traceback.format_exc(10))
    finally:
        if error_list != []:
            print ""
            print "Aborted because of following error message:"
            for error in error_list:
                print error
        else:
            print ""
            print "-[END]-"
            exit()
            
def main():
    from optparse import OptionParser

    usage = "usage: %prog [options] arg1 arg2"
    parser = OptionParser(usage=usage)
    
    parser.add_option("-l", "--logfile", type="string", dest="logfile",
                      help="Path to a directory or filename, with option --bulk on assuming it is" +
                      " a directory name ", metavar="LF")
    parser.add_option("-b", "--bulk", action="store_true", dest="bulk",
                      help="Parse all logfiles in the directory specified in --logfile", 
                      metavar="B")
    parser.add_option("-a", "--ah", action="store_true", dest="advanced_help",
                      help="Prints advanced help informations", metavar="AH")
    parser.add_option("-y", "--yui", action="store_true", dest="yui",
                      help="Creates YUI powered HTML files", metavar="YUI")
    parser.add_option("-s", "--sta", action="store_true", dest="sta",
                      help="Creates standard HTML files", metavar="STA")
    (options, args) = parser.parse_args()
    
    if options.advanced_help:
        advanced_help()
        exit()
        
    elif options.bulk:
        if options.logfile != None:
            import os.path
            if os.path.isdir(options.logfile):
                process_bulk(options.logfile, options.yui, options.sta)
                exit()
            else:
                print "[ERROR]"
                print "Could not find directory."
                print options.logfile
                exit()
        else:
            print "Logfile directory is missing"
            exit()
                 
    elif options.logfile:
        if options.logfile != None:
            import os.path
            if os.path.exists(options.logfile):
                if os.path.isdir(options.logfile):
                    print ""
                    print "[ERROR]"
                    print "Logfile is a directory."
                    print "If you want to parse logfiles in that directory please"
                    print "provide the -b [bulk] option."
                    exit()
                else:
                    process_single_logfile(options.logfile, options.yui, options.sta)
                    exit()
            else:
                print "[ERROR]"
                print "File not found." 
                print options.logfile
                exit()
        else:
            print "Logfile name is missing!"
            exit()
    else:
        print ""
        print "Please provide Options"
        print ""
        advanced_help()
      
if __name__ == "__main__":
    """
    Info:
    The clean_log() method is called first. Argument is the path to the
    logfile that needs to be parsed. That method returns a container list containing 
    Logfilecontainer objects that can be used for further processing.
    
    Example [Asuming you imported this module]:
    First get the container list containing Logfilecontainer objects.
    container = clean_log(file)

    If you want to create a YUI powered HTML file from this container you call:
    create_yui_html_file(container)
    which creates the HTMl file and stores it in the output directory.
    
    If you only want to save the parsed logfile call:
    create_logfile(container)
     
    If this file is executed from a shell, pass in the desired options.
    Type 'IIS_6_0.py -h' for more info on available options.
    """
    
    main()