import fileinput
import sys
import time
import cPickle
import pickle
import csv
import datetime

DOMAIN_NAME = 0
CLIENT_IP = 1                  
COMPLETION_TIME = 3    
HTTP_REQUEST = 4       
STATUS_CODE = 5
RETURNED_SIZE = 6
REFERRER = 7
USER_AGENT = 8
dic={}

def hashing():
    start = time.clock()
    count =0
    totalcount = 0
    jpgcount = 0
    pngcount = 0
    csscount = 0
    gifcount =0
    ad_count=0
    unsuccesscount =0
    nonjpgcount = 0
    cleaned_count=0
    duplicate_aceess=0
    else_count=0
    domains = {}
    ad_hash = {}
    rest = main()
    outfile = open('D:\\format11.txt', "w")
     
    """
    log records created by Adservers need to be removed by loading the well known adservers 
    into a dictionary and match them 
    """
    for line in fileinput.input(['D:\\adservers.txt']):
        try:
#            print line
            line1=line.split()[0]
            ad_hash[line1]="*"
        except:
            continue
    
    print "got it back"
    pkl_file = open('D:\\domain_mapping_seq_16.p', 'rb')
    domains = pickle.load(pkl_file)
    pkl_file.close()
    print "loaded"
    
    for line in fileinput.input(['D:\\access_log.14']): # loading the log file into memory line by line
        try:         
            row = getlogfields(line)
            expandedrow = expandedfields(row)
            fields = expandedrow[2].split(".")

            length = len(fields)
            print totalcount
            print expandedrow[6]
            if int(expandedrow[6])< 7:
                continue
            if int(expandedrow[6])> 11:
                break      
#            if int(totalcount)>= 105000:
#                break
            totalcount+=1
#            if fields[length-1].startswith("jpg") or fields[length-1].startswith("png") or fields[length-1].startswith("gif") or fields[length-1].startswith("css"):
            if fields[length-1]=="jpg":  #removing entries with jpg extensions 
                jpgcount +=1
                continue
            elif fields[length-1]=="png": #removing entries with png extensions 
                pngcount +=1
                continue
            elif fields[length-1]=="gif": #removing entries with gif extensions 
                gifcount +=1
                continue
            elif fields[length-1]=="css": #removing entries with css extensions 
                csscount +=1
                continue
            elif int(row[5])<200 or int(row[5])>299: #removing entries with unsuccessfull attempts 
                unsuccesscount +=1
                continue
            else:
                if ad_hash.has_key(row[0]):
                    ad_count+=1
                    continue
                domainname = lookup(row[0], rest) # will return the domain name from the host name eg: m.facebbok.com---> facebook.com
                if not (len(domainname.split('.')[1])>0):
                    continue
                cleaned_count+=1
                domainnumber = domainmapper(domainname, domains) # for fast matching assign a unique number to every distinct domain
                log = str(domainnumber)+" "+expandedrow[0]+" "+row[2]+" "+expandedrow[1]+" \n"
#            5

#            if totalcount>=100000: # loop terminating control point 
#                break 
###            

            """
            Session creation........
            A hash table structure is used to create sessions:
                The user ID will be the Key for the outer hashtable
                The value will contain another hashtable with IP_add as key and a list of domains accessed as value
            Same userID with different IP_add is considered as different session
            Maximum time difference between two page access in a session would be 25.5 minutes
            CDNs are removed using the referrer fields
                
            """
            if dic.has_key(row[2]): # if outer hastable contains the user_ID
                ipinfo = dic.get(row[2]) # retrieving the value object(hash table)
                last_updateinfo=ipinfo.get('1')              
                last_ipaccess=last_updateinfo[0]
                last_accesstime=last_updateinfo[1]
                record_time =datetime.time(int(expandedrow[6]), int(expandedrow[7]),int(expandedrow[8]))
                last_access_datetime=datetime.datetime(int(expandedrow[5]),int(getmonth(expandedrow[4])),int(expandedrow[3]),int(expandedrow[6]),int(expandedrow[7]),int(expandedrow[8]))
                time_differnce=(record_time.hour-last_accesstime.hour)*3600+(record_time.minute-last_accesstime.minute)*60+(record_time.second-last_accesstime.second)
               
                if ipinfo.has_key(expandedrow[0]): # check for the availability of IP_add
                    sess_tup=ipinfo.get(expandedrow[0])
                    list=sess_tup[1]
                    if has_element(list,domainnumber)==1: 
                        duplicate_aceess+=1 
                        continue
                    list.append(domainnumber) # if IP_add already exist append the domain number to the list
                    ipinfo[expandedrow[0]]=(last_access_datetime,list)
                    last_updateinfo[0]=expandedrow[0]
                    last_updateinfo[1]=record_time
                    count+=1
                
                elif((not last_ipaccess== expandedrow[0]) and(time_differnce<600)):
                    sess_tup=ipinfo.get(last_ipaccess)
                    list=sess_tup[1]
                    if has_element(list,domainnumber)==1: 
                        duplicate_aceess+=1 
                        continue
                    list.append(domainnumber)
                    ipinfo[last_ipaccess]=(last_access_datetime,list)
                    last_updateinfo[1]=record_time
                    count+=1
                    
                else:
                    listip=[] # if IP_add not exist create a new list and insert it into the hashtable
                    listip.append(domainnumber)
                    ipinfo[expandedrow[0]]=(last_access_datetime,listip)
                    count+=1
                    
            else:
                diction={}
                listip=[]
                last_ipaccess=expandedrow[0]
                last_accesstime=datetime.time(int(expandedrow[6]),int(expandedrow[7]),int(expandedrow[8]))
                last_access_datetime=datetime.datetime(int(expandedrow[5]),int(getmonth(expandedrow[4])),int(expandedrow[3]),int(expandedrow[6]),int(expandedrow[7]),int(expandedrow[8]))
                user_tup=(last_ipaccess,last_accesstime)
                listip.append(domainnumber)
                sess_tup=(last_access_datetime,listip)
                diction['1']=user_tup
                diction[expandedrow[0]]=sess_tup
                dic[row[2]]=diction
                count+=1
                else_count+=1
        except:
            continue
   
    """
    Statistics about the data needs to be collected....
    """
    print "Totalcount : ",totalcount
    print "Jpgcount : ", jpgcount
    print "Pngcount : ", pngcount
    print "Csscount : ", csscount
    print "GifCount : ", gifcount
    print "Unsuccess access : ", unsuccesscount
    print "Ad Count : ",ad_count
    print "NonJpg : ", nonjpgcount
    print "Cleaned Count  : ", cleaned_count
    print 'duplicate_aceess',duplicate_aceess
    print "elsecount",else_count
    
    
#    print "DICTIONARY  "
#    print len(domains)
#    print domains.items() 
    
            
    length=0
    list_count=0
    morethan_two=0
    no_of_user=0
    myfile_1 = open('D:\\log_14_seq_session.csv', 'wb')
    myfile_2 = open('D:\\domain_14_seq.csv', 'wb')
    myfile_3 = open('D:\\log_14_seq_time.csv', 'wb')
    wr_1 = csv.writer(myfile_1)
    wr_2 = csv.writer(myfile_3)

    for key in dic.keys():
        value=dic.get(key)
        no_of_user+=1
        for key in value.keys():
#            print value.get(key)
            if(key=='1'):
                continue
            length+=len(value.get(key))
            if (len(value.get(key))>=2):
                morethan_two+=1
            sess_tup=value.get(key)
            last_time=sess_tup[0]
            list=sess_tup[1]
            wr_2.writerow(sess_tup)
            wr_1.writerow(list)      
            list_count+=1
 
#            if expandedrow[6]== "4" :
#                wr_1.writerow(value.get(key))
#            if expandedrow[6]== "5" :
#                wr_2.writerow(value.get(key))
#    for key in domains.keys():
#        myfile_2.writelines(str(key)+","+str(domains.get(key))+"\n")
        

#    obj = cPickle.load(open('D:\\save13.p', 'rb')) # to retrieve the saved dictionary 
#    print "got it back"
#    for key in obj.keys():
#        value=dic.get(key)
#        print value.values()
    
    outfile.close()
    elapsed = (time.clock() - start)
    print elapsed

    print "length:", length
    print "list_count:", list_count
    print "More then two:", morethan_two
    print "count:", count
    print "dupilcate_count:",duplicate_aceess
    print "no_of _user=",no_of_user
    print"No of domains:",max(domains.values())

#    for key in dic.keys():
#        value=dic.get(key)
#        print "U_ID:" ,key ,"IP:",value.keys(), "SESSION:", value.values()

    output = open('D:\\domain_mapping_seq_14.p', 'wb')
    pickle.dump(domains, output)
    output.close()
#    cPickle.dump(domains, open('D:\\domain_mapping.p', 'wb'))     
#    obj = cPickle.load(open('D:\\domain_mapping.p', 'rb')) # to retrieve the saved dictionary 
#    print "got it back"
#    pkl_file = open('D:\\domain_mapping.p', 'rb')
#    obj = pickle.load(pkl_file)
#    pkl_file.close()
#    for key in obj.keys():
#        print key,",", obj.get(key)
    sys.exit()
 
                    
def has_element(list,test): # to test the duplicates in a list 
    for domain in list:
        if domain == test:
            return 1
    return 0

def domainmapper(domain,domains): # To map a domain names with unique number 
    if domains.has_key(domain):
        return domains.get(domain) #### MADE CHANGE TO GET PRASATH"S REQ
    else:
        size = len(domains)
        domains[domain]= size   #domains[domain]= size+1
        return size             #return size+1
    
def test():
    print "Success testing "
    if dic.has_key("861ee80996964b5c1a01a8d34a2f31ed"):
        free = dic.get("861ee80996964b5c1a01a8d34a2f31ed")
        print "Codeeeeeeeeeeee", free.items() 
    cPickle.dump(dic, open('D:\\save13.p', 'wb'))
         
         
"""
The method getlogfields are used split the fields in the log record 
"""      
def getlogfields(s): 
    fields = []
    # error! no leading spaces allowed!
    if s.startswith(" "): 
       return fields

    state = 0
    quoteds = ""
    n = 0
    for f in s.split():
        if state == 0:
           if f.startswith("\""):
               if f.endswith("\""):
                   fields.append(f[1:-1])
               else:
                   quoteds = f[1:] + " "
                   state = 2
               continue
           elif f.startswith("'"):
               if f.endswith("\""):
                   fields.append(f[1:-1])
               else:
                   quoteds = f[1:] + " "
                   state = 3
               continue
           elif f.startswith("["):
               if f.endswith("]"):
                   fields.append(f[1:-1])
               else:
                   blockeds = f[1:] + " "
                   state = 4
               continue
           else:
               fields.append(f)
 
        if state == 2 :
            quoteds += (f + " ")
            if f.endswith("\"") :
               quoteds = quoteds[:-2]
               fields.append(quoteds)
               n += 1
               state = 0
 
        if state == 3 :
            quoteds += (f + " ")
            if f.endswith("'") :
               quoteds = quoteds[:-2]
               fields.append(quoteds)
               n += 1
               state = 0
 
        if state == 4 :
            blockeds += (f + " ") 
            if f.endswith("]") :
               blockeds = blockeds[:-2]
               fields.append(blockeds)
               n += 1
               state = 0
    return fields

def logsplittimefield(timefield):
    try:
#        print "debugging...", timefield
        (stime, zone) = timefield.split()
        caldate, hour, minute, second = stime[0:].split(":")
        day, month, year = caldate.split("/")
    except:
        return None, None, None, None, None, None, None
    return (day, month, year, hour, minute, second, zone)

def logsplitrequestfield(requestfield):
    try:
        # to avoid the intermediate storing to variables.
        method, resource, protocol = requestfield.split()
    except:
        return None, None, None
 
    return method, resource, protocol
 
 
def logsplituseragentfield(useragentfield):
    """
    Returns (browser, url)
    """
    return useragentfield.split()

def expandedfields(ninefields):
    """
    Returns a full tuple of information.
    """
    try:
        if len(ninefields) == 0:
            return None, None
        ip1, ip2 = ninefields[CLIENT_IP].split(",")
        if ip1 == '-':
            ip = ip2
        else:
            ip = ip1
        day, month, year, hour, minute, second, zone = logsplittimefield(ninefields[COMPLETION_TIME])
        timestamp = year + "-" + getmonth(month) + "-" + day + " " + hour + ":" + minute + ":" + second
        method, resource, protocol = logsplitrequestfield(ninefields[HTTP_REQUEST])
        status = ninefields[STATUS_CODE]
        size = ninefields[RETURNED_SIZE]
#        for i, f in enumerate(ninefields):
#            print i, f
        browserandurls = ninefields[USER_AGENT].split()
        browser = browserandurls[0]
        if len(browserandurls) == 1:
            urls = []
        else:
            urls = browserandurls[1:]
            urls[0] = urls[0][1:]
            urls[-1] = urls[-1][:-1]

        return (ip, timestamp,resource, day, month, year, hour, minute, second, zone, method, protocol, browser, urls)
    except:
#        print "error in processing [%s]" % ninefields
        return None, None, None,None, None, None,None

def getmonth(month):
    if month == "Jan":
        return "01"
    elif month == "Feb":
        return "02"
    elif month == "Mar":
        return "03"
    elif month == "Apr":
        return "04"
    elif month == "May":
        return "05"
    elif month == "Jun":
        return "06"
    elif month == "Jul":
        return "07"
    elif month == "Aug":
        return "08"
    elif month == "Sep":
        return "09"
    elif month == "Oct":
        return "10"
    elif month == "Nov":
        return "11"
    elif month == "Dec":
        return "12"
    else:
        return "00"


''' Public Suffix List support.
        www.foo.bar.baz.com -> baz.com
        www.m facebook.com -> facebook.com
    effective_tld_names.dat retrieved from:
        http://publicsuffix.org/list/
'''

def find_node(parent, parts):
    if not parts:
        return parent

    if len(parent) == 1:
        parent.append({})

    assert len(parent) == 2
    negate, children = parent

    child = parts.pop()
    try:
        child = child.encode('ascii')
    except:
        pass

    child_node = children.get(child, None)

    if not child_node:
        children[child] = child_node = [0]

    return find_node(child_node, parts)

def add_rule(root, rule):
    if rule.startswith('!'):
        negate = 1
        rule = rule[1:]
    else:
        negate = 0

    parts = rule.split('.')
    find_node(root, parts)[0] = negate

def simplify(node):
    if len(node) == 1:
        return node[0]

    return (node[0], dict((k, simplify(v))
                          for (k, v) in node[1].iteritems()))

def mini_pformat(o):
    if o in (0, 1):
        return str(o)
    elif type(o) in (str, unicode):
        return repr(o)
    elif type(o) is dict:
        return '{' + ','.join((mini_pformat(k)+':'+mini_pformat(v))
                              for (k, v) in o.iteritems()) + '}'
    else:
        assert type(o) == tuple
        if len(o) == 1:
            return '(%s,)' % mini_pformat(o2[0])
        else:
            return '(' + ','.join(mini_pformat(o2) for o2 in o) + ')'


def build_structure(fp):
    root = [0]

    for line in fp:
        line = line.decode('utf-8').strip()
        if line.startswith('//') or not line:
            continue

        add_rule(root, line.split()[0])
    return root


def main():
    root = build_structure(file('D:\\effective_tld_names.txt'))
    root = simplify(root)
    write_module(root)
    return root

def write_module(root): 
    print 'root =', mini_pformat(root)

def lookup_node(matches, depth, parent, parts):
    if parent in (0, 1):
        negate = parent
        children = None
    else:
        negate, children = parent

    matches[-depth] = matches[-depth] or negate

    if depth < len(parts) and children:
        for name in (parts[-depth], '*'):
            child = children.get(name, None)
            if child is not None:
                lookup_node(matches, depth+1, child, parts)

def lookup(domain,root):
    parts = domain.split('.')
    hits = [None] * len(parts)

    lookup_node(hits, 1, root, parts)

    for i, what in enumerate(hits):
        if what is not None:
            return '.'.join(parts[i:])

if __name__ == "__main__": hashing()