import psyco
import os
import gzip
import re
import time
import sys
import logging
import cProfile
import pstats
import settings 
import urlparse 
import bsddb
import struct
import json

class Model:
    class Stat:
        def __init__(self):
            self.time_usage = ''
            self.total_host = 0
            self.total_webpage = 0
            self.total_webpage_in_webgraph = 0
            self.total_hyperlink = 0
            self.total_uniqurl = 0
            self.cum_pagesize = 0
            
            self.maxoutlink = 0
            self.minoutlink = sys.maxint
            self.all = {}

        def getlog(self):
            f = open(settings.db_stat,'w')
            db = {}
            db['time_usage'] = self.time_usage
            db['total_hosts'] = self.total_host
            db['total_webpages_found'] = self.total_webpage
            db['total webpages_in_webgraph'] = self.total_webpage_in_webgraph
            db['total_hyperlink'] = self.total_hyperlink
            db['total_unique_url'] = self.total_uniqurl
            db['total_dangling'] = self.total_hyperlink-self.total_webpage_in_webgraph
            db['total_non_dangling'] = self.total_webpage_in_webgraph
            db['max_outlink'] = self.maxoutlink
            db['min_outlink'] = self.minoutlink
            db['average_urls_page'] = self.total_hyperlink*1.0/self.total_webpage
            db['average_pages_site'] = self.total_webpage*1.0/self.total_host
            db['average_bytes_page'] = self.cum_pagesize*1.0/self.total_webpage
            db['total_content_size'] = self.cum_pagesize
            f.write(json.dumps(db))
            self.all = db
            f.close()

    def __init__(self):
        self.stat = self.Stat()
        self.urlid = bsddb.btopen(settings.db_urlid,'c')
        self.idurl = bsddb.btopen(settings.db_idurl,'c')
        self.webgraph = open(settings.db_webgraph,'wb')
        self.hostgraph = open(settings.db_hostgraph,'wb')
        self.transposegraph = {}

    def addurl(self,url):
        if self.urlid.has_key(url) :
            return int(self.urlid[str(url)])
        else:
            self.urlid[str(url)] = str(self.stat.total_uniqurl)
            self.idurl[str(self.stat.total_uniqurl)] = str(url)

            # update stat.total_uniqurl
            self.stat.total_uniqurl += 1

            self.urlid.sync()
            self.idurl.sync()

            return self.stat.total_uniqurl-1

    def add2webgraph(self,src,dest):
        # src id | out degree | list of destination id 
        self.webgraph.write(struct.pack('i',src))
        self.webgraph.write(struct.pack('i',len(dest)))
        self.stat.total_webpage_in_webgraph += 1
        for d in dest:
            self.webgraph.write(struct.pack('i',d))
            # add node to transpose graph
            # self.transposegraph[ <int> ] = [ <int> , ... ]
            if self.transposegraph.has_key(d) :
                if src not in self.transposegraph[d]:
                    self.transposegraph[d].append(src)
            else:
                self.transposegraph[d] = [src]

    def add2hostgraph(self,hostid,pageid_list):
        self.hostgraph.write(struct.pack('i',hostid))
        self.hostgraph.write(struct.pack('i',len(pageid_list)))
        for p in pageid_list:
            self.hostgraph.write(struct.pack('i',p))

    def createtransposegraph(self):
        # dest | indegree | list of source id 
        tpgraph = open(settings.db_transposegraph,'wb')
        for destid in self.transposegraph.keys() :
            tpgraph.write(struct.pack('i',destid))
            tpgraph.write(struct.pack('i',len(self.transposegraph[destid])))
            for srcid in self.transposegraph[destid]:
                tpgraph.write(struct.pack('i',srcid))

        tpgraph.close()
        del self.transposegraph

    def postprocessing(self):
        self.createtransposegraph()
        self.stat.getlog()

    def close(self):
        self.urlid.close()
        self.idurl.close()
        self.webgraph.close()
        self.hostgraph.close()

########################################################################################

def GetInHMS(seconds):
    hours = seconds / 3600
    seconds -= 3600*hours
    minutes = seconds / 60
    seconds -= 60*minutes
    if hours == 0:
        return "%02d:%02d" % (minutes, seconds)
    return "%02d:%02d:%02d" % (hours, minutes, seconds)

def urlsplit(url):
    """ Extend on urlparse.urlsplit() by further parsing the
        network location into userinfo and host.
        @returns scheme, userinfo, host, path, query, frag
    """
    scheme, netloc, path, query, frag = urlparse.urlsplit(url)
    if '@' in netloc:
        userinfo, host = netloc.split('@',1)
    else:
        userinfo, host = '', netloc
    #return scheme, userinfo, host, path, query, frag
    return scheme, userinfo, host, path, '','' 

def canonicalize(root,url):
    """ Canonicalize an url """

    # 1. Convert host to lower case
    # 2. Do not include port number for port 80
    # 3. normalize '.' and '..' parts
    # 4. drop userinfo
    # 5. drop fragment
    # 6. url decode. (not doing because it cannot be used in request anymore.)
    scheme, userinfo, host, path, query, frag = urlsplit(url)
    if scheme not in ['','http','https']:
        return None
    else:
        host = host.lower()
        if host.endswith(':80'):        # todo: what if there are 2 or more colons?
            host = host[:-3]

        pparts = path.split('/')        # todo: make sure path start with '/'
        i = 0
        while i < len(pparts):
            if pparts[i] == '.':
                del pparts[i]
            elif pparts[i] == '..':
                if i > 1:               # note: pparts[0] is always the '' before the initial '/'. Never pop it.
                    del pparts[i-1:i+1]
                    i -= 1
                else:
                    del pparts[i]
            else:
                i += 1

        if len(pparts) > 1:
            path = '/'.join(pparts)
        else:
            path = '/'

        return urlparse.urljoin(root,urlparse.urlunsplit((scheme, host, path, query, None)))

def remove_extra_spaces(content):
    p = re.compile(r'\s+')
    return p.sub(' ',content).strip()

def remove_html_tags(content):
    p = re.compile(r'<.*?>',re.M)
    p = remove_extra_spaces(p.sub('',content))
    return p

def decodeHTML(s):
    return s.replace('&amp;','&') \
            .replace('&lt;','<') \
            .replace('&gt;','>') \
            .replace('&quot;','"') \
            .replace('&#039;','\'') \
            .replace('&mdash','--')

def ExtractSites(hostname,content,model): #O(n)   -
    ''' Feature:
      - Ignored robot.txt
    '''

    # ==P=>>>>=i===<<<<=T===>=A===<=!Junghoo!==>
    # URL: http://www.klainfo.com/
    # Date: 
    # Position: 0
    # DocId: 0
                                                # step1
    # HTTP/1.1 200 OK
    # Content-Length: 74
    # Content-Type: text/plain
    # Last-Modified: Thu, 01 May 2008 16:47:16 GMT
    # Accept-Ranges: bytes
    # Date: Tue, 16 Sep 2008 01:36:26 GMT
    # Connection: close
                                                # step2
    # <html>...</html>

    # Add hostid +1
    model.stat.total_host += 1

    # List of webpageid for add hostgraph 
    webpageid_list = []

    # Get hostid for add hostgraph
    hostid = model.addurl(hostname)

    for page in re.split(settings.docSeperator,content): # split for each page by docSeperator
        # Ignore if page size is less than 10 bytes
        if len(page) > 10 :
            # Finding the URL for each page
            html_start = page.find('<')
            webpageurl = remove_extra_spaces(page[:200].split('\n')[1][5:])
            if 'robot' not in webpageurl and '?' not in webpageurl and '#' not in webpageurl and html_start != -1:
            #if 'robot' not in pageurl :


                # Valid HTML Content
                # Find pagecontent w/o header
                webpagecontent = page[html_start:]                 # page content that start with html tag

                ####################################################################
                # Start Extract for each webpage
                # For create webgraph
                webpageid = model.addurl(webpageurl)
                webpageid_list.append(webpageid)
                destid = []

                # Stat: update pagesize and webpageid_list
                model.stat.cum_pagesize += len(webpagecontent)
                model.stat.total_webpage += 1 

                # List the hyperlinks from pagecontent
                hreflist = re.findall(r'href=[\'"]?([^\'" >]+)', webpagecontent)

                # Parse href tag
                for href in hreflist:
                    # URL Canonicalize
                    url = canonicalize(webpageurl,href)

                    # Add unique destid into list
                    if url != None :
                        id = model.addurl(url)
                        if id not in destid: destid.append(id)

                # Add to webgraph with non-dangling node
                if destid != [] :
                    model.add2webgraph(webpageid,destid) 
                    model.stat.total_hyperlink += len(destid)
                    model.stat.maxoutlink = max(model.stat.maxoutlink, len(destid))
                    model.stat.minoutlink = min(model.stat.minoutlink, len(destid))

                # End Extract for each webpage
                ####################################################################

    # add host graph
    if len(webpageid_list) > 0:
        model.add2hostgraph(hostid,webpageid_list)

    # End ExtractSite
    ####################################################################
    

def main(): # Initial Logging System 
    logging.basicConfig(filename=settings.logfilename, level=logging.INFO)
    Log = logging.getLogger('Main')
    model = Model()
    stat = model.stat

    # Start time main
    time_main = time.time()
   
    # Start logging
    Log.info('.....................Start..........................\n')
    Log.info(str(time.ctime()))
    Log.info(settings.dirInput)

    ############################################################################################
    # Start Main Program
    
    # List file from DataTest Directory
    for hostid , hostfile in zip(range(1,settings.maximumHost),os.listdir(settings.dirInput)):

        hostname = 'http://'+hostfile[:-3]+'/'

        try:
            # read gzip to mem
            time_extractfile = time.time()
            gz = gzip.open(settings.dirInput + hostfile ,'rb')
            content = gz.read()
            gz.close()
            time_extractfile = time.time()-time_extractfile

            Log.info('Hostid %5d:\t%s',hostid,hostname)
            
            #extract site and extract url
            time_extractsite = time.time()
            ExtractSites(hostname,content,model)
            time_extractsite = time.time()-time_extractsite

            Log.info('\t\tFile in %5.4f s. \tSite in %6.4f s.',time_extractfile,time_extractsite)

        except IOError :
            Log.error('Cannot open %s',hostname)
            pass

    # Finish Process
    time_postprocessing = time.time()
    model.postprocessing()
    time_postprocessing = time.time() - time_postprocessing
    Log.info('Postprocessing time usage : %6.4f s.',time_postprocessing)

    os.system('ps aux | grep main.py > '+settings.dataset+'/memoryusage.txt')
    os.system('uptime >> '+settings.dataset+'/memoryusage.txt')
    os.system('free >> '+settings.dataset+'/memoryusage.txt')

    stat.time_usage = str(GetInHMS(int(time.time()-time_main)))

    Log.info('')
    Log.info('>> Summary')
    Log.info(str(time.ctime()))
    for k in model.stat.all.keys() :
        Log.info('%s : %s',k,model.stat.all[k])
    Log.info('......................END...........................\n')

    print '\n'
    print 'Load Average'
    print os.system('uptime')

    model.close()
    # End Main

if __name__ == "__main__" :
    try:
        psyco.log(logfile=settings.dataset+'/psyco-log.txt')
        psyco.profile()
        cProfile.run('main()')
        #main()
        os.system('rm *.pyc')
    except KeyboardInterrupt:
        print 'End'
