import time
import re,gzip
import urlparse 
import logging
import sgmllib
import chardet

import settings
import model

def GetInHMS(seconds):
    hours = seconds / 3600
    seconds -= 3600*hours
    minutes = seconds / 60
    seconds -= 60*minutes
    if hours == 0:
        return "%02d:%02d" % (minutes, seconds)
    return "%02d:%02d:%02d" % (hours, minutes, seconds)

def ConvertURL(siteurl,url):
    parts = urlparse.urlsplit(url)

    if parts[0] == '' :
        url = urlparse.urljoin(siteurl,url)
    else:
        if parts[0] != 'http' or parts[0] != 'https' :
            return None
        else:
            url = urlparse.urlsplit(url).geturl()


    if '?' in url:
        pos = url.find('?')
        url = url[:pos-1]

    if '#' in url:
        pos = url.find('#')
        url = url[:pos-1]
    return url

def remove_html_tags(content):
    p = re.compile(r'<.*?>')
    return p.sub('',content)

def remove_extra_spaces(content):
    p = re.compile(r'\s+')
    return p.sub(' ',content)

def ExtractTitle(src,page,db=None):
    if db != None:
        title = ''
        e = re.compile('<title>.*</title>',re.IGNORECASE)
        list = e.findall(page)
        if list :
            title = remove_extra_spaces(remove_html_tags(list[0]))
        else:
            title = remove_extra_spaces(remove_html_tags(page))
        db.addtitle(src,title.strip())

def encodeHTML(s=""):
    """encodeHTML(s) -> str

    Encode HTML special characters from their ASCII form to
    HTML entities.
    """
    return s.replace("&", "&amp;") \
            .replace("<", "&lt;") \
            .replace(">", "&gt;") \
            .replace("\"", "&quot;") \
            .replace("'", "&#039;") \
            .replace("--", "&mdash")

def ExtractURL(page,pageurl=None,db=None): #O(n)    -
    src = 0
    dest = []

    if db != None : 
        src = db.addurl(pageurl)
        ExtractTitle(src,page,db)

    urllist = []
    hreflist = re.findall(r'href=[\'"]?([^\'" >]+)', page)

    # Parse href tag
    for href in hreflist:
        # Convert relative to absoluparse url link regexte path and insert dest id to hash
        url = ConvertURL(pageurl,href)

        if url != None and url not in urllist:
            urllist.append(url)
            if db != None : 
                id = db.addurl(url)
                dest.append(id)

    # add to webgraph
    if dest != [] :
        db.addnode(src,dest) 

    return  src,len(hreflist)

def ExtractSites(hostname,content,db,stat): #O(n)   -
    ''' Feature:
      - Ignored robot.txt
    '''

    # ==P=>>>>=i===<<<<=T===>=A===<=!Junghoo!==>
    # URL: http://www.klainfo.com/
    # Date: 
    # Position: 0
    # DocId: 0
                                                # step1
    # HTTP/1.1 200 OK
    # Content-Length: 74
    # Content-Type: text/plain
    # Last-Modified: Thu, 01 May 2008 16:47:16 GMT
    # Accept-Ranges: bytes
    # Date: Tue, 16 Sep 2008 01:36:26 GMT
    # Connection: close
                                                # step2
    # <html>...</html>

    totalpage = 0
    totalhref = 0

    hostid = 0
    pageid_list = []
    if db != None : 
        hostid = db.addurl(hostname)
        
    for page in re.split(settings.docSeperator,content): # split for each page by docSeperator

        # change encoding
        #char = chardet.detect(page)
        #if char['encoding'] not in [None,'ascii']:
            ##print char['encoding']
            #page = page.decode(chardet.detect(page)['encoding'])
            #print remove_html_tags(page)
        stat.cum_pagesize += len(page)

        if len(page) > 10 :
            pageurl = remove_extra_spaces(page[:100].split('\n')[1][5:])
            if 'robot' not in pageurl and '?' not in pageurl and '#' not in pageurl:
            #if 'robot' not in pageurl :
                html_start = page.find('<')
                page = (page,page[html_start:])[html_start != -1]


                if html_start != -1:     # ignore if small content or no html tag
                    pageid,thref = ExtractURL(page,pageurl,db)

                    pageid_list.append(str(pageid))
                    totalhref += thref
                    totalpage += 1


    # Stat
    stat.tot_webpage += totalpage
    stat.tot_hyperlink += totalhref

    # add site graph
    if len(pageid_list) > 1:
        db.addhost(hostid,pageid_list)
        #print hostid,pageid_list
    
    return totalpage,totalhref

if __name__ == "__main__" :
    #gz = gzip.open('../data/big/www.beaucoup.com.gz','rb')
    #gz = gzip.open('../data/big/adsabs.harvard.edu.gz','rb')
    
    #content = gz.read()
    pass
    #content = open('../data/adsabs.harvard.edu','r').read()
    #db = model.Database()
    #start = time.time()
    #page_list = ExtractSites(content,db)
    #print ExtractURL(content,'http://adsabs.harvard.edu',db)

    #print time.time()-start
    #db.close()
