import time
import re,gzip
import htmlentitydefs
import urlparse 
import htmldata

import settings
import model


def ConvertURL(siteurl,url):
    parts = urlparse.urlsplit(url)
    if parts[0] == 'http' or parts[0] == 'https': 
        return parts[0]+'://'+parts[1]+parts[2]
    elif parts[0] == '' :
        fullurl = urlparse.urljoin(siteurl,url)
        return fullurl 
    else:
        return None

def ExtractURL(content,siteurl=None,db=None):
    e = re.compile('<a *[^>]*>.*</a *>',re.IGNORECASE)
    hreflist = e.findall(content)
    urllist = []

    for href in hreflist:
        if not re.search('href=[\'"]?(#+).*',href,re.IGNORECASE) : # href\s*=\s*\"([^\"]+)\"
            # strip start
            startpos = href.find('=')+1
            startpos = (startpos,startpos+1)[ href[startpos] == '\'' or href[startpos] == '\"' ]
            href = href[startpos:]  # reset
            # strip end
            endpos = href.find('>')-1
            endpos = (endpos+1,endpos)[ href[endpos] == '\'' or href[endpos] == '\"' ]
            href = href[:endpos]

            if href.find('\'') > 0 : href = href[:href.find('\'')]
            if href.find('\"') > 0 : href = href[:href.find('\"')]

            # Convert relative to absolute path 
            url = ConvertURL(siteurl,href)
            if url != None and url not in urllist:
                urllist.append(url)
                #print url
                if db != None : 
                    db.addurl(url)

    # return totalurl, uniqurl
    return len(hreflist),len(urllist)
    #return len(hreflist)

def ExtractSites(content,db):
    ''' Feature:
      - Ignored web page which no content-types
      - Ignored no <html> tag
      - Ignored robot.txt
    '''

    # split document into list [start, end, header dictionary]
    page_list = []   # return value
    url_count = 0
    f = open('bad-pages','a')
    for m in re.finditer(settings.docSeperator,content): # docid per content
       
        # Identify start and end position of header and html
        header_start = m.start()
        header_end = content[header_start:header_start+5000].lower().find('<html')
        m = re.search('</?\w+\s+[^>]*>',content[header_start:header_start+2500],re.IGNORECASE)

        if header_end == -1 :   # IF no <html> tag THEN skip it out
            f.write(content[header_start:header_start+1000].split('\n')[1]+'\n')
            pass
        else:
            header_end += header_start
            html_start = header_end
            html_end = html_start+content[html_start:].lower().find('</html>')+7
        
            # Parse header to dictionary
            header = {}
            header_txt =  content[header_start : header_end].split('\n')[1:]
 
            # ==P=>>>>=i===<<<<=T===>=A===<=!Junghoo!==>
            # URL: http://www.klainfo.com/
            # Date: 
            # Position: 0
            # DocId: 0
                                                        # step1
            # HTTP/1.1 200 OK
            # Content-Length: 74
            # Content-Type: text/plain
            # Last-Modified: Thu, 01 May 2008 16:47:16 GMT
            # Accept-Ranges: bytes
            # Date: Tue, 16 Sep 2008 01:36:26 GMT
            # Connection: close
                                                        # step2
            # <html>...</html>

            step = 0    # Counting the number of '' or '\r' for stop header
            for h in header_txt :  # omit document seperator
                if h in ['','\r'] :
                    step += 1
                    if step == 2:
                        break
                else:
                    # find the key and value
                    key = h[0:h.find(' ')][:-1].lower()
                    value = h[h.find(' ')+1:].strip().lower()

                    # Insert document url to DB
                    if db != None and key == 'url' : 
                        db.addurl(value)
                   
                    # recompile http
                    if 'http' in key :  
                        header['http/1.1'] = value
                    else:
                        header[key] = value

            # Finally Process
            # Append parameters to 'page_list'
            # Rejected file
            if not header.has_key('content-type') or 'robot' in header['url']: 
                pass
            else:
                page_list.append([html_start,html_end,header])

            ######################################################################
            # ExtractURLs
            #url_count += ExtractURLs(content[html_start:html_end],header['url'],db) ######################################################################

    #END
    f.close()
    return page_list
    # return len(page_list),urlcount

if __name__ == "__main__" :
    #gz = gzip.open('../data/www.beaucoup.com.gz','rb')
    #gz = gzip.open('../data/adsabs.harvard.edu.gz','rb')

    #content = gz.read()
    #content = open('../data/myhero.com','r').read()

    content = open('../data/myhero.com','r').read()
    print ExtractURL(content,'http://myhero.com/myhero/')
