import os, gzip, re, time, bsddb
from urllister import URLLister

# __INPUT__ = 'WebBase.small'
__INPUT__ = 'WebBase.medium'

doc_sep = '==P=>>>>=i===<<<<=T===>=A===<=!Junghoo!==>>'
db = bsddb.btopen('docid.db','c')
os.system('rm -r docid.db')
# List file in DataTest (hostname == filename)
for hostname in os.listdir(os.path.join(os.path.split(os.getcwd())[0],__INPUT__)):
    # reset
    doc_content = [] # [start_pos, end_pos, doc_url]
    parser = URLLister()
    urllist = set([])

    # start performance evaluation
    start_t = time.time()

    # open file with gzip and
    # read content which be extracted from .gz
    f = gzip.open(os.path.join(os.path.split(os.getcwd())[0],__INPUT__,hostname),'rb')  
    content = f.read() 
    f.close()

    # split document
    for docid , m in zip(range(1000) , re.finditer(doc_sep,content)): # docid per content
        # parse url from header URL: http://...
        tmp = re.search(r'URL:.*.\n',content[m.end():m.end()+500])
        doc_url = tmp.group(0).split(' ')[1][:-1] 

        # parse doc_content into position
        if docid == 0 :
            doc_content.append([m.start(),0,doc_url])
        else:
            doc_content.append([m.start(),0,doc_url])
            doc_content[docid-1][1] = m.start()

    # extract url per page
    count =0
    for start,end,doc_url in doc_content:
        parser.feed(content[start:end])
        for u in parser.urls :
            if u != "" :
                key = ('http://' + hostname[:-3] + u , ('/'+u,u)[ u[0] == '/'] )['http://' in u]
                if not db.has_key(key) :
                    if len(db.keys()) == 0:
                        db[key] = '0'
                    else :
                        db[key] = str( int( db.last()[1] )+1 )
                    count = count + 1
    print count,hostname
print db.last()    
db.close() 
