import os
import gzip
import re
import time
import bsddb
import struct
import sys
from util import *

__DB__ = 'db.url' 
__WEBGRAPH__ = 'db.webgraph'
__INPUT__ = os.path.join(os.path.split(os.getcwd())[0],
    ('data/1000/',
     'data/big/',
    )[1]
)

validContentTypes = ('text/html','text/plain','text','application/xhtml+xml')

def main():
    # open connection
    webgraph = open(__WEBGRAPH__,'wb')
    DB = bsddb.btopen(__DB__,'c')
    
    page_no = 0

    # List file in DataTest (hostname == filename)
    for id,hostname in zip(range(1000),os.listdir(__INPUT__)):
        start_t = time.time()

        # extract from .gz 
        try:
            site_content = gzip.open(__INPUT__+hostname,'rb').read()
            print '>>> ' + hostname
        except IOError :
            print 'ERROR> ' + hostname + ' is not a gzip format.'
        
        # split_document into list [start, end, header dictionary]
        parsedoc_time = time.time()
        doc_list = ParseDoc(site_content, DB)
        page_no += len(doc_list)
        print '>>> ParseDoc in %f' % ( time.time()-parsedoc_time )

        # parse url
        for start,end,header in doc_list:
            # ParseURL only text file
            # print header['Content-Type'],header['Content-Type'].find('text')
            if header.has_key('Content-Type') :
                if header['Content-Type'] in validContentTypes : 
                    print '>>> ParseURL '+header['URL'] ,
                    parseurl_time = time.time()
                    urllist = list(ParseURL( site_content[start:end] , hostname , DB ))
                    print 'Done'
                    print '\t%d urls \t %d bytes \t %f s' % ( len(urllist) , end-start , time.time()-parseurl_time )

                    #webgraph.write(struct.pack('i', int(DB[header['URL']]) ))
                    #webgraph.write(struct.pack('i', len(urllist) ))
                    #for url in urllist:
                        #webgraph.write(struct.pack('i', int( DB[url] ) ))

            else:
                print 'ERROR> '+header['URL'] + ' : No Content-Type'

        print '>>> time usage:',time.time()-start_t,hostname,'has',len(doc_list),'pages'
    print len(DB),'url in DB'
    print page_no,' pages'

    DB.close()

if __name__ == "__main__" :
    os.system('rm db.*')
    start_t = time.time()
    main()
    print 'Overall time usage:',time.time()-start_t,'sec'
    os.system('rm *.pyc')

