import os 

########################################################################################

# PATH
dirProject = ''
dirInput = ''
dataset = ''

if os.name == 'nt' :
    dirProject = 'I:\\kla_webgraph\\'
    dirInput = os.path.join(dirProject,
        ('',
         'data\\wbCt100Sep08\\',
         'data\\crawl-2009-10\\',
        )[1]
    )
else:
    dirProject = '/'.join(os.getcwd().split('/')[:-1])
    dirInput = os.path.join(dirProject,
        ('',
         'data/wbCt100Sep08/',
         'data/wbCt100Oct08/',
         'data/wbCt1000Sep08/',
         'data/th-test/',
         'data/big/',
         'data/crawl-2009-10/',
        )[1]
    )
    dataset = dirInput.split('/')[::-1][1]
    os.system('rm *.pyc '+dataset+'/*')
    os.system('mkdir ' + dataset)


########################################################################################


# Database
db_webgraph = dataset + '/webgraph.bin'     # pre processing    -> binary storage 
    # src id | outdegree | list of dest id 
 
db_transposegraph = dataset + '/transposegraph.bin'       # post processing   -> binary storage 
    # dest id | indegree | list of src id

db_hostgraph = dataset + '/hostgraph.bin'
    # website id | number of webpage | webpage id list

db_danglingnode = dataset + '/danglingnode.bin'
    # total | list of dangling node

db_urlid = dataset + '/urlid.bdb'        # pre processing    -> bsddb
    # url | id

db_idurl = dataset + '/idurl.bdb'      # pre processing    -> bsddb 
    # id | url 


db_stat = dataset + '/stat.json'

logfilename = dataset + '/log.txt'

docSeperator = '==P=>>>>=i===<<<<=T===>=A===<=!Junghoo!==>'
validContentTypes = ('text/html','text','text/plain','application/xhtml+xml')

# maximum tar .gz
maximumHost = 1000000

print dirInput
