# -*- coding: utf-8 -*-

#Questo script stampa i top 1000 domini con la relativa categoria secondo http://www.alexa.com/topsites
#utilità dello script: se ha senso, inserire i domini con la categoria, nel db

#osservzione: per adesso non mi è permesso ricavarmi la categoria

#import os
import urllib2
#import sys
#import re
#import string
from BeautifulSoup import BeautifulSoup

from httplib import BadStatusLine

cont = 1

myURL = "http://www.alexa.com/topsites/countries" 

data = urllib2.urlopen(myURL).read()

#print data #stampa di test

soup = BeautifulSoup(data)
for tag in soup.findAll("a", href=True):
    if tag["href"].find("/topsites/countries/") == 0:
        print cont, tag["href"]#stampa di test
        cont += 1

        i = 0
        cont_b = 1
        while i < 20:
            
            line = tag["href"]
            line_a = line[:-3]
            #print line_a #stampa di test
            line_b = line[19:]
            #print line_b #stampa di test
            myURL_b = "http://www.alexa.com" + line_a + ";%s" % (i,) + line_b
            #http://www.alexa.com/topsites/countries;1/AU
            #print myURL_b #stampa di test
            '''user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
            headers = { 'User-Agent' : user_agent }
            r = urllib2.Request(myURL_b, headers=headers)
            data_b = urllib2.urlopen(r)'''
            try:
                data_b = urllib2.urlopen(myURL_b)
            except BadStatusLine, e:
                print e, e.line
                print "could not fetch %s" % myURL_b

            soup_b = BeautifulSoup(data_b)
            
            for tag_b in soup_b.findAll("a", href=True):
                if tag_b["href"].find("/siteinfo/") == 0 and tag_b["href"].find("#") < 0:
                    print "    ",cont_b, tag_b["href"].replace("/siteinfo/","")
                    cont_b += 1
            i += 1


