__author__="Henry Heberle"
__date__ ="$25/10/2010 20:34:29$"
import pg
import feedparser
import nltk
import random
urls = {
        0: [ #technology
        "http://rss.cnn.com/rss/edition_technology.rss",
        "http://feeds.bbci.co.uk/news/technology/rss.xml",
        "http://feeds.nytimes.com/nyt/rss/Technology"],
        1:[ #entertainment and arts
        "http://rss.cnn.com/rss/edition_entertainment.rss",
        "http://feeds.bbci.co.uk/news/entertainment_and_arts/rss.xml",
        "http://feeds.nytimes.com/nyt/rss/Arts",
        #"http://rss.wn.com/English/keyword/entertainment",
        "http://feeds.reuters.com/reuters/artNews",
        "http://feeds.reuters.com/reuters/entertainment"],
        2:[ #health
        "http://feeds.bbci.co.uk/news/health/rss.xml",
        "http://feeds.nytimes.com/nyt/rss/Health",
        #"http://rss.wn.com/English/keyword/health",
        "http://feeds.reuters.com/reuters/healthNews"
        ],
        3:[ #"business"
        "http://rss.cnn.com/rss/edition_business.rss",
        "http://feeds.bbci.co.uk/news/business/rss.xml",
        "http://feeds.nytimes.com/nyt/rss/Business"],
        #"http://rss.wn.com/English/keyword/business"],
        4:[ #"sports"
        "http://rss.cnn.com/rss/edition_sport.rss",
        "http://newsrss.bbc.co.uk/rss/sportonline_world_edition/front_page/rss.xml",
        "http://feeds1.nytimes.com/nyt/rss/Sports",
        #"http://rss.wn.com/English/keyword/sport",
        "http://feeds.reuters.com/reuters/sportsNews"],
        5:[ #"science"
        "http://feeds.bbci.co.uk/news/science_and_environment/rss.xml",
        "http://rss.cnn.com/rss/edition_space.rss",
        "http://feeds.nytimes.com/nyt/rss/Science",
        #"http://rss.wn.com/English/keyword/science",
        #"http://rss.wn.com/English/keyword/environment",
        "http://feeds.reuters.com/reuters/environment",
        "http://feeds.reuters.com/reuters/scienceNews"],
        6:[ #"politics"
        "http://feeds.nytimes.com/nyt/rss/Politics",
        #"http://rss.wn.com/English/keyword/politics",
        "http://feeds.reuters.com/Reuters/PoliticsNews"]
        }
total = 0
inseridos = 0
unicodeError = 0
duplicados = 0
# open connection
db = pg.connect("rssdb", "localhost", 5432, None, None, "rssuser", "password")
for category in urls:
    urls_of_category = urls[category]
    random.shuffle(urls_of_category)
    print urls_of_category
    for url in urls_of_category:
        #print "#url: "+ url
        feed = feedparser.parse(url)
        #print "total no feed "+ url + ": "+ str(len(feed.entries))
        n_item = 0
        for item in feed.entries:
            total+=1
            n_item +=1
            try:
                title = item.title
                updated = item.updated
                link = item.link
                summary = item.summary
                summary = nltk.clean_html(summary)
                text = title + summary;
                text = text.replace(" ","")
                text = text.replace("\n","")
                list = [text, title, summary, category, updated, link]
                try:
                    db.inserttable('rss', [list])
                    inseridos+=1
                except Exception:                  
                    pass                    
                    duplicados+=1                    
                    #print "Erro ao inserir. Provavelmente é notícia duplicada. ###"                    
            except UnicodeError:
                unicodeError+=1
                print "Erro de caractere invalido (UnicodeError)"
                #raw_input()
                pass
            except:
                print "********** Erro desconhecido ***********"
        #print "\n\n url= " + url + " " + str(n_item)
        #raw_input()
db.close()
print
print
print "       total: " + str(total)
print "   inseridos: " + str(inseridos)
print "unicodeError: " + str(unicodeError)
print "  duplicados: " + str(duplicados)
print
punctuations = [']','[','.',',',';','"','\'','?','(',')',':','-','_','`']

