#!/usr/bin/env python
import BeautifulSoup
from PyRSS2Gen import RSSItem, Guid
import ScrapeNFeed
import re
import urllib2
url = "http://dagospia.excite.it/"

class DagospiaFeed(ScrapeNFeed.ScrapedFeed):    

    def HTML2RSS(self, headers, body):
        soup = BeautifulSoup.BeautifulSoup(body)
	articoli = soup.findAll('a',href=re.compile('articolo*'))
	items = []
	for articolo in articoli:
		art_link = unicode(self.baseURL+articolo['href'])
		print(art_link)
		try:
			art_html = urllib2.urlopen(art_link)
		except  (urllib2.URLError , urllib2,ValueError):
			art_content = 'Impossibile scaricare sito'
		else:
			art_soup= BeautifulSoup.BeautifulSoup(art_html)
 			art_content = art_soup.find('div',{ "class" :"articolo"})
# 		art_content = "test"
#		art_content = art_html.read()	
#		print(str(articolo.contents[0]))
		title=unicode(articolo.contents[0])        
		items.append(RSSItem(title=title,
                                     description=unicode(art_content),
                                     link=art_link))
        self.addRSSItems(items)

DagospiaFeed.load("Articoli Dagospia",
                 url,
                 "Tutti gli articoli di Dagospia",
                 '/home/mrivo/public_html/dagospia.xml', 
		 '/home/mrivo/public_html/dagospia.pickle',25)
