from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
from StringIO import StringIO 



import BeautifulSoup
from PyRSS2Gen import RSSItem, Guid
import ScrapeNFeed
import re
#import urllib2

from datetime import date
import sys
import pickle

#DB Definition
from google.appengine.ext import db

class PickleFile(db.Model):
	xmlfile = db.BlobProperty()
	pfile = db.BlobProperty()
        date = db.DateTimeProperty(auto_now_add=True)

class JobRapidoLinks(db.Model):
	origin = db.StringProperty()
	date = db.DateTimeProperty(auto_now_add=True)

class DagospiaLinks(db.Model):
	date = db.DateTimeProperty(auto_now_add=True)


joblinks = []
artlinks = []
openlink = False


class DagospiaFeed(ScrapeNFeed.ScrapedFeed):

    def HTML2RSS(self, headers, body):
        soup = BeautifulSoup.BeautifulSoup(body)
        articoli = soup.findAll('a',href=re.compile('articolo*'))
        items = []
	global artlinks
        for articolo in articoli:
		art_link = unicode(self.baseURL+articolo['href'])
		if DagospiaLinks.get_by_key_name(art_link) == None:
			try:			
				art_html = urlfetch.fetch(art_link).content
			except  :
				art_content = 'Impossibile scaricare sito'
			else:
				art_soup= BeautifulSoup.BeautifulSoup(art_html)
 				art_content = art_soup.find('div',{ "class" :"articolo"})
			title=unicode(articolo.contents[0])        
			items.append(RSSItem(title=title,
                                     description=unicode(art_content),
                                     link=art_link))
			artlinks.append(art_link)
				
        self.addRSSItems(items)


class JobrapidoErpFeed(ScrapeNFeed.ScrapedFeed):

    def HTML2RSS(self, headers, body):
        soup = BeautifulSoup.BeautifulSoup(body)
        annunci = soup.findAll('a',id=re.compile('SearchResultSlot'))
        items = []
	global joblinks
	global openlink
        for annuncio in annunci:
		annuncio_link = unicode(annuncio['href'])
		annuncio_title = annuncio.contents[0]
		if annuncio_link.find("/") == 0:
# cerca il link nella pagina intermedia di jobrapido
			annuncio_link = self.baseURL + annuncio_link
			try:			
				result = urlfetch.fetch(annuncio_link)
				html = result.content
			except  :
				pass
#				annuncio_link2 = annuncio_link #mr 06/08/08
			else:
				soup_middle = BeautifulSoup.BeautifulSoup(html)
				annuncio_link2 = soup_middle.find('a')
				if annuncio_link2 != None : #mr 06/08/08
					annuncio_link = unicode (annuncio_link2['href'])
		if JobRapidoLinks.get_by_key_name(annuncio_link) == None:
			annuncio_html = ''
			if openlink:
				try:
					result = urlfetch.fetch(annuncio_link)
					html = result.content
				except :
					annuncio_html = 'Impossibile scaricare il sito'
				else :
					annuncio_html = BeautifulSoup.BeautifulSoup(html)
	 		items.append(RSSItem(title=unicode(annuncio_title),description=unicode(annuncio_html),link=unicode(annuncio_link)))
			joblinks.append(annuncio_link)

        self.addRSSItems(items)

class Dagospia(webapp.RequestHandler):
  def get(self):
	sitename = "Dagospia"
	url = "http://dagospia.excite.it/"
	global artlinks
#get the picklefile from memory
	mypickleclass = PickleFile.get_by_key_name(sitename)
	if mypickleclass :
		myxml =  StringIO(mypickleclass.xmlfile)
		mypickle = StringIO(mypickleclass.pfile)
	else:
		myxml = StringIO()
		mypickle = StringIO()
#post the feed	
	try:
		DagospiaFeed.load(sitename,url,sitename,myxml,mypickle,50)
		self.response.out = myxml
	except:
		pass
	else:
#save the pickle and links
		if self.request.path == '/dagospia/update' :	
			PickleFile(key_name=sitename,xmlfile=myxml.read(),pfile=mypickle.read()).put()
			for art in artlinks:
				DagospiaLinks(key_name=art).put()
	artlinks=[]

class JobrapidoErp(webapp.RequestHandler):
  def get(self):
	sitename = "JobrapidoErp"
	url = "http://www.jobrapido.it/?w=erp&l=lombardia"
	global openlink
	if self.request.path == '/jobrapidosap' or self.request.path == '/jobrapidosap/update':
		openlink = False  #mettere True per aprire i link
		sitename = "JobrapidoSap"
		url = "http://www.jobrapido.it/?w=sap&l=lombardia"
	global joblinks
#recupera la pagina dai parametri get
	url = url + "&p="+ self.request.get("p")
#get the picklefile from memory
	mypickleclass = PickleFile.get_by_key_name(sitename)
	if mypickleclass :
		myxml =  StringIO(mypickleclass.xmlfile)
		mypickle = StringIO(mypickleclass.pfile)
	else:
		myxml = StringIO()
		mypickle = StringIO()
#post the feed	
	try:
		JobrapidoErpFeed.load(sitename,url,sitename,myxml,mypickle,50)
		self.response.out = myxml
#		JobrapidoErpFeed.load(sitename,url,sitename,self.response.out,mypickle,50)
	except:
		pass
	else:
#save the pickle and the links
		if self.request.path == '/jobrapidoerp/update' :	
			PickleFile(key_name=sitename,xmlfile=myxml.read(),pfile=mypickle.read()).put()
			for link in joblinks :
				JobRapidoLinks(key_name=link,origin=sitename).put()
		if self.request.path == '/jobrapidosap/update' :
			PickleFile(key_name=sitename,xmlfile=myxml.read(),pfile=mypickle.read()).put()
			for link in joblinks :
				JobRapidoLinks(key_name=link,origin=sitename).put()
	joblinks=[]
	openlink = False


application = webapp.WSGIApplication(
                                     [('/dagospia', Dagospia),
                                     ('/dagospia/update', Dagospia),
                                     ('/jobrapidoerp', JobrapidoErp),	
                                     ('/jobrapidoerp/update', JobrapidoErp),
                                     ('/jobrapidosap', JobrapidoErp),	
                                     ('/jobrapidosap/update', JobrapidoErp)
					
					],
                                     debug=True)


def main():
  run_wsgi_app(application)

if __name__ == "__main__":
  main()

