#
#   api_html.py
#
#   David Janes
#   2009.01.14
#
#	Copyright 2008, 2009 David Janes
#

import os
import sys
import urllib
import types
import pprint
import types

import bm_extract
import bm_uri
import api
import bm_work
import bm_io

from bm_log import Log

class Downloaders(api.APIReader):
	_loader_class = bm_uri.URILoader

	@apply
	def item_path():
		def fset(self, path):
			self._item_path = path

		return property(**locals())

	@apply
	def value():
		def fset(self, value):
			self._raw = value

		return property(**locals())

	def CustomizeValidate(self):
		if self._raw != None:
			return

		api.APIReader.CustomizeValidate(self)

	def CustomizeDownloadPage(self):
		"""Called by _IterPage to get the next page"""

		try:
			loader = bm_uri.URILoader(
				( self._raw == None ) and self.ConstructPageURI() or "file:///", 
				referer = self._http_referer,
				user_agent = self._http_user_agent,
				## authenticate = self._authenticate,
				**self._http_ad
			)
			loader.Load(raw = self._raw)
		except bm_uri.NotModified:
			pass
		except bm_uri.DownloaderError:
			raise

		return	self._convert2work.FeedString(loader.GetRaw())

class HTML(Downloaders):
	_convert2work = bm_work.HTML2WORK(keep_attributes = True)
	_item_path = "body"
	_raw = None

	def __init__(self, **ad):
		Downloaders.__init__(self, **ad)

	def CustomizeDownloadPage(self):
		"""See api.APIReader.CustomizeDownloadPage"""

		try:
			loader = bm_uri.HTMLLoader(
				( self._raw == None ) and self.ConstructPageURI() or "file:///", 
				referer = self._http_referer,
				user_agent = self._http_user_agent,
				**self._http_ad
			)
			loader.Load(raw = self._raw)
		except bm_uri.NotModified:
			pass
		except bm_uri.DownloaderError:
			return	None

		try:
			d = self._convert2work.FeedString(loader.GetCooked())
			d["headers"] = loader.GetHeaders()

			return	d
		except:
			Log(exception = True, data = loader.GetCooked())
			raise

class JSON(Downloaders):
	_convert2work = bm_work.JSON2WORK()
	_item_path = ""

	def __init__(self, **ad):
		Downloaders.__init__(self, **ad)

class XML(Downloaders):
	"""Download an XML document, only using the text between tags as data"""

	_convert2work = bm_work.XML2WORK(keep_attributes = False)
	_item_path = ""

	def __init__(self, **ad):
		Downloaders.__init__(self, **ad)

class XMLAttributes(Downloaders):
	"""Download an XML document, preserving attributes (as '@' attributes)"""

	_convert2work = bm_work.XML2WORK(keep_attributes = True)
	_item_path = ""

	def __init__(self, **ad):
		Downloaders.__init__(self, **ad)

class XMLAttributesAsText(Downloaders):
	"""Download an XML document, treating attributes as nodes"""

	_convert2work = bm_work.XML2WORK(keep_attributes = True, attribute_prefix = '')
	_item_path = ""

	def __init__(self, **ad):
		Downloaders.__init__(self, **ad)

if __name__ == '__main__':
	Log.verbose = True

	to_test = "html_static"

	if to_test == "html":
		apio = HTML(
			uri = "http://www.toronto.ca/fire/cadinfo/livecad.htm", 
			item_path = "body.table[2].tr.td[2].table.tr[4].td.table.tr",
		)
		for item in apio.items:
			pprint.pprint(item)
	elif to_test == "json_list":
		apio = JSON(
			uri = "http://labs.adobe.com/technologies/spry/data/json/array-03.js",
		)
		for item in apio.items:
			pprint.pprint(item)
		pprint.pprint(apio.response)
	elif to_test == "json_path":
		apio = JSON(
			uri = "http://www.google.com/calendar/feeds/c4o4i7m2lbamc4k26sc2vokh5g%40group.calendar.google.com/public/full?alt=json-in-script",
			item_path = "feed.entry",
		)
		for item in apio.items:
			pprint.pprint(item)
	elif to_test == "html_static":
		apio = HTML(
			value = bm_io.readfile("data/fire.html"),
			item_path = "body.table[2].tr.td[2].table.tr[4].td.table.tr",
		)
		for item in apio.items:
			pprint.pprint(item)


