from BeautifulSoup import BeautifulSoup
from google.appengine.api import urlfetch
import re
import copy

VALID_TAGS = ['html','table','tr','th','td']

def FRCLinks_URL_Loader(URL):
	Content = urlfetch.fetch(URL).content
	#Admittedly, this is a hack. Pat uses window.location= to redirect instead of a proper HTTP response so this is the easiest way
	URL = re.search("window.location.*?=.*?\"(.*)\"",Content).group(1)
	try:
		return urlfetch.fetch(URL).content
	except:
		return ""

class HorizontalParser:
	def __init__(self,Content):
		myNewMassage = copy.copy(BeautifulSoup.MARKUP_MASSAGE)
		Content = re.sub("<\!D.*?>","",Content)
		Content = re.sub("<!--(.*?)-->",lambda match: ""+match.group(1)+"",Content)
		dataMassage = [(re.compile('<!'), lambda match: '<'),(re.compile('<br.*?>'),lambda match: '\n')]
		myNewMassage.extend(dataMassage)
		#Because 4FX doesn't know how to make clean HTML code we have to beat its brains in with a bat to make it work. 
		#A dash of pepper, some potatoes, some carrots and soon we will have some great soup. (Call BeautifulSoup on the Content and work its magic)
		soup = BeautifulSoup(Content,markupMassage = myNewMassage)
		#Remove all non-valid tags (ie, p, span, div,image, a, etc...)
		map(lambda tag: setattr(tag,"hidden",tag not in VALID_TAGS), soup.findAll(True))
		#Grab the tables
		self.tables = soup.findAll('table')
		
	#This will return the first tables with the given name. 
	def tableByName(self,name):
		tables = filter( lambda table: str(table.tr.td).strip() == name, self.tables)
		tables.append(None)
		return tables[0]
		
	#This will return the index+1th table on the page (ie index of 0 will return the 1st table)
	def tableByIndex(self,index):
		return self.tables[index]
	
	#This function will parse the named table, if the table has a name use this one as it will adjust for the title row
	def parseByName(self,name,headers = None,skip = 0):
		#First, we need to get the table
		table = self.tableByName(name)
		#If headers weren't supplied by the call we assume the first non-title row contains them
		index = 1 + skip #Because this table has a title we start at row 1 instead of row 0
		#If the headers aren't named (ie they are in the table) we get those
		if not headers:
			headers = self.getHeaders(table,index)
			#don't want to process this 2x right?
			index += 1
		return self.parse(table,index,headers)
		
	#	This function will parse the indexed table, if the table lacks a name use this one as it will adjust for the lack of title row
	def parseByIndex(self,index,headers = None, skip = 0):
		#First, we need to get the table
		table = self.tableByIndex(index)
		#If headers weren't supplied by the call we assume the first non-title row contains them
		index = 0 + skip #Because this table lacks a title we start at row 0
		if not headers:
			headers = self.getHeaders(table,index)
			index += 1
		return self.parse(table,index,headers)
	
	def getHeaders(self,table,index):
		rows = table.findAll('tr')
		return map( lambda cell: str(cell).strip() , rows[index].findAll('td'))
	
	def parse(self,table,index,headers):
		rows = table.findAll('tr')
		return map(lambda row: dict(zip(headers,map(lambda cell: re.sub("<.*?>","",str(cell).strip()), row.findAll('td')))), rows[index:])
		
	def parseAll(self):
		return map(self.parseByIndex,range(0,len(self.tables)))