# -*- coding: utf-8 -*-
#import gtk
import comm
import config
import classes
import mc
from BeautifulSoup import BeautifulStoneSoup
import json
#from mechanize._beautifulsoup import BeautifulStoneSoup

def parse_config(soup):
	"""	There are lots of goodies in the config we get back from the ABC.
		In particular, it gives us the URLs of all the other XML data we
		need.
	"""

	soup = soup.replace('&amp;', '&#38;')

	xml = BeautifulStoneSoup(soup)

	# should look like "rtmp://cp53909.edgefcs.net/ondemand"
	rtmp_url = xml.find('param', attrs={'name':'server_streaming'}).get('value')
	categories_url = str(xml.find('param', attrs={'name':'categories'}).get('value'))
	highlights_url = str(xml.find('param', attrs={'name':'highlights'}).get('value'))
	print "RTMP URL: " + str(rtmp_url)
	categories_url = "http://www.abc.net.au/iview/" + categories_url
	print "CAtegories: " + categories_url
	rtmp_chunks = rtmp_url.split('/')

	return {
		'rtmp_url'  : rtmp_url,
		'rtmp_host' : rtmp_chunks[2],
		'rtmp_app'  : rtmp_chunks[3],
		'categories_url' : categories_url,
		'highlights_url' : highlights_url,
	}

def parse_auth(soup):
	"""	There are lots of goodies in the auth handshake we get back,
		but the only ones we are interested in are the RTMP URL, the auth
		token, and whether the connection is unmetered.
	"""

	xml = BeautifulStoneSoup(soup)

	# should look like "rtmp://203.18.195.10/ondemand"
	rtmp_url = xml.find('server').string

	playpath_prefix = ''

	if rtmp_url is not None:
		# Being directed to a custom streaming server (i.e. for unmetered services).
		# Currently this includes Hostworks for all unmetered ISPs except iiNet.

		rtmp_chunks = rtmp_url.split('/')
		rtmp_host = rtmp_chunks[2]
		rtmp_app = rtmp_chunks[3]
	else:
		# We are a bland generic ISP using Akamai, or we are iiNet.

		if not comm.iview_config:
			comm.get_config()

		playpath_prefix = config.akamai_playpath_prefix

		rtmp_url = comm.iview_config['rtmp_url']
		rtmp_host = comm.iview_config['rtmp_host']
		rtmp_app = comm.iview_config['rtmp_app']

	token = xml.find("token").string
	token = token.replace('&amp;', '&') # work around BeautifulSoup bug

	return {
		'rtmp_url'        : rtmp_url,
		'rtmp_host'       : rtmp_host,
		'rtmp_app'        : rtmp_app,
		'playpath_prefix' : playpath_prefix,
		'token'           : token,
		'free'            : (xml.find("free").string == "yes")
	}

def parse_index(xml, useJSON=False):
	"""	This function parses the index, which is an overall listing
		of all programs available in iView. The index is divided into
		'series' and 'items'. Series are things like 'beached az', while
		items are things like 'beached az Episode 8'.
	"""
	programme = []

	if useJSON:
	  jsonDecode = json.loads(xml)

	  for series in jsonDecode:
		tempSeries = classes.Series()
		#print "Found a series: " + str(series[1])
		"""
		#This was the original ABC json implementation
		tempSeries.title = str(series['title'])
		tempSeries.keywords = str(series['keywords'])
		tempSeries.seriesID = str(series['id'])
		numEpisodes = int(len(series['episodes']))
		"""
		#This is the new, messy one
		tempSeries.title = str(series["b"])
		tempSeries.keywords = str(series["e"])
		tempSeries.seriesID = str(series["a"])
		numEpisodes = int(len(series["f"]))
		
		
		if numEpisodes > 0:
			programme.append(tempSeries)
	else:
		soup = BeautifulStoneSoup(str(xml))
		
		"""
		print "The raw XML: " + str(xml)
		print "The XML: " + str(soup)
		print "The first series: " + str(soup('series'))
		"""

		for series in soup('series'):
			# print "found a series: " + str(series('title')[0].contents[0])	 
			tempSeries = classes.Series()
			tempSeries.title = str(series('title')[0].contents[0])
			tempSeries.title = strip_CDATA(tempSeries.title)
			tempSeries.keywords = str(series('keywords')[0].contents[0])
			tempSeries.seriesID = str(series['id'])

			#We'reonly interested in shows with watchable episodes
			numEpisodes = int(len(series('item')))
			if numEpisodes > 0:
				programme.append(tempSeries)

	return programme

def parse_series_items(raw_json):

	episodes = []
	jsonDecode = json.loads(raw_json)

	for series_item in jsonDecode[0]["f"]:
		tempItem = classes.Program()
		tempItem.episodeID = str(series_item["a"])
		title1 = strip_CDATA(str(series_item["b"]))
		
		#Not all items have a title2 (Episode title)
		try:
			title2 = str(series_item["c"])
		except KeyError:
			title2 = ""
		tempItem.title = title1 + ": " + title2
		
		tempItem.description = str(series_item["d"])
		tempItem.videoAsset = 'http://www.abc.net.au/iview/#/program/' + tempItem.episodeID
		tempItem.thumbnailURL = "" #str(program.find('thumbnail')['url'])
		#tempItem.contentType = str(program.find('content')['type'])
		#print "Video Asset: " + tempItem.videoAsset
		
		episodes.append(tempItem)

	return episodes

def parse_highlights(xml):

	soup = BeautifulStoneSoup(xml)

	highlightList = []

	for series in soup('series'):
                #print "found a series: " + str(series('title')[0].contents)
                #series_iter = programme.append(None, [series.find('title').string, series.get('id'), None, None])
                tempSeries = classes.Series()
                tempSeries.title = str(strip_CDATA(series('title')[0].contents[0]))
                tempSeries.thumbURL = str(series('thumb')[0].contents[0])
		tempSeries.keywords = series('keywords')
		tempSeries.seriesID = str(series['id'])

		highlightList.append(tempSeries)

	return highlightList

def parse_categories(xml):
	# soup = BeautifulStoneSoup(xml)
	xml = xml.replace("\n", "")
	xml = xml.replace("\t", "")
	xml = xml.replace('\^M', "")
	xml = xml.replace("\^M", "")
	print "length of xml: " + str(len(xml))
	xml = xml.replace(xml[38], "")
	
	from xml.dom.minidom import parseString
	
	doc = parseString(xml)
	

	categories = {}
	subcategories = {}
	subIDs = []

	for category in doc.getElementsByTagName("category"):
		if (not category.getAttribute("id") == "test") and (not category.getAttribute("id") in subIDs):
			#print category.getAttribute("id")
			tempCategory = classes.Category()
			tempCategory.categoryID = str(category.getAttribute("id"))
			if category.getAttribute("genre") == "true": tempCategory.isGenre = True
			tempCategory.name = category.firstChild.firstChild.nodeValue
			#tempCategory.series = []
			#tempCategory.children = []

			if tempCategory.isGenre:
				for subCategory in category.getElementsByTagName("category"):
					tempSubCategory = classes.Category(isSub = True)
					tempSubCategory.categoryID = str(subCategory.getAttribute("id"))
					tempSubCategory.name = str(subCategory.firstChild.firstChild.nodeValue)
					tempSubCategory.parent = tempCategory
					tempCategory.children.append(tempSubCategory)

					#print "\tFound a sub-category: " + tempSubCategory.name
					subIDs.append(subCategory.getAttribute("id"))
					subcategories[tempSubCategory.categoryID] = tempSubCategory

			categories[tempCategory.categoryID] = tempCategory
			
	return (categories, subcategories)

def parse_handshake(xml):
	
	soup = BeautifulStoneSoup(xml)

	token = soup.find("token").string
	token = token.replace('&amp;', '&') # work around BeautifulSoup bug

	print "token: " + str(token)
	return str(token)

def strip_CDATA(string):

        ret = string.replace('<![CDATA[','')
        ret = ret.replace(']]>','')

        return ret
        
