#!/usr/bin/env

import urllib2
import cookielib
import time
import html5lib
#from lxml import etree
import xml.etree.cElementTree as etree
import BeautifulSoup

"""
print "Getting tema 20 (Dokumentar)"
doc = get_parsed_doc(get_url_prosjekt(806))
for prog in get_programs(doc):
	print prog["header"]
"""


#config
host = "www1.nrk.no"
host2 = "www.nrk.no"
speed = 2000

def get_url_base():
	return "http://%s/nett-tv" % (host,)

def get_url_theme(themeId):
	return get_url_base() + "/tema/%i" % (themeId,)

def get_url_letter(letter="@"):
	return get_url_base() + "/bokstav/%s" % (letter,)

def get_url_project(projectId):
	return get_url_base() + "/prosjekt/%i" % (projectId,)

def get_url_clip(clipId):
	return get_url_base() + "/klipp/%i" % (clipId,)

def get_url_clip_index(indexId):
	return get_url_base() + "/indeks/%i" % (indexId,)

def get_url_podcasts():
	return "http://%s/podkast/" % (host2,)

def get_url_mediaxml(clipId):
	return get_url_base() + "/silverlight/getmediaxml.ashx?id=%s&hastighet=%i&vissuper=True" % (clipId, speed)

class fragment:
	project = "project"
	category = "category"
	clip = "broadcast"

class menu_item:
	category = "category"
	clip = "clip"
	index = "index"

def get_url_menufragment(fragmentType, fragmentId):
	return get_url_base() + "/menyfragment.aspx?type=%s&id=%i" % (fragmentType, fragmentId,)

#Common routines for all nrk docs
def get_raw_doc(url):
	cj = cookielib.LWPCookieJar()
	opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
	urllib2.install_opener(opener)
	
	#Add cookies to skip speed-test
	#print "Setting cookies"
	cj.set_cookie(create_cookie(name="NetTV2.0Speed", value="NetTV2.0Speed=%s" % speed))
	cj.set_cookie(create_cookie(name="agtest", value="tested"))
	cj.set_cookie(create_cookie(name="UseSilverlight", value="UseSilverlight=0"))

	#get document
	result = urllib2.urlopen(url)
	resultText = unicode(result.read(), "utf-8", "replace")
	if len(resultText) == 0:
		raise RuntimeError("Url %s return empty document" % (url,))

	return resultText

def get_parsed_doc(url):
	#retrieve doc from server
	resultText = get_raw_doc(url)

	#parse doc as html
	return parse_doc(resultText)

def parse_doc(docText):
	#print "Parsing"
	parser = html5lib.HTMLParser(tree=html5lib.treebuilders.getTreeBuilder("beautifulsoup"))
	return parser.parse(docText)


#podcasts
def get_menu_podcasts():
	url = get_url_podcasts()
	doc = get_parsed_doc(url)
	pods = doc.findAll("div", attrs={'class':'pod'})
	videoPod = pods[0]
	audioPod = pods[1]

	videos = get_podcast_rss_list(videoPod)
	audios = get_podcast_rss_list(audioPod)

	return {"video": videos, "audio": audios}

def get_podcast_rss_list(podNode):
	rssList = []
	for curRow in podNode.findAll("tbody"):
		name = unicode(curRow.find("th").renderContents(), "utf-8")
	
		programUrl = u""	
		descriptNode = curRow.find("p")
		descriptLink = descriptNode.find("a")
		if descriptLink: 
			programUrl = descriptLink["href"]
		isString = lambda x: x.__class__ == BeautifulSoup.NavigableString
		description = u"".join(filter(isString, descriptNode.contents))

		linkNode = curRow.find("tr", attrs={"class": "pod-rss-url"})
		if not linkNode:
			continue
		rssUrl = linkNode.find("a")["href"]

		rssList.append({
			"name": name,
			"description": description,
			"summary": None,
			"imageUrl": None,
			"programUrl": programUrl,
			"rssUrl": rssUrl,
			})
	return rssList


#themes (temaer)
def get_menu_themes():
	"""
	List all projects (program) starting with *letter*.
	If letter == "@", all programs are returned
	"""
	url = get_url_letter(letter="-1")
	doc = get_parsed_doc(url)
	return get_themes(doc)

def get_themes(doc):
	"""
	Returns all themes from a doc node. This should
	work for most
	"""
	themeRoot = doc.find(id="ctl00_ucTop_themes")
	themes = themeRoot.findAll("li", recursive=False)
	return map(get_theme_info, themes)

def get_theme_info(themeNode):
	theme = unicode(themeNode.a.renderContents(), "utf-8")
	relurl = themeNode.a["href"]
	absurl  = "http://%s%s" % (host, relurl,)
	themeId = int(relurl.split("/")[-1])
	return {
		"name": theme,
		"url": absurl,
		"id": themeId,
		}


#projects (program)
def get_menu_projects(letter="@"):
	"""
	List all projects (program) starting with *letter*.
	If letter == "@", all programs are returned
	"""
	url = get_url_letter(letter)
	doc = get_parsed_doc(url)
	return get_projects(doc)

def get_menu_theme_projects(theme):
	"""
	List all projects (program) in theme (tema) *theme*
	"""
	url = get_url_theme(theme)
	doc = get_parsed_doc(url)
	return get_projects(doc)

def get_projects(doc):
	projectRoot = doc.find(id="live-content2")
	projects = projectRoot.findAll("div", recursive=False)
	return map(get_project_info, projects)
		
def get_project_info(programNode):
	header = unicode(programNode.h2.a.renderContents(), "utf-8")
	description = programNode.div.a["title"]
	image = programNode.div.a.img["src"]
	relurl = programNode.div.a["href"]
	absurl = "http://%s%s" % (host, relurl)
	projectId = int(relurl.split("/")[-1])
	return {
		"name": header, 
		"description": description, 
		"image": image,
		"id": projectId
		}


#project menu
def get_menu_categories(projectId):
	"""
	Returns a list of category infos for a given program
	"""
	url = get_url_menufragment(fragment.project, projectId)
	doc = get_parsed_doc(url)
	return get_menu_items(doc)


def get_menu_clips(categoryId):
	"""
	Returns a list of program clips for the given category
	"""
	url = get_url_menufragment(fragment.category, categoryId)
	doc = get_parsed_doc(url)
	return get_menu_items(doc)


def get_menu_clip_indices(clipId):
	"""
	Returns a list of clip indices for the given clip
	"""
	url = get_url_menufragment(fragment.clip, clipId)
	doc = get_parsed_doc(url)
	return get_menu_items(doc)


def get_menu_items(menuDoc):
	"""
	Given a document from get_url_menu_fragment(...), 
	get_menu_items returns a list of parsed menu items.
	The items for categories, clips and indices are 
	similar enough to put into one format
	"""

	menuNodes = menuDoc.html.body.findAll("li", recursive=False)
	ret = map(get_menu_item_info, menuNodes)
	return ret

def get_menu_item_info(node):
	anchor = node.findAll("a", recursive=False)[-1]
	name = unicode(anchor.renderContents(), "utf-8")
	description = anchor["title"]
	relurl = anchor["href"]
	isLeaf = node["class"] == "noexpand"
	if "/kategori/" in relurl:
		type = menu_item.category
	elif "/klipp/" in relurl:
		type = menu_item.clip
	elif "/indeks/" in relurl:
		type = menu_item.index
	id = int(relurl.split("/")[-1])

	mediaType = None
	if type == menu_item.clip or type == menu_item.index:
		classNames = anchor["class"].split(" ")
		if "icon-video-black" in classNames or "icon-videoindex-black":
			mediaType = "video"
		elif "icon-sound-black" in classNames:
			mediaType = "audio"
		else:
			raise RuntimeError("Unknown clip type (classnames: %s)" % (classNames,))

	return {
		"name": name,
		"type": type,
		"mediaType": mediaType,
		"isLeaf": isLeaf,
		"description": description,
		"id": id,
		}




#---- get url of actual clips -----
def get_asx_url_clip(clipId):
	clipUrl = get_url_clip(clipId)
	doc = get_parsed_doc(clipUrl)
	asxUrl = doc.find("object", id='ctl00_ucPlayer_Player')["url"]
	return asxUrl

def get_asx_url_clip_index(clipIndexId):
	clipUrl = get_url_clip_index(clipIndexId)
	doc = get_parsed_doc(clipUrl)
	asxUrl = doc.find("object", id='ctl00_ucPlayer_Player')["url"]
	return asxUrl

def get_asx(asxUrl):
	doc = get_parsed_doc(asxUrl)
	name = unicode(doc.entry.title.renderContents(), "utf-8")
	description = unicode(doc.entry.abstract.renderContents(), "utf-8")
	if doc.entry.starttime:
		starttime = doc.entry.starttime["value"]
	else:
		starttime = None
	if doc.entry.duration:
		duration = doc.entry.duration["value"]
	else:
		duration = None
	url = doc.entry.ref["href"]#.replace("mms://", "http://")
	return {
		"name": name,
		"description": description,
		"starttime": starttime,
		"duration": duration,
		"url": url,
		}
	


	
#---- utilities -----
def create_cookie(name, value, domain=None, expires=None):
	#use nrk domain as default
	if not domain:
		domain = host
	
	#expires in 24hours by default
	if not expires:
		expires = int(time.time()) + 24*3600

	return cookielib.Cookie(
		version=0, 
		name=name, 
		value=value, 
		port=None, 
		port_specified=False, 
		domain=domain,
		domain_specified=False, 
		domain_initial_dot=False, 
		path="/", 
		path_specified=True, 
		secure=False, 
		expires=expires, 
		discard=False, 
		comment=None, 
		comment_url=None, 
		rest={'HttpOnly':None}, 
		rfc2109=False)



