# -*- coding: utf-8 -*-

import re
import sys
import datetime
from PMS import Plugin, Log, XML, Utils, HTTP
from PMS.MediaXML import MediaContainer, DirectoryItem, VideoItem, \
                         SearchDirectoryItem
from PMS.Shorthand import _D, _E, _L, _R

AREENA_PLUGIN_PREFIX = "/video/areena"
AREENA_ROOT = "http://areena.yle.fi/"
AREENA_ASX_DISPATCHER = "http://www.yle.fi/java/areena/dispatcher/"
VER = "&v=t" # fetch text versions of pages
FILTER_VIDEO = "&filter=1,1"
CACHE_HIGH = 86400 # one day
CACHE_MED = 3600 # an hour
CACHE_LOW = 900 # fifteen minutes
CACHE_NONE = 0

dirs = [ ['Kategoriat', 'KATEGORIAT'], [u'Ohjelmat A-Ö', 'OHJELMAT'] ]

##############################################################################
def Start():
    Plugin.AddRequestHandler(AREENA_PLUGIN_PREFIX, HandleVideosRequest,
                             "YLE Areena", "icon-default.png", "bg.jpg")
    Plugin.AddViewGroup("InfoList", viewMode="InfoList", contentType="items")
    Plugin.AddViewGroup("List", viewMode="List", contentType="items") 

##############################################################################
def utf8decode(s):
    if s is not None:
        s = s.encode("iso-8859-1")
        s = s.decode("utf-8")
    return s

def checkChildCount(dir, pathNouns):
    """
        Check the child count of dir.
        If there are no children, inform user.
    """
    if dir.ChildCount() == 0 and dir.GetAttr("message") is None:
        if pathNouns[0].startswith("SEARCH"):
            dir.SetMessage("Ei osumia", "Ei osumia annetuilla hakusanoilla.")
        elif pathNouns[0].startswith("OHJELMAT"):
            dir.SetMessage(u"Ei sisältöä", u"Tällä hetkellä valinnalla ei "+
                           u"löydy sisältöä Areenasta. Yritä myöhemmin "+
                           u"uudelleen.")
        elif pathNouns[0].startswith("KATEGORIAT"):
            dir.SetMessage(u"Ei sisältöä", u"Tällä hetkellä valitsemallasi "+
                           u"kategorialla ei löydy sisältöä Areenasta. Yritä "+
                           u"myöhemmin uudelleen.")

def appendItems(dir, rePattern, path, command, xpath, xpath_item):
    """
        Fetches category or program items and appends them to the
        MediaContainer as DirectoryItems.
    """
    Log.Add("appendItems: path: %s, command: %s, xpath: %s, xpath_item: %s" %
            (path, command, xpath, xpath_item))
    regexp = re.compile(rePattern, re.IGNORECASE+re.DOTALL+re.MULTILINE)
    element = XML.ElementFromString(HTTP.GetCached(AREENA_ROOT+path+VER,
                                                   CACHE_HIGH), True)
    try:
        xpath = unicode(xpath, "iso-8859-1")
        items = element.xpath(xpath)
    except AttributeError:
        Log.Add("appendItems: Unable to create element from: "+ 
                "%s" % (AREENA_ROOT+path+VER))
        return
        
    for item in items:
        
        try:
            item = item.xpath(xpath_item)[0]
        except IndexError:
            Log.Add("appendItems: Unable to locate item")
            continue
        else:
            uri = item.get('href')
            item_name = utf8decode(item.text)
        
        try:
            matches = regexp.search(uri)
        except TypeError:
            Log.Add("appendItems: Item URI was not found")
            continue
        
        if matches is None:
            Log.Add('appendItems: Could not add item from: %s' % 
                    (AREENA_ROOT+uri))
            continue
            
        item_id = matches.group(1)
        
        # Fetch item info.
        try:
            info = XML.ElementFromString(HTTP.GetCached(AREENA_ROOT+command+
                                                        item_id+FILTER_VIDEO+
                                                        VER, CACHE_MED),
                                         True)[0]
            info.xpath("//div[contains(@class,'fragment1-1')]/div")
        except (IndexError, AttributeError), e:
            Log.Add("appendItems: Unable to fetch info for item: %s." % e)
            continue # Skip this item.
            
        # Fetch thumbnail.
        try:
            thumb = info.xpath("//div[contains(@class,'visio-box')]/img")[0]
        except IndexError:
            Log.Add("appendItems: Unable to fetch thumb for item with id:"+
                    " %s" % item_id)
            thumb = ""
        else:
            thumb = thumb.get("src")
        
        # Fetch summary title.
        try:
            summary_title = info.xpath("//div[contains(@class,"+ 
                                       "'visio-box')]/div/h4")[0].text
        except IndexError:
            Log.Add("appendItems: Unable to fetch summary title for item with"+
                    " id: %s" % item_id)
            summary_title = ""
        else:
            summary_title = utf8decode(summary_title)
        
        # Fetch summary.
        try:
            summary = info.xpath("//div[contains(@class,'visio-box')]"+ 
                                 "/div/p[1]")[0].text
        except IndexError:
            Log.Add("appendItems: Unable to fetch summary for item with id: "+
                    "%s" % item_id)
            summary = summary_title
        else:
            summary = utf8decode(summary)
        
        page = 1
        d = DirectoryItem(item_name+"$"+item_id+"$"+str(page), item_name,
                          thumb, summary)
        d.SetAttr("subtitle", summary_title)
        dir.AppendItem(d)

def appendGroupings(dir, path="selaa"):
    """
        Fetches groupings and appends them to the
        MediaContainer as DirectoryItems.
    """
    element = XML.ElementFromString(HTTP.GetCached(AREENA_ROOT+path+VER,
                                                   CACHE_HIGH), True)
    
    try:
        items = element.xpath("//table[contains(@class,'productlist')]"+ 
                              "/tbody/tr/td[1]/strong")
    except AttributeError:
        Log.Add("Could not get groupings from: %s" % (AREENA_ROOT+path+VER))
        return None
    
    for item in items:
        
        try:
            group = item.xpath(".")[0].text
        except IndexError:
            Log.Add("appendGroupings: Could not get group name")
            continue
        group = utf8decode(group)
            
        try:
            group_id = item.xpath("../a")[0]
        except IndexError:
            Log.Add("appendGroupings: Could not get group id")
            continue
        group_id = group_id.get("name")
        group_id = utf8decode(group_id)
        
        dir.AppendItem(DirectoryItem(group_id+"$"+group, group))

def appendVideos(dir, name, command, id, page=1, filter=FILTER_VIDEO):
    """
        Fetches video information and appends them to the
        MediaContainer as VideoItems.
        
        Also appends DirectoryItems for browsing further content when
        necessary.
    """
    requestURL = AREENA_ROOT + command + id + filter + "&s=" + str(page) + VER
    Log.Add("appendVideos: %s" % requestURL)
    elString = HTTP.GetCached(requestURL, CACHE_LOW)
    element = XML.ElementFromString(elString, True)
    try:
        items = element.xpath("//li[contains(@class, 'video')]")
    except AttributeError:
        Log.Add("appendVideos: could not get video list for %s." % name)
        return None
    """    
    if page > 1:
        dir.AppendItem(DirectoryItem(name.decode("utf-8")+"$"+str(id)+"$"+
                       str(page-1), u"Takaisin..."))
    """
    if len(items) == 0:
        # Check if the search query needs to be refined (over 100 results).
        regexp = re.compile(".*Hakuosumia yli 100 kpl.*", re.IGNORECASE+
                             re.MULTILINE+re.DOTALL)
        matches = regexp.search(elString)
        if matches:
            dir.SetMessage("Tarkenna hakua", u"Hakua täytyy tarkentaa, koska "+
                           u"osumia löytyi yli 100 kpl.")
            return
            
    for v in items:
                
        # Fetch URI.
        try:
            uri = v.xpath('span[4]/a')[0]
        except IndexError:
            Log.Add("appendVideos: Could not find video page URI.")
            continue # Not much to do without an URI.
        uri = uri.get('href').strip('/')
                    
        # Check whether the item needs a license.
        # Skip this content for now.
        try:
            tvlicense = v.xpath("div/span[contains(@class, 'tvlicense')]")[0]
        except IndexError:
            pass # expected
        else:
            Log.Add("appendVideos: Skipped locked content: "+AREENA_ROOT+uri)
            continue
            
        # We need the video page.
        # This shouldn't change often (at all), thus high cache.
        video_page = XML.ElementFromString(HTTP.GetCached(AREENA_ROOT+uri+VER,
                                                          CACHE_HIGH),
                                           True)
        if video_page is None:
            Log.Add("appendVideos: Unable to fetch data from video page: "+ 
                    AREENA_ROOT+uri+VER)
            continue
        
        # Fetch video id.
        matches = re.compile('toista\?id=(\d+)', re.IGNORECASE).search(uri)
        if matches:
            video_id = matches.group(1)
        else:
            Log.Add("appendVideos: Could not resolve video id from: %s" % uri)
            continue
        
        # Fetch stream URL.
        asx_url = AREENA_ASX_DISPATCHER + video_id + ".asx?bitrate=1000000"
        asx_file = XML.ElementFromURL(asx_url)
        try:
            key = asx_file.xpath("/ASX/ENTRY/REF")[0].get("HREF")
        except (IndexError, AttributeError):
            Log.Add("appendVideos: Could not fetch stream url from: %s" %
                    asx_url)
            continue
        key = key.replace("http", "mms")
        
        Log.Add("appendVideos: Stream URL: %s" % key)
        
        video_note = ""
        if key.find("ondemand.asx") is not -1:
            video_note = u" (Tämä video ei toimi)"
            Log.Add("appendVideos: Unaccessable stream: %s" % key)
            
        # Fetch thumbnail.
        try:
            thumb = v.xpath('span/a/img')[0]
        except IndexError:
            thumb = None
        else:
            thumb = thumb.get('src')
        
        # Fetch series title.
        try:
            series_title = v.xpath("span[contains(@class, 'title')]/"+
                                   "a")[0].text
        except IndexError:
            series_title = ""
        else:
            series_title = utf8decode(series_title)
            
        # Fetch video title.
        try:
            video_title = v.xpath("span[contains(@class, 'link-to-media')]"+
                                  "/a")[0].text
        except IndexError:
            video_title = ""
        else:
            video_title = utf8decode(video_title)
            
        title = series_title + ' - ' + video_title
            
        # Fetch summary.
        summary = video_page.xpath("//p[@class='clip-description']")
        try:
            summary = summary[0].text
        except IndexError:
            summary = ""
        else:
            summary = utf8decode(summary)
            
        # Fetch duration.
        p1 = video_page.xpath("//div[@id='content']/div"+ 
                              "[contains(@class, 'fragment1')]/div[2]/div/p")
        p1_to_string = ""
        for block in p1:
            p1_to_string += XML.ElementToString(block)
        regexp = re.compile('(\d{2,2}\:\d{2,2}\:\d{2,2})')
        matches = regexp.search(p1_to_string)
        if matches:
            # Calculate duration in milliseconds.
            duration = matches.group(0)
            duration = duration.split(':')
            duration = int(duration[0]) * 3600 + \
                       int(duration[1]) * 60 + \
                       int(duration[2])
            duration *= 1000
        else:
            duration = 0
        
        video = VideoItem(key, title+video_note, summary, str(duration), thumb)
        video.SetAttr("subtitle", series_title)
        dir.AppendItem(video)
    
    # Check to see if further video pages exist for this category or program.
    pages = page
    try:
        element.xpath("//div[@class='page-numbers']"+
                      "/following-sibling::a[@class='next']")[0]
    except IndexError:
        # Further pages do not exist.
        next_page = page
    else:
        # Further pages exist.
        next_page = page + 1
        dir.AppendItem(DirectoryItem(name.decode("utf-8")+"$"+id+"$"+
                       str(next_page), u"Lisää..."))
                       
    # Fetch total number of pages if necessary.
    if pages != next_page:
        try:
            pages = element.xpath("//div[@class='page-numbers']/a[last()]")[0]
        except IndexError:
            pages = 0
        else:
            pages = int(pages.text)
    if pages > 1:
        pagination = " ("+str(page)+"/"+str(pages)+")"
    else:
        pagination = ""
    dir.SetAttr("title2", name.decode("utf-8")+pagination)    

def HandleVideosRequest(pathNouns, count):
        
    # Bugfix: Will force the dictionary (and the HTTP cache) to be saved.
    Plugin.Dict["Now"] = datetime.datetime.now()
    
    Log.Add("pathNouns: %s count: %d" % (pathNouns, count))
    dir = MediaContainer(art="bg.jpg", viewGroup="List", title1="YLE Areena")
    
    # Top level menu.
    if count == 0:
        for (name, val) in dirs:
            dir.AppendItem(DirectoryItem(val+"$"+name, name))
        dir.AppendItem(SearchDirectoryItem("SEARCH$"+_L("Search"),
                                           _L("Haku"),
                                           _L("YLE Areena -haku"),
                                           _R("search.png")))
        
    # Program groupings by first character.
    elif count == 1 and pathNouns[0].startswith("OHJELMAT"):
        dir.SetAttr("title2", u'Ohjelmat A-Ö')
        appendGroupings(dir)
    
    # Category or program listing.
    elif count == 1 or count == 2 and pathNouns[0].startswith("OHJELMAT"):
        
        (v,n) = pathNouns[count-1].split('$')
        
        if v.startswith('KATEGORIAT'):
            Log.Add("KATEGORIAT")
            rePattern = '^/hae\?cid=(\d+)$'
            path = ''
            command = 'hae?cid='
            xpath = "//div[contains(@class,'fragment2')]/div[1]/p[1]/a"
            xpath_item = "."
        elif pathNouns[0].startswith("OHJELMAT"):
            Log.Add("OHJELMAT")
            rePattern = '^/hae\?pid=(\d+)$'
            path = 'selaa'
            command = 'hae?pid='
            # Selects all programs.
            #xpath = "//table[contains(@class,'productlist')]/tbody" \
            #        "/tr/td[1]/a[@name='" + v + "']"
            # Selects only those programs that have videos available.
            xpath = "//table[contains(@class,'productlist')]/tbody" \
                    "/tr/td[position() = 3 and . != '0']/../td[1]" \
                    "/a[@name='" + v + "']"
            xpath_item = "../following-sibling::td/a"
        else:
            return
        
        title2_prefix = ""
        if pathNouns[0].startswith("OHJELMAT"):
            title2_prefix = "Ohjelmat "
            
        dir.SetAttr("title2", title2_prefix+n.decode("utf-8"))
        dir.SetViewGroup ("InfoList")
        
        appendItems(dir, rePattern, path, command, xpath, xpath_item)
        
        checkChildCount(dir, pathNouns)
    # Video listing.
    elif count >= 2:
        
        dir.SetViewGroup ("InfoList")
        
        if pathNouns[0].startswith('SEARCH') and count < 3:
            query = pathNouns[1]
            Log.Add("Search query: %s" % query)
            page = 1
            name = "Haku: " + query
            id = query.replace(" ", "+")
        else:    
            (name, id, page) = pathNouns[count-1].split('$')
            page = int(page)
            
        if pathNouns[0].startswith('KATEGORIAT'):
            command = 'hae?cid='
        elif pathNouns[0].startswith('OHJELMAT'):
            command = 'hae?pid='
        elif pathNouns[0].startswith('SEARCH'):
            command = 'hae?keyword='
            
        appendVideos(dir, name, command, id, page)
        
        checkChildCount(dir, pathNouns)
    return dir.ToXML()
