# -*- coding: utf-8 -*-

import re
import sys
import datetime
from PMS import Plugin, Log, XML, Utils, HTTP
from PMS.MediaXML import MediaContainer, DirectoryItem, VideoItem
from PMS.Shorthand import _E, _D

AREENA_PLUGIN_PREFIX = "/video/areena"
AREENA_ROOT = "http://areena.yle.fi"
AREENA_ASX_DISPATCHER = "http://www.yle.fi/java/areena/dispatcher"
CACHE_HIGH = 86400 # one day
CACHE_MED = 3600 # an hour
CACHE_LOW = 900 # fifteen minutes

dirs = [ ['Kategoriat', 'KATEGORIAT'], [u'Ohjelmat A-Ö', 'OHJELMAT'] ]

##############################################################################
def Start():
    Plugin.AddRequestHandler(AREENA_PLUGIN_PREFIX, HandleVideosRequest,
                             "YLE Areena", "icon-default.png", "bg.jpg")
    Plugin.AddViewGroup("InfoList", viewMode="InfoList", contentType="items")
    Plugin.AddViewGroup("List", viewMode="List", contentType="items") 

##############################################################################
def utf8decode(s):
    if s is not None:
        s = s.encode("iso-8859-1")
    else:
        return s
    return s.decode("utf-8")

def appendItems(dir, rePattern, path, command, xpath, xpath_item):
    """
        Fetches category or program items and appends them to the
        MediaContainer as DirectoryItems.
    """
    Log.Add("appendItems: %s" % (path+command))
    regexp = re.compile(rePattern, re.IGNORECASE+re.DOTALL+re.MULTILINE)
    element = XML.ElementFromString(HTTP.GetCached(AREENA_ROOT+path,
                                                   CACHE_HIGH), True)
    try:
        items = element.xpath(xpath.decode('utf-8'))
    except AttributeError:
        Log.Add("appendItems: Unable to create element from: "+ 
                "%s" % (AREENA_ROOT+path))
        return
    
    for item in items:
        
        try:
            item = item.xpath(xpath_item)[0]
        except IndexError:
            Log.Add("appendItems: Unable to locate item")
            continue
        else:
            url = item.get('href')
            item_name = utf8decode(item.text)
        
        try:
            matches = regexp.search(url)
        except TypeError:
            Log.Add("appendItems: Item URL was not found")
            continue
        
        if matches is None:
            Log.Add('appendItems: Could not add item from: %s' % 
                    (AREENA_ROOT+url))
            continue
            
        item_id = matches.group(1)
        
        # Fetch item info.
        try:
            info = XML.ElementFromString(HTTP.GetCached(AREENA_ROOT+command+
                                                        item_id+"&filter=1,1",
                                                        CACHE_MED),
                                         True)[0]
            info.xpath("//div[contains(@class,'fragment1-1')]/div")
        except (IndexError, AttributeError), e:
            Log.Add("appendItems: Unable to fetch info for item: %s." % e)
            continue # Skip this item.
            
        # Fetch thumbnail.
        try:
            thumb = info.xpath("//div[contains(@class,'visio-box')]/img")[0]
        except IndexError:
            Log.Add("appendItems: Unable to fetch thumb for item with id:"+ 
                    " %s" % item_id)
            thumb = ""
        else:
            thumb = thumb.get("src")
        
        # Fetch summary title.
        try:
            summary_title = info.xpath("//div[contains(@class,"+ 
                                       "'visio-box')]/div/h4")[0].text
        except IndexError:
            Log.Add("appendItems: Unable to fetch summary title for item with"+ 
                    " id: %s" % item_id)
            summary_title = ""
        else:
            summary_title = utf8decode(summary_title)
        
        # Fetch summary.
        try:
            summary = info.xpath("//div[contains(@class,'visio-box')]"+ 
                                 "/div/p[1]")[0].text
        except IndexError:
            Log.Add("appendItems: Unable to fetch summary for item with id: "+ 
                    "%s" % item_id)
            summary = summary_title
        else:
            summary = utf8decode(summary)
        
        page = 1
        d = DirectoryItem(item_name+"$"+item_id+"$"+str(page), item_name,
                          thumb, summary)
        d.SetAttr("subtitle", summary_title)
        dir.AppendItem(d)

def appendGroupings(dir, path="/selaa"):
    """
        Fetches groupings and appends them to the
        MediaContainer as DirectoryItems.
    """
    element = XML.ElementFromString(HTTP.GetCached(AREENA_ROOT+path,
                                                   CACHE_HIGH), True)
    
    try:
        items = element.xpath("//table[contains(@class,'productlist')]"+ 
                              "/tbody/tr/td[1]/strong")
    except AttributeError:
        Log.Add("Could not get groupings from: %s" % (AREENA_ROOT+path))
        return None
    
    for item in items:
        
        try:
            group = item.xpath(".")[0].text
        except IndexError:
            Log.Add("appendGroupings: Could not get group name")
            continue
        group = utf8decode(group)
            
        try:
            group_id = item.xpath("../a")[0]
        except IndexError:
            Log.Add("appendGroupings: Could not get group id")
            continue
        group_id = group_id.get("name")
        group_id = utf8decode(group_id)
        
        dir.AppendItem(DirectoryItem(group_id+"$"+group, group))

def appendVideos(dir, name, command, id, filter="1,1", page=1):
    """
        Fetches video information and appends them to the
        MediaContainer as VideoItems.
        
        Also appends DirectoryItems for browsing further content when
        necessary.
    """
    Log.Add("appendVideos: %s" % 
            (AREENA_ROOT+command+str(id)+"&filter="+filter+"&s="+str(page)))
    
    element = XML.ElementFromString(HTTP.GetCached(AREENA_ROOT+command+str(id)+
                                                   "&filter="+filter+"&s="+
                                                   str(page), CACHE_LOW),
                                    True)
    
    try:
        items = element.xpath("//li[contains(@class, 'video')]")
    except AttributeError:
        Log.Add("appendVideos: could not get video list for %s." % name)
        return None
    """    
    if page >> 1:
        dir.AppendItem(DirectoryItem(name.decode("utf-8")+"$"+str(id)+"$"+
                       str(page-1), u"Takaisin..."))
    """
    for v in items:
                
        # Fetch URL.
        try:
            url = v.xpath('span[4]/a')[0]
        except IndexError:
            Log.Add("appendVideos: Could not find video page URL.")
            continue # Not much to do without an URL.
        url = url.get('href')
                    
        # Check whether the item needs a license. Skip this content for now.
        try:
            tvlicense = v.xpath("div/span[contains(@class, 'tvlicense')]")[0]
        except IndexError:
            pass # expected
        else:
            Log.Add("appendVideos: Skipped locked content: "+AREENA_ROOT+url)
            continue
            
        # We need the video page. This does not change often, thus high cache.
        video_page = XML.ElementFromString(HTTP.GetCached(AREENA_ROOT+url,
                                                          CACHE_HIGH),
                                           True)
        if video_page is None:
            Log.Add("appendVideos: Unable to fetch data from video page: "+ 
                    AREENA_ROOT+url)
            continue
        
        # Fetch video id.
        matches = re.compile('/toista\?id=(\d+)', re.IGNORECASE).search(url)
        if matches:
            video_id = matches.group(1)
        else:
            Log.Add("appendVideos: Could not resolve video id from: %s" % url)
            continue
        
        # Fetch stream URL.
        asx_url = AREENA_ASX_DISPATCHER + "/" + video_id + ".asx?bitrate=1000000"
        asx_file = XML.ElementFromURL(asx_url)
        try:
            key = asx_file.xpath("/ASX/ENTRY/REF")[0].get("HREF")
        except (IndexError, AttributeError):
            Log.Add("appendVideos: Could not fetch stream url from: %s" % asx_url)
            continue
        key = key.replace("http", "mms")
        
        Log.Add("appendVideos: Stream URL: %s" % key)
        
        video_note = ""
        if key.find("ondemand.asx") is not -1:
            video_note = u" (Tämä video ei toimi)"
            Log.Add("appendVideos: Unaccessable stream: %s" % key)
            
        # Fetch thumbnail.
        try:
            thumb = v.xpath('span/a/img')[0]
        except IndexError:
            thumb = None
        else:
            thumb = thumb.get('src')
        
        # Fetch series title.
        try:
            series_title = v.xpath("span[contains(@class, 'title')]/a")[0].text
        except IndexError:
            series_title = ""
        else:
            series_title = utf8decode(series_title)
            
        # Fetch video title.
        try:
            video_title = v.xpath("span[contains(@class, 'link-to-media')]"+ 
                                  "/a")[0].text
        except IndexError:
            video_title = ""
        else:
            video_title = utf8decode(video_title)
            
        title = series_title + ' - ' + video_title
            
        # Fetch summary.
        try:
            summary = video_page.xpath("//p[@class='clip-description']")[0].text
        except IndexError:
            summary = ""
        else:
            summary = utf8decode(summary)
            
        # Fetch duration.
        p1 = video_page.xpath("//div[@id='content']/div"+ 
                              "[contains(@class, 'fragment1')]/div[2]/div/p")
        p1_to_string = ""
        for block in p1:
            p1_to_string += XML.ElementToString(block)
        regexp = re.compile('(\d{2,2}\:\d{2,2}\:\d{2,2})')
        matches = regexp.search(p1_to_string)
        if matches:
            # Calculate duration in milliseconds.
            duration = matches.group(0)
            duration = duration.split(':')
            duration = int(duration[0]) * 3600 + \
                       int(duration[1]) * 60 + \
                       int(duration[2])
            duration *= 1000
        else:
            duration = 0
        
        video = VideoItem(key, title+video_note, summary, str(duration), thumb)
        video.SetAttr("subtitle", series_title)
        dir.AppendItem(video)
    
    # Check to see if further video pages exist for this category or program.
    pages = page
    try:
        element.xpath("//div[@class='page-numbers']"+
                      "/following-sibling::a[contains(@class, 'inactive')]")[0]
    except IndexError:
        # Further pages exist.
        next_page = page + 1
        dir.AppendItem(DirectoryItem(name.decode("utf-8")+"$"+str(id)+"$"+
                       str(next_page), u"Lisää..."))
    else:
        next_page = page
        
    # Fetch total number of pages if necessary.
    if pages != next_page:
        try:
            pages = element.xpath("//div[@class='page-numbers']/a[last()]")[0]
        except IndexError:
            pages = 0
        else:
            pages = int(pages.text)
    if pages >> 1:
        pagination = " ("+str(page)+"/"+str(pages)+")"
    else:
        pagination = ""
    dir.SetAttr("title2", name.decode("utf-8")+pagination)

def HandleVideosRequest(pathNouns, count):
    
    # Bugfix: Will force the dictionary (and the HTTP cache) to be saved.
    Plugin.Dict["Now"] = datetime.datetime.now()
    
    Log.Add("pathNouns: %s count: %d" % (pathNouns, count))
    dir = MediaContainer(art="bg.jpg", viewGroup="List", title1="YLE Areena")
    
    # Top level menu.
    if count == 0:
        for (name, val) in dirs:
            dir.AppendItem(DirectoryItem(val+"$"+name, name))
        
    # Program groupings by first character.
    elif count == 1 and pathNouns[0].startswith("OHJELMAT"):
        dir.SetAttr("title2", u'Ohjelmat A-Ö')
        appendGroupings(dir)
            
    # Category or program listing.
    elif count == 1 or count == 2 and pathNouns[0].startswith("OHJELMAT"):
        
        (v,n) = pathNouns[count-1].split('$')
        
        if v.startswith('KATEGORIAT'):
            Log.Add("KATEGORIAT")
            rePattern = '^/hae\?cid=(\d+)$'
            path = ''
            command = '/hae?cid='
            xpath = "//div[contains(@class,'fragment2')]/div[1]/p[1]/a"
            xpath_item = "."
        elif pathNouns[0].startswith("OHJELMAT"):
            Log.Add("OHJELMAT")
            rePattern = '^/hae\?pid=(\d+)$'
            path = '/selaa'
            command = '/hae?pid='
            # Selects only those programs that have content available.
            xpath = "//table[contains(@class,'productlist')]/tbody" \
                    "/tr/td[position() = 3 and . != '0']/../td[1]" \
                    "/a[@name='" + v + "']"
            xpath_item = "../following-sibling::td/a"
        else:
            return
        
        title2_prefix = ""
        if pathNouns[0].startswith("OHJELMAT"):
            title2_prefix = "Ohjelmat "
            
        dir.SetAttr("title2", title2_prefix+n.decode("utf-8"))
        dir.SetViewGroup ("InfoList")
        
        appendItems(dir, rePattern, path, command, xpath, xpath_item)
    
    # Video listing.
    elif count >= 2:
        
        (name, id, page) = pathNouns[count-1].split('$')
        
        dir.SetViewGroup ("InfoList")
        
        page = int(page)
        id = int(id)
        if pathNouns[0].startswith('KATEGORIAT'):
            command = '/hae?cid='
        elif pathNouns[0].startswith('OHJELMAT'):
            command = '/hae?pid='
        filter = "1,1"
        
        appendVideos(dir, name, command, id, filter, page)
        
    return dir.ToXML()
