from datetime import datetime, timedelta
from google.appengine.api import urlfetch
from urllib import urlencode, unquote
from ext.BeautifulSoup import BeautifulSoup
from random import randrange, seed
import looptube.models
import re

def unichar_fromhex(otxt):
    bigchar = otxt.group('bigchar')[2:]
    val = int(bigchar, 16)
    return unichr(val)

def unescape(txt):
    p = re.compile(r'(?P<bigchar>%u[0-9A-Za-z]{4})', re.VERBOSE)
    return unquote(p.sub(unichar_fromhex, txt)).replace('%20', ' ')

def fetchWebPage(url, type='GET', payload=None, headers=None):
    try:
        if ('POST' == type):
            result = urlfetch.fetch(url, method=urlfetch.POST, payload=payload, headers=headers)
        else:
            result = urlfetch.fetch(url)
        if (result.status_code == 200):
            return result.content
        else:
            return ''
    except:
        return ''

lastFmAPIKey = '5b5d67f42da03711365eeb44032cb35b'
def fetchFromLastFmAPI(methodName, params):
    params['method'] = methodName
    params['api_key'] = lastFmAPIKey
    apiURL = 'http://ws.audioscrobbler.com/2.0/'
    result = fetchWebPage(apiURL+'?'+urlencode(params))
    if ('' == result):
        return None
    else:
        return result
    
def fetchSimilarSongs(songName, artistName, limit=10):
    result = fetchFromLastFmAPI('track.getSimilar', {'track':songName, 'artist':artistName})
    if (None == result):
        return None
    soup = BeautifulSoup(result)
    songList = []
    tracks = soup.findAll('track')
    if (len(tracks) > limit):
        tracks = tracks[:limit]
    for track in tracks:
        trackName = unicode(track.find('name').string)
        artist = unicode(track.find('artist').find('name').string)
        match = float(track.find('match').string)
        songList.append({'track' : trackName, 'artist' : artist, 'match' : match })
        
    return songList

def fetchSimilarArtists(artistName, limit=10):
    result = fetchFromLastFmAPI('artist.getSimilar', {'artist':artistName})
    if (None == result):
        return None
    soup = BeautifulSoup(result)
    artistList = []
    artists = soup.findAll('artist')
    if (len(artists) > limit):
        artists = artists[:limit]
    for artist in artists:
        artist = unicode(artist.find('name').string)
        matchTag = artist.find('match')
        if (int == matchTag.__class__):
            match = matchTag
        else:
            match = float(matchTag.string)
        artistList.append({'artist' : artist, 'match' : match })
        
    return artistList

def fetchArtistTrack(artistName, limit=50):
    result = fetchFromLastFmAPI('artist.getTopTracks', {'artist':artistName})
    if (None == result):
        return None
    soup = BeautifulSoup(result)
    artist = soup.find('toptracks')['artist']
    tracks = soup.findAll('track')
    if (len(tracks) > limit):
        tracks = tracks[:limit]
 
    seed()
    track = tracks[randrange(len(tracks))]   
    trackName = unicode(track.find('name').string)
        
    return {'track' : trackName, 'artist' : artist}
    
def fetchYouTubeVideoData(videoID='', params=None, escapedQueryString=None, relatedVideoID=None, similarVideoID=None,
                          start_index=1, getDetails=False, onlyLoopable=False):
    if (escapedQueryString):
        url = 'http://gdata.youtube.com/feeds/api/videos?v=2&q='+escapedQueryString
    elif (params):
        url = 'http://gdata.youtube.com/feeds/api/videos?v=2&'+urlencode(params)
    elif (relatedVideoID):
        url = 'http://gdata.youtube.com/feeds/api/videos/'+relatedVideoID+'/related?v=2'
    elif (similarVideoID):
        videoResult = fetchYouTubeVideoData(similarVideoID);
        if (len(videoResult) == 0):
            return []
        title = videoResult[0]['title']
        url = 'http://gdata.youtube.com/feeds/api/videos?v=2&'+urlencode({'q' : title})
    elif (len(videoID) > 0):
        url = 'http://gdata.youtube.com/feeds/api/videos?v=2&'+urlencode({'q' : '"'+videoID+'"'})
    else:
        return None
    if (start_index > 1):
        url = url + '&start-index=' + str(start_index)
    
    page = fetchWebPage(url)
    if ('' == page):
        return None
    soup = BeautifulSoup(page)
    entries = soup.findAll('media:group')
    returnList = []
    
    for i in range(0, len(entries)):
        entry = entries[i]
        videoID = unicode(entry.find('yt:videoid').string)
        if (onlyLoopable):
            videoStatus = fetchYouTubeVideoStatus(videoID)
            if ((None == videoStatus) or ('fail' == videoStatus['status'])):
                continue
        try:
            dict =  {'videoID' : videoID,
                     'title' : unicode(entry.find('media:title').string),
                     'description' : unicode(entry.find('media:description').string),
                     'thumbURL' : unicode(entry.find('media:thumbnail')['url']),
                     'duration' : int(entry.find('yt:duration')['seconds']),
                     'sourceURL' : unicode(entry.find('media:player')['url']),
                     'author' : unicode(entry.find('media:credit').string),
                     'views' : int(soup.findAll('yt:statistics')[i]['viewcount']),
                     'rating' : float(soup.findAll('gd:rating')[i]['average']),
                     }
            returnList.append(dict)
        except:
            pass
    if (getDetails):
        return {'totalResults' : int(soup.find('openSearch:totalResults'.lower()).string),
                'startIndex' : int(soup.find('openSearch:startIndex'.lower()).string),
                'itemsPerPage' : int(soup.find('openSearch:itemsPerPage'.lower()).string),
                'results' : returnList
                }
    else:
        return returnList

def searchYouTubeFirstPlayableVideo(searchString):
    videoData = fetchYouTubeVideoData(params={'q' : searchString})
    if (None == videoData):
        return None
    
    for data in videoData:
        videoStatus = fetchYouTubeVideoStatus(data['videoID'], default='fail')
        if ('ok' == videoStatus['status']):
            return data

    return None
    
def fetchYouTubeVideoFLVURL(videoID):
    url = 'http://kej.tw/flvretriever/?videoUrl=http://www.youtube.com/watch?v='+videoID
    page = fetchWebPage(url)
    if ('' == page):
        return None
    soup = BeautifulSoup(page)
    return {'flvURL' : unicode(soup.find('textarea').string)
            }

def fetchYouTubeVideoStatus(videoID, default='ok'):
    videoStatus = looptube.models.VideoStatus.getVideoStatus(videoID, default)
    return videoStatus

LEADING_PAGE_RANGE_DISPLAYED = TRAILING_PAGE_RANGE_DISPLAYED = 10
LEADING_PAGE_RANGE = TRAILING_PAGE_RANGE = 8
NUM_PAGES_OUTSIDE_RANGE = 0
ADJACENT_PAGES = 4

def digg_paginator(paginator, page, results_per_page, base_url):
    in_leading_range = in_trailing_range = False
    pages_outside_leading_range = pages_outside_trailing_range = range(0)
    paginator_page = paginator.page(page)
 
    if (paginator.num_pages <= LEADING_PAGE_RANGE_DISPLAYED):
        in_leading_range = in_trailing_range = True
        page_numbers = [n for n in range(1, paginator.num_pages + 1) if n > 0 and n <= paginator.num_pages]           
    elif (page <= LEADING_PAGE_RANGE):
        in_leading_range = True
        page_numbers = [n for n in range(1, LEADING_PAGE_RANGE_DISPLAYED + 1) if n > 0 and n <= paginator.num_pages]
        pages_outside_leading_range = [n + paginator.num_pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)]
    elif (page > paginator.num_pages - TRAILING_PAGE_RANGE):
        in_trailing_range = True
        page_numbers = [n for n in range(paginator.num_pages - TRAILING_PAGE_RANGE_DISPLAYED + 1, paginator.num_pages + 1) if n > 0 and n <= paginator.num_pages]
        pages_outside_trailing_range = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)]
    else: 
        page_numbers = [n for n in range(page - ADJACENT_PAGES, page + ADJACENT_PAGES + 1) if n > 0 and n <= paginator.num_pages]
        pages_outside_leading_range = [n + paginator.num_pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)]
        pages_outside_trailing_range = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)]

    return {
        "base_url": base_url,
        "is_paginated": True,
        "previous": paginator_page.previous_page_number(),
        "has_previous": paginator_page.has_previous(),
        "next": paginator_page.next_page_number(),
        "has_next": paginator_page.has_next(),
        "results_per_page": results_per_page,
        "page": page,
        "pages": paginator.num_pages,
        "page_numbers": page_numbers,
        "in_leading_range" : in_leading_range,
        "in_trailing_range" : in_trailing_range,
        "pages_outside_leading_range": pages_outside_leading_range,
        "pages_outside_trailing_range": pages_outside_trailing_range
    }

def fetchEntriesFromAtomFeed(url):
    page = fetchWebPage(url);
    if (None == page):
        return None
    soup = BeautifulSoup(page)
    resultList = [ {'title' : unicode(entry.find('title').string), 
                    'published' : unicode(entry.find('published').string)[:10], 
                    'url' : unicode(entry.find('link', {'rel' : 'alternate'})['href']) 
                    } for entry in soup.findAll('entry') ]
    return resultList

def getYouTubeMovieID(movieURL):
    segs = movieURL.split('/watch?v=')
    if (len(segs) < 2):
        return None
    if (not segs[0].endswith('youtube.com')):
        return None
    return segs[1].split('&')[0]