#!/usr/bin/python2.5

# Copyright 2011 Google Inc. All Rights Reserved.

"""A utility module for fetching from Youtube GData API."""

__author__ = 'pluu@google.com (Phong Luu)'

import urllib
import logging
from operator import itemgetter
import re
import logging
import datetime
import simplejson

DEV_KEY = ('AI39si6fXrpH0sxfh9Q4jsnAklZXfAaZQB0O9DJ6i6Kw'
           'HUbUDmyMInrRr-oCejRjg8F8noxky9SNAMQ7hoSyxa-RNZUPMVYVMA')

YT_BASE_API_URL = 'https://gdata.youtube.com/feeds/api/'

YT_VIDEO_URL = (YT_BASE_API_URL +
                'users/%s/uploads?alt=json&max-results=50&key=' + DEV_KEY)

YT_VIDEO_DATA_URL = (YT_BASE_API_URL +
                     'videos/%s?v=2&alt=json&key=' + DEV_KEY)


def FetchVideoData(videoid):
  url = YT_VIDEO_DATA_URL % videoid
  logging.info('fetching url - %s', url)
  content = urllib.urlopen(url).read()
  feed = simplejson.loads(content)
  return YTContentExtractor({'feed': {'entry': [feed['entry']]}})


def FetchPlaylists(username, start_index=1, result_store=[]):
  """Recursively fetch all playlists belonging to a YT username."""
  def _PlaylistDataExtractor(feed):
    """Extracts feed entries into dictionary items."""
    youtube_entries = []
    try:
      for i in xrange(0, len(feed['feed']['entry'])):
        playlistid= feed['feed']['entry'][i]['yt$playlistId']['$t']
        title = feed['feed']['entry'][i]['title']['$t']
        youtube_entries.append({'title': title, 'playlistid': playlistid})
    except:
      pass
    return youtube_entries

  url = (
      'https://gdata.youtube.com/feeds/api/users/%s/playlists?v=2'
      '&max-results=50&key=%s&start-index=%d&alt=json' % (
          username, DEV_KEY, start_index))
  logging.info('fetching url - %s', url)
  content = urllib.urlopen(url).read()
  feed_entry = simplejson.loads(content)
  total_results = feed_entry['feed']['openSearch$totalResults']['$t']
  items_per_page = feed_entry['feed']['openSearch$itemsPerPage']['$t']
  no_of_indexes = total_results / items_per_page
  result_store += _PlaylistDataExtractor(feed_entry)
  if total_results >= start_index:
    return FetchPlaylists(username, start_index+50, result_store)
  else:
    return result_store

def GetCountryRestrictions(videoid):
  """Gets video restriction meta for a Youtube video.

  Args:
    videoid: YouTube videoid.

  Returns:
    dict containing video meta from youtube.
  """
  url = (
      'http://gdata.youtube.com/feeds/api/videos/%s?'
      'fields=media:group/media:restriction&alt=json&key=%s') % (
          videoid, DEV_KEY)
  logging.info('fetching url - %s', url)
  content = urllib.urlopen(url).read()
  feed = simplejson.loads(content)
  try:
    country_restrictions = feed['entry']['media$group']['media$restriction']
  except:
    country_restrictions = {}

  return country_restrictions


def _PlaylistDataExtractor(feed):
  """Extracts feed entries into dictionary items."""
  youtube_entries = []
  try:
    for i in xrange(0, len(feed['feed']['entry'])):
      playlistid= feed['feed']['entry'][i]['yt$playlistId']['$t']
      title = feed['feed']['entry'][i]['title']['$t']
      youtube_entries.append({'title': title, 'playlistid': playlistid})
  except:
    pass
  return youtube_entries

def GetVideoFeed(playlistid, start_index=1, result_store=[]):
  """Gets Video Feed."""
  url = ('http://gdata.youtube.com/feeds/api/playlists/%s?v=2&'
         '&max-results=50&key=%s&start-index=%d&alt=json' % (
      playlistid, DEV_KEY, start_index))
  logging.info('fetching url - %s', url)
  content = urllib.urlopen(url).read()
  feed_entry = simplejson.loads(content)
  total_results = feed_entry['feed']['openSearch$totalResults']['$t']
  items_per_page = feed_entry['feed']['openSearch$itemsPerPage']['$t']
  no_of_indexes = total_results / items_per_page
  videos_list, error_list = YTContentExtractor(feed_entry)
  result_store += videos_list

  if total_results >= start_index:
    return GetVideoFeed(playlistid, start_index+50, result_store)
  else:
    return result_store

def YTVideoFetcher(yt_username):
  """Gets all videos based on a username.

  Args:
    yt_username: Youtube author username.

  Returns:
    Json result from YT api call.
  """
  url = YT_VIDEO_URL % yt_username
  logging.info(url)
  print url
  yt_video_feed = urllib.urlopen(url).read()
  return YTContentExtractor(simplejson.loads(yt_video_feed))


def FetchYTStream(yt_username, manual_page_break=None,
                  start_index=1, result_store=[], loop_count=1, invalid_vids=[]):
  """Recursive YT page grabber to fetch contents.
  
  Args:
    start_index: Start-index param to request specific page from YT-API request.
    result_store: Per page level result container.

  Returns:
    List of items containing youtube_entries -> YTContentExtractor items.
  """
  url = (YT_VIDEO_URL % yt_username)
  url += '&start-index=%d' % start_index
  logging.info('fetching url - %s', url)
  content = urllib.urlopen(url).read()
  feed_entry = simplejson.loads(content)
  total_results = feed_entry['feed']['openSearch$totalResults']['$t']
  items_per_page = feed_entry['feed']['openSearch$itemsPerPage']['$t']
  no_of_indexes = total_results / items_per_page
  videos_list, error_list = YTContentExtractor(feed_entry)
  result_store += videos_list
  invalid_vids += error_list
  if manual_page_break == loop_count:
    return result_store, invalid_vids

  if loop_count < no_of_indexes:
    try:
      return FetchYTStream(yt_username, manual_page_break,
                           start_index + 50, result_store, loop_count+1,
                           invalid_vids)
    except:
      # YT API fails after start index reaches 1000 if using mustang
      return result_store, invalid_vids
  else:
    return result_store, invalid_vids

def YTContentExtractor(feed_entry):
  """Extracts all video details from YT-API page.

  Args:
    feed_entry: Json contents of YT-API Page.

  Returns:
    A tuple (x, y) where x is a list of detailed contents per YT video. For Example,
    [ {title:value, videoid:value, thumbnail:value, view_count:value}], [{}, {},
    {}]].
    Where y is the list of invalid videoids that are no embedable.

  """
  def _ExtractMediaContent(media_content):
    """Returns True/False depending on wether video is embed

    Args: media_content is the branch from json YT API feed.
        ['feed']['entry'][0]['media$group']['media$content']

    Returns:
      True if video is embed, False otherwise.
    """
    for video_content in media_content:
      if video_content['yt$format'] == 5:
        return True
    return False

  youtube_entries = []
  invalid_videoids = []

  try:
    results = len(feed_entry['feed']['entry'])
  except:
    results = 0
  for i in xrange(0, results):
  
    print feed_entry['feed']['entry'][i]
    try:
      video_id = feed_entry['feed']['entry'][i]['media$group']["yt$videoid"]['$t']
    except:
      video_id = feed_entry['feed']['entry'][i]['id']['$t']
      video_id = video_id.split('/')[-1]
    try:
      views = feed_entry['feed']['entry'][i]['yt$statistics']['viewCount']
    except:
      views = 0

    if not _ExtractMediaContent(
        feed_entry['feed']['entry'][i]['media$group'].get('media$content', [])):
      print 'not embedable - %s' % video_id
      logging.info('not embedable - %s', video_id)
      invalid_videoids.append(video_id)
      continue

    try:
      country_restrictions_raw = (
          feed_entry['feed']['entry'][i]['media$group']['media$restriction'])
      
      if type(country_restrictions_raw).__name__ == 'list':
        country_restrictions_raw = country_restrictions_raw[0]

      country_list = country_restrictions_raw['$t']
      relationship = country_restrictions_raw['relationship']
    except Exception, e:
      country_restrictions_raw = ''
      country_list = ''
      relationship = ''

    try:
      published_date = feed_entry['feed']['entry'][i]['published']['$t']
    except:
      published_date = ''

    try:
      title = feed_entry['feed']['entry'][i]['title']['$t']
    except:
      continue
    try:
      keywords = (
          feed_entry['feed']['entry'][i]['media$group']['media$keywords']['$t'])
    except:
      keywords = ''

    try:
      description = (feed_entry['feed']['entry'][i][
          'media$group']['media$description']['$t'])

    except:
      description = ''

    try:
      uploader = feed_entry['feed']['entry'][i]['author'][0]['name']['$t']
    except:
      continue

    
    try:
      alias = feed_entry['feed']['entry'][i]['author'][0]['uri']['$t']
      alias = alias.split('/')[-1]
    except:
      alias = None
    #hack, because YT api now returning full  name in author field
    if alias:
      uploader = alias

    try:
      thumb = feed_entry['feed']['entry'][i]['media$group'][
          'media$thumbnail'][0]['url']

    except:
      thumb = 'http://i.ytimg.com/vi/%s/0.jpg' % videoid

    youtube_entries.append(
        {'country_restriction': country_restrictions_raw,
         'country_list': country_list.split(' '),
         'relationship': relationship,
         'title': title,
         'published': published_date,
         'keywords': keywords,
         'description': description,
         'uploader': uploader,
         'view_count': views,
         'thumbnail': thumb,
         'videoid': video_id})
  return youtube_entries, invalid_videoids

