#Copyright Ken Faulkner 2007.
#!/usr/bin/env python2.4

import sys
import urllib
from Common.logRoutine import *
import os
import os.path
import datetime
from Framework.Parser import HTMLParse
from Datatypes.AudioDetails import AudioDetails
from Datatypes.DayDetails import DayDetails
import traceback
import Queue
import time
from configobj import ConfigObj
from Framework.CacheManager.CacheManager import CacheManager
from Common.MiscConst import *
from Framework.Downloader.DownloaderQueue import DownloaderQueue
import re

indexToDay = { 0:'monday', 1:'tuesday', 2:'wednesday', 3:'thursday', 4:'friday', 5:'saturday', 6:'sunday'}

    
class LinkDownloader( object ):
  def __init__(self, config ):
  
    self.log = getLogger()
    self.config = config
    self.home_dir = os.path.join( os.getenv("HOME"), ".bbc" )
    self.userConfig = os.path.join( self.home_dir, "user.cfg")
    
    self.downloadDays = {}
    self.indexToDay = indexToDay
    
    self.baseURL = self.config['BBC']['BaseURL']
    self.linkURL = self.config['BBC']['LinkURL']

    self.link_download_num = int( self.config['BBC']['link_download_num'] )
    
    # Cache for data!!
    self.cacheManager = CacheManager( self.config )
    
    # indicate fresh download.
    self.have_fresh_results = False
    
    # days processed. Should total 6
    self.day_count = 0

    # percent complete (this is just pure guess work)
    self.percent_complete = 0
    
  
  # fudge urls, so they are complete
  # also remove anything in the description between (and inlcuding) [ ] 
  def manipulateData(self, date, day, audioList ):
    try:
      for i in audioList:
        i.link = self.baseURL +  day + "/" + i.link

        desc = i.description.split('[')
        i.description = desc[0]
        i.date = date
        
        i.estimated_size = BYTES_PER_MINUTE * i.time
        
    except:
      self.log.error("fudgeURLS exception " + traceback.format_exc() )
      
      
  def titleSorter( self, d1, d2):
  
    res = 0
    try:
      if d1.title == d2.title:
      
        self.log.debug("checking log " + str( d1 ) + ":" + str( d2) )
      
        if d1.episode_no < d2.episode_no:
          res = -1
        else:
          if d1.episode_no > d2.episode_no:
            res = 1
          else:
            res = 0
      else:
        if d1.title < d2.title:
          res = -1
        else:
          if d1.title > d2.title:
            res = 1

    except:
      # erm, DOH!
      self.log.error("LinkDownloader::titleSorter ex " + traceback.format_exc() )

    return res

  def get_current_entries( self ):
    """
    Get the list of "current" entries from: http://www.bbc.co.uk/radio7/programmes/a-z/
    
    """
    self.log.info("LinkDownloader::get_current_entries start")

    entries = []
    
    try:
    
      self.log.debug("base url " + self.baseURL )
      
      main_data = urllib.urlopen( self.baseURL ).read()
      sp = main_data.split("withsynopsis")
     
      # go through each 'series'
      for i in sp[1:]:
        idx = i.find('href="')
        if idx != -1:
          href = i[idx:].split('"')[1]
          
          # this should only get ones that have *something* to download.
          idx = i.find("Available on BBC iPlayer")
          if idx != -1:
            entries.append( self.linkURL + href )     
      
    except:
      self.log.error("LinkDownloader::get_current_entries ex " + traceback.format_exc() )


    return entries

  def get_series_entries( self, entries ):
    """
    Get series details. 
    """
    self.log.info("LinkDownloader::get_series_entries start")

    episode_set = set()
    
    try:
    
      requestQ = Queue.Queue()
      resultQ = Queue.Queue()

      for entry in entries:
        requestQ.put( entry )

      download_list = []
      for i in xrange( self.link_download_num ):
        download_list.append( DownloaderQueue( requestQ, resultQ ) )
        
      for i in download_list:
        i.start()
        
      for i in download_list:
        i.join()
        

      new_entries = []
      while not resultQ.empty():
        new_entries.append( resultQ.get() )
      
      # half way through process?  
      self.percent_complete = 50
      
      for main_data in new_entries:
      
        full_url = None
        
        # go through each 'series'
        for i in main_data.split("\n"):
        
          # make sure "URL" is listed in line.
          idx = i.find('url"')
          if idx != -1:
          
            idx = i.find('href="')
            if idx != -1:
              url = i[idx:].split('"')[1]
              
              full_url = self.linkURL + url
          
          idx = i.find("time-remaining")
          if idx != -1:
            if full_url != None:
              episode_set.add( full_url )
              full_url = None

      
                          
      
    except:
      self.log.error("LinkDownloader::get_series_entries ex " + traceback.format_exc() )

    return list( episode_set )


  def get_ra_link( self, url ):
    """
    Get the RealAudio link from the contents of the given url.
    
    Should do far better parsing.
    """
    self.log.info("LinkDownloader::get_ra_link start")

    ra_file = None
    
    try:
    
      main_data = urllib.urlopen( url ).read()
     
      idx = main_data.find("iplayer/aod")
      if idx != -1:
        ra_file = main_data[idx-30:].split('"')[1]
          
    except:
      self.log.error("LinkDownloader::get_ra_link ex " + traceback.format_exc() )

    return ra_file
           

  def get_series_name( self, data ):
    """
    Get series details.
    Should definitely write a non-brittle parser....  just too lazy.
     
    """
    self.log.info("LinkDownloader::get_series_name start")

    series_name = ""
    
    try:

      idx = data.find('class="tleo"')
      if idx != -1:
        idx2 = data[idx:].find("</a>")
        if idx2 != -1:
          series = data[idx:idx+idx2]
          idx = series.rfind(">")
          
          series_name = series[idx+1:]
          
         
    except:
      self.log.error("LinkDownloader::get_series_name ex " + traceback.format_exc() )

    return series_name
           
  def generate_audio_details( self, entries ):
    """
    Get series details.
    Should definitely write a non-brittle parser....  just too lazy.
     
    """
    self.log.info("LinkDownloader::generate_audio_details start")

    final_list = []
    
    try:

      requestQ = Queue.Queue()
      resultQ = Queue.Queue()

      for entry in entries:
        requestQ.put( entry )

      download_list = []
      for i in xrange( self.link_download_num ):
        download_list.append( DownloaderQueue( requestQ, resultQ ) )
        
      for i in download_list:
        i.start()
        
      for i in download_list:
        i.join()
        

      new_entries = []
      while not resultQ.empty():
        new_entries.append( resultQ.get() )
            
      self.percent_complete = 100
      
      start = time.time()
      
      for main_data in new_entries:
      
        # first check if this is downloadable. (will have the word "console" in  it)
        idx = main_data.find("iplayer/console")
        if idx != -1:
        
          self.log.debug("can download " + entry )
        
          idx = main_data.find("href", idx-30)
          endIdx = main_data[idx:].find("\n")
          
          line = main_data[idx:endIdx+idx]
            
          url = line.split('"')[1]
          
          duration = int( line.split("Duration:")[1].split()[0] )
          self.log.debug("link " + url )
          self.log.debug("duration " + str( duration ) )
          
          # sooooo brittle. Should write a proper parser.         
          title_line = main_data.split("title>")[1][:-2]
          try:
          
            title_line = title_line.split("Programmes -")[1]
            
            description_line = main_data.split('meta name="description"')[1].split("\n")[0]
            description_line = description_line.split('"')[1]
            
            idx = title_line.find("Episode")
            if idx != -1:
              episode = int( title_line.split("Episode")[1] )
            else:
            
              # try regex
              
              m = re.search("Episode ([0-9]+) of ([0-9]+)", main_data)
              
              if m:
                episode = m.group(1)
              else:
                episode = 0
              
            audio_entry = AudioDetails()
            audio_entry.title = title_line.strip()
            audio_entry.description = description_line
            audio_entry.time = duration  # yeah, should rename the member varible.
            audio_entry.episode_no = episode
            audio_entry.estimated_size = BYTES_PER_MINUTE * audio_entry.time
            audio_entry.album = self.get_series_name( main_data )
             
            #ra_link = self.get_ra_link( url )
            # dont get final RA entry until we're ready to download.
            audio_entry.link = url
            
            self.log.debug("added " + str( audio_entry ) )
            
            final_list.append( audio_entry )
          except:
            # probably not radio 7.
            self.log.warning("LinkDownloader::generate_audio_details exception, not added to list " + entry )
      
      end = time.time()
         
      self.log.debug("inner loop %f"%(end-start))
         
    except:
      self.log.error("LinkDownloader::generate_audio_details ex " + traceback.format_exc() )

    return final_list
       
  def getAllUniqueEntries( self ):
    """
    Get all entries, convert into 1 list.
    Firstly generate a dict with the key being combined titles+description.
    This will remove the dupes.

    Then put into a list and sort based on title.

    """

    self.log.info("LinkDownloader::getAllUniqueEntries start")

    uniqueList = []

    try:

      start = time.time()
      
      self.have_fresh_results = False
      
      entries = self.get_current_entries()
      
      episodes = self.get_series_entries( entries )
      
      final_list =  self.generate_audio_details( episodes )
      
      uniqueList = final_list
      
      uniqueList.sort( cmp = self.titleSorter )
      
      self.have_fresh_results = True
       
      
      self.log.debug("unique list " + str( uniqueList ) )
      end = time.time()
      self.log.debug("time taken to get list %f"%(end-start) )
      
    except:
      self.log.error("LinkDownloader::getAllUniqueEntries ex " + traceback.format_exc() )

    return uniqueList




 

if __name__ == "__main__":
  
  c = ConfigObj("/Users/faulkner/.bbc/bbc.cfg")
  
  app = LinkDownloader( c )
  
  entries = app.getAllUniqueEntries( )
  
  for i in entries:
    print i
    
    
  



