#!/usr/bin/env python

# Copyright 2012 Aaron Ciuffo 

'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
 along with this program.  If not, see <http://www.gnu.org/licenses/>.
'''

#### Global Vars ####
version="""NPR Podcast Downloader V2.1.1
by Aaron Ciuffo (txoof.com)
released without warranty under GPLV3: 
http://www.gnu.org/licenses/gpl-3.0.html
"""

#### Imports ####
# handle regular expressions
import re
# handle os file requests
import os
import sys
import shutil

# handle command line arguments
import ConfigParser
import argparse
# handle date requests
import datetime
import time

# load optional and non-standard modules
def load_modules(options):

  try:
    global requests
    import requests
  except Exception, e:
    print 'Faled to load requests module -',e
    print 'Please install requests module: http://docs.python-requests.org/'
    print 'exiting'
    exit(2)

  # try to load the id3 tagging system by default
  if not options['notag']:
    try:
      global EasyID3
      from mutagen.easyid3 import EasyID3
    except Exception, e:
      print 'Error: failed to load id3 tagger -',e
      print 'Please install the mutagen module or specify --notag/notag=True'
      print 'Mutagen module: http://code.google.com/p/mutagen/'
      print 'Disabling tagging'
      options['notag']=True

  return(options)

#### Classes ####
# Exception Class
class RQFail(Exception):
  pass


#### Helper Functions ####
# Generate a YYYY-MM-DD string from timedata
def ymd (timedata):
  timestr=str(timedata.year)+'-'+str(timedata.month)+'-'+str(timedata.day)
  return (timestr)

# add ID3 tags
def id3_tag(segment,outpath):
  # make sure special characters don't break the id3 module
  title=segment['title'].decode('utf-8')

  try:
    filename=outpath+segment['filename']+'.mp3'
    id3info=EasyID3(filename)
    id3info['title']=title
    # id3info['title']=segment['title']
    id3info['tracknumber']=segment['segnum']
    id3info['album']=segment['program']
    id3info['genre']='podcast'
    id3info.save()
  except Exception, e:
    print 'Invalid ID3 tag:',e
  return()

# Write a manifest of every file created by the script 
def write_manifest(outpath,downloaded,m3ufile):
  # write out a list of all the files created by this script
  # the manifest can then be used to clean up directories
  manifest='manifest.log'
  try:
    f=open(outpath+manifest, 'w')
  except Exception, e:
    print 'problem writing manifest file:',e
  for i in downloaded:
    f.write(i+'\n')
  f.write(m3ufile+'\n')
  f.close
  return()

# Write a unique M3U playlist for each program downloaded
def make_m3u(date,program,outpath,downloaded):
  # generate an m3u playlist in each directory
  if program == 2:
    ext='-ATC'
  elif program == 3:
    ext='-MorningEd'
  elif program == 7:
    ext='-WESat'
  elif program == 10:
    ext='-WESun'
  else:
    ext='-Unknown'
  m3ufile=date+ext+'.m3u'
  downloaded.sort()
  try:
    f=open(outpath+m3ufile, 'w')
  except Exception, e:
    print 'problem opening m3u file:',e

  for i in downloaded:
    try:
      f.write(i+'\n')
    except Exception, e:
      print 'problem writing m3u file:',e
  f.close
  return(m3ufile)

# Write an anonymous M3U playlist in the root of the output directory
# This is useful for some mp3 players
def make_genericm3u(program,basepath,outpath,downloaded,options):
  # generate a generic m3u in the root for the latest version of each show
  if program == 2:
    m3ufile=str(options['outpath'])+'/ATC.m3u'
  elif program == 3:
    m3ufile=str(options['outpath'])+'/ME.m3u'
  elif program == 7:
    m3ufile=str(options['outpath'])+'/WESa.m3u'
  elif program == 10:
    m3ufile=str(options['outpath'])+'/WESu.m3u'
  else:
    m3ufile=str(options['outpath'])+'/Unknown.m3u'
  
  downloaded.sort()

  try:
    f=open(m3ufile, 'w')
  except Exception, e:
    print 'problem opening m3ufile:',e

  m3upath='./'+basepath

  for i in downloaded:
    try:
      f.write(m3upath+i+'\n')
    except Exception, e:
      print 'problem writing m3u file:',e
  f.close
  return(m3ufile)

# Clean up old episodes and delete empty directories
def clean_up(options):
  rawdirs=[]
  basepath=options['outpath']
  keep=options['keep']
  # get a listing of all the available directories
  #F*ing Magic here. I have no idea how this works.  
  rawdirs=[ name for name in os.listdir(basepath) if os.path.isdir(os.path.join(basepath, name)) ]

  deldirs=[]
  # weed out directories that don't have a manifest.log
  for i in rawdirs:
    if os.path.exists(basepath+i+'/manifest.log'):
      deldirs.append(i)
    pass

  # put them in order smalest to largest so the oldest can be removed
  deldirs.sort()

  i=0
  # keep deleting until i is greater than the total directories less keep
  while i < (len(deldirs)-keep):
    # reset variables:
    files=[]
    delpath=basepath+deldirs[i]+'/'
    f=open(delpath+'manifest.log')
    # read the manifest in delpath/manifest.log
    for line in f:
      l=line.strip()
      files.append(l)
    # delete each file in the files list
    for j in files:
      try:
        os.remove(delpath+j)
      except Exception,e:
        print 'could not remove:',delpath+j
        print 'error:',e
    # remove the manifest
    try:
      os.remove(delpath+'manifest.log')
    except Exception,e:
       print 'could not remove:',delpath+j
       print 'error:',e
    # check for . files and remove the directory
    # list all the files and directories 
    dircheck=os.listdir(delpath)
    # assume the directory is empty
    empty=True
    for k in dircheck:
      if not re.match('(^\..*)', k):
        empty=False
    if empty:
      try:
        shutil.rmtree(delpath)
      except Exception, e:
        print 'could not remove directory:',delpath
        print 'error:',e
    else:
      print 'directory not empty:',delpath
    # increment the counter; files have been deleted
    i +=1

  return()

#### Configuration Functions ####
# Parse commandline options
def parse_args():
  # command line options override settings read from config file
  scriptname=re.search('(^.*)\.',os.path.basename(__file__))
  defaultconf=os.path.expanduser('~/.'+scriptname.group(1)+'/config.ini')
  parser = argparse.ArgumentParser(description='Fetch all segments for the most recent episodes of NPR programs.')

  parser.add_argument('--apikey', action='store', type=str, metavar='<str>', help='NPR API access key')

  # set a default configuration file - pull the name from script name
  helpstr='default configuration file: '+defaultconf
  parser.add_argument('-c', '--config', action='store', type=str, metavar='<path>', help=helpstr, default=defaultconf)

  # do a dry run - don't actually download, just display what would happen
  parser.add_argument('-d','--dryrun', action='store_true', default=False, help='dry run - do not download mp3s.')

  parser.add_argument('-g', '--genericm3u', action='store_true', default=False, help='create generic m3u play lists in the root of the output directory; useful for some media players.')

  parser.add_argument('-k', '--keep', action='store', type=int, metavar='<i>', help='maximum number of old episodes to keep')

  
  parser.add_argument('-m', '--maxeps', action='store', type=int, metavar='<i>', help='maximum number of episodes to download')


  parser.add_argument('-o', '--outpath', action='store', type=str, metavar='<path>', help='output path for downloaded mp3s')

  parser.add_argument('-s', '--minsegments', action='store', type=int, metavar='<i>', help='minimum number number of segments to try to locate.')

  # make the tagger optional
  # add a merge section to check the config file
  parser.add_argument('-t', '--notag', action='store_true', default=False, help='turn off id3 tagging of segments') 

  # query URL
  parser.add_argument('-u', '--baseurl', action='store', type=str, metavar='<url>', help='base url for NPR query engine')

  parser.add_argument('-V', '--version', action='store_true', default=False, help='display version and exit')

  args = parser.parse_args()
  return(args)

# Read configuration file
def read_config(args): 
  config=ConfigParser.ConfigParser() 
  config.read(args.config) 
  return(config)

# Merge configuration and commandline. Commandline trumps config file
def merge_options(args, config):
  options={}
  
  #command line options override configuration file options

  # set default to 3 if none is specified
  if not args.maxeps:
    try:
      options['maxeps']=int(config.get('main','maxeps'))
    except:
      # default if none other specified
      options['maxeps']=3
  else:
    options['maxeps']=args.maxeps
  
  if not args.apikey:
    try:
      options['apikey']=str(config.get('main','apikey'))
    except:
      # default if none other
      options['apikey']=''
  else:
    options['apikey']=args.apikey
  
  if not args.baseurl:
    try:
      options['baseurl']=str(config.get('main','baseurl'))
    except:
      # default
      options['baseurl']='http://api.npr.org/query?'
  else:
    options['baseurl']=args.baseurl

  if not args.keep:
    try:
      options['keep']=int(config.get('main','keep'))
    except:
      #default
      options['keep']=4
  else:
    options['keep']=args.keep

  if not args.outpath:
    try:
      options['outpath']=str(config.get('main','outpath'))
    except:
      # default
      options['outpath']='./'
  else:
    options['outpath']=args.outpath
  # append a final / on just in case
  options['outpath']=options['outpath']+'/'
  # clean up output path and expand it properly
  options['outpath']=os.path.expanduser(options['outpath'])
  # sanity checks
  # don't try to download a negative number of episodes
  if options['maxeps'] < 0:
    options['maxeps']=0
  # if keep < maxeps we would delete stuff that was just downloaded
  if options['keep'] < options['maxeps']:
    options['keep']=options['maxeps']

  # skip tagging of mp3s if the user specifies
  if not args.notag:
    try:
      options['notag']=config.get('main','notag')
    except:
      options['notag']=False
  else:
    options['notag']=True

  if not args.genericm3u:
    try:
      options['genericm3u']=config.get('main','genericm3u')
    except:
      options['genericm3u']=False
  else:
    options['genericm3u']=True

  # try to get at least <i> segments. Default: 23
  if not args.minsegments and args.minsegments != 0:
    try:
      options['minsegments']=config.get('main','minsegments')
    except:
      options['minsegments']=23
  else:
    options['minsegments']=args.minsegments

  # set a sane value if insanity is specified for the infill value 
  if not (0 <= options['minsegments'] <= 99):
    options['minsegments']=23

  # dry run - only pretend to download
  if args.dryrun:
    options['dryrun']=True
  else:
    options['dryrun']=False
  return(options)


#### Download Functions ####
# Generate a list of potential new episodes
def download_list(options): 
  # set the maximum number of episodes 
  maxeps=options['maxeps'] 
  utcnow=datetime.datetime.utcnow() 
  # subtract 5 from timedelta to roughly get the time in eastern time 
  estnow=utcnow-datetime.timedelta(hours=5) 
 
  # dict of episodes to download 
  episodes={} 
   
  # backtime counts backwards until maxeps have been defined 
  backtime=estnow 
 
  count=0 
  while count < maxeps: 
    # check for Morning Edition episodes 
    if backtime.hour == 13 and backtime.weekday() in range(0,5): 
      episodes[count]={'date':ymd(backtime),'program':3} 
      count += 1 
      backtime=backtime-datetime.timedelta(hours=1) 
      continue 
    # weekend edition saturday after 1:00 pm eastern time(ish) 
    if backtime.hour == 13 and backtime.weekday() == 5: 
      episodes[count]={'date':ymd(backtime),'program':7} 
      count += 1 
      backtime=backtime-datetime.timedelta(hours=1) 
      continue 
    # weekend edition sunday 
    if backtime.hour == 13 and backtime.weekday() == 6: 
      episodes[count]={'date':ymd(backtime),'program':10} 
      count += 1 
      backtime=backtime-datetime.timedelta(hours=1) 
      continue 
    # ATC after 7:00 pm easter time(ish) 
    if backtime.hour == 19: 
      episodes[count]={'date':ymd(backtime),'program':2} 
      count += 1 
      backtime=backtime-datetime.timedelta(hours=1) 
      continue 
    # if there was no match, count backwards one hour and try again 
    backtime=backtime-datetime.timedelta(hours=1) 
  # end while 
   
  return(episodes)

# Download the NPRML data sheet listing the available segments
def get_nprml (baseurl,payload):
  try:
    nprml=requests.get(baseurl,params=payload)
    print 'nprml url:',nprml.url
  except requests.ConnectionError:
    raise RQFail(requests.exeptions.ConnectionError)
  else:
    if not nprml.status_code==requests.codes.ok:
      raise RQFail(r.status_code)
  # check the headers to see if the page looks valid
  if not re.search('.*(xml).*', nprml.headers['content-type']):
    error_data='non xml content-type recieved: '+nprml.headers['content-type']
    raise RQFail(error_data)
  # if there's an error, raise an error
  if re.search('error',nprml.content):
    match_error=re.findall('\=\"(\d+)\"',nprml.content)
    match_text=re.findall('<text>(.*)</text>',nprml.content)
    error_data=''
    for i in match_error:
      # build the error and append it to the previous error
      error_data=error_data+'error: '+i+'='+match_text[match_error.index(i)]+'\n'
    raise RQFail(error_data)    
  return(nprml)

# Attempt to guess missing segments not listed in the NPRML 
def fill_missing(segments,options):
  missing=[]
  available=[]
  minsegments=options['minsegments']
  # determine the segments we have
  for i in segments:
    available.append(int(segments[i]['segnum']))
    match=re.search('(.*\/)(\w+_.*_)(\d+).mp3',segments[i]['mp3url'])
  # make a list of missing segments
  basename=match.group(2)
  for i in range(minsegments):
    if not i+1 in available:
      missing.append(i+1)

  # get the program name
  if not segments[0]['program']:
    program='Unknown'
  else:
    program=segments[0]['program']

  # insert missing entries
  index=len(segments)
  for i in missing:
    # default title - if none is found
    title='Title Unavailable' 
    if i < 10:
      segnum='0'+str(i)
    else:
      segnum=str(i)
    title='Segment: '+segnum+' - '+title
    mp3url=match.group(1)+match.group(2)+segnum+'.mp3'
    filename=basename+segnum
    # insert the missing segment after last entry (n entries -1 or len(segments)
    segments[index]={'title':title,'url':'','mp3url':mp3url,'filename':filename,'infill':1,'segnum':segnum,'program':program}
    index +=1
  return(segments)


# Parse the NPRML for list of advertised segments
def parse_nprml(nprml,options):
  # parse and prepare links for downloading
  title=re.findall('<title><.*\[.*\[(.*)\]\]></title>',nprml.content)
  url=re.findall('<mp3 type="m3u">(.*)</mp3>',nprml.content)
  segments={}

  # Check the number of urls against the number of titles
  # if there are less urls than titles, not all the audio is available.  
  # Fixed a bug here - some shows have more than one link.  This may cause 
  # problems later with matching titles to urls
  if len(url) < len(title)-1:
    raise RQFail('Complete audio not available at this time')

  # strip off NPR program description title
  match=re.search('.*NPR Programs: (.*)',title[0])
  if match:
    program = match.group(1)
    title.pop(0)
  
  # intializes the dictionary and insert the titles and urls
  for i in title:
    j=title.index(i)
    mp3url=requests.get(url[j])
    match=re.search('.*\/(\w+)\.mp3$',mp3url.content)
    segnum=re.search('.*_(\d+).mp3$',mp3url.content)
    segments[j]={'title':i,'url':url[j],'mp3url':mp3url.content,'filename':match.group(1),'infill':0,'segnum':segnum.group(1),'program':program}
  if len(segments.keys()) <= 0:
    raise RQFail('No data parsed from NPRML')
  # it appears that sometimes NPR "expires" programs after a few days
  # there are typically 19 segments, this sub attempts to add 
  # in any missing segments
  if len(segments.keys()) < options['minsegments']:
    segments=fill_missing(segments,options)

  return(segments)

# Download an individual segment
def dl_segment(url,filename,infill,outpath):
  success=False
  print '-'*20
  print 'downloading:',url
  # download the segment and get the file name from the URL
  try:
    r=requests.get(url)
  except requests.ConnectionError:
    raise RQFail(requests.exceptions.ConnectionError)
  else:
    if not r.status_code==requests.codes.ok:
      if infill==1:
        raise RQFail('attempted to download missing segment.\ninfill segment does not exist, skipping')
      else:
        raise RQFail(r.status_code)
  # check the content type downloaded for audio/mpeg type
  if not re.search('.*(audio).*', r.headers['content-type']):
    error_data='non audio content-type recieved: '+r.headers['content-type']
    raise RQFail(error_data)
  if not filename:  
    print 'no filename; assigning temporary.'
    #create a filename out of epoch seconds - lame but effective
    filename='temporary'+str(round(time.time(),2))
  filename=filename+'.mp3'
  with open(outpath+filename, 'wb') as code:
    code.write(r.content)
  return (filename)


# Download segments for program
def download_program(program,date,options):
  #npr api query engine url
  baseurl=options['baseurl']
  apiKey=options['apikey']

  # morning shows = 01, evening shows = 02 for directory listings by date
  if program in (3, 7, 10):
    basepath=date+'_01/'
  else:
    basepath=date+'_02/'
  outpath=options['outpath']+basepath
  # check for an existing log file as indication that a show has been downloaded
  if os.path.exists(outpath+'manifest.log'):
    print 'program is up to date, nothing to download\n'
    return()

  # create the output directory 
  if not os.path.exists(outpath):
    try:
      os.makedirs(outpath)
    except Exception, e:
      print 'problem creating directory for output:',e

  # see http://www.npr.org/api/mappingCodes.php for codes
  # ATC=2, ME=3, WESa=7, WESu=10
  payload={'id': program,'apiKey': apiKey,'fields':'title,audio','dateType':'story','output':'NPRML','date':date}

  # get the nprml document
  try:
    nprml=get_nprml(baseurl,payload)
  except RQFail as (error_value):
    print 'Failed to download program index from NPR'
    print 'Errors Follow\n'+'='*20+'\n',error_value
    return()

  # parse the nprml
  try:
    segments=parse_nprml(nprml,options)
  except RQFail as (error_value):
    print 'Error parsing NPRML:',error_value
    return()

  # attempt to download the segment and record filenames
  downloaded=[]
  for i in segments:
    url=segments[i]['mp3url']
    filename=segments[i]['filename']
    infill=segments[i]['infill']

    # don't actually download
    if not options['dryrun']:
      try:
        downloaded.append(dl_segment(url,filename,infill,outpath))
      except RQFail as (error_value):
        if infill == 1:
          print error_value
        else:
          print 'Failed to download appropriate data at:',url
          print 'Returned error:',error_value
          return()
      else:
        if not options['notag']:
          try:
            id3_tag(segments[i], outpath)
          except Exception, e:
            print 'problem writing tag:',e
      #consider:
      # downloaded.append=make_m3u(date,program,outpath,downloaded)
      # also need to update write_manifest
      m3ufile=make_m3u(date,program,outpath,downloaded)
    else:
      print 'dry run - download:',filename,'infill:',infill
  if not options['dryrun']:
    # currently not cleaning this up, just leaving it to rot 
    if options['genericm3u']:
      make_genericm3u(program,basepath,outpath,downloaded,options)

    write_manifest(outpath,downloaded,m3ufile)
  return()


#### main Function ###
def main():
  # parse the command line arguments
  args=parse_args()
  # parse the configuration file
  config=read_config(args)
  # merge the command line and configuration file
  options=merge_options(args,config)
  
  if args.version:
    print version
    exit(0)

  # load non-standard modules
  options=load_modules(options)

  # generate a dictionary of new episodes to download
  episodes=download_list(options)
  
  # recurse the episodes dictionary and attempt to download each one
  for key in episodes:
    print '*'*30
    print 'download program',episodes[key]['program'],'for date:',episodes[key]['date']
    # download all the segments in the program
    download_program(episodes[key]['program'],episodes[key]['date'],options)
    print 'done downloding program'

  clean_up(options)
  return()

main()
