#!/usr/bin/env python
#
# tv_grab_uk_rt3.py - XMLTV Grabber for Radio Times data
#
# Copyright (C) 2012 Adam Sutton <dev@adamsutton.me.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
"""
Noddy attempt at a RadioTimes XMLTV grabber as tv_grab_uk_rt is slow.

I have now discovered that the cause of the slow own of tv_grab_uk_rt
is in fact its date/time processing. While I intend to provide some
suggested workarounds, this script is still MUCH faster and provides
a useful testing ground for other features.

The script doesn't yet provide all the features of the original however
it has most of them. The main thing missing is some of the more esoteric
title processing routines.
"""

import os, sys, re, time, shutil
from datetime import datetime, timedelta, tzinfo
from optparse import OptionParser

# ###########################################################################
# Configuration
# ###########################################################################

#
# Globals
#
global START, DEBUGON, CONF
START   = time.time()
DEBUGON = False
CONF    = {}

#
# App info
#
app_name   = 'tv_grab_uk_rt_aps'
app_desc   = 'United Kingdom/Republic of Ireland (Radio Times Fast)'
app_caps   = [ 'baseline', 'lineups', 'cache', 'manualconfig' ]	
app_vers   = '0.0.1'

#
# Paths/URLS
#
data_url   = 'http://xmltv.radiotimes.com/xmltv'
supp_url   = 'http://supplement.xmltv.org/tv_grab_uk_rt'
conf_root  = os.path.expanduser('~/.xmltv')
conf_path  = os.path.join(conf_root, app_name + '.conf')
cache_path = os.path.join(conf_root, 'cache2')
supp_path  = os.path.join(conf_root, 'supplement', app_name)
web_path   = os.path.join(cache_path, 'www')
xml_path   = os.path.join(cache_path, app_name)

#
# Load config file
#
def load_config ():

  # Copy existing config
  if not os.path.exists(conf_path):
    old_path = conf_path.replace('_aps', '')
    if os.path.exists(old_path):
      shutil.copy(old_path, conf_path)
    else:
      print 'No configuration available'
      sys.exit(1)

  global CONF
  CONF = { 'channel' : [] }
  for l in open(conf_path):
    pts = l.strip().split('=')
    if len(pts) != 2: continue
    if pts[0] != 'channel':
      CONF[pts[0]] = pts[1]
    else:
      CONF['channel'].append(pts[1])

#
# Update config
#
def set_config ( key, val ):
  global CONF
  CONF[key] = val

#
# Get config
#
def get_config ( key, defa = None ):
  r = defa
  if key in CONF: r = CONF[key]
  return r

# ###########################################################################
# Debug
# ###########################################################################

#
# Enable debug
#
def debug_enable ():
  global DEBUGON
  DEBUGON = True

#
# Logging
#
def log ( msg ):
  print >>sys.stderr, msg

#
# Debug
#
def debug ( msg ):
  if DEBUGON: log(msg)

# ###########################################################################
# Data Fetching
# ###########################################################################

#
# Calcualte md5sum
#
def md5sum ( s ):
  from md5 import new
  md5 = new()
  md5.update(str(s))
  md5 = md5.digest()
  ret = ('%02X' * len(md5)) % tuple(map(ord, md5))
  return ret

#
# Fetch data from URL
#
# Will cache the data and only fetch as required
#
def fetch_url ( url, force = False ):
  import httplib, urllib
  import urllib2, urlparse
  log('    fetch %s ...' % url)

  # Create path form url
  up    = urlparse.urlparse(url)
  cpath = os.path.join(web_path, up.netloc + up.path)
  mpath = cpath + '.meta'

  # Ensure cache directory exists
  if not os.path.exists(os.path.dirname(cpath)):
    os.makedirs(os.path.dirname(cpath))

  # Load cache meta data
  cmeta = None
  if os.path.exists(mpath):
    try:
      cmeta = eval(open(mpath).read())
    except: pass

  # Fetch the headers
  con  = httplib.HTTPConnection(up.netloc)
  con.request("HEAD", up.path)
  rsp  = con.getresponse()
  head = {}
  for l in rsp.getheaders(): head[l[0]] = l[1]
  con.close()

  # Get remove meta
  rmeta = {}
  rmeta['content-length'] = int(head['content-length'])
  rmeta['last-modified']  = head['last-modified']

  # Check the cache
  data = None
  if not force and not get_config('fetch') and cmeta:
    if cmeta == rmeta:
      if cmeta['content-length'] == os.stat(cpath).st_size:
        debug('      have cached data')
        data = open(cpath).read()

  # Retrieve remote (and cache)
  if not data:
    up = urllib2.urlopen(url)
    debug('      get remote data')
    data = up.read()
    up.close()
    open(cpath, 'w').write(data)
    open(mpath, 'w').write(repr(rmeta))

  # Return raw data
  return data

#
# Process file
#
def process_file ( data, delim ):
  ret = []
  for l in data.splitlines():
    l = l.strip()
    if l.startswith('#'): continue
    ret.append(l.split(delim))
  return ret

#
# Supplment loading
#
def fetch_supplement ( name, delim = '|' ):
  url  = supp_url + '/' + name

  # Fetch
  data = fetch_url(url)

  # Process
  ret  = process_file(data, delim)

  # Return
  return ret

#
# Data fetching
#
def fetch_data ( name, delim = '~' ):
  url  = data_url + '/' + name

  # Fetch
  data = fetch_url(url)

  # Process
  ret = process_file(data, delim)

  # Return
  return ret

#
# Fetch a programme
#
# Attempt to split data by day and only process what has changed
#
def fetch_programmes ( name ):

  # Fetch
  data = fetch_data(name)

  # Split by day
  days = {}
  for d in data: 
    if len(d) < 23: continue
    day = re.sub('(\d+)/(\d+)/(\d+)', r'\3\2\1', d[19])
    if day not in days: days[day] = []
    days[day].append(d)

  # Only return days we've not already generated
  ret = {}
  for d in days:
    data  = None

    # Use cache?
    if get_config('ocache') and not get_config('force'):
      cpath = os.path.join(cache_path, app_name, d + '_' + name)
      mpath = cpath + '.md5'

      # Create directory
      if not os.path.exists(os.path.dirname(cpath)):
        os.makedirs(os.path.dirname(cpath))

      # Check cache
      if os.path.exists(cpath) and os.path.exists(mpath):
        debug('    checking output cache')
        try:
          md5 = open(mpath).read().split(' ')
          dat = open(cpath).read()
          #debug('      %s' % md5[0])
          #debug('      %s' % md5[1])

          # Validate
          imd5 = md5sum(repr(days[d]))
          omd5 = md5sum(dat)
          #debug('      %s' % imd5)
          #debug('      %s' % omd5)
          if md5[0] == imd5 and md5[1] == omd5:
            import pickle
            debug('    have output cache')
            data = pickle.loads(dat)
        except Exception, e:
          log('ERROR: failed to check cache %s' % e)

    # Return data
    if data:
      ret[d] = (True, data)
    else:
      ret[d] = (False, days[d])

  # Return
  return ret
  

# ###########################################################################
# Text Processing
# ###########################################################################

#
# Unescape some HTML encodings
#
def html_unescape ( s ):
  def fixup1 ( m ):
    t = m.group(0)
    if t[:2] == "&#":
      try:
        if t[3] == 'x':
          t = unichr(int(t[3:-1], 16))
        else:
          t = unichr(int(t[2:-1]))
      except ValueError:
        log('ERROR: could not process %s' % t)
    else:
      n = t[1:-1]
      if   n == '&amp;':  t = '&'
      #elif n == '&lt;':   t = '<'
      #elif n == '&gt;':   t = '>'
      #elif n == '&apos;': t = "'"
      #elif n == '&quot;': t = '"'
    return t
  s = re.sub("&#?\w+;", fixup1, s)
  return s

#
# Escape HTML
#
def html_escape ( s ):
  s = s.replace('&', '&amp;') 
  #s = s.replace('"', '&quot;')
  #s = s.replace("'", '&apos;')
  #s = s.replace('>', '&gt;')
  #s = s.replace('<', '&lt;')
  return s

#
# Fix UTF8
#
def utf8_fixup ( s, fixups ):
  for f in fixups:
    s = s.replace(f[0], f[1])
  return s

#
# Get unicode
#
def to_unicode ( s ):
  enc = get_config('encoding', 'utf8')
  f = False
  for c in s:
    if ord(c) >= 128: f = True
  if f and enc in [ 'utf8', 'utf-8' ]:
    s = utf8_fixup(s, get_config('utf8_fixups'))
  if f:
    try:
      s = unicode(s, enc)
    except Exception, e:
      debug('problem encoding %s' % repr(s))
      debug('  %s' % e)
      try:
        s = s.decode('latin1')
      except:
        s = s.decode(enc, 'ignore').encode(enc)
  return s

#
# Text to number
#
TEXT_TO_NUMBER = {
  'one'   : '1',
  'two'   : '2',
  'three' : '3',
  'four'  : '4',
  'five'  : '5',
  'six'   : '6',
  'seven' : '7',
  'eight' : '9',
  'ten'   : '10'
}

#
# Text to number
#
# TODO: expand to handle larger numbers?
# 
def text_to_number ( t ):
  if t.lower() in TEXT_TO_NUMBER:
    t = TEXT_TO_NUMBER[t.lower()]
  return t

# ###########################################################################
# Time processing
# ###########################################################################

# DST start/stop
DST = {}

# Get dst start stop for specified year
#
# DST begins at 01:00 GMT on the last sunday of March
# it ends 01:00 GMT (02:00 BST) on the last sunday of October
def dst_times ( year ):
  ret = None
  if year in DST: ret = DST[year]
  else:
    b = None
    e = None
    for i in range(7):
      t = datetime(year, 3, 31-i, 1)
      if not b and t.weekday() == 6: b = t
      t = datetime(year, 10, 31-i, 2)
      if not e and t.weekday() == 6: e = t
    ret = (b, e)
  return ret

# Time Zone
class TZHR ( tzinfo ):
  def __init__ ( self, of = None ):
    if of is not None:
      self.of = of
  def utcoffset ( self, dt ):
    return timedelta(minutes=self.of)
  def dst ( self, dt ):
    return timedelta(0)
  def tzname ( self, dt ):
    r = ''
    if self.of >= 0: r = '+'
    else:            r = '-'
    r = r + '%02d%02d' % (self.of / 60, self.of % 60)
    return r
  def __cmp__ ( self, other ):
    return cmp(self.of, other.of)

# Format time
def format_time ( tm ):
  return tm.strftime('%Y%m%d%H%M%S %z')

# Adjust time for DST
def dst_adjust ( tm ):
  dst = dst_times(tm.year)
  tz  = TZHR(0)
  if tm >= dst[0] and tm < dst[1]:
    tz = TZHR(60)
  return tm.replace(tzinfo=tz)

# ###########################################################################
# Programmes
# ###########################################################################

#
# RegExps
#
EPISODE_EXP = re.compile('(\d+(/\d+)?)?,?\s*(series (\d+))?')
SUBTEP_EXP  = re.compile('((\d+)/(\d+))-')

#
# Class to hold programme data
#
class Programme:

  # Initalise
  def __init__ ( self, data = None, chn = None ):
    # Initialise
    self.sub_title   = None
    self.title       = None
    self.episode     = None
    self.timezone    = None

    # Store
    if chn:
      self.channel = chn

    # Extract fields
    if data:
      self.episode     = self.p_episode(data[1])
      self.sub_title   = self.p_sub_title(data[2])
      self.title       = self.p_title(data[0])
      self.year        = data[3]
      self.director    = self.p_string(data[4])
      self.cast        = self.p_cast(data[5])
      self.premiere    = data[6]  == 'true'
      self.film        = data[7]  == 'true'
      self.repeat      = data[8]  == 'true'
      self.subtitles   = data[9]  == 'true'
      self.widescreen  = data[10] == 'true'
      self.new         = data[11] == 'true'
      self.deaf        = data[12] == 'true'
      self.baw         = data[13] == 'true'
      self.star        = data[14]
      self.cert        = data[15]
      self.genre       = self.p_genre(data[16])
      self.description = self.p_description(data[17])
      self.rtchoice    = data[18] == 'true'
      (self.start, self.stop, self.dur) = self.p_time(data[19], data[20], data[21], data[22])

  # Parse string (html format and unicode translation)
  def p_string ( self, s ):

    # Escape html
    s = html_unescape(s)

    # Convert to unicode
    s = to_unicode(s)

    # Escape
    s = html_escape(s)

    # A few "basic" fixes
    s = s.strip()
    s = re.sub(' +', ' ', s)

    return s

  # Parse title
  def p_title ( self, s ):

    # Get config
    title_proc = get_config('prog_titles', {})
    title_pre  = []
    title_sub  = []
    title_adj  = {}
    if 1 in title_proc:
      title_pre = title_proc[1]
    if 2 in title_proc:
      title_sub = title_proc[2]
    if 5 in title_proc:
      title_adj = title_proc[5]

    # Extract timezone
    r = re.search('^\((GMT|UTC|BST|UTC\+1)\)', s)
    if r:
      self.timezone = r.group(1)
      s             = s.replace(r.group(0), '')

    # Remove trailing :|
    s = re.sub('[|:]$', '', s)

    # Pre-fix strip (non-title info)
    for p in title_pre:
      r = re.search(p+'\s*:\s*', s)
      if r:
        s = s.replace(r.group(0), '')
        break

    # Title/sub-title
    if title_sub and (':' in s or '-' in s):
      for p in title_sub:
        if s.startswith(p):
          e = '^' + p + '\s*[:-]\s*'
          r = re.search(e, s)
          if r:
            st = s.replace(r.group(0), '')
            s  = p
            if self.sub_title: st = st + ': ' + self.sub_title
            self.sub_title = st
            break

    # Fix titles
    if s in title_adj: s = title_adj[s]

    # Done
    return self.p_string(s)

  # Parse sub-title
  def p_sub_title ( self, s ):
    r = SUBTEP_EXP.search(s)
    if r:
      self.episode = self.p_episode(r.group(1))
      s            = s.replace(r.group(0), '')

    # Use existing (parsed from episode)
    if not s and self.sub_title: s = self.sub_title

    # Extract part numbering
    r = re.search('\s*-\s*Part (\w+)', s)
    if r:
      s = s.replace(r.group(0), ' (Part %s)' % text_to_number(r.group(1)))

    return self.p_string(s)

  # Parse Episode
  def p_episode ( self, s ):
    ret = None
    if s:
      r   = EPISODE_EXP.search(s)
      if r:
        e = r.group(1)
        if not e: e = ''
        else: 
          p = e.split('/')
          if len(p) == 2:
            o = int(p[1])
            e = int(p[0]) - 1
            if e >= o: o = None
            e = str(e)
            if o: e = e + '/' + str(o)
          else:
            e = str(int(e) - 1)
        se = r.group(4)
        if not se: se = ''
        else:
          se = str(int(se) - 1)
        if e or se:
          ret = (se, e, '')
    
      # Assume subtitle
      if not ret:
        self.sub_title = s
    return ret

  # Parse Cast
  def p_cast ( self, s ):
    ret = []
    c   = '|'
    if c not in s: c = ',' # Old style?
    for a in s.split(c):
      i = a.find('*')
      if i != -1: a = a[i+1:]
      if a: ret.append(self.p_string(a))
    return ret

  # Parse Genre
  def p_genre ( self, s ):
    title_genre = None
    title_proc  = get_config('prog_titles')
    if 6 in title_proc: 
      title_genre = title_proc[6]

    # Film?
    if self.film:
      s = 'Film'

    # Match
    elif title_genre:
      if self.title in title_genre:
        s = title_genre[self.title]

    return s

  # Parse description
  def p_description ( self, s ):

    # Remove update info
    i = s.find('UPDATED LISTING')
    if i != -1: s = s[:i]

    # Strip New series
    if s.startswith('New series'):
      self.premiere = True
      s = re.sub('^New series\s*(\(\d+\/\d+\))?\.\s*', '', s)
      # TODO: extract the series info?
 
    return self.p_string(s)

  # Parse time data
  def p_time ( self, day, start, stop, dur ):

    # Duration
    d = int(dur)

    # Parse start
    f = datetime.strptime(day + ' ' + start, '%d/%m/%Y %H:%M')

    # DST?
    f = dst_adjust(f)

    # Create end from duration
    t = f + timedelta(minutes=d)

    return (f, t, d)

  # Output (debug)
  def __str__ ( self, extended = False ):
    b = self.f_time(self.start)
    ret = '%s - %s' % (b, self.title)
    return ret

  def f_time ( self, tm ):
    return format_time(tm)

  # Convert to pyxmltv format
  def to_pyxmltv ( self ):
    ret = { 'channel' : self.channel.id, 'title' : [ (self.title, '') ] }
   
    # Basic info
    if self.sub_title:
      ret['sub-title']   = [ (self.sub_title, '') ]
    if self.episode:
      ret['episode-num'] = [ ('%s . %s . %s' % self.episode, 'xmltv_ns') ]
    if self.genre:
      ret['category']    = [ (self.genre, '' ) ]
    if self.description:
      ret['desc']        = [ (self.description, '') ]

    # Credits
    if self.director or self.cast:
      ret['credits']  = {}
      if self.director: ret['credits']['director'] = [ self.director ]
      if self.cast:
        ret['credits']['actor'] = []
        for a in self.cast:
          ret['credits']['actor'].append(a)

    # Dates/Times
    if self.year:
      ret['date']  = self.year
    if self.start and self.stop:
      ret['start'] = self.f_time(self.start)
      ret['stop']  = self.f_time(self.stop)
    ret['length']   = { 'units' : 'minutes', 'length' : self.dur }

    # Content info
    if self.subtitles:
      ret['subtitles'] = [ {'type': 'teletext', 'language': ('English', '')} ]
    if self.new:
      ret['new'] = True
    ret['video'] = { 'colour'  : not self.baw,
                     'aspect'  : '4:3',
                     'present' : True,
                     'quality' : self.channel.quality }
    if self.widescreen: ret['video']['aspect'] = '16:9'

    # Done
    return ret

  def to_xmltv ( self ):
    ret = '  <programme start="%s" stop="%s" channel="%s">\n'\
        % (self.f_time(self.start), self.f_time(self.stop), self.channel.id)
    ret = ret + '    <title>%s</title>\n' % self.title
    if self.sub_title:
      ret = ret + '    <sub-title>%s</sub-title>\n' % self.sub_title
    if self.description:
      ret = ret + '    <desc lang="en">%s</desc>\n' % self.description
    if self.director or self.cast:
      ret = ret + '    <credits>\n'
      if self.director:
        ret = ret + '      <director>%s</director>\n' % self.director
      for a in self.cast:
        ret = ret + '      <actor>%s</actor>\n' % a
      ret = ret + '    </credits>\n'
    if self.year:
      ret = ret + '    <date>%s</date>\n' % self.year
    if self.genre:
      ret = ret + '    <category lang="en">%s</category>\n' % self.genre
    if self.episode:
      ret = ret + '    <episode-num system="xmltv_ns">%s.%s.%s</episode-num>\n' % self.episode
    ret = ret + '    <video>\n'
    if self.baw:
      ret = ret + '      <colour>no</colour>\n'
    if self.widescreen or self.channel.quality == 'HDTV':
      ret = ret + '      <aspect>16:9</aspect>\n'
    ret = ret + '      <quality>%s</quality>\n' % self.channel.quality
    ret = ret + '    </video>\n'
    if self.repeat:
      ret = ret + '    <previously-shown />\n'
    if self.premiere:
      ret = ret + '    <premiere />\n'
    if self.new:
      ret = ret + '    <new />\n'
    if self.subtitles:
      ret = ret + '    <subtitles type="teletext" />\n'
    if self.deaf:
      ret = ret + '    <subtitles type="deaf-signed" />\n'
    if self.cert:
      ret = ret + '    <rating system="BBFC">\n'
      ret = ret + '      <value>%s</value>\n' % self.cert
      ret = ret + '    </rating>\n'
    if self.star:
      ret = ret + '    <star-rating system="Radio Times Film Rating">\n'
      ret = ret + '      <value>%s/5</value>\n' % self.star
      ret = ret + '    </star-rating>\n'
    if self.rtchoice:
      ret = ret + '    <star-rating system="Radio Times Recommendation">\n'
      ret = ret + '      <value>1/1</value>\n'
      ret = ret + '    </star-rating>\n'
    ret = ret + '  </programme>\n'
    return ret

# ###########################################################################
# Channel Data
# ###########################################################################

#
# Class to define channel
#
class Channel:

  # Load data
  def __init__ ( self, data = None ):
    if data:
      self.id      = data[0]
      self.rtid    = int(data[1])
      self.name    = self.p_string(data[2])
      self.icon    = data[3]
      self.shift   = self.p_shift(data[4])
      self.hours   = self.p_hours(data[5])
      self.quality = data[6]
      self.progs   = []

  # Parse string (html format and unicode translation)
  def p_string ( self, s ):
    s = html_unescape(s)
    s = to_unicode(s)
    s = html_escape(s)
    return s

  # Parse shift
  def p_shift ( self, s ):
    r = re.search('^\+(\d)hour', s)
    if r:
      s = int(r.group(1))
    else:
      s = 0
    return s

  # Parse hours
  def p_hours ( self, s ):
    r = re.search('^(\d\d)(\d\d)-(\d\d)(\d\d)', s)
    if r:
      s = (int(r.group(1)) * 60 + int(r.group(2)), int(r.group(3)) * 60 + int(r.group(4)))
    else:
      s = None
    return s

  # Apply time shifting
  def prog_shift ( self, p ):
    if self.shift:
      p.start = p.start + timedelta(hours=self.shift)
      p.stop  = p.stop  + timedelta(hours=self.shift)
    return p

  # Fix programme scheduling
  def prog_validate ( self, p, prev ):
    ret = True

    # Blank
    if not p.dur: return False

    # TODO: Check on air?

    # Check against previous
    if prev:

      # Timing mismatch
      if prev.stop != p.start:

        # DST?
        if prev.stop.tzinfo != p.start.tzinfo:
          debug('DST error, will correct')
          p.stop  = prev.stop + (p.stop - p.start)
          p.start = prev.stop

        # Overlap
        if prev.stop > p.start:
          debug('overlap detected for %s and %s' % (prev.title, p.title))
          ret = False

        # Missing
        else:
          debug('missing scheduling')
          # TODO: insert TBC entry

        # Output prev/cur times
        debug('  prev.stop = %s' % format_time(prev.stop))
        debug('  p.start   = %s' % format_time(p.start))

    return ret

  # Load a channels programmes
  def load_programmes ( self ):
    self.progs   = []
    prev = None
 
    # Load data
    name = '%d.dat' % self.rtid
    data = fetch_programmes(name)

    # Process each day
    days = data.keys()
    days.sort()
    for day in days:
      cached = data[day][0]
      tmp    = []

      # Each programme
      for d in data[day][1]:
        p = None

        # Cached
        if cached:
          p = d

        # Process
        else:
          if len(d) < 23: continue
          try:
            p = Programme(d, self)
          except Exception, e:
            log('ERROR: %s' % e)
            log('ERROR: data was %s' % pts)
            continue

        # Time shift
        p = self.prog_shift(p)

        # Validate
        if not self.prog_validate(p, prev): continue

        # Store
        tmp.append(p)
        self.progs.append(p)
        prev = p

      # Cache (directories should already exist)
      if get_config('ocache') and not cached and tmp:
        cpath = os.path.join(cache_path, app_name, day + '_' + name)
        mpath = cpath + '.md5'
        try:
          import pickle
          tmp = pickle.dumps(tmp)
          open(cpath, 'w').write(tmp)
          open(mpath, 'w').write('%s %s' % (md5sum(repr(data[day][1])), md5sum(tmp)))
        except: pass

  # Output (debug)
  def __str__ ( self, extended = False ):
    ret = '%04d %-15s %s' % (self.rtid, self.id, self.name)
    if extended:
      ret = ret + '\n  icon    = %s' % self.icon
      ret = ret + '\n  quality = %s' % self.quality
    return ret

  # To pyxmltv format
  def to_pyxmltv ( self ):
    ret = { 'display-name' : [ ( self.name, '' ) ],
            'id'           : self.id }
    if self.icon:
      ret['icon'] = [ { 'src' : self.icon } ]
    return ret

  # To xmltv format
  def to_xmltv ( self ):
    ret = '  <channel id="%s">\n' % self.id
    ret = ret + '    <display-name>%s</display-name>\n' % self.name
    if self.icon:
      ret = ret + '    <icon src="%s" />\n' % self.icon
    ret = ret + '  </channel>\n'
    return ret

#
# Load channel data
#
def get_channels ( data, _filter ):
  tmp = []
  ret = []
  for pts in data:
    if len(pts) != 7: continue
    chn = Channel(pts)
    if not _filter or chn.id in _filter:
      tmp.append(chn)

  # Sort into order
  # TODO: this is only required for comparison against existing script
  #       as it ensures the same channel output ordering
  if _filter:
   for c in _filter:
     for d in tmp:
       if d.id == c:
         ret.append(d)
         break
  else:
    ret = tmp

  return ret

# ###########################################################################
# Command Line options
# ###########################################################################

optp = OptionParser()
optp.add_option('--configure', action='store_true',
                help='Configure the system')
optp.add_option('--debug',     action='store_true',
                help='Enable debugging output')
optp.add_option('--fetch',     action='store_true',
                help='Force fetching of remote data')
optp.add_option('--force',     action='store_true',
                help='Ignore cached data and rengerate')
optp.add_option('--pyxmltv',   action='store_true',
                help='Use pthon-xmltv module to output XML')
optp.add_option('--ocache',  action='store_true',
                help='Use the output cache')
optp.add_option('--capabilities', action='store_true',
                help='Display supported capabilities')
optp.add_option('--description', action='store_true',
                help='Display title of this program')
optp.add_option('--version', action='store_true',
                help='Display version information')

# ###########################################################################
# Main
# ###########################################################################

# TODO:
#   precompile regexps for better performance
#   add missing title processing

# Let's go!
if __name__ == '__main__':

  # Parse command line
  (opts,args) = optp.parse_args()

  # Enable debug
  if opts.debug:
    debug_enable()

  # Configure
  if opts.configure:
    print 'TODO: not currently supported, using tv_grab_uk_rt to configure'
    sys.exit(0)

  # Description
  if opts.description:
    print app_desc
    sys.exit(0)

  # Capabilities
  if opts.capabilities:
    for c in app_caps: print c
    sys.exit(0)

  # Version
  if opts.version:
    print 'This is %s version %s' % (app_name, app_vers)
    sys.exit(0)

  # Load configuration
  load_config()

  # Add command line switches
  set_config('fetch',    opts.fetch or opts.force)
  set_config('force',    opts.force)
  set_config('pyxmltv',  opts.pyxmltv)
  set_config('ocache',   opts.ocache)

  # Fetch supplementary data
  log('loading supplements ...')
  supp = {}
  for s in [ 'channel_ids', 'prog_titles_to_process', 'utf8_fixups' ]:
    log('  load %s' % s)
    supp[s] = fetch_supplement(s)

  # Convert fixups
  utf8_fixups = []
  for f in supp['utf8_fixups']:
    t = eval("'" + f[1] + "'")
    f = eval("'" + f[0] + "'")
    utf8_fixups.append([f, t])

  # Convert title processing
  prog_titles = {}
  if get_config('title-processing') == 'enabled':
    for pts in supp['prog_titles_to_process']:
      if len(pts) != 2: continue
      typ = int(pts[0])
      dat = pts[1].split('~')
      if typ in [ 1, 2 ]:
        if typ not in prog_titles: prog_titles[typ] = []
        prog_titles[typ].append(dat[0])
      if typ in [ 5, 6 ]:
        if typ not in prog_titles: prog_titles[typ] = {}
        prog_titles[typ][dat[0]] = dat[1]

  # Convert supplements to config
  set_config('prog_titles', prog_titles)
  set_config('utf8_fixups', utf8_fixups)

  # Get list of channels
  log('load channel data ...')
  chn_filt = get_config('channel')
  channels = get_channels(supp['channel_ids'], chn_filt)

  # Get programmes
  log('load programme data ...')
  beg = time.time()
  idx = 0
  num = len(channels)
  for chn in channels:
    idx = idx + 1
    dur = (time.time() - beg)
    eta = (dur / idx) * (num - idx)
    log('  %03d/%03d - %-50s ... eta %0.2fs' % (idx, num, chn.id, eta))
    chn.load_programmes()

  # Store to file?
  op = sys.stdout
  if len(args) > 0: op = open(args[0], 'w')

  # Generator info
  gen_encoding  = get_config('encoding')
  gen_date      = str(datetime.now())
  gen_src_name  = 'Radio Times XMLTV Service'
  gen_src_url   = 'http://www.radiotimes.com'
  gen_info_name = 'XMLTV fast RT grabber'
  gen_info_url  = 'TBD'

  # python-xmltv output
  if opts.pyxmltv:
    import xmltv
    log('generating xml with python-xmltv ...')
    wr = xmltv.Writer(encoding            = gen_encoding,
                      date                = gen_date,
                      source_info_name    = gen_src_name,
                      source_info_url     = gen_src_url,
                      generator_info_name = gen_info_name,
                      generator_info_url  = gen_info_url);
    for c in chns:
      wr.addChannel(c.to_pyxmltv())
      for p in c.progs:
        wr.addProgramme(p.to_pyxmltv())
    op.write('<?xml version="1.0" encoding="utf-8"?>\n')
    op.write('<!DOCTYPE tv SYSTEM "xmltv.dtd">\n')
    op.write('')
    wr.write(op)

  # internal generator
  else:
    log('generating xml output ...')

    # Head
    op.write('<?xml version="1.0" encoding="%s"?>\n' % gen_encoding)
    op.write('<!DOCTYPE tv SYSTEM "xmltv.dtd">\n')
    op.write('\n')
    head = ('<tv date="%s" source-info-url="%s" source-info-name="%s" '
         + ' generator-info-name="%s" generator-info-url="%s">\n')\
         % (gen_date, gen_src_url, gen_src_name, gen_info_name, gen_info_url)
    op.write(head)

    # Channels
    for chn in channels:
      op.write(chn.to_xmltv())

    # Programmes
    for chn in channels:
      for p in chn.progs:
        op.write(p.to_xmltv().encode(gen_encoding))

    # Footer
    op.write('</tv>\n')

  # Complete
  log('complete, took %0.2f seconds' % (time.time() - START))
