#! /usr/bin/python
# Copyright (C) 2010  Johan Jordaan (www.johanjordaan.co.za)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

# Todo List
#
# 1) Add better error handling.
    
import sys
sys.path.append('..\pylibs')
sys.path.append('..\pycommon')
import urllib
import os
import os.path
import unicodecsv
from xml.sax.saxutils import unescape
from optparse import OptionParser   
import zipfile
import traceback

from BeautifulSoup import BeautifulSoup 

import filemappings as FM

def parse_set(string):
  try:
    # Use the soup to extract the data from the table
    #
    soup = BeautifulSoup(string)
    table = soup.find("div", { "class" : "textspoiler" }).table
    trs = table.findAll('tr')
    data = []
    for tr in trs:
      tds = tr.findAll('td')
      row_data = []
      for td in tds:
        a = td.findAll('a')
        if len(a)>0:
          data.append([FM.CSV['ID'],a[0]['href'].rpartition('=')[-1]])
        row_data.append(''.join([x.string.strip() if x.string != None else '' for x in td.contents]))
      data.append(row_data)
    
    # Now group the data
    #
    groups = []
    group = {}
    for row in data:
      if len(row) == 1 and row[0] == '':
        groups.append(group)
        group = {}
      else:
        key = row[0].replace(':','').strip()
        value = row[1].replace(u'\u2014','-').replace(u'\u2019','\'').replace(u'\u00E6','Ae').replace(u'\xae','<<').strip()
        group[key] = value
  except Exception,e:
    print traceback.print_exc()
    groups = []
    
  return groups

def parse_set_list(cache_path,force_index):
    cache_file_name = os.path.join(cache_path,'index.html.zip')
    if not os.path.exists(cache_file_name) or force_index:
      print 'Connecting to gatherer to get the set list ...' 
      url = urllib.urlopen('http://gatherer.wizards.com')
      d = url.read()
      f = zipfile.ZipFile(cache_file_name,'w',zipfile.ZIP_DEFLATED)
      f.writestr('index.html',d)
      f.close()
    else:
      print 'Using existing cached index ...'
      f = zipfile.ZipFile(cache_file_name)
      d = f.read('index.html')
      f.close()
    
    print 'Parsing received set list ...'
    soup = BeautifulSoup(d)
    b = soup.find(text=lambda text:text.strip() =='Filter Card Set:')
    options = b.findNext(name='select').findAll('option')
    return [unescape(option.string.strip(),{"&apos;": "'", "&quot;": '"'}) for option in options if option.string]
    

def strip(force_index,clear_cache,force_data,output_root,get_set_only):
  # If the cache directory does not exist then create it
  #
  cache_path = os.path.join(output_root,'cache')
  if not os.path.exists(cache_path):
    print 'Creating cache directory [%s] ...'%(cache_path)
    os.mkdir(cache_path)

  # If the csv directory does not exist then create it
  #
  csv_path = os.path.join(output_root,'csv')
  if not os.path.exists(csv_path):
    print 'Creating data directory [%s] ...'%(csv_path)
    os.mkdir(csv_path)
    
  # Get the sets from the sets definition. This is simply a ripped portion form the gatherer frontend
  #    
  set_list = parse_set_list(cache_path,force_index)
  
  # If get_set_only then find the set the user ased for in the set list and reduce the set_list
  #
  if get_set_only != None:
    new_set_list = []
    for set in set_list:
      if set.find(get_set_only)>=0:
        new_set_list.append(set)
    set_list = new_set_list
  
  # Itterate ove the sets and process them
  #
  for set in set_list:
    print '-'*30+'Processing set [%s] ...'%(set)  

    # Check if the file is already in the cache directory if it is then don't download it
    #
    cache_file_name = os.path.join(cache_path,set+'.html.zip').replace(':','_').replace('"','\'')
    if not os.path.exists(cache_file_name) or clear_cache:
      print 'Retrieving file [%s] from gatherer ...'%(set)
      params = urllib.urlencode({'output': 'spoiler', 'method': 'text', 'set': '["'+set+'"]'})
      url = urllib.urlopen("http://gatherer.wizards.com/Pages/Search/Default.aspx?%s" % params)
      f = zipfile.ZipFile(cache_file_name,'w',zipfile.ZIP_DEFLATED)
      f.writestr(set+'.html',url.read())
      f.close()
    else:
      print 'Cached File [%s] found ...'%(set)

    # Check if the file is already in the data directory if it is not then create it else skip it
    #  
    csv_file_name = os.path.join(csv_path,set+'.csv').replace(':','_').replace('"','\'')
    if not os.path.exists(csv_file_name) or force_data: 
      print 'Processing file [%s] ...'%(set)
      
      # Open the file and read the data
      #
      f = zipfile.ZipFile(cache_file_name)
      d = f.read(set+'.html')
      f.close()
   
      # Parse the file
      #
      card_data = parse_set(d) 

      # Write the data into a csv file
      #
      f = open(csv_file_name,'wt')
      writer = unicodecsv.UnicodeWriter(f,lineterminator='\n')
      writer.writerow([set])
      writer.writerow(FM.CSV.values())
      for card in card_data:
        row_data = []
        for key in FM.CSV.keys():
          if FM.CSV[key] in card.keys():
            row_data.append(card[FM.CSV[key]].strip())
          else:
            row_data.append('')
        writer.writerow(row_data)
      f.close()
    else:
      print 'Data file [%s] found, skipping ...'%(set)
    
if __name__ == '__main__':
  parser = OptionParser()
  parser.add_option('-I', action='store_true', dest='force_index',default=False,help='Force index retreival. Do this if you alread have an index file but there might be new sets.')
  parser.add_option('-C', action='store_true', dest='clear_cache',default=False,help='Ignore the existing cachje and retriev all files again')
  parser.add_option('-F', action='store_true', dest='force_data',default=False,help='Overwrite all data files')
  parser.add_option('-g', '--get',dest='get_set_only',help='Force a clean get of the specified set')
  parser.add_option('-o', '--output',dest='output_root',default='..\data\gatherer',help='Root directory for the data files and the cache')
  
  (options, args) = parser.parse_args()
  
  strip(options.force_index,options.clear_cache,options.force_data,options.output_root,options.get_set_only)
  
  
  
  
  
    

