#!/usr/bin/env python
# encoding: utf-8
"""
site_plugin.py

Created by Gabriel Harrison on 2008-12-15.
Copyright (c) 2008 __MyCompanyName__. All rights reserved.
"""

import sys
import os
import re
import unittest
import wsgiref.handlers

from google.appengine.api.urlfetch import fetch
from google.appengine.ext import webapp
from urlparse import urlparse, urlunparse, urljoin

from BeautifulSoup import BeautifulSoup as Soup
from BeautifulSoup import SoupStrainer as Strainer
from BeautifulSoup import PageElement


class AbstractSitePlugin:
  """Base class used to rip image links from a URL."""
  
  site_name = "Unnamed"
  site_url = "Unknown"
  page_count = -1
  links = None
  
  def __init__(self, name, url):
    self.site_name = name
    self.site_url = url
    
  def calculatePageCount(self, request, content):
    pass
    
  def produceLinks(self, request, content):
    pass
    

class LumraxBase(AbstractSitePlugin):
  sub_site = None
  
  def __init__(self, sub_site):
    AbstractSitePlugin.__init__(self, 'iLumrax', 
        'http://%s.lumrax.com' % sub_site)
    self.sub_site = sub_site

  def subsite(self, sub_site):
    self.__init__(sub_site)

  def calculatePageCount(self, request=None, content=None):
    page = fetch('%s%s' % (self.site_url, '/meta/last_page'))
    if page.content:
      soup = Soup(page.content)
      url = soup.find(text=re.compile('newer')).parent.parent.get('href')
      self.page_count = int(re.sub(r'index_(\d+)\.html', r'\1', url))
    else:
      self.page_count = -1
    return self.page_count

  def produceLinks(self, page=0):
    results = []
    if page == -1:
      for i in range(0, self.calculatePageCount()):
        results.extend(self.produceLinks(i))
      return results
    site_page = ['/index_%s.html' % (page + 1), '/meta/last_page'][page is '']
    site_response = fetch('%s%s' % (self.site_url, site_page))
    if site_response.content:
      site_content = site_response.content
      site_strainer = Strainer('div', {'class': 'padder'})
      site_soup = Soup(site_content, parseOnlyThese = site_strainer)
      hlinks = site_soup.findAll('div')[2].findAll('a',
          {'class':'picaout hastooltip'})
      for link in hlinks:
        results.append({
          'href': '%s%s' % (self.site_url, link.find('img')['src'].replace('/300/',
              '/612/')), 
          'thumb': '%s%s' % (self.site_url, link.find('img')['src'].replace('/612/', 
              '/300/'))
        })
    return results

if __name__ == '__main__':
  unittest.main()