#-*- coding: utf-8 -*-

#_______________________ Simple As FucK Image Downloader _______________________
#
#                                by newfagpower

"""
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.

    © 2011 newfagpower

"""

import os
import re
import imp
import sys
import zipfile
import shutil
import urllib2
import HTMLParser
import logging
log = logging.getLogger(__name__)

import data


def get_for_domain(url):
    """ Find automaticaly the good downloader """

    # to make a support for a site (ex: images.google.com),
    # create a subclass of 'Base' named 'ImagesGoogle'.
    # place the subclass in the folder plugins,
    # and that it!
    for cls in Base.__subclasses__():
        for domain in cls.DOMAIN_LIST:
            if url.find(domain) >= 0:
                return cls(url)
    raise NotFound()


class NotFound(Exception): pass


class Base(object):
    """ base downloader, its main target is imagebords """
    
    DOMAIN_LIST = []
    _DEFAULT_NAME = ''
    _MAX_TITLE_LENGTH = 0
    _THREAD_TITLE_RE = ''
    _THREAD_NUMBER_RE = ''
    _IMAGE_LINK_RE = ''
    """
    _DEFAULT_NAME = 'thread'
    _MAX_TITLE_LENGTH = 20
    _THREAD_TITLE_RE = '<p>([^<]*)'
    _THREAD_NUMBER_RE = '([0-9]*)'
    _IMAGE_LINK_RE = '<a[^href]*href="([^"]*)"[^>]*><img'
    """    
    def __init__(self, url):
        self._name = []
        self._downloaded_list = []
        self._waiting_list = []
        self._url = url
        self._tmp_dir = ''
        
    def prepare(self):
        # raises os-exception
        # raises plugins-exception
        (self._name, image_list) = self._retrieve_data()
        self._waiting_list = image_list
        self._downloaded_list = []
        self._tmp_dir = os.path.join(data.WORKING_DIR, self._name)
        if not os.path.exists(self._tmp_dir):
            os.mkdir(self._tmp_dir)
    
    def get_nb_pending(self):
        return len(self._waiting_list)

    def get_nb_done(self):
        return len(self._downloaded_list)

    def get_name(self):
        return self._name
        
    def download_image(self):
        ok = True
        if self._waiting_list:
            image_url = self._waiting_list[0]
            web_file = None
            local_file = None
            try:
                filename = os.path.basename(image_url)
                dest_filename = os.path.join(self._tmp_dir, filename)
                web_file = urllib2.urlopen(image_url)
                local_file = open(dest_filename, 'wb')
                local_file.write(web_file.read())
            except Exception as e:
                # ignore exceptions
                log.warning('«{0}» cannot download image «{1}»: {2}'.
                                format(self._name, image_url, e))
            finally:
                local_file and local_file.close()
                web_file and web_file.close()
            # FIXME: when the current image download fails it is ignored
            self._waiting_list.remove(image_url)
            self._downloaded_list.append(image_url)
        else:
            if self._can_continue():
                try:
                    (_, image_url_list) = self._retrieve_data()
                    new_url_list = [x for x in image_url_list
                                    if x not in self._waiting_list]
                    self._waiting_list += new_url_list
                except Exception as e:
                    # ignore exception
                    log.warning('«{0}» reload data: {2}'. format(self._name, e))
            else:
                ok = False
        return ok
        
    def remove(self):
        # raises shutil-exception
        shutil.rmtree(self._tmp_dir)

    def save(self, destination_dir, zip_it):
        # raises zip-exception
        # raises shutil-exception
        EXT = '.zip'
        if zip_it:
            destination_filename = os.path.join(destination_dir,
                                    os.path.basename(self._tmp_dir) + EXT)
            compress = zipfile.ZipFile(destination_filename, 'w')
            for filename in os.listdir(self._tmp_dir):
                compress.write(os.path.join(self._tmp_dir, filename),
                    arcname=filename)
            compress.close()
            shutil.rmtree(self._tmp_dir)
        else:
            shutil.move(self._tmp_dir, destination_dir)

    def _retrieve_data(self):
        """ gives informations about the download,
        returns the download «name» and the images to download urls. """
        # raises urllib2-exception
        
        html = ''
        log.debug('«{0}» retrieving new page'.format(self._url))
        f = urllib2.urlopen(self._url)
        # remove windows endlines
        html = f.read().replace('\r', '')
        f.close()
        log.debug('«{0}» new page retrieved'.format(self._url))
        
        title_list = re.findall(self.__class__._THREAD_TITLE_RE, html)
        number_list = re.findall(self.__class__._THREAD_NUMBER_RE, html)
        image_url_list = re.findall(self.__class__._IMAGE_LINK_RE, html)
        
        title = title_list and title_list[0].strip() or ''
        number = number_list and number_list[0].strip() or ''
        # remove any directory separator from the name
        sanitized_title = (HTMLParser.HTMLParser().unescape(title)[: self.
                            __class__._MAX_TITLE_LENGTH].replace(os.sep, '|'))
        if sanitized_title or number:
            name = '{0}_{1}'.format(sanitized_title, number)
        else:
            name = self.__class__._DEFAULT_NAME
        return (name, image_url_list)
        
    def _can_continue(self):
        """ test when all images are downloaded if new images are available,
        if False the download is stopped. """
        ok = False
        try:
            f = urllib2.urlopen(self._url)
            code = f.getcode()
            f.close()
            if code == 200: # "code" must be "!= 404", but "== 200" is better
                ok = True
        except Exception as e:
            log.warning('«{0}» page check failed {1}'.format(self._name, e))
        return ok


# load all downloader plugins
# can only be done after the definition of Base
from plugins import *

