# -*- coding: utf-8 -*-
'''Copyright 2010 Giordano Tamburrelli. All rights reserved.

Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
   list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
   this list of conditions and the following disclaimer in the documentation
   and/or other materials provided with the distribution.
3. Neither the name of Giordano Tamburrelli nor the names of his contributors may be used
   to endorse or promote products derived from this software without specific
   prior written permission.
   
THIS SOFTWARE IS PROVIDED BY GIORDANO TAMBURRELLI "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL GIORDANO TAMBURRELLI OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.

Created on May 24, 2010

@author: Giordano Tamburrelli
'''

import re
import random
import time
import os
import urllib2
import urllib
from UnicodeTable import replace_unicode
from DBLP.PaperHandlers import url_handler_factory
from Utils import UrlOpener
import net.socks as socks 

DBLP_URL = 'http://www.informatik.uni-trier.de/~ley/db/indices/a-tree/'

def add(x, y):
    return x + u' ' + y

class Downloader():
    
    def __init__(self, min_delay = 1, max_delay = 2, verbose = True, save = False, path = None, proxies = {}, socks_proxy_url=None, socks_proxy_type=socks.PROXY_TYPE_SOCKS5, socks_proxy_port=80):
        self.verbose = verbose
        self.proxies = proxies
        self.min_delay = min_delay
        self.max_delay = max_delay
        self.save = save
        self.opener = UrlOpener(proxies = proxies, socks_proxy_type=socks_proxy_type, socks_proxy_url = socks_proxy_url, socks_proxy_port= socks_proxy_port)
        
        if path == None:
            self.base_path = os.path.join(os.path.dirname(__file__), '..', '..')
        
    def __get_long_name__(self, name, surname, middle=None):   
        if middle != None:
            long_name = (name,).__add__(middle).__add__((surname,))
        else:
            long_name = (name,).__add__((surname,))
        long_name = reduce(add, long_name)
        return long_name
    
    def get_DBLP_url(self, name, surname, middle=None):
        long_name = self.__get_long_name__(name, surname, middle)
        if self.verbose: print 'Retrieving papers by ' + long_name
        initial = surname[0].lower()
        if not 'abcdefghilmnopqrstuvzwyjkx'.__contains__(initial):
            initial = '='
        initial += '/'
        name = replace_unicode(name)
        surname = replace_unicode(surname)
        url = DBLP_URL + initial + surname + ':' + name 
        if middle != None:
            for mid in middle:
                current = replace_unicode(mid)
                url += '_'+current
        url+='.html'
        return url
            
    def __clean_url__(self, url):
        res = url['class="ee" href="'.__len__(): url.__len__()-1] 
        return res
    
    def __clean_title__(self, title):
        title = title.replace('/', '_')
        return title
            
    def __get_title__(self, url, html):
        url = url.replace('.', '\.')
        url = url.replace('?', '\?')
        url = url.replace('-', '\-')
        url = url.replace('(', '\(')
        url = url.replace(')', '\)')
        exp = re.compile(url+'.*\n')
        bag = exp.findall(html)[0]
        exp = re.compile('http://citeseerx\.ist\.psu\.edu/search\?q=%22.*?%22')
        found_title = exp.findall(bag)[0]
        found_title = found_title.lstrip('http://citeseerx.ist.psu.edu/search?q=%22')
        found_title = found_title.rstrip('.%22')
        found_title = urllib.unquote_plus(found_title)
        return unicode(found_title, errors='ignore')
        
        
    def recognize_urls_in_DBLP(self, html):
        res = {}
        exp = re.compile('class="ee" href=".+?"')
        found_urls = exp.findall(html)
        found_urls = map(self.__clean_url__, found_urls)
        self.recognized = found_urls.__len__()
        for url in found_urls:
            title = self.__get_title__(url, html)
            res[url] = title
        if self.verbose: print 'Recognized %d papers' % found_urls.__len__()
        return res
    
    def save_pdf(self, name, pdf, path):
        print name
        out_file = open(os.path.join(path, (name +u".pdf")),"w")
        out_file.write(pdf)
        out_file.close()
        
    def get_paper_from_url(self, url, handler, name, path):
        if self.verbose: print 'Downloading paper "%s": %s' % (name, url)
        try:
            pdf = handler.get_paper(url, self.opener)
        except urllib2.HTTPError:
            pdf = None
            
        if pdf != None and str(pdf).startswith("%PDF-"):
            if self.save:
                self.save_pdf(self.__clean_title__(name), pdf, path)
        elif self.verbose:
            print 'Missing PDF: skipping paper %s' % url
        return pdf
    
    def __create_directory__(self, name, surname, middle):
        long_name = self.__get_long_name__(name, surname, middle)
        path = os.path.join(self.base_path, long_name)
        os.mkdir(os.path.normpath(path))
        return path
    
    def __get_papers__(self, dblp_url, path):
        try:
            dblp_page = self.opener.get_page(url = dblp_url).read()
        except urllib2.HTTPError:
            if self.verbose: print 'DBLP page not found: Http Error' 
            return () 
        urls = self.recognize_urls_in_DBLP(dblp_page)
        total_pdfs = ()
        for url in urls:
            handler = url_handler_factory(url)
            if handler != None:
                pdf = self.get_paper_from_url(url, handler(), urls[url], path)
                if pdf != None:
                    total_pdfs = total_pdfs.__add__((pdf,))
                    timer = random.randint(self.min_delay, self.max_delay)
                    time.sleep(timer)
            elif self.verbose: print 'Missing handler: skipping paper %s' % url
        return total_pdfs
    
    def get_papers(self, name, surname, middle=None):
        dblp_url = self.get_DBLP_url(name, surname, middle)
        if self.save:
            path = self.__create_directory__(name, surname, middle)
        else:
            path = None
            
        papers = self.__get_papers__(dblp_url, path) 
        if self.verbose: print 'Retrieved %d over %d papers by %s' % (papers.__len__(), self.recognized, self.__get_long_name__(name, surname, middle) )
        return papers
    
    def get_papers_list(self, authors):
        res = {}
        for author in authors:
            name = author['name']
            surname = author['surname']
            try:
                middle = author['middle']
            except KeyError:
                middle = None
            long_name = self.__get_long_name__(name, surname, middle) 
            pdfs = self.get_papers(name, surname, middle)
            res[long_name] = pdfs
        return res
    
    def get_papers_from_url(self, dblp_url, directory_name):
        self.__get_papers__(dblp_url, directory_name)
        
    def get_papers_from_url_list(self, dblp_urls):
        res = {}
        for url in dblp_urls:
            pdfs = self.get_papers_from_url(url, dblp_urls[url])
            res[url, pdfs]
        return res
        
    