# -*- coding: utf-8 -*-

import urllib
import re
import logging
import threading

from onlinebrary.lib.aggregator.abstract_aggregator import AbstractAggregator
from onlinebrary.model.agg_data import AggregatedData

log = logging.getLogger(__name__)

SEARCH_URL = 'http://lib.ru/search/librusearch.cgi?'
KEYWORDS_PARAM = 'query'
PAGE_PARAM = 'page'
CONTENT_ENCODING = 'koi8-r'
REQUEST_ENCODING = 'cp1251'

class LibRuAggregator(AbstractAggregator):
    
    def __init__(self):
        self.name = "Lib.ru"

    def search_by_title(self, title):
    
        log.info('Searching %s in %s', title, self.name)
        
        data = AggregatedData(self.name)
    
        title_encoded = urllib.quote(title.encode(REQUEST_ENCODING))
        try:
            query = urllib.urlencode(((KEYWORDS_PARAM, title_encoded),))
        except Exception, e:
            log.error(e)
            return e
            
        url = SEARCH_URL + query
        
        data.source_search = url
        
        log.info('Requesting search URL: %s', url)
        
        page = urllib.urlopen(url)
        
        content = unicode(page.read(), CONTENT_ENCODING)
        
        #print content.encode('utf-8')

        #apSearch daemon: 76 documents
        search_results = re.search("apSearch\sdaemon:\s(?P<count>\d*)\sdocuments", content, re.M | re.I | re.S)
        
        if not search_results or search_results.group('count') == 0:
            log.info('No search results. Sorry')
            return data
        else:
            results_count = search_results.group('count')
            content = content[search_results.end():]
            
        offset = 0
        pages_count = re.search("&page=(?P<count>\d)", content, re.M | re.I | re.S)
        p_count = 0
        
        while pages_count:
            content = content[pages_count.end():]
            p_count = max(p_count, int(pages_count.group('count')))
            pages_count = re.search("&page=(?P<count>\d)", content, re.M | re.I | re.S)

        log.info('All results pages count = %d' % p_count)

        results = dict()
        results[""] = []
        
        parsers = []
        for i in range(0, p_count + 1):
            parser = LibRuPagesParser(title_encoded, i, results[""])
            parser.start()
            parsers.append(parser)

        for parser in parsers:
            parser.join()
        
        data.count = results_count
        data.data = results

        return data
        

class LibRuPagesParser(threading.Thread):
    
    def __init__(self, keywords, page, basket):
        threading.Thread.__init__(self)
        self.page = page
        self.basket = basket
        self.keywords = keywords
        
    def run(self):
        log.info('Page parser started!')
        query = urllib.urlencode(((KEYWORDS_PARAM, self.keywords),
                                   (PAGE_PARAM, self.page)))
        url = SEARCH_URL + query
        log.info('Requesting search URL: %s', url)
        
        try:
            page = urllib.urlopen(url)
        except Exception, e :
            self.basket.extend(e)
            
        content = unicode(page.read(), CONTENT_ENCODING)
        
        results = []
        offset = 0
        while offset < len(content):
            link = re.search("\.&nbsp;\s<a\s(?P<link>.*?)</a>", content[offset:], re.M | re.I | re.S)
            if link:
                offset += link.end()
                link = "<a target='_blank' " + link.group('link') + "</a>"
                log.debug('Link founded: %s', link)        
                results.append(link)
            else:
                if len(results) == 0:
                    log.warning('No search results found in results page. It\'s wierd.')
                offset = len(content)
        
        self.basket.extend(results)
        log.info('Page parser finished its work!')
