# -*- coding: utf-8 -*-
import logging
import concurrent.futures
import datetime
import re

import requests
from bs4 import BeautifulSoup

from django.core.management.base import BaseCommand, CommandError
from django.core.cache import cache

from common.models import ViewModel, SiteTaxonomy

from book import dao

logger = logging.getLogger('book.allitebook-com')


class Command(BaseCommand):
    help = 'fetch the specified book from allitebook.com'
    s = None
    year_re = re.compile(r'\d{4}')

    def add_arguments(self, parser):
        parser.add_argument('--start', type=int, default=1,
                            help='start page num')
        parser.add_argument('--end', type=int, default=3, help='end page num')
        parser.add_argument('--c', type=int, default=4,
                            help='concurrent workers count')
        parser.add_argument('--m', type=str, default='thread',
                            help='thread or process')
        #parser.add_argument('--book_id', nargs='+', type=int)

    def get_html(self, url):
        if not self.s:
            self.s = requests.Session()
        logger.debug('GET  ' + url)
        html = cache.get(url)
        if html:
            logger.debug('HIT  ' + url)
        else:
            r = self.s.get(url)
            html = r.text
            logger.debug('GET  %s %s' % (url, r.status_code))
            r.raise_for_status()
            cache.set(url, html, 3600)
        return html

    def load_book_detail(self, book):
        html = self.get_html(book.source_url)
        if not html:
            logging.error(
                "load book detail failed, can't get url " + book.source_url)
            return
        soup = BeautifulSoup(html, "html.parser")
        main_div = soup.find('main', id="main-content")
        book_article = main_div.find('article')
        sub_title_h = book_article.find('h4')
        if sub_title_h:
            book.sub_title = str(sub_title_h.string)
        else:
            book.sub_title = ''
        book.summary = str(book_article.find(
            'div', class_='entry-content')).replace('<h3>Book Description:</h3>', '')
        # logger.debug(book_article)
        download_link = soup.find('span', class_='download-links').find('a')

        meta_dd_list = book_article.find(
            'div', class_='book-detail').find_all('dd')
        book.isbn = str(meta_dd_list[1].string)
        if book.isbn:
            book.isbn = book.isbn.replace('-', '').strip()

        book.year = str(meta_dd_list[2].string)
        year = self.year_re.search(book.year)
        if year:
            book.year = int(year.group(0))
        else:
            logger.warn('parse year %s failed from %s' %
                        (book.year, book.source_url))
            book.year = 0
        try:
            book.page_cnt = int(str(meta_dd_list[3].string))
        except (ValueError, AttributeError) as e:
            book.page_cnt = 0
            logger.error('parse page_cnt failed from  %s' % (book.source_url,))
            logger.error(e)
            # raise
        book.language = str(meta_dd_list[4].string)

        file_size = str(meta_dd_list[5].string)
        file_format = str(meta_dd_list[6].string)
        download_url = download_link.get('href')

        book.download_urls = []
        book.download_urls.append((download_url, file_format, file_size))

        book.tags = []
        book.tags.append(str(meta_dd_list[7].find('a').string))

    def parse_book_list(self, url):
        html = self.get_html(url)
        if not html:
            logging.error("can't get url " + url)
            return None
        soup = BeautifulSoup(html, "html.parser")
        main_div = soup.find('main', id="main-content")
        books = []
        for article in main_div.find_all('article'):
            title_link = article.find('h2', class_='entry-title').find('a')
            author_links = article.find('h5', class_='entry-author').find_all('a')
            cover_img = article.find('img')
            book = ViewModel()
            book.source_url = title_link.get('href')
            book.title = str(title_link.string)
            book.book_id = int(article.get('id').replace('post-', ''))
            book.cover_url = cover_img.get('src')
            #book.authors = []
            if author_links:
                book.authors= [str(author_link.string) for author_link in author_links]
            # logger.debug(book.__dict__)
            books.append(book)
        return books

    def handle_page(self, url):
        cache_key = url + '.books'
        books = cache.get(cache_key)
        if not books:
            books = self.parse_book_list(url)
        else:
            logger.debug('HIT  %s' % (cache_key, ))
        if not books:
            logging.error("parse failed " + url)
        cache.set(cache_key, books, 3600)
        for book in books:
            self.load_book_detail(book)
        logger.debug('SAVE %s' % (url,))
        # logger.debug(self.remain_urls)
        dao.save_books((self.host, SiteTaxonomy.Book), books)

    def get_pages(self, start, end):
        urls = []
        for page in range(end, start - 1, -1):
            if page == 1:
                url = self.host
            else:
                url = self.page_url % (page,)
            urls.append(url)
        return urls

    def handle(self, *args, **options):
        self.host = 'http://www.allitebooks.com'
        self.page_url = self.host + '/page/%d/'
        start = options['start']
        end = options['end']
        c = options['c']
        m = options['m']
        logger.debug('start:%d end:%d c:%d' % (start, end, c))
        urls = self.get_pages(start, end)
        cache_key = '%d-%d-%d' % (start, end, c)
        for i, url in enumerate(urls):
            cache.set('%s:%d' % (cache_key, i), url)
        executor_class = None
        if m == 'thread':
            executor_class = concurrent.futures.ThreadPoolExecutor
        elif m == 'process':
            executor_class = concurrent.futures.ProcessPoolExecutor
        else:
            self.stdout.write(self.style.error('invalid mode %s' % (m,)))
            return
        with executor_class(max_workers=c) as executor:
            future_to_url = {executor.submit(
                self.handle_page, url): url for url in urls}
            for future in concurrent.futures.as_completed(future_to_url):
                url = future_to_url[future]
                future.result()
                try:
                    i = urls.index(url)
                    cache.delete('%s:%d' % (cache_key, i))
                except ValueError:
                    logger.info('remove %s failed' % (url,))
                logger.debug('OK   %s' % (url, ))
