# -*- coding: utf-8 -*-
import logging
import concurrent.futures
import datetime
import re
import math
import json

import requests
from bs4 import BeautifulSoup

from django.core.management.base import BaseCommand, CommandError
from django.core.cache import cache

from common.models import ViewModel, SiteTaxonomy

from book import dao

logger = logging.getLogger('book.itebooks-info')


class Command(BaseCommand):
    help = 'fetch the specified book from it-ebooks.info'

    def add_arguments(self, parser):
        parser.add_argument('q', type=str, help='search keywords')
        parser.add_argument('--start', type=int, default=1,
                            help='start page num')
        parser.add_argument('--end', type=int, default=3, help='end page num')
        parser.add_argument('--c', type=int, default=4,
                            help='concurrent workers count')
        parser.add_argument('--m', type=str, default='thread',
                            help='thread or process')
        parser.add_argument('--api',  type=str, default='raw',
                            help='search api backend(raw or baidu or bing)')
        #parser.add_argument('--book_id', nargs='+', type=int)

    def handle(self, *args, **options):
        start = options['start']
        end = options['end']
        c = options['c']
        m = options['m']
        api = options['api']
        q = options['q']
        logger.debug('search %s start:%d end:%d c:%d with %s' %
                     (q, start, end, c, api))
        option = SearchOption(q, start, end, m, c)
        robot = None
        if api == 'raw':
            robot = RawItEbookRobot()
        elif api == 'baidu':
            robot = BaiduItEbookRobot()
        elif api == 'bing':
            robot = BingItEbookRobot()
        else:
            self.stdout.write(self.style.error('invalid mode %s' % (m,)))
            return
        robot.execute(option)


class SearchOption:
    def __init__(self, q, s, e, m, c):
        self.q = q
        self.s = s
        self.e = e
        self.m = m
        self.c = c

    def get_executor_class(self):
        executor_class = None
        if self.m == 'thread':
            executor_class = concurrent.futures.ThreadPoolExecutor
        elif self.m == 'process':
            executor_class = concurrent.futures.ProcessPoolExecutor
        else:
            raise ValueError()
        return executor_class


class ItEbookRobot:
    s = None
    api = {
        'search_url': None,
        'page_url': None,
        'item_url': None
    }
    host = 'http://www.it-ebooks.info'
    year_re = re.compile(r'\d{4}')

    def get_html(self, url, allow_redirects=False):
        if not self.s:
            self.s = requests.Session()
        logger.debug('GET  ' + url)
        html = cache.get(url)
        if html:
            logger.debug('HIT  ' + url)
        else:
            r = self.s.get(url, allow_redirects=allow_redirects)
            html = r.text
            logger.debug('GET  %s %s' % (url, r.status_code))
            r.raise_for_status()
            cache.set(url, html, 3600)
        return html

    def get_json(self, url, allow_redirects=False):
        if not self.s:
            self.s = requests.Session()
        logger.debug('GET  ' + url)
        result = cache.get(url)
        if result:
            logger.debug('HIT  ' + url)
        else:
            r = self.s.get(url, allow_redirects=allow_redirects)
            try:
                result = r.json()
                logger.debug('GET  %s %s' % (url, r.status_code))
                r.raise_for_status()
                cache.set(url, result, 3600)
            except json.decoder.JSONDecodeError as e:
                logger.error('decode json failed %s %s' % (url, r.text))
                logger.error(e)
        return result

    def get_pages(self, start, end):
        return [self.get_page_url(page) for page in range(end, start - 1, -1)]

    def get_page_url(self, page_no):
        if page_no == 1:
            url = self.api['search_url'].format(q=self.option.q)
        else:
            url = self.api['page_url'].format(
                q=self.option.q, page_no=page_no)
        return url

    def get_max_page(self, url):
        pass

    def execute(self, option):
        self.option = option
        end_page = self.option.e
        if self.option.e == -1:
            end_page = self.get_max_page(self.get_page_url(self.option.s))
        urls = self.get_pages(self.option.s, end_page)
        executor_class = self.option.get_executor_class()
        with executor_class(max_workers=self.option.c) as executor:
            future_to_url = {executor.submit(
                self.handle_page, url): url for url in urls}
            for future in concurrent.futures.as_completed(future_to_url):
                url = future_to_url[future]
                future.result()
                logger.debug('OK   %s' % (url, ))

    def handle_page(self, url):
        pass

    def save_books(self, books, url=None):
        logger.debug('SAVE %s' % (url,))
        dao.save_books((self.host, SiteTaxonomy.Book), books)


class RawItEbookRobot(ItEbookRobot):
    api = {
        'search_url': 'http://it-ebooks-api.info/v1/search/{q}',
        'page_url': 'http://it-ebooks-api.info/v1/search/{q}/page/{page_no}',
        'item_url': 'http://it-ebooks.info/book/{book_id}/'
    }
    book_id_re = re.compile(r'/book/(\d+)')

    def handle_page(self, url):
        m = self.get_json(url)
        if m is None:
            logging.error("request %s failed" % (url, ))
            return
        if m['Error'] != '0':
            logging.error("request %s failed %s" % (url, m['Error']))
            return
        cache_key = url + '.books'
        books = cache.get(cache_key)
        if not books:
            books = self.map_to_vm(m['Books'])
        else:
            logger.debug('HIT  %s' % (cache_key, ))
        if not books:
            logging.error("parse failed " + url)
            return
        cache.set(cache_key, books, 3600)
        for book in books:
            self.load_book_detail(book)
        cache.set(cache_key + '-2', books, 3600)
        self.save_books(books, url)

    def map_to_vm(self, book_src):
        books = []
        for book_raw in book_src:
            book = ViewModel()
            book.title = book_raw['Title']
            book.sub_title = book_raw.get('SubTitle', '')
            book.book_id = int(book_raw['ID'])
            book.isbn = book_raw['isbn'].replace('-', '').strip()
            book_id = self.get_book_id(book.isbn)
            book.book_id = book_id
            if book.book_id is None:
                continue
            book.source_url = self.get_book_url_byid(book.book_id)
            book.cover_url = book_raw['Image']
            book.authors = []
            books.append(book)
        return books

    def get_book_id(self, isbn):
        html = self.get_html(
            'http://it-ebooks.info/search/?q=%s&type=isbn' % (isbn,))
        year = self.book_id_re.search(html)
        if year:
            return year.group(1)
        else:
            logger.error('Cannot find book id by isdn %s' % (isbn,))
            return None

    def get_book_url_byid(self, book_id):
        return self.api['item_url'].format(book_id=book_id)

    def get_book_url(self, isbn):
        html = self.get_html(
            'http://it-ebooks.info/search/?q=%s&type=isbn' % (isbn,))
        year = self.book_id_re.search(html)
        if year:
            return self.api['item_url'].format(book_id=year.group(1))
        else:
            logger.error('Cannot find book by isdn %s' % (isbn,))
            return None

    def load_book_detail(self, book):
        html = self.get_html(book.source_url)
        if not html:
            logging.error(
                "load book detail failed, can't get url " + book.source_url)
            return
        soup = BeautifulSoup(html, "html.parser")

        summary_span = soup.find(
            'span', itemprop='description')
        book.tags = []
        #summary_text = NavigableString(str(summary_span))
        book.summary = ''.join(summary_span.stripped_strings)
        for tag_a in summary_span.find_all('a'):
            book.tags.append(str(tag_a.string))
        authors = str(soup.find('b', itemprop='author').string)
        for author in authors.split(','):
            author_name = author.strip()
            if author_name:
                book.authors.append(author_name)

        # for author_a in author_b.find_next('b').find_all('a'):
        #    book.authors.append(str(author_a.string))
        book.publisher = str(soup.find('a', itemprop='publisher').string)
        try:
            book.year = int(
                str(soup.find('b', itemprop='datePublished').string))
        except ValueError as e:
            book.year = 0
            logger.error('parse year failed from  %s' % (book.source_url,))
            logger.error(e)
        try:
            book.page_cnt = int(
                str(soup.find('b', itemprop='numberOfPages').string))
        except ValueError as e:
            book.page_cnt = 0
            logger.error('parse page_cnt failed from  %s' % (book.source_url,))
            logger.error(e)
            # raise
        book.language = str(soup.find('b', itemprop='inLanguage').string)
        file_format_b = soup.find('b', itemprop='bookFormat')
        file_format = str(file_format_b.string)
        file_size = str(file_format_b.find_previous('b').string)

        book.download_urls = []
        book.download_urls.append(('', file_format, file_size))

    def get_max_page(self, url):
        m = self.get_json(url)
        return math.ceil(int(m['Total']) / 10)


class SearchEngineItEbookRobot(ItEbookRobot):
    pass


class BaiduItEbookRobot(SearchEngineItEbookRobot):
    api = {
        'search_url': 'http://it-ebooks-api.info/v1/search/{q}',
        'page_url': 'http://it-ebooks-api.info/v1/search/{q}/page/{page_no}',
        'item_url': 'http://it-ebooks-api.info/v1/book/{id}'
    }


class BingItEbookRobot(SearchEngineItEbookRobot):
    pass
