# -*- coding: utf-8 -*-
import logging
import concurrent.futures
import re
import json
import os
import codecs
import base64
import queue
# import asyncio
import random
from urllib.parse import urljoin, urlparse

import requests
from bs4 import BeautifulSoup


from django.core.cache import cache
from django.core.cache import caches


from common.models import ViewModel, SiteTaxonomy
from common.ebook import EPubFactory

from novel import dao

from .robot_mixin import HttpMixin, PersistenceMixin, SiteParseMixin
# from .robot_mixin import AsyncHttpMixin

class ItEbookRobot(HttpMixin, PersistenceMixin):
    api = {
        'dir_url': None,
        'dir_page_url': None,
    }
    log = logging.getLogger('book.robot')
    host = 'http://www.it-ebooks.info'
    year_re = re.compile(r'\d{4}')
    
    def get_executor_class(self, m):
        executor_class = None
        if m == 'thread':
            executor_class = concurrent.futures.ThreadPoolExecutor
        elif m == 'process':
            executor_class = concurrent.futures.ProcessPoolExecutor
        else:
            raise ValueError()
        return executor_class

    def compose_urls(self, **options):
        urls = {
            'action': None,
        }
        site = options['site']
        keys = options['book_id']
        if keys:
            ep_urls = {
                '99lib': 'http://www.99lib.net/book/{key}/index.htm',
                'hetushu': 'http://www.hetushu.com/book/{key}/index.html',
                'csw': 'http://book.99csw.com/book/{key}/index.htm'
            }
            urls['action'] = 'handle_book'
        else:
            keys = range(options['end'], options['start']-1, -1)
            ep_urls = {
                'hetushu': 'http://www.hetushu.com/book/index.php?page={key}&state=2',
            }
            urls['action'] = 'handle_page'

        if site == '99lib' or site == '9':
            urls['urls'] = {book_id: ep_urls['99lib'].format(key=book_id)
                            for book_id in keys}
            self.host = 'http://www.99lib.net'
        elif site == 'hetushu' or site == 'h':
            urls['urls'] = {book_id: ep_urls['hetushu'].format(
                key=book_id) for book_id in keys}
            self.host = 'http://www.hetushu.com'
        elif site == 'csw' or site == 'w':
            urls['urls'] = {book_id: ep_urls['csw'].format(
                key=book_id) for book_id in keys}
            self.host = 'http://book.99csw.com'
        else:
            raise ValueError('invalid site %s' % (site,))

        return urls

    def execute(self, option):
        self.images = []
        self.option = option
        self.cache = caches[option.get('cache','default')]

        urls = self.compose_urls(**option)
        executor_class = self.get_executor_class(option['m'])
        
        self.allow_redirects = option['allow_redirects']

        # book_dict = {}
        self.pool1  =  executor_class() 
        self.pool2  =  executor_class() 
        self.chapter_fn = {}
        action = urls['action']
        future_to_url = {self.pool1.submit(
            getattr(self, action), book_id, urls['urls'][book_id]): book_id
            for book_id in urls['urls']}
        
        for chapter in concurrent.futures.as_completed(self.chapter_fn):
            title = self.chapter_fn[chapter]
            chapter.result()
            self.log.debug('OK %s', title)


        for future in concurrent.futures.as_completed(future_to_url):
            url = future_to_url[future]
            # m = future.result()
            future.result()
            
            # if m:
            #     book_dict.update(m)
            self.log.debug('OK   %s', url)



    def handle_book(self, book_id, url):
        book = self.parse_book(book_id, url)
        target = self.option['target']
        if target == 'epub':
            self.save_to_epub(book, url)
        elif target == 'txt':
            self.save_to_txt(book, url)
        elif target == 'md':
            self.save_to_md(book, url)
        elif target == 'db':
            self.save_to_db([book, ], url)
        self.log.debug('OK   %s', url)            

    def parse_book(self, book_id, url):
        raise NotImplementedError()

    def handle_page(self, page, url):
        book_dict = self.get_book_dict(page, url)
        if book_dict:
            #self.log.debug(book_dict)
            # future_to_url = {self.pool1.submit(
            #     self.handle_book, book_id, url): book_id
            #     for book_id, url in book_dict.items()}
            # for future in concurrent.futures.as_completed(future_to_url):
            #     url = future_to_url[future]
            #     future.result()
            #     self.log.debug('OK   %s', url)
            for book_id, url in book_dict.items():
                self.handle_book(book_id,url)
                self.log.debug('OK   %s', url)

    def get_book_dict(self, page, url):
        raise NotImplementedError()


class Net99libEbookRobot(ItEbookRobot):
    host = 'http://www.99lib.net'
    chapter_sort_no_re = re.compile(r'[A-Z]+%')
    img_re = re.compile(r'<img src="([\s\S]+?)"')
    chapter_div_re = re.compile(r'<div>([\s\S]+?)</div>')
    sorted_chapter_div_re = re.compile(
        r'<div class="chapter">([\s\S]+?)</div>')
    book_inner_link_re = re.compile(
        r'<a href="/book/\d+/index.htm">([\s\S]+?)</a>')
    remove_tags_re = [re.compile(r'<%s>[\s\S]+?</%s>' % (x, x))
                      for x in "acronym, bdo, big, cite, code, "
                      "dfn, kbd, q, s, samp, strike, tt, u, var".split(', ')]
    content_re = re.compile(r'<div id="content">([\s\S]+)</div>')

    def get_book_dict(self, page, url):
        m = self.get_html(url)
        if not m:
            self.log.error('request %s failed', url)
            return
        cache_key = url + '.page'
        bookid_dict = self.cache.get(cache_key)
        host = self.get_host(url)
        if not bookid_dict:
            soup = BeautifulSoup(m, "html.parser")
            ul = soup.find('ul', class_='book_list')
            href_list = [x.get('href')  for x in ul.find_all('a')]
            id_list = [x.replace('book', '').replace(
                'index.html', '').replace('/', '') for x in href_list]
            
            bookid_dict = dict(zip(id_list, href_list))
            for k,v in bookid_dict.items():
                bookid_dict[k] = host + v     
        else:
            self.log.debug('HIT  %s', cache_key)
        if not bookid_dict:
            self.log.error("parse failed " + url)
            return
        self.cache.set(cache_key, bookid_dict, 3600)
        return bookid_dict




    def parse_book(self, book_id, url):
        m = self.get_html(url)
        if m is None:
            self.log.error("request %s failed", url)
            return
        cache_key = url + '.book'
        site_name = self.get_site_name(url)

        book = self.cache.get(cache_key)
        if not book:
            book = ViewModel()
            book.host = self.host
            book.isbn = site_name + '-' + str(book_id)
            book.source_url = url
            soup = BeautifulSoup(m, "html.parser")
            book.title, book.author, book.cover_url, book.cat = self.get_book_info(
                soup)
            if book.cover_url:
                book.cover_url = urljoin(url, book.cover_url)
            if book.cat:
                book.tags = [book.cat, ]
            book.chapters = self.get_chapters(soup)
        else:
            self.log.debug('HIT  %s', cache_key)
        if not book:
            self.log.error("parse failed " + url)
            return
        self.cache.set(cache_key, book, 3600)
        book.uuid = '%s-%s' % (site_name, book_id)
        folder_path = os.path.abspath(os.path.join('books-output', book.uuid))
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        book.folder = folder_path        
        for chapter in book.chapters:
            if chapter.href and chapter.href != '#':
                chapter.href = urljoin(url, chapter.href)
            if hasattr(chapter, 'children') and chapter.children:
                for child in chapter.children:
                    if not child.href or child.href == '#':
                        continue
                    child.href = urljoin(url, child.href)
            chapter.book = book
            if chapter.href == '#':
                for child in chapter.children:
                    child.book = chapter.book
                    self.chapter_fn[self.pool2.submit(
                    self.load_chapter_detail, child)] = child.title  
            else:          
                #self.load_chapter_detail(chapter)                            
                self.chapter_fn[self.pool2.submit(
                    self.load_chapter_detail, chapter)] = chapter.title                
        # for future in concurrent.futures.as_completed(future_to_title):
        #     title = future_to_title[future]
        #     future.result()
        #     self.log.debug('OK   %s', title)
        return book

    def get_book_info(self, soup):
        title_h = soup.find('h2')
        author_h = soup.find('h4')
        cat = None
        if author_h:
            author = str(author_h.find('a').string)
        else:
            author_div = title_h.find_next_sibling('div')
            author = str(author_div.find('a').string)
            cat_div = author_div.find_next_sibling('div')
            if cat_div:
                cat = str(cat_div.string).replace('类型', '')
        return (str(title_h.string), author,
                soup.find('img').get('src'), cat)

    def get_chapters(self, soup):
        chapters = []
        dir_dl = soup.find('dl', id='dir')

        for sort_no, item in enumerate(dir_dl.children):
            if item.name == 'dt':
                chapter = ViewModel(href='#', children=[], sort_no=sort_no)
                chapter.title = str(item.string)
                chapters.append(chapter)
            elif item.name == 'dd':
                chapter_a = item.find('a')
                chapter = ViewModel()
                chapter.sort_no = sort_no
                chapter.title = str(chapter_a.string)
                chapter.href = chapter_a.get('href')
                if chapters and hasattr(chapters[-1], 'children'):
                    chapters[-1].children.append(chapter)
                else:
                    chapters.append(chapter)
            else:
                self.log.warning('unkown tag name %s', item.name)

        return chapters



    def load_chapter_detail(self, chapter):
        # logger.debug('chapter name %s' % (chapter.title,))
        if chapter.href == '#':
            for child in chapter.children:
                child.book = chapter.book
                self.load_chapter_detail(child)
            return
        cache_key = '%s.self' % (chapter.href,)
        chapter_c = self.cache.get(cache_key)
        if chapter_c:
            self.log.debug('HIT  %s', cache_key)
            chapter.__dict__.update(chapter_c.__dict__)
            return
        try:
            html = self.get_html(chapter.href)
        except requests.exceptions.HTTPError:
            html = None
        if not html:
            self.log.error(
                "load book detail failed, can't get url " + chapter.href)
            chapter.content = ''
            return
        for x in self.remove_tags_re:
            html = x.sub('', html)
        html = self.book_inner_link_re.sub(r'\1', html)
        soup = BeautifulSoup(html, "html.parser")
        content_body = self.content_re.search(html).group(0)
        p_div = [x.group(1).replace('<div class="imgbox center">', '')
                 for x in self.chapter_div_re.finditer(content_body)]

        meta_content = soup.find(
            'meta', attrs={'name': 'client'})
        if meta_content:
            meta_content = meta_content.get('content')
        else:
            chapter.content = '\n'.join(p_div)
            self.cache.set(cache_key, chapter, 3600)
            return chapter

        chapter_sort_no = [int(x) for x in self.chapter_sort_no_re.split(
            base64.b64decode(meta_content).decode('utf-8'))]
        if len(chapter_sort_no) != len(p_div):
            self.log.warning('cannot sort, len not match')
            self.log.warning('url:%s,sort_len:%d,p_len:%d', chapter.href,
                             len(chapter_sort_no), len(p_div))
            chapter.content = ''
        else:
            sorted_div = [None] * len(chapter_sort_no)
            j = 0
            for i, sort_no in enumerate(chapter_sort_no):
                if sort_no < 5:
                    sorted_div[sort_no] = p_div[i]
                    j = j + 1
                else:
                    sorted_div[sort_no - j] = p_div[i]
            if sorted_div.count(None):
                self.log.warning('sort p failed,has None item')
                self.log.warnging(json.dumps(chapter.__dict__))
                chapter.content = ''
            else:
                sorted_div2 = [x.group(1)
                               .replace('<div class="imgbox center">', '')
                               for x in self.sorted_chapter_div_re
                               .finditer(html)]
                sorted_content = '\n'.join(sorted_div2)
                chapter.content = sorted_content + \
                    '\n'.join([x for x in sorted_div])
                self.download_images(chapter)

        self.cache.set(cache_key, chapter, 3600)
        self.log.debug('OK   %s', chapter.title)
        return chapter
    def download_img(self, url, file_name):
        try:
            with open(file_name, 'wb') as fd:
                content = self.get_content(url)
                fd.write(content)
        except requests.exceptions.HTTPError:
            self.log.exception('download image failed')        
    def download_images(self, chapter):
        sub_list = []

        for x in self.img_re.finditer(chapter.content):
            img_url = x.group(1)
            file_name = img_url[img_url.rfind('/') + 1:]
            if file_name:
                sub_list.append((x.group(0), file_name))
                self.images.append((file_name, img_url))
                self.download_img(img_url, os.path.join(chapter.book.folder, file_name))
        content = chapter.content
        for x in sub_list:
            content = content.replace(
                x[0], '<img src="images/%s" ' % (x[1],))
        chapter.content = content
