# -*- coding: utf-8 -*-
import re
import codecs
import os
import concurrent.futures
import requests
from bs4 import BeautifulSoup
from ebook import epub, txt

host = 'http://www.555zw.com/book/27/27908/'
name = '阳神'
slug = 'ys'
author = '梦入神机'


def download_chapter(i, url):
    absolute_url = host + url
    content = requests.get(absolute_url).content
    content = content.decode('gbk').encode('utf-8')

    with open(slug + '/html/' + str(i).zfill(3) + '.html', 'w') as f:
        f.write(content.decode('utf-8'))
        f.close()


def normalize_body(body):
    lines = body.replace('&nbsp;', '') \
        .replace('<br/>', '').replace('<br>', '') \
        .strip().splitlines()
    return '\n'.join([x for x in lines if len(x.strip()) > 0])


def normalize_title(title):
    return title.replace('正文', '') \
        .replace('&lt;', '').replace('&gt;', '') \
        .replace('章节目录', '').strip()


def parse_chapter():
    body_re = re.compile(
        r'<div id="content" align="center">([\s\S]+?)<span></span>')
    title_re = re.compile(r'<div class="article_listtitle">([\s\S]+?)</div>')
    for root, dirs, files in os.walk(slug + "/html", topdown=False):
        for name in files:
            with open(os.path.join(root, name), 'r') as hf:
                html = hf.read()
                d = body_re.search(html)
                if d is None:
                    print(name + ' body parse error')
                    continue
                t = title_re.search(html)
                if t is None:
                    print(name + ' title parse error')
                    continue
                body = normalize_body(d.group(1))
                title = normalize_title(t.group(1))
                txtname = slug + '/txt/' + name.replace('.html', '') + ".txt"
                with open(txtname, 'w') as tf:
                    tf.write(title)
                    tf.write("\n")
                    tf.write(body)


def get_links():
    url = host
    content = requests.get(url).content
    content = content.decode('gbk').encode('utf-8')
    soup = BeautifulSoup(content, 'html.parser')
    table = soup.find('table', class_='acss')

    links = [x.get('href') for x in table.find_all('a')]
    return links


def init_dir():
    if not os.path.exists(slug):
        os.mkdir(slug)
    if not os.path.exists(slug + '/html'):
        os.mkdir(slug + '/html')
    if not os.path.exists(slug + '/txt'):
        os.mkdir(slug + '/txt')
    if not os.path.exists(slug + '/epub'):
        os.mkdir(slug + '/epub')


def thread_open(links):
    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        future_to_url = {executor.submit(
            download_chapter, i, v): i for i, v in enumerate(links)}
        for future in concurrent.futures.as_completed(future_to_url):
            i = future_to_url[future]
            try:
                future.result()
            except Exception as exc:
                print('%r generated an exception: %s' % (i, exc))
            else:
                print('%r page is ok' % (i, ))


if __name__ == '__main__':
    # links = get_links()
    # init_dir()
    # thread_open(links)
    # parse_chapter()
    # txt.combine_file()
    # epub.generate_epub()
    # prepare_epub()
    # test_epub()
