# coding=utf-8
# !/usr/bin/python

# 线程池
from concurrent.futures import ThreadPoolExecutor
# 导入requests库
import requests
# 导入文件操作库
import codecs
import util.os_utils as os
import random
from bs4 import BeautifulSoup
import sys
import importlib
importlib.reload(sys)

random_headers = [
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 "
    "Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 "
    "Safari/537.75.14",
    "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
    'Opera/9.25 (Windows NT 5.1; U; en)',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
    'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
    'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
    "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 "
    "Chrome/16.0.912.77 Safari/535.7",
    "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0",
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'
]

# 给请求指定一个请求头来模拟chrome浏览器
global headers
headers = {'User-Agent': random.choice(random_headers)}

domain = 'http://www.xbiquge.la'
# 定义存储位置
global base_path
base_path = '/Users/liujun/Documents/xbiquge'


def download(url):
    res = requests.get(url, headers=headers)
    html = res.content
    html_doc = str(html, 'utf8')
    # 使用自带的html.parser解析
    soup = BeautifulSoup(html_doc, 'html.parser')
    main_title = soup.find('div', id='info').find('h1').text
    book_path = base_path + '/' + main_title
    os.createFile(book_path)
    # 获取所有的章节
    a = soup.find('div', id='list').find_all('a')
    print('总章节数: %d ' % len(a))
    for each in a:
        try:
            chapter = domain + each.get('href')
            content = get_contents(chapter)
            chapter = book_path + "/" + each.text + ".txt"
            write_txt(chapter, content, 'utf8')
        except Exception as e:
            print(e)


# 获取章节内容
def get_contents(chapter):
    req = requests.get(url=chapter)
    html = req.content
    html_doc = str(html, 'utf8')
    bf = BeautifulSoup(html_doc, 'html.parser')
    [s.extract() for s in bf('p')]
    texts = bf.find_all('div', id="content")
    # 获取div标签id属性content的内容 \xa0 是不间断空白符 &nbsp;
    content = texts[0].text.replace('\xa0' * 4, '\n')
    return content


# 写入文件
def write_txt(chapter, content, code):
    with codecs.open(chapter, 'a', encoding=code)as f:
        f.write(content)


# 主方法
def main():
    res = requests.get(domain, headers=headers)
    html = res.content
    html_doc = str(html, 'utf8')
    # 使用自带的html.parser解析
    soup = BeautifulSoup(html_doc, 'html.parser')
    # 获取最近更新小说列表
    li = soup.find('div', id='newscontent').find_all('li')
    print('更新总数: %d ' % len(li))
    with ThreadPoolExecutor(max_workers=5) as t:
        for each in li:
            url = each.find('span', class_='s2').find('a', target="_blank").attrs['href']
            # 下载每页的图片
            t.submit(download, url)


if __name__ == '__main__':
    download('http://www.xbiquge.la/79/79417/')
