#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018-8-15 12:00 
# @Author : By Joker
# @File : getBook2.py 
# @Software: PyCharm

from urllib import request

from bs4 import BeautifulSoup


# 获取指定url中的正文
def download_specified_chapter(txt_url, chapter_url, desc, header, coding, chapter_name):
    # 生成request对象
    dowload_req = request.Request(chapter_url, headers=header)
    # 打开request对象中的url网址,并获得对应内容
    response = request.urlopen(dowload_req)
    # 解码之后,获取页面的html
    download_html = response.read().decode(coding, 'ignore')
    # 获取html的beautifulSoup
    origin_soup = BeautifulSoup(download_html, 'lxml')
    # 获取正文部分
    content = origin_soup.find(class_='content')
    # 整理正文格式
    txt = content.text.replace('\n' * 8, '\n').replace('nrgg360();', '')
    # write不能完全打印所有的unicode字符,所以改变目标文件的编码
    print("正在下载:{}----链接:{}".format(chapter_name, chapter_url))
    with open(txt_url, 'a', encoding=coding) as f:
        if chapter_name is None:
            f.write('\n')
        else:
            f.write('\n\n' + chapter_name + '\n')
        f.write(desc + txt)


# 获取指定url中的目录列表
def download_all_url(header, coding, i, *url, **book):
    url_req = request.Request(url[1] + book[str(i)][3], headers=header)
    response = request.urlopen(url_req)
    html = response.read().decode(coding, "ignore")
    html_soup = BeautifulSoup(html, "lxml")
    print("开始下载: {}--->>>>>>>目录链接: {}".format(book[str(i)][0], url[1] + book[str(i)][3]))
    for element in html_soup.find_all("li", class_="line2"):
        chapter_name = element.string
        chapter_url = url[1] + element.a.get('href')
        download_specified_chapter(url[0] + book[str(i)][0] + "--" + book[str(i)][1] + ".txt", chapter_url,
                                   book[str(i)][2],
                                   header, coding,
                                   chapter_name)
    for element in html_soup.find_all("li", class_="line3"):
        chapter_name = element.string
        chapter_url = url[1] + element.a.get('href')
        download_specified_chapter(url[0] + book[str(i)][0] + "--" + book[str(i)][1] + ".txt", chapter_url,
                                   book[str(i)][2],
                                   header, coding,
                                   chapter_name)


# 指定url中的所有书籍
def download_all_book(header, coding, *url):
    url_req = request.Request(url[1], headers=header)
    response = request.urlopen(url_req)
    html = response.read().decode(coding, "ignore")
    html_soup = BeautifulSoup(html, "lxml")
    div = html_soup.find_all("div", class_="m-box")
    book_num = div.__len__()
    i = 0
    while i < book_num:
        mtitle = html_soup.select(".m-title h1")[i].text
        author = html_soup.select(".m-author")[i].text
        desc = html_soup.select(".m-intro")[i].text
        next = html_soup.find_all("a", attrs="m-diralinks", text="目录")[i]["href"]
        book = {str(i): [mtitle, author, desc, next]}
        download_all_url(header, "utf-8", i, *url, **book)
        i = i + 1


if __name__ == "__main__":
    main_url = "http://www.75xs.cc"
    txt_url = "E:/books/long/2/"
    url = [txt_url, main_url]
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.3.4000'}
    download_all_book(header, "utf-8", *url)
