# coding:utf-8
import requests
from bs4 import BeautifulSoup
import re
import os
import time

# req_header = {
#     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
#     'Accept-Encoding': 'gzip, deflate',
#     'Accept-Language': 'zh-CN',
#     'Cache-Control': 'max-age=0',
#     'Connection': 'keep-alive',
#     'Cookie': '''dblck=1; imgHost=0; skin=0; read=%E7%AC%AC1%E5%86%8C%7C325%7C%E7%AC%AC1%E5%86%8C%7C325%7C1%7C1531099093%7C%2Fmanhua%2Flongzu1%2F325.html%7C%2Fmanhua%2Flongzu1%2F325.
#                     html; __tins__19026477=%7B%22sid%22%3A%201532412977000%2C%20%22vd%22%3A%207%2C%20%22
#                     expires%22%3A%201532415416306%7D; __51cke__=; __51laig__=7; bdshare_ty=0x18; current-font=1; current-back=1; current-fimaly=5''',
#     'DNT': '1',
#     'Host': 'www.75xs.cc',
#     'Referer': 'http://www.75xs.cc/book/longzu1/',
#     'Upgrade-Insecure-Requests': '1',
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.3.4000',
#     'X-DevTools-Emulate-Network-Conditions-Client-Id': '046c20e2-b2a6-4d08-808f-e4b3966b9be9'
# }

req_header = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-CN',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Cookie': '''current-font=1; current-back=1; current-fimaly=5; bdshare_ty=0x18; __tins__19026477=%7B%22sid%22%3A%201534304263900%2C%20%22vd%22%3A%204%2C%20%22expires%22%3A%201534306106967%7D; __51cke__=; __51laig__=4''',
    'DNT': '1',
    'Host': 'www.75xs.cc',
    'If-Modified-Since': 'Wed, 05 Apr 2017 07:30:54 GMT',
    'If-None-Match': "48a98fdeadd21:0",
    'Referer': 'http://www.75xs.cc/',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36 Maxthon/5.2.3.4000',
    'X-DevTools-Emulate-Network-Conditions-Client-Id': '861135d3-9d44-4678-a6d8-b9cdb4bb105f'
}

req_url_base = 'http://www.75xs.cc/book/'  # 小说主地址


# 小说下载函数
# txt_id：小说编号
# txt字典项介绍
# id：小说编号
# title：小说题目
# first_page：第一章页面
# txt_section：章节地址
# section_name：章节名称
# section_text：章节正文
# section_ct：章节页数
def get_txt(txt_id):
    txt = {}
    txt['title'] = ''
    txt['id'] = str(txt_id)
    try:
        # print("请输入需要下载的小说编号：")
        # txt['id']=input()
        req_url = req_url_base + txt['id'] + '/'  # 根据小说编号获取小说URL
        print("小说编号：" + txt['id'])
        res = requests.get(req_url, params=req_header)  # 获取小说目录界面
        soups = BeautifulSoup(res.text, "html.parser")  # soup转化
        # 获取小说题目
        txt['title'] = soups.select('.g-doc .g-bd .g-mn clearfix .m-fm-l .m-book_info .m-infos h3')[0].text
        txt['author'] = soups.select('.g-doc .g-bd .g-mn clearfix .m-fm-l .m-book_info .m-infos span')
        # 获取小说作者
        txt['author'] = txt['author'][0].text
        # 获取小说简介
        txt['intro'] = soups.select('.g-doc .g-bd .g-mn clearfix .m-fm-l .m-book_info p')[0].text.strip()
        print("编号：" + '{0:0>8}   '.format(txt['id']) + "小说名：《" + txt['title'] + "》  开始下载。")
        print("正在寻找第一章页面。。。")
        # 获取小说所有章节信息
        first_page = soups.select('.g-doc .g-bd .g-mn clearfix .m-fm-l .m-book-list .play_0 ul li a')
        # 获取小说总章页面数
        section_ct = len(first_page)
        # 获取小说第一章页面地址
        first_page = first_page[0]['href']
        print("小说章节页数：" + str(section_ct))
        print("第一章地址寻找成功：" + first_page)
        # 设置现在下载小说章节页面
        txt_section = first_page
        # 打开小说文件写入小说相关信息
        fo = open('{0:0>8}-{1}.txt.download'.format(txt['id'], txt['title']), "ab+")
        fo.write((txt['title'] + "\r\n").encode('UTF-8'))
        fo.write((txt['author'] + "\r\n").encode('UTF-8'))
        fo.write(("*******简介*******\r\n").encode('UTF-8'))
        fo.write(("\t" + txt['intro'] + "\r\n").encode('UTF-8'))
        fo.write(("******************\r\n").encode('UTF-8'))
        # 进入循环，写入每章内容
        while (1):
            try:
                # 请求当前章节页面
                r = requests.get(req_url + str(txt_section), params=req_header)
                # soup转换
                soup = BeautifulSoup(r.text, "html.parser")
                # 获取章节名称
                section_name = soup.select('.g-doc .g-bdyd .yuedu .wd880 .yuedu_cot .yuede_index h3')[0]
                section_text = soup.select('.g-doc .g-bdyd .yuedu .wd880 .yuedu_cot .yuede_index .yuedu_index_nr .content')[0]
                for ss in section_text.select("script"):  # 删除无用项
                    ss.decompose()
                # 获取章节文本
                section_text = re.sub('\s+', '\r\n\t', section_text.text).strip('\r\n')  #
                # 获取下一章地址
                txt_section = soup.select(
                    '.g-doc .g-bdyd .yuedu .wd880 .yuedu_cot .yuede_index .yuedu_index_nr yuedu_index_nr_a ul li a')[0][
                    'href']
                # 判断是否最后一章，当为最后一章时，会跳转至目录地址，最后一章则跳出循环
                if (txt_section == './'):
                    print("编号：" + '{0:0>8}   '.format(txt['id']) + "小说名：《" + txt['title'] + "》 下载完成")
                    break
                # 以二进制写入章节题目
                fo.write(('\r' + section_name.text + '\r\n').encode('UTF-8'))
                # 以二进制写入章节内容
                fo.write((section_text).encode('UTF-8'))
                print(txt['title'] + ' 章节：' + section_name.text + '     已下载')
                # print(section_text.text.encode('UTF-8'))
            except:
                print("编号：" + '{0:0>8}   '.format(txt['id']) + "小说名：《" + txt['title'] + "》 章节下载失败，正在重新下载。")
        fo.close()
        os.rename('{0:0>8}-{1}.txt.download'.format(txt['id'], txt['title']),
                  '{0:0>8}-{1}.txt'.format(txt['id'], txt['title']))
    except:  # 出现错误会将错误信息写入dowload.log文件，同时打印出来
        fo_err = open('dowload.log', "ab+")
        try:
            fo_err.write(('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + "]：编号：" + '{0:0>8}   '.format(
                txt['id']) + "小说名：《" + txt['title'] + "》 下载失败。\r\n").encode('UTF-8'))
            print('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + "]：编号：" + '{0:0>8}   '.format(
                txt['id']) + "小说名：《" + txt['title'] + "》 下载失败。")
            os.rename('{0:0>8}'.format(txt['id']) + '-' + txt['title'] + '.txt.download',
                      '{0:0>8}'.format(txt['id']) + '-' + txt['title'] + '.txt.error')
        except:
            fo_err.write(('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + "]：编号：" + '{0:0>8}   '.format(
                txt['id']) + "下载失败。\r\n").encode('UTF-8'))
            print('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + "]：编号：" + '{0:0>8}   '.format(
                txt['id']) + "下载失败。")
        finally:  # 关闭文件
            fo_err.close()


# 此处为需要下载小说的编号
get_txt('longzu1')
