#!/usr/bin/env/python
# -*- coding: utf-8 -*- 

# @File : TestReptile.py 
# @Author : t_fengyun
# @Time : 2019/9/11 16:55 
# @desc : there is description 

#小说下载函数
#id：小说编号
#txt字典项介绍
# title：小说题目
# first_page：第一章页面
# txt_section：章节地址
# section_name：章节名称
# section_text：章节正文
# section_ct：章节页数
import  requests,threading,re,os,time
from bs4 import BeautifulSoup

txt_id = input('please input txt_id:')
txt = {}
txt['title'] = ''
if txt_id ==0:
    print('请输入正确的编号')
txt['id'] = str(txt_id)
req_url_base = 'http://www.qu.la/book/'
req_header = {
    ":authority": "www.qu.la",
    ":method": "GET",
    ":path": "/book/214076/1399351.html",
    ":scheme": "https",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
    "accept-encoding": "gzip, deflate, br",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "cookie": "__cfduid=d8c646ce2574509e3326d0e51217a9b911568168071; PPad_id_PP=1; bookid=214076; chapterid=1399351; chaptername=%25u7B2C271%25u7AE0%2520%25u600E%25u4E48%25u641E%253F; bcolor=; font=; size=; fontcolor=; width=",
    "pragma": "no-cache",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "none",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
try:
    print("请输入需要下载的小说编号：")
    txt['id'] = input()
    req_url = req_url_base + txt['id'] + '/'  # 根据小说编号获取小说URL
    print('====>url:',req_url)
    print("小说编号：" + txt['id'])
    res = requests.get(req_url, params=req_header)  # 获取小说目录界面
    soups = BeautifulSoup(res.text, "html.parser")  # soup转化
    # 获取小说题目
    txt['title'] = soups.select('#wrapper .box_con #maininfo #info h1')[0].text
    txt['author'] = soups.select('#wrapper .box_con #maininfo #info p')
    # 获取小说最近更新时间
    txt['update'] = txt['author'][2].text
    # 获取最近更新章节名称
    txt['lately'] = txt['author'][3].text
    # 获取小说作者
    txt['author'] = txt['author'][0].text
    # 获取小说简介
    txt['intro'] = soups.select('#wrapper .box_con #maininfo #intro')[0].text.strip()
    print("编号：" + '{0:0>8}   '.format(txt['id']) + "小说名：《" + txt['title'] + "》  开始下载。")
    print("正在寻找第一章页面。。。")
    # 获取小说所有章节信息
    first_page = soups.select('#wrapper .box_con #list dl dd a')
    # 获取小说总章页面数
    section_ct = len(first_page)
    # 获取小说第一章页面地址
    print('first_page[0]======================>',first_page[0]['href'].split('.')[0])
    # first_page = first_page[0]['href'].split('/')[3]
    first_page = first_page[0]['href'].split('.')[0]
    print("=========================???")
    print("小说章节页数：" + str(section_ct))
    print("第一章地址寻找成功：" + first_page)
    # 设置现在下载小说章节页面
    txt_section = first_page
    # 打开小说文件写入小说相关信息
    fo = open('{0:0>8}-{1}.txt.download'.format(txt['id'], txt['title']), "ab+")
    fo.write((txt['title'] + "\r\n").encode('UTF-8'))
    fo.write((txt['author'] + "\r\n").encode('UTF-8'))
    fo.write((txt['update'] + "\r\n").encode('UTF-8'))
    fo.write((txt['lately'] + "\r\n").encode('UTF-8'))
    fo.write(("*******简介*******\r\n").encode('UTF-8'))
    fo.write(("\t" + txt['intro'] + "\r\n").encode('UTF-8'))
    fo.write(("******************\r\n").encode('UTF-8'))
    # 进入循环，写入每章内容
    cnt = 0
    while (1):
        try:
            print('download ------------------>')
            # 请求当前章节页面
            r = requests.get(req_url + str(txt_section), params=req_header)
            print('r在这里。。。',r)
            # soup转换
            soup = BeautifulSoup(r.text, "html.parser")
            # 获取章节名称
            section_name = soup.select('#wrapper .content_read .box_con .bookname h1')[0].text

            section_text = soup.select('#wrapper .content_read .box_con #content')[0].text
            print('section_name--------------------->',section_name,section_text)
            for ss in section_text.select("script"):  # 删除无用项
                ss.decompose()
            # 获取章节文本
            section_text = re.sub('\s+', '\r\n\t', section_text.text).strip('\r\n')  #
            # 获取下一章地址
            txt_section = soup.select('#wrapper .content_read .box_con .bottem2 #A3')[0]['href']
            # 判断是否最后一章，当为最后一章时，会跳转至目录地址，最后一章则跳出循环
            if (txt_section == './'):
                print("编号：" + '{0:0>8}   '.format(txt['id']) + "小说名：《" + txt['title'] + "》 下载完成")
                break
            # 以二进制写入章节题目
            fo.write(('\r' + section_name.text + '\r\n').encode('UTF-8'))
            # 以二进制写入章节内容
            fo.write((section_text).encode('UTF-8'))
            print(txt['title'] + ' 章节：' + section_name.text + '     已下载')
            # print(section_text.text.encode('UTF-8'))
        except:
            print('我来这里啦。。。。。。。。。。')
            print("编号：" + '{0:0>8}   '.format(txt['id']) + "小说名：《" + txt['title'] + "》 章节下载失败，正在重新下载。")
    fo.close()
    os.rename('{0:0>8}-{1}.txt.download'.format(txt['id'], txt['title']),
              '{0:0>8}-{1}.txt'.format(txt['id'], txt['title']))
except:  # 出现错误会将错误信息写入dowload.log文件，同时答应出来
    fo_err = open('dowload.log', "ab+")
    try:
        fo_err.write(('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + "]：编号：" + '{0:0>8}   '.format(
            txt['id']) + "小说名：《" + txt['title'] + "》 下载失败。\r\n").encode('UTF-8'))
        print(
            '[' + time.strftime('%Y-%m-%d %X', time.localtime()) + "]：编号：" + '{0:0>8}   '.format(txt['id']) + "小说名：《" +
            txt['title'] + "》 下载失败。")
        os.rename('{0:0>8}'.format(txt['id']) + '-' + txt['title'] + '.txt.download',
                  '{0:0>8}'.format(txt['id']) + '-' + txt['title'] + '.txt.error')
    except:
        print('下载失败，here======================》')
        fo_err.write(('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + "]：编号：" + '{0:0>8}   '.format(
            txt['id']) + "下载失败。\r\n").encode('UTF-8'))
        print('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + "]：编号：" + '{0:0>8}   '.format(txt['id']) + "下载失败。")
    finally:  # 关闭文件
        fo_err.close()

