#!/usr/bin/env/python
# -*- coding: utf-8 -*- 

# @File : Reptile.py 
# @Author : t_fengyun
# @Time : 2019/9/11 10:27 
# @desc : there is reptile novel

import  requests,threading,re,os,time
from bs4 import BeautifulSoup
req_header = {
    ":authority": "www.qu.la",
    ":method": "GET",
    ":path": "/book/214076/1399351.html",
    ":scheme": "https",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
    "accept-encoding": "gzip, deflate, br",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "cookie": "__cfduid=d8c646ce2574509e3326d0e51217a9b911568168071; PPad_id_PP=1; bookid=214076; chapterid=1399351; chaptername=%25u7B2C271%25u7AE0%2520%25u600E%25u4E48%25u641E%253F; bcolor=; font=; size=; fontcolor=; width=",
    "pragma": "no-cache",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "none",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
req_url_base ='http://www.qu.la/book/'
req_url = req_url_base +'214076/'
txt_section = '1399351.html'
r = requests.get(req_url+str(txt_section),params=req_header)

soup = BeautifulSoup(r.text,'html.parser')
section_name=soup.select('#wrapper .content_read .box_con .bookname h1')[0].text

section_text=soup.select('#wrapper .content_read .box_con #content')[0].text
# print(section_name,section_text)
# for ss in section_text.select("script"):                #删除无用项
#     ss.decompose()

section_text=re.sub( '\s+', '\r\n\t', section_text).strip('\r\n')

txt_name = '1.txt'
fo = open(txt_name,'ab+')
fo.write(('\r' + section_name + '\r\n').encode('UTF-8'))
fo.write((section_text).encode('utf-8'))
fo.close()
print("章节名:"+section_name)
print("章节内容：\n"+section_text)