# -!- coding: utf-8 -!-
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#作者：cacho_37967865
#博客：https://blog.csdn.net/sinat_37967865
#文件：getWebTxt.py
#日期：2019-07-07
#备注：Python爬虫爬取小说
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

import requests
import random
import time
import math
from bs4 import BeautifulSoup
from pacong.txtdeal.base import deal_replace,deal_title
from pycacho.cachobase.file_deal import an_save_txt,a_save_txt,ann_save_txt
from pycacho.cachobase.deal_replace import replace_one_list
from pycacho.cachobase.logger import Logger
import re

agent = ["Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre,'Accept-Language':'zh-CN,zh;q=0.9'",
         "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36,'Accept-Language':'zh-CN,zh;q=0.9'"]

headers = {
    'User-Agent': random.choice(agent)
}

# 在requests做请求的时候，为了避免ssl认证，可以将requests.get(url,verify=False), 但是这么设置会带来一个问题，日志中会有大量的warning信息, 可以配置下面4行
session = requests.Session()
session.keep_alive = False
requests.adapters.DEFAULT_RETRIES = 5
requests.packages.urllib3.disable_warnings()

book_url = ''   # 小说网
out_title = 'F:\PythonProject\Python\\title.txt'

def get_book_title(story_url,keyword):
    resp = requests.get(story_url, headers=headers,timeout=20,verify=False)
    resp.encoding = 'utf-8'
    html = resp.text
    soup = BeautifulSoup(html,'html5lib')       # "lxml"解析器丢失数据
    for chapter in soup.find_all('a'):
        try:
            if '.html' in chapter.get('href'):
                title = chapter.text
                an_save_txt(out_title, deal_title(title))
        except:
            pass

if __name__ == '__main__':
    get_book_title(book_url, '')
