import requests
import random
from bs4 import BeautifulSoup
from selenium import webdriver

from pycacho.cachobase.file_deal import an_save_txt,a_save_txt,ann_save_txt

agent = ["Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre,'Accept-Language':'zh-CN,zh;q=0.9'",
         "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36,'Accept-Language':'zh-CN,zh;q=0.9'"]

headers = {
    'User-Agent': random.choice(agent)
}
# 在requests做请求的时候，为了避免ssl认证，可以将requests.get(url,verify=False), 但是这么设置会带来一个问题，日志中会有大量的warning信息, 可以配置下面4行
session = requests.Session()
session.keep_alive = False
requests.adapters.DEFAULT_RETRIES = 5
requests.packages.urllib3.disable_warnings()

story_id = str(118335)
out_txt = 'F:\PythonProject\Python\\' + story_id + '.txt'

def get_book_detail(out_txt,url,id_value):
    resp = requests.get(url, headers=headers,timeout=60,verify=False)
    resp.encoding = 'utf-8'
    html = resp.text.replace('</p>','').replace('<br/>','')
    soup = BeautifulSoup(html,'html5lib')    # "lxml"解析器丢失数据
    #print(soup)
    title = soup.h1
    a_save_txt(out_txt, title)
    content = soup.find(id=id_value).get_text()
    print(title)
    a_save_txt(out_txt,content)

def get_chapter(out_txt):
    driver = webdriver.Chrome()
    driver.get('https://www.luoqiuzw.com/book/118335/77617051.html')
    driver.maximize_window()
    driver.implicitly_wait(2)
    soup = BeautifulSoup(driver.page_source,'lxml')
    content = soup.find(id='content').get_text()
    an_save_txt(out_txt, content)



if __name__ == '__main__':
    get_chapter(out_txt)