# -!- coding: utf-8 -!-
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#作者：cacho_37967865
#博客：https://blog.csdn.net/sinat_37967865
#文件：get_Web_page.py
#日期：2023-04-22
#备注：Python爬虫爬取章节详情页分页的文章
章节列表，章节详情通过 BeautifulSoup处理
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import requests
import random
import os
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from pycacho.cachobase.file_deal import an_save_txt,ann_save_txt
from pycacho.cachobase.logger import Logger


logger = Logger("get_Web_page").get_log()

agent = ["Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
         "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36"
         #'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
         ]

headers = {
    'User-Agent': random.choice(agent)
}

# 在requests做请求的时候，为了避免ssl认证，可以将requests.get(url,verify=False), 但是这么设置会带来一个问题，日志中会有大量的warning信息, 可以配置下面4行
session = requests.Session()
session.keep_alive = False
requests.adapters.DEFAULT_RETRIES = 5
requests.packages.urllib3.disable_warnings()

# 获取某个小说所有章节 (site-->网站地址，article_no-->文章名,total_no-->总章节数量，start_chapter-->开始章节)
def get_article(site,story_url,keyword):
    resp = requests.get(story_url, headers=headers,timeout=20,verify=False)
    resp.encoding = 'gbk'
    html = resp.text
    soup = BeautifulSoup(html,'html5lib')       # "lxml"解析器丢失数据
    chapter_link = []
    for chapter in soup.find_all('a'):
        try:
            if keyword in chapter.get('href') and '.html' in chapter.get('href'):
                url = chapter.get('href')
                chapter_link.append(site+url)
        except:
            pass
    #print(chapter_link)
    return(chapter_link)

# 具体章节内容
def get_chapter(out_txt,chapter_url,n):
    driver = webdriver.Chrome()
    driver.get('https://www.7yydstxt226.com/1/1540/')
    driver.maximize_window()
    driver.implicitly_wait(10)
    time.sleep(5)
    driver.find_element(By.LINK_TEXT,"").click()  # Selenium 4.0.0之后find_element_by_link_text()被弃用
    soup = BeautifulSoup(driver.page_source,'lxml')
    content = soup.find(id='chapterinfo').get_text()
    an_save_txt(out_txt, content)
    driver.find_element(By.LINK_TEXT,"【2】").click()
    print(driver.page_source)
    soup1 = BeautifulSoup(driver.page_source,'lxml')
    content1 = soup1.find(id='chapterinfo').get_text()
    #print(content)
    an_save_txt(out_txt, content1)


if __name__ == '__main__':
    url = 'https://www.7yydstxt226.com'       # 第一版主 https://www.7yydstxt226.com/1/1823_1/ (1/1823：章节名，_1：章节页数）
    story_id = '1-1823'                  # 小说id （实际：1/1823）
    start_chapter = 67543596             # 初始章节,int

    out_txt = 'F:\PythonProject\Python\\' + story_id + '.txt'
    if os.path.exists(out_txt):
        os.remove(out_txt)
    get_chapter(out_txt,'https://www.7yydstxt226.com/1/1540/19979.html',1)
