import requests
from bs4 import BeautifulSoup

cookies = {
    'Hm_lvt_a5905f76170777c16e555e60095745df': '1714397333',
    'Hm_lpvt_a5905f76170777c16e555e60095745df': '1714397333',
    '__gads': 'ID=39d1866493dab5ed:T=1714397333:RT=1714397333:S=ALNI_Mbm4uLm6tH7w-_fmNxPtkfmi9onNA',
    '__gpi': 'UID=00000dff762968ab:T=1714397333:RT=1714397333:S=ALNI_MYB2s5HGvr7PZL36umkpznq9mAVOQ',
    '__eoi': 'ID=c0454ef4445f8432:T=1714397333:RT=1714397333:S=AA-AfjaqLTiNu4FisU1DiE9XgzZo',
    'FCNEC': '%5B%5B%22AKsRol_jKkOZoU0OUIH4xFt3QOYSmaZ5r1D8ARsbVXx25RBRy1bK2ceRiw93lfTqeK1pCqD8QmgcYwZm4cdxnUx2qyjhz0UiZ8HZQSuCVqD9SXQhT9drvU81RFDdI3P-vKrPX4n_MRkTI7QYBQ13wTzRhRDzi9hatg%3D%3D%22%5D%5D',
}

headers = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cache-control': 'no-cache',
    # 'cookie': 'Hm_lvt_a5905f76170777c16e555e60095745df=1714397333; Hm_lpvt_a5905f76170777c16e555e60095745df=1714397333; __gads=ID=39d1866493dab5ed:T=1714397333:RT=1714397333:S=ALNI_Mbm4uLm6tH7w-_fmNxPtkfmi9onNA; __gpi=UID=00000dff762968ab:T=1714397333:RT=1714397333:S=ALNI_MYB2s5HGvr7PZL36umkpznq9mAVOQ; __eoi=ID=c0454ef4445f8432:T=1714397333:RT=1714397333:S=AA-AfjaqLTiNu4FisU1DiE9XgzZo; FCNEC=%5B%5B%22AKsRol_jKkOZoU0OUIH4xFt3QOYSmaZ5r1D8ARsbVXx25RBRy1bK2ceRiw93lfTqeK1pCqD8QmgcYwZm4cdxnUx2qyjhz0UiZ8HZQSuCVqD9SXQhT9drvU81RFDdI3P-vKrPX4n_MRkTI7QYBQ13wTzRhRDzi9hatg%3D%3D%22%5D%5D',
    'pragma': 'no-cache',
    'priority': 'u=0, i',
    'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'none',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
}

response = requests.get('https://bixuejian.5000yan.com/', cookies=cookies, headers=headers)
response.encoding = 'utf-8'
page_index = response.text

# 开始解析数据
soup = BeautifulSoup(page_index, "lxml")
a_list = soup.select(".paiban>li>a")

for i in a_list:
    # 获取详情页面的链接
    detail_url = i["href"]
    # 获取标签名
    detail_name = i.text
    # 文件路径
    file_path = r"xiaoshuo/"
    # 文件名即章节名
    file_name = detail_name + ".txt"
# 获取具体页面的小说内容
    detail_page = requests.get(detail_url, headers=headers)
    detail_page.encoding = "utf-8"
    detail_page = detail_page.text
# 加载页面内容
    soup_detail=BeautifulSoup(detail_page,"lxml")
    # 筛选标签
    line=soup_detail.select(".grap")
    # 获取tesxt的内容
    line=line[0].text



    with open(file_path + file_name, "w",encoding="utf-8") as fp:
        fp.write(line)
        print(file_name+":爬取成功")
