# _*_ coding utf-8 _*_
# @Time :2025/05/28 23:01
# @Author:lan ling

import os
from typing import Any

import requests
from bs4 import BeautifulSoup

cookie_temp = "__itrace_wid=b7e1666d-31df-47bd-1ad9-28ba6f259f68; PHPSESSID=4r92q1jolpe5a5r6p0tsorouom; __itrace_wid=bef8592a-ab65-4819-bdaf-47a6edb10b11"

qidian_temp_cookie = "e1=%7B%22l6%22%3A%221%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_xiangqing%22%2C%22eid%22%3A%22qd_H_mulublock%22%7D; e2=%7B%22l6%22%3A%221%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_xiangqing%22%2C%22eid%22%3A%22qd_H_mulublock%22%7D; _csrfToken=jZjS4CLB1DDHgwHakoX5WXIplfYPiE9ed1Yp01Bw; newstatisticUUID=1748444145_1777122989; fu=2082835264; traffic_utm_referer=https%3A//cn.bing.com/; _ga=GA1.1.847951136.1748444149; supportwebp=true; x-waf-captcha-referer=; _ga_FZMMH98S83=GS2.1.s1748444149$o1$g1$t1748446306$j31$l0$h0; _ga_PFYW0QLV3P=GS2.1.s1748444149$o1$g1$t1748446306$j31$l0$h0; w_tsfp=ltvuV0MF2utBvS0Q7aLtkE2tFzwufTs4h0wpEaR0f5thQLErU5mB2YJ+uMj2OHbc4cxnvd7DsZoyJTLYCJI3dwMcE8WRdopC3V/EldNwiocTB0ZiRZrYWwVNdu9y6TNHe3hCNxS00jA8eIUd379yilkMsyN1zap3TO14fstJ019E6KDQmI5uDW3HlFWQRzaLbjcMcuqPr6g18L5a5Wza5FuuKF4mA+lG2RSU1C8aDSslsxe4c+oONE78dsqmSqA="

biquge_temp_cookie = "fontFamily=null; fontColor=null; fontSize=null; bg=null; Hm_lvt_b1f1cdea3b4cf4604cb8940d5d2ce1a7=1724853099; zh_choose=s"

cookies = {}

def get_cookie(cookie_temp_str):
    cookie_list = cookie_temp_str.split(';')
    temp_cookies = {cookie.split('=')[0]:cookie.split('=')[1]for cookie in cookie_list}
    # temp_cookies = {}
    # for cookie in cookie_list:
    #     temp_cookies[cookie.split('=')[0]] = cookie.split('=')[1]
    print(temp_cookies)
    return temp_cookies


# 1.根据小说链接得到小说的目录和对应的url
def get_catalogue(url,headers):
    # 发送请求
    cookies.update(get_cookie(cookie_temp))
    response = requests.get(url=url,headers=headers,cookies=cookies)
    response.encoding = "utf-8" #指定编码
    chapter_info_list = [] #用来存储获取到的所有url和章节标题数据
    if response.status_code == 200:
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        # 获取body标签内容
        list_div = soup.find('div',id = "list")
        if list_div:
            # 查找所有章节链接（假设章节链接在<a>标签中）
            a_tags = list_div.find_all('a')
            for a in a_tags:
                href = a.get('href')
                title = a.get_text(strip=True)  # 去除标题前后的空白
                if href and title:
                    full_url = "https://www.shaonianshuwu.com/" +  href
                    chapter_info_list.append({
                        'title': title,
                        'url': full_url
                    })
        else:
            print("未找到list_dev标签")
    else:
        print(f"请求失败，状态码：{response.status_code}")

    return chapter_info_list


def qidian_get_catalogue(url,headers):
    # 发送请求
    # cookies.update(get_cookie(qidian_temp_cookie))
    response = requests.get(url=url,headers=headers)
    response.encoding = "utf-8" #指定编码
    chapter_info_list = [] #用来存储获取到的所有url和章节标题数据
    print(response.text)
    if response.status_code == 200:
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        # 获取body标签内容
        list_div = soup.find('div',id = "allCatalog")
        if list_div:
            # 查找所有章节链接（假设章节链接在<a>标签中）
            a_tags = list_div.find_all('a')
            for a in a_tags:
                href = a.get('href')
                title = a.get_text(strip=True)  # 去除标题前后的空白
                if href and title:
                    full_url = "https://www.shaonianshuwu.com/" +  href
                    chapter_info_list.append({
                        'title': title,
                        'url': full_url
                    })
        else:
            print("未找到list_dev标签")
    else:
        print(f"请求失败，状态码：{response.status_code}")

    # chapter_info_list = chapter_info_list[13:]
    print(chapter_info_list)
    return chapter_info_list


def biquge_get_catalogue(url,headers):
    # 发送请求
    # cookies.update(get_cookie(biquge_temp_cookie))
    response = requests.get(url=url,headers=headers,cookies=cookies)
    response.encoding = "utf-8" #指定编码
    chapter_info_list = [] #用来存储获取到的所有url和章节标题数据
    if response.status_code == 200:
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        # 获取body标签内容
        list_div = soup.find_all('div',class_ = "section-box")
        if list_div[1]:
            # 查找所有章节链接（假设章节链接在<a>标签中）
            a_tags = list_div[1].find_all('a')
            for a in a_tags:
                href = a.get('href')
                title = a.get_text(strip=True)  # 去除标题前后的空白
                if href and title:
                    full_url = "https://www.22biqu.com/" +  href
                    chapter_info_list.append({
                        'title': title,
                        'url': full_url
                    })
        else:
            print("未找到list_dev标签")
    else:
        print(f"请求失败，状态码：{response.status_code}")

    # chapter_info_list = chapter_info_list[13:]
    return chapter_info_list


# 2.根据章节目录，抓取目录对应的url指定的小说正文页面
def get_content(chapter_info_list,headers,):
    # 判断小说文件夹是否存在
    if os.path.exists("novel_起点小说/没钱修什么仙"):
        pass
    else:
        os.makedirs("novel_起点小说/没钱修什么仙")

    with open("./novel_起点小说/没钱修什么仙" + "没钱修什么仙" + ".txt", "w", encoding="utf8") as file:
        for chapter_info in chapter_info_list:
            for i in range(1,20):
                if i == 1:
                    # 发送请求
                    response = requests.get(url=chapter_info["url"],headers=headers)
                    if response.status_code == 200: #判断请求是否成功
                        # 使用BeautifulSoup解析HTML
                        soup = BeautifulSoup(response.text, 'html.parser')
                        file.write(chapter_info["title"] + "\n")
                        print(chapter_info["title"] + "\n")
                        # 获取body标签内容
                        list_div = soup.find('div', id="content")
                        if list_div:
                            # 查找所有章节链接（假设章节链接在<a>标签中）
                            p_tags = list_div.find_all('p')
                            for p in p_tags:
                                p_content = p.get_text(strip=True)  # 去除标题前后的空白
                                if "请点击下一页" in p_content or "请大家收藏" in p_content:
                                    pass
                                else:
                                    file.write(p_content + "\n")
                else:
                    # 获取下一页地址
                    part1, part2 = chapter_info["url"].split(".html", 1)
                    next_page_url = f"{part1}/{i}.html{part2}"

                    # 发送请求
                    response = requests.get(url=next_page_url,headers=headers)
                    if response.status_code == 200: #判断请求是否成功
                        # 使用BeautifulSoup解析HTML
                        soup = BeautifulSoup(response.text, 'html.parser')
                        # 获取body标签内容
                        list_div = soup.find('div', id="content")
                        if list_div:
                            # 查找所有章节链接（假设章节链接在<a>标签中）
                            p_tags = list_div.find_all('p')
                            for p in p_tags:
                                p_content = p.get_text(strip=True)  # 去除标题前后的空白
                                if "请点击下一页" in p_content or "请大家收藏" in p_content:
                                    pass
                                else:
                                    file.write(p_content + "\n")

                        # 获取button2的作用
                        button2_use = soup.find('a', id="A3")
                        if button2_use.text != "下一页":
                            break

def biquge_get_content(chapter_info_list,headers,):
    # 判断小说文件夹是否存在
    if os.path.exists("novel_笔趣阁/高手寂寞"):
        pass
    else:
        os.makedirs("novel_笔趣阁/高手寂寞")

    with open("./novel_笔趣阁/高手寂寞/" + "高手寂寞3" + ".txt", "w", encoding="utf8") as file:
        for chapter_info in chapter_info_list:
            for i in range(1,20):
                if i == 1:
                    # 发送请求
                    response = requests.get(url=chapter_info["url"],headers=headers)
                    if response.status_code == 200: #判断请求是否成功
                        # 使用BeautifulSoup解析HTML
                        soup = BeautifulSoup(response.text, 'html.parser')
                        file.write(chapter_info["title"] + "\n")
                        print(chapter_info["title"] + "\n")
                        # 获取body标签内容
                        list_div = soup.find('div', id="content")
                        if list_div:
                            # 查找所有章节链接（假设章节链接在<a>标签中）
                            p_tags = list_div.find_all('p')
                            for p in p_tags:
                                p_content = p.get_text(strip=True)  # 去除标题前后的空白
                                file.write(p_content + "\n")
                            list_a = soup.find('a', id="next_url")
                            if list_a:
                                next_content = list_a.get_text(strip=True)  # 去除标题前后的空白
                                if "下一章" in next_content:
                                    break
                                if "下一页" in next_content:
                                    pass
                                else:
                                    break
                else:
                    # 获取下一页地址
                    part1, part2 = chapter_info["url"].split(".html", 1)
                    next_page_url = f"{part1}_{i}.html{part2}"
                    print("next url:" + next_page_url)
                    # 发送请求
                    response = requests.get(url=next_page_url,headers=headers)
                    if response.status_code == 200: #判断请求是否成功
                        # 使用BeautifulSoup解析HTML
                        soup = BeautifulSoup(response.text, 'html.parser')
                        # 获取body标签内容
                        list_div = soup.find('div', id="content")
                        if list_div:
                            # 查找所有章节链接（假设章节链接在<a>标签中）
                            p_tags = list_div.find_all('p')
                            for p in p_tags:
                                p_content = p.get_text(strip=True)  # 去除标题前后的空白
                                file.write(p_content + "\n")

                        # 获取button2的作用
                        list_a = soup.find('a', id="next_url")
                        if list_a:
                            next_content = list_a.get_text(strip=True)  # 去除标题前后的空白
                            if "下一章" in next_content:
                                break
                            if "下一页" in next_content:
                                pass
                            else:
                                break


# 1.根据小说链接得到小说的目录和对应的url
def biquge2_get_catalogue(url,headers):
    # 发送请求
    cookies.update(get_cookie(cookie_temp))
    response = requests.get(url=url,headers=headers,cookies=cookies)
    response.encoding = "utf-8" #指定编码
    chapter_info_list = [] #用来存储获取到的所有url和章节标题数据
    if response.status_code == 200:
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        # 获取body标签内容
        list_div = soup.find('ul', class_="chapter")
        if list_div:
            # 查找所有章节链接（假设章节链接在<a>标签中）
            a_tags = list_div.find_all('a')
            for a in a_tags:
                href = a.get('href')
                title = a.get_text(strip=True)  # 去除标题前后的空白
                if href and title:
                    full_url = "https://m.yfthg.com" + href
                    chapter_info_list.append({
                        'title': title,
                        'url': full_url
                    })
        else:
            print("未找到list_dev标签")
    else:
        print(f"请求失败，状态码：{response.status_code}")

    return chapter_info_list

def biquge2_get_content(chapter_info_list,headers):
    # 判断小说文件夹是否存在
    if os.path.exists("novel_笔趣阁/退下让朕来"):
        pass
    else:
        os.makedirs("novel_笔趣阁/退下让朕来")

    with open("./novel_笔趣阁/退下让朕来/" + "退下让朕来" + ".txt", "w", encoding="utf8") as file:
        for chapter_info in chapter_info_list:
            # 发送请求
            response = requests.get(url=chapter_info["url"],headers=headers)
            if response.status_code == 200: #判断请求是否成功
                # 使用BeautifulSoup解析HTML
                soup = BeautifulSoup(response.text, 'html.parser')
                file.write(chapter_info["title"] + "\n")
                print(chapter_info["title"] + "\n")
                # 获取body标签内容
                list_div = soup.find('div', id="text")
                if list_div:
                    # 查找所有章节链接（假设章节链接在<a>标签中）
                    p_tags = list_div.find_all('p')
                    for p in p_tags:
                        p_content = p.get_text(strip=False)  # 去除标题前后的空白
                        new_text = p_content.replace("    ", "\n")  # 注意引号内是四个空格
                        file.write(new_text + "\n")