#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @file    : main.py
import time
import urllib.parse

import requests
import copy
import os
from bs4 import BeautifulSoup

HEADERS = {
    "authority": "www.bi02.cc",
    "accept": "application/json",
    "accept-language": "zh,en;q=0.9,zh-CN;q=0.8",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "sec-fetch-dest": "empty",
    "sec-fetch-mode": "cors",
    "sec-fetch-site": "same-origin",
    "user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1",
    "x-requested-with": "XMLHttpRequest",
}

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DOWNLOAD_PATH = os.path.join(BASE_DIR, "result")


def get_hm_cookie(url):
    session = requests.Session()
    session.get(url=url, headers=HEADERS, timeout=10)
    return session


def search(key_word):
    new_header = copy.deepcopy(HEADERS)
    new_header["referer"] = urllib.parse.quote(
        f"https://www.bi02.cc/s?q={key_word}", safe="/&=:?"
    )

    hm_url = urllib.parse.quote(
        f"https://www.bi02.cc/user/hm.html?q={key_word}", safe="/&=:?"
    )
    session = get_hm_cookie(hm_url)

    params = {
        "q": f"{key_word}",
    }
    try:
        response = session.get(
            "https://www.bi02.cc/user/search.html",
            params=params,
            headers=new_header,
            timeout=10,
        )
    except Exception as e:
        print(f"搜索{key_word}时失败，错误信息:{e}")
        return [], session
    data = response.json()
    return data, session


def download_by_tag(tag, href, result_path, session):
    title = tag.text.strip()  # 去除多余的空格或换行符
    url = f"https://www.bi02.cc{href}"
    print(f"开始下载第 {title} 章: url: {url}")
    result_file_name = os.path.join(result_path, f"{title}.txt")
    try:
        content_response = session.get(url, headers=HEADERS)
        if content_response.status_code != 200:
            print(f"请求失败，状态码: {content_response.status_code}, URL: {url}")
            return
        content_soup = BeautifulSoup(content_response.content, "html.parser")
        text = content_soup.find(id="chaptercontent")
        if text is None:
            print(f"未找到章节内容，可能是页面结构发生变化: {url}")
            return
        with open(result_file_name, "w+", encoding="utf-8") as f:
            for i in text.get_text().split("　　")[1:-2]:  # 分割段落并去除首尾无关内容
                f.write(f"{i}\n")
        print(f"成功下载第 {title} 章: {result_file_name}")
    except AttributeError as e:
        print(f"处理 {url} 时发生属性错误: {e}")
    except Exception as e:
        print(f"处理 {url} 时发生未知错误: {e}")
    time.sleep(0.2)


def download_txt(download_url, path_name, session):
    """
    下载小说
    :param download_url: 下载链接
    :param path_name: 存储文件名
    :return:
    """
    result_path = os.path.join(DOWNLOAD_PATH, path_name)
    if not os.path.exists(result_path):
        os.makedirs(result_path, exist_ok=True)
    try:
        response = session.get(download_url, headers=HEADERS, timeout=10)
        soup = BeautifulSoup(response.content, "html.parser")
        down_load_url = soup.select("div[class='listmain'] dl dd a")
        for tag in down_load_url:
            href = tag["href"]
            if href == "javascript:dd_show()":
                hide_dd = soup.select("span[class='dd_hide'] dd a")
                for hide_tag in hide_dd:
                    href = hide_tag["href"]
                    download_by_tag(hide_tag, href, result_path, session)
            else:
                download_by_tag(tag, href, result_path, session)
    except Exception as e:
        import traceback

        print(traceback.format_exc())
        print(f"下载{download_url}失败，错误信息:{e}")


def run():
    while True:
        keyword = input("请输入搜索的小说名or输入q退出:")
        if keyword.replace(" ", "").lower() == "q":
            break
        if not keyword:
            continue
        data_list, session = search(keyword)
        if not data_list or data_list == 1:
            print("请重试.......")
            continue
        for i in range(len(data_list)):
            item = data_list[i]
            articlename = item.get("articlename")
            author = item.get("author")
            print(f"编号：{i} 书名：{articlename}----->{author}")
        while True:
            try:
                num_book = int(input("请输入需要下载的编号:"))
            except Exception:
                print("请输入正确的编号")
                continue
            try:
                item = data_list[num_book]
            except Exception:
                print("编号超出了预期，请请重新输入")
                continue
            break

        url_list = f"https://www.bi02.cc{item.get('url_list')}"
        print(f"开始下载{url_list}")
        path_name = f"{item.get('articlename', '')}___{item.get('author', '')}"

        download_txt(url_list, path_name, session)


if __name__ == "__main__":
    run()