import requests
from bs4 import BeautifulSoup
import re
from urllib.parse import urlparse

# 目标页面的URL，想要爬哪个页面的就替换这个链接
url = "https://archiveofourown.org/works?work_search%5Bsort_column%5D=kudos_count&work_search%5Bother_tag_names%5D=&work_search%5Bexcluded_tag_names%5D=&work_search%5Bcrossover%5D=&work_search%5Bcomplete%5D=&work_search%5Bwords_from%5D=&work_search%5Bwords_to%5D=&work_search%5Bdate_from%5D=&work_search%5Bdate_to%5D=&work_search%5Bquery%5D=&work_search%5Blanguage_id%5D=zh&commit=Sort+and+Filter&tag_id=Sephiroth*s*Cloud+Strife"


def returnlist(url):
    headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
    }
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.content, 'html.parser')

    # 查找<ol>标签下的<li>元素
    ol_tag = soup.find('ol', {'role': 'navigation', 'aria-label': 'Pagination', 'class': 'pagination actions', 'title': 'pagination'})
    baselink = 'https://archiveofourown.org'
    # 提取前n条<li>中的<a>标签的href
    n = 2  # 设置需要提取的链接数量
    count = 0
    li_links=[]
    ## 因为有那个一开始的li所以limit+1
    for li in ol_tag.find_all('li', limit=n+1): 
        a_tag = li.find('a', href=True)
        if a_tag:
            print(baselink+a_tag['href'])
            li_links.append(baselink+a_tag['href'])
            count += 1
    print(f'Total extracted hrefs: {count}')
    return li_links


def prase(url):
    # 请求网页内容
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
    }
    response = requests.get(url, headers=headers)


    soup = BeautifulSoup(response.content, 'html.parser')

    # 筛选符合条件的 <a> 标签,找对应的作品链接后缀和baselink拼在一起
    works_links = []
    for a in soup.find_all('a', href=True):
        if a['href'].startswith('/works/') and a.find_parent(class_="heading"):
            works_links.append((a['href'], a.text))

    baselink = 'https://archiveofourown.org'

    # 下载EPUB文件时清理文件名中的非法字符和URL参数
    for href, title in works_links:
        full_url = baselink + href
        work_response = requests.get(full_url, headers=headers)
        work_soup = BeautifulSoup(work_response.content, 'html.parser')  
        epub_links = []
        #找子页面<a>标签中带epub的部分
        for epub_a in work_soup.find_all('a', href=True):
            if 'EPUB' in epub_a.text:
                epub_link = baselink + epub_a['href']
                epub_links.append(epub_link)
                
                # 下载
                epub_response = requests.get(epub_link, headers=headers)   
                # 提取实际文件名部分 like  https://archiveofourown.org/downloads/56210797/SC_Mi_De_Jia_Ren_Min.epub?updated_at=1728886504只要SC_Mi_De_Jia_Ren_Min这一部分
                parsed_url = urlparse(epub_link)
                raw_filename = parsed_url.path.split('/')[-1]  
                # 移除非法字符
                base_filename = re.sub(r'[<>:"/\\|?*]', '', raw_filename.split('?')[0])  
                if not base_filename.endswith('.epub'):
                    base_filename += '.epub'  # 添加扩展名
                
                # 将内容写入文件
                with open(base_filename, 'wb') as epub_file:
                    epub_file.write(epub_response.content)
                    print(f'Downloaded {base_filename} from {epub_link}')

    print("All available EPUB files have been downloaded.")
li_links=[]
li_links=returnlist(url)
prase(url)
for link in li_links:
    prase(link)