from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
from time import sleep
import os
import requests

# 初始化Chrome浏览器
browser = webdriver.Chrome()

koi = input("请输入url:")
# 打开目标网页
browser.get(koi)

# 等待10秒钟
sleep(10)

# 获取主页面源码
main_html = browser.page_source

# 使用BeautifulSoup解析主页面HTML
soup = BeautifulSoup(main_html, 'html.parser')

# 查找所有的iframe
iframes = browser.find_elements(By.TAG_NAME, "iframe")

for i, iframe in enumerate(iframes):
    # 切换到iframe
    browser.switch_to.frame(iframe)
    
    wait = WebDriverWait(browser, 3)  # 设置最长等待时间
    
    try:
        span_element = wait.until(EC.element_to_be_clickable((By.XPATH, ".//span[text()='卡池详情']")))
        span_element.click()
        
        print(f"已点击第{i}个iframe中的'卡池详情'")
        
        # 等待新内容加载完成
        sleep(3)
        # 获取更新后的iframe源码
        updated_iframe_html = browser.page_source
        iframe_soup = BeautifulSoup(updated_iframe_html, 'html.parser')
        
        # 提取.webp图片链接或任何你需要的信息
        webp_links = []
        for img in iframe_soup.find_all('img'):
            src = img.get('src')
            if src and src.endswith('.webp'):
                webp_links.append(src)
                
        print(webp_links)
        with open(f'webp_links_{i}.txt', 'w', encoding='utf-8') as f:
            for link in webp_links:
                f.write(link + '\n')
  # 或者保存到文件等
        
    except Exception as e:
        print(f"处理第{i}个iframe时出错: {e}")
    
    # 切换回主页面
    browser.switch_to.default_content()

# 添加 <base> 标签以修正相对路径
base_tag = soup.new_tag('base', href="https://www.bilibili.com/blackboard/activity-Mz9T5bO5Q3.html?id=269&type=dlc&f_source=social&from=official")
soup.head.insert(0, base_tag)

# 提取所有的.webp图片链接
webp_links = []
for img in soup.find_all('img'):
    src = img.get('src')
    if src and src.endswith('.webp'):
        webp_links.append(src)

# 将解析后的HTML保存到文件

# 将.webp图片链接保存到文件
with open('webp_links.txt', 'w', encoding='utf-8') as f:
    for link in webp_links:
        f.write(link + '\n')

# 关闭浏览器
browser.quit()
def download_image(url, folder):
    local_filename = url.split('/')[-1].split('@')[0] 
    local_filepath = os.path.join(folder, local_filename)
    
    with requests.get(url) as response:
        response.raise_for_status()
        with open(local_filepath, 'wb') as f:
            f.write(response.content)

def process_links(input_file, output_folder):
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    with open(input_file, 'r') as file:
        lines = file.readlines()

    for line in lines:
        link = line.strip()
        if '@' in link:
            base_url, size_info = link.rsplit('@', 1)
            width_str = ''.join(filter(str.isdigit, size_info.split('_')[0]))  # Extract digits only
            
            try:
                width = int(width_str)
                if width >= 101:
                    png_link = base_url 
                    try:
                        download_image(png_link, output_folder)
                        print(f'Downloaded: {png_link}')
                    except requests.exceptions.HTTPError as err:
                        print(f'Failed to download {png_link}: {err}')
                else:
                    print(f'Skipped (width < 101): {link}')
            except ValueError:
                print(f'Skipped (invalid width format): {link}')
        else:
            print(f'Skipped (no @ found): {link}')

if __name__ == "__main__":
    input_file = 'webp_links_0.txt'
    output_folder = '犬夜叉'
    process_links(input_file, output_folder)





