import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import time

def download_large_images(url, save_path, index:str):

    if os.path.exists(save_path):
        print(f'{save_path} already exists, we will not download {url}.')
        return

    # 创建保存目录，如果不存在的话
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    
    # 获取网页内容
    response = requests.get(url)
    response.raise_for_status()  # 如果请求失败，则抛出异常
    
    # 解析HTML文档
    soup = BeautifulSoup(response.text, 'html.parser')
    
    # 查找所有的图片标签
    img_tags = soup.find_all('img')
    
    # 图片计数器
    count = 0
    
    for img in img_tags:
        # 获取图片的src属性
        src = img.get('src')
        if not src:
            continue
        
        # 解析图片URL，确保它是完整的
        img_url = urljoin(url, src)
        # if index not in img_url:
            # continue

        try:
            # 请求图片资源
            img_response = requests.get(img_url, stream=True, timeout=100)
            img_response.raise_for_status()
            # time.sleep(0.1)
            
            # 检查图片大小是否超过1MB
            if int(img_response.headers.get('content-length', 0)) > 1 * 1024:
                # 提取文件名
                img_name = os.path.basename(urlparse(img_url).path)
                img_path = os.path.join(save_path, img_name)
                print(f'Downloading {img_name} to {save_path}')
                # 下载并保存图片
                with open(img_path, 'wb') as f:
                    for chunk in img_response.iter_content(chunk_size=8192):
                        f.write(chunk)
                
                count += 1
        except requests.RequestException as e:
            print(f"Failed to download {img_url}: {e}")
    
    return count



import requests
from bs4 import BeautifulSoup

def search_target(url, target):
    try:
        # 发送 HTTP 请求获取网页内容
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'
        }
        response = requests.get(url, headers=headers)
        response.raise_for_status()  # 检查请求是否成功
        html_content = response.text

        # 使用 BeautifulSoup 解析 HTML 内容
        soup = BeautifulSoup(html_content, 'html.parser')

        # 查找所有 <a> 标签
        results = []
        for link in soup.find_all('a', href=True):
            href = link['href']
            text = link.get_text(strip=True)

            # 如果链接文本包含目标关键字
            if target in text:
                # 构造完整的链接地址
                full_url = requests.compat.urljoin(url, href)
                # 将结果添加到列表中
                results.append({
                    "title": f"[{text}]",
                    "download_url": full_url
                })

        return results

    except Exception as e:
        print(f"Error occurred: {e}")
        return []

import re

def convert_file_name(title):
    # 去除外层的方括号（如果存在）
    title = title.strip("[]")

    # 替换空格为下划线
    title = title.replace(" ", "_")

    # 移除或替换其他不适合作为文件名的字符
    # 使用正则表达式匹配非法字符并替换为下划线
    title = re.sub(r'[\\/:*?"<>|]', '', title)  # 移除非法字符
    title = re.sub(r'【', '_', title)           # 替换左括号
    title = re.sub(r'】', '_', title)           # 替换右括号
    title = re.sub(r'_{2,}', '_', title)        # 将连续多个下划线替换为单个下划线

    # 去除首尾多余的下划线
    title = title.strip("_")

    return title

# 示例调用
if __name__ == "__main__":

    page_range = range(64, 506)
    for page in page_range:
        url = f"https://ty.c04vuk.cc/pw/thread1022.php?fid=21&page={page}"
        target = "唐安琪"
        result = search_target(url, target)
        print(f'searching {url} for {target}')
        print(result)
        for result in result:
            # print(result)
            file_name=convert_file_name(result['title'])
            print(f'fetching {file_name}')
            download_large_images(result['download_url'], f'./images_1024/{target}/{file_name}', f'{page}')


    # url = "https://ty.c04vuk.cc/pw/thread1022.php?fid=21&page=22"
    # target = "唐安琪"
    # result = search_target(url, target)
    # print(result)

# # https://bestgirlsexy.com/page/3/?s=tang+an+qi
# # https://bestgirlsexy.com/xiuren%e7%a7%80%e4%ba%ba%e7%bd%91-no-7585-tang-an-qi/
# if __name__ == '__main__':

#     download_large_images("https://ty.c04vuk.cc/pw/read.php?tid=7745181&fpage=22", "images_1024", "1")
    
#     exit(0)

#     base_url = 'https://bestgirlsexy.com/xiuren秀人网-no'
#     post_url = 'tang-an-qi'
#     # index_to_download = [8862, 9004, 8972, 8935, 8824, 8786, 8748]
#     # index_to_download = [8683, 8714, 8644, 8607, 8571, 8531, 8942, 8485]
#     # index_to_download = [8492, 8485, 8433, 8396, 8357, 8326]
#     # index_to_download = [8287, 8250, 8212, 8174, 8142, 8110, 8065, 8034]
#     # index_to_download = [8001, 7964, 7928, 7899, 7864, 7825, 7794, 7760]
#     # index_to_download = [7726, 7689, 7655]
#     index_to_download = [7621, 7585, 7578]
#     #015

#     # total_url = 'https://bestgirlsexy.com/tang-an-qi-%e5%94%90%e5%ae%89%e7%90%aa-vol-0016-%e5%86%85%e8%b4%ad%e6%97%a0%e6%b0%b4%e5%8d%b0-%e7%bb%b4%e4%bf%ae%e5%86%b0%e7%ae%b1/'
#     # download_large_images(total_url, f'./images/tang-an-qi', '0016')

#     for index in index_to_download:
#         url = f'{base_url}-{index}-{post_url}'
#         save_path = f'./images/{post_url}/{index}'
#         download_large_images(url, save_path, f'{index}')
