import os
import re
import time
import random
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse

def extract_image_links_from_url(url, referer=None):
    """从URL中提取图片链接"""
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'none',
        'Sec-Fetch-User': '?1',
        'Cache-Control': 'max-age=0'
    }
    
    if referer:
        headers['Referer'] = referer
    else:
        # 如果没有提供referer，使用目标网站的域名作为referer
        parsed_url = urlparse(url)
        headers['Referer'] = f"{parsed_url.scheme}://{parsed_url.netloc}/"
    
    try:
        # 添加随机延迟，模拟人类行为
        delay = random.uniform(1, 3)
        time.sleep(delay)
        
        session = requests.Session()
        response = session.get(url, headers=headers, timeout=15)
        response.raise_for_status()
        return parse_html_for_images(response.text, url)
    except Exception as e:
        print(f"Error fetching URL: {e}")
        return []

def extract_image_links_from_file(file_path):
    """从本地HTML文件中提取图片链接"""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            html_content = f.read()
        
        # 使用文件所在目录作为基础URL
        base_url = f"file://{os.path.dirname(os.path.abspath(file_path))}"
        return parse_html_for_images(html_content, base_url)
    except Exception as e:
        print(f"Error reading file: {e}")
        return []

def parse_html_for_images(html_content, base_url):
    """解析HTML内容中的图片链接"""
    soup = BeautifulSoup(html_content, 'html.parser')
    image_urls = set()
    
    # 查找所有img标签
    for img in soup.find_all('img'):
        src = img.get('src')
        if src:
            # 处理相对URL
            full_url = urljoin(base_url, src)
            image_urls.add(full_url)
    
    # 查找CSS背景图片
    style_tags = soup.find_all('style')
    for style in style_tags:
        if style.string:
            urls = re.findall(r'url\([\'"]?([^\'"]+)[\'"]?\)', style.string)
            for url in urls:
                if is_image_url(url):
                    full_url = urljoin(base_url, url)
                    image_urls.add(full_url)
    
    # 查找内联样式中的背景图片
    for tag in soup.find_all(lambda tag: tag.has_attr('style')):
        style = tag['style']
        urls = re.findall(r'url\([\'"]?([^\'"]+)[\'"]?\)', style)
        for url in urls:
            if is_image_url(url):
                full_url = urljoin(base_url, url)
                image_urls.add(full_url)
    
    return list(image_urls)

def is_image_url(url):
    """检查URL是否为图片链接"""
    image_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg']
    parsed_url = urlparse(url)
    path = parsed_url.path.lower()
    
    # 检查文件扩展名
    for ext in image_extensions:
        if path.endswith(ext):
            return True
    
    # 检查URL中是否包含图片相关关键词
    image_keywords = ['image', 'img', 'photo', 'picture', 'pic']
    for keyword in image_keywords:
        if keyword in path.lower():
            return True
    
    # 检查是否包含常见图片网站的特定模式
    # 例如 pic.dmjnb.com/pic/6d399fba5e5e4a4977436ffa4b1bd5b6
    if re.search(r'pic\.[^/]+/pic/[a-zA-Z0-9]{8,}', url):
        return True
    
    # 检查URL参数中是否有图片相关信息
    query = parsed_url.query.lower()
    if 'image' in query or 'img' in query or 'photo' in query or 'pic' in query:
        return True
    
    return False

def get_filename_from_url(url):
    """从URL中提取文件名"""
    parsed_url = urlparse(url)
    path = parsed_url.path
    filename = os.path.basename(path)
    
    # 如果没有文件名，生成一个默认的
    if not filename or '.' not in filename:
        # 尝试从URL中提取图片ID或其他标识符
        # 例如 https://pic.dmjnb.com/pic/6d399fba5e5e4a4977436ffa4b1bd5b6
        match = re.search(r'/([a-zA-Z0-9]{8,})(?:\?|$)', url)
        if match:
            img_id = match.group(1)
            filename = f"image_{img_id}.jpg"
        else:
            import time
            filename = f"image_{int(time.time())}_{random.randint(1000, 9999)}.jpg"
    
    return filename

def get_extension_from_content_type(content_type):
    """根据内容类型获取文件扩展名"""
    content_type = content_type.lower()
    if 'jpeg' in content_type or 'jpg' in content_type:
        return '.jpg'
    elif 'png' in content_type:
        return '.png'
    elif 'gif' in content_type:
        return '.gif'
    elif 'webp' in content_type:
        return '.webp'
    elif 'bmp' in content_type:
        return '.bmp'
    elif 'svg' in content_type:
        return '.svg'
    else:
        return '.jpg'  # 默认使用jpg