import os
import re
import requests
import urllib.parse
from pathlib import Path
from urllib.parse import urlparse
import time
import random

def download_image(url, save_path, max_retries=3):
    """下载图片到指定路径，支持重试机制"""
    for attempt in range(max_retries):
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                'Accept-Encoding': 'gzip, deflate, br',
                'Connection': 'keep-alive',
                'Upgrade-Insecure-Requests': '1',
            }
            
            # 处理URL，移除查询参数
            clean_url = url.split('?')[0].split('#')[0]
            
            response = requests.get(clean_url, headers=headers, timeout=30, stream=True)
            response.raise_for_status()
            
            # 检查内容类型
            content_type = response.headers.get('content-type', '').lower()
            if not any(img_type in content_type for img_type in ['image', 'jpeg', 'png', 'gif', 'webp', 'svg']):
                print(f"⚠️ 警告: {url} 可能不是图片文件 (Content-Type: {content_type})")
            
            # 确保目录存在
            os.makedirs(os.path.dirname(save_path), exist_ok=True)
            
            # 保存图片
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
            
            # 验证文件大小
            file_size = os.path.getsize(save_path)
            if file_size < 100:  # 文件太小，可能下载失败
                print(f"⚠️ 警告: 下载的文件太小 ({file_size} bytes)，可能不是有效图片")
                if attempt < max_retries - 1:
                    continue
            
            print(f"✓ 成功下载: {url} -> {save_path} ({file_size} bytes)")
            return True
            
        except requests.exceptions.RequestException as e:
            print(f"✗ 下载失败 (尝试 {attempt + 1}/{max_retries}): {url} - {str(e)}")
            if attempt < max_retries - 1:
                time.sleep(random.uniform(1, 3))  # 随机延迟
                continue
        except Exception as e:
            print(f"✗ 未知错误 (尝试 {attempt + 1}/{max_retries}): {url} - {str(e)}")
            if attempt < max_retries - 1:
                time.sleep(random.uniform(1, 3))
                continue
    
    return False

def get_file_extension(url, content_type=None):
    """从URL或内容类型获取文件扩展名"""
    # 从URL路径获取扩展名
    parsed_url = urlparse(url)
    path = parsed_url.path
    
    # 移除查询参数和锚点
    clean_path = path.split('?')[0].split('#')[0]
    
    if '.' in clean_path:
        ext = clean_path.split('.')[-1].lower()
        if ext in ['jpg', 'jpeg', 'png', 'gif', 'webp', 'svg']:
            return ext
    
    # 从内容类型获取扩展名
    if content_type:
        content_type = content_type.lower()
        if 'jpeg' in content_type or 'jpg' in content_type:
            return 'jpg'
        elif 'png' in content_type:
            return 'png'
        elif 'gif' in content_type:
            return 'gif'
        elif 'webp' in content_type:
            return 'webp'
        elif 'svg' in content_type:
            return 'svg'
    
    # 默认返回jpg
    return 'jpg'

def sanitize_filename(filename):
    """清理文件名，移除非法字符"""
    # 移除或替换非法字符
    illegal_chars = r'[<>:"/\\|?*]'
    filename = re.sub(illegal_chars, '_', filename)
    
    # 限制文件名长度
    if len(filename) > 100:
        name, ext = os.path.splitext(filename)
        filename = name[:100-len(ext)] + ext
    
    return filename

def process_markdown_file(file_path):
    """处理单个markdown文件"""
    print(f"\n开始处理文件: {file_path}")
    
    # 读取文件内容
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
    except UnicodeDecodeError:
        try:
            with open(file_path, 'r', encoding='gbk') as f:
                content = f.read()
        except:
            print(f"✗ 无法读取文件: {file_path}")
            return
    
    # 创建images目录
    file_dir = os.path.dirname(file_path)
    images_dir = os.path.join(file_dir, 'images')
    os.makedirs(images_dir, exist_ok=True)
    
    # 查找所有图片链接
    # 匹配 ![alt](url) 格式的图片
    image_pattern = r'!\[([^\]]*)\]\(([^)]+)\)'
    matches = re.findall(image_pattern, content)
    
    if not matches:
        print("未找到图片链接")
        return
    
    print(f"找到 {len(matches)} 个图片链接")
    
    # 记录已处理的链接，避免重复处理
    processed_links = set()
    
    # 处理每个图片链接
    for i, (alt_text, image_url) in enumerate(matches):
        print(f"\n处理第 {i+1} 个图片: {image_url}")
        
        # 跳过已经是相对路径的链接
        if not image_url.startswith(('http://', 'https://')):
            print(f"跳过相对路径链接: {image_url}")
            continue
        
        # 跳过已处理的链接
        if image_url in processed_links:
            print(f"跳过重复链接: {image_url}")
            continue
        
        processed_links.add(image_url)
        
        # 生成文件名
        parsed_url = urlparse(image_url)
        original_filename = os.path.basename(parsed_url.path)
        
        # 如果文件名没有扩展名，尝试获取内容类型
        if '.' not in original_filename or not any(ext in original_filename.lower() for ext in ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.svg']):
            try:
                headers = {
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
                }
                response = requests.head(image_url.split('?')[0], headers=headers, timeout=10)
                ext = get_file_extension(image_url, response.headers.get('content-type'))
                filename = f"image_{i+1}.{ext}"
            except:
                filename = f"image_{i+1}.jpg"
        else:
            # 使用原始文件名，但确保唯一性
            name, ext = os.path.splitext(original_filename)
            filename = f"{name}_{i+1}{ext}"
        
        # 清理文件名
        filename = sanitize_filename(filename)
        
        # 构建保存路径
        save_path = os.path.join(images_dir, filename)
        
        # 下载图片
        if download_image(image_url, save_path):
            # 构建相对路径
            relative_path = f"images/{filename}"
            
            # 替换原内容中的链接
            old_link = f"![{alt_text}]({image_url})"
            new_link = f"![{alt_text}]({relative_path})"
            content = content.replace(old_link, new_link)
            
            print(f"✓ 更新链接: {image_url} -> {relative_path}")
        
        # 添加随机延迟避免请求过快
        time.sleep(random.uniform(0.5, 1.5))
    
    # 保存更新后的内容
    try:
        with open(file_path, 'w', encoding='utf-8') as f:
            f.write(content)
        print(f"\n✓ 文件处理完成: {file_path}")
    except Exception as e:
        print(f"✗ 保存文件失败: {file_path} - {str(e)}")

def process_directory(directory_path):
    """处理目录下的所有markdown文件"""
    directory = Path(directory_path)
    
    # 查找所有markdown文件
    markdown_files = list(directory.glob("*.md"))
    
    if not markdown_files:
        print(f"在目录 {directory_path} 中未找到markdown文件")
        return
    
    print(f"找到 {len(markdown_files)} 个markdown文件")
    
    # 处理每个文件
    for file_path in markdown_files:
        process_markdown_file(str(file_path))

def main():
    """主函数"""
    print("Markdown图片下载工具")
    print("=" * 50)
    
    # 获取当前目录
    current_dir = os.getcwd()
    print(f"当前目录: {current_dir}")
    
    # 询问用户是否处理当前目录
    choice = input("是否处理当前目录下的所有markdown文件? (y/n): ").lower()
    
    if choice == 'y':
        process_directory(current_dir)
    else:
        # 让用户指定目录
        target_dir = input("请输入要处理的目录路径: ").strip()
        if os.path.exists(target_dir):
            process_directory(target_dir)
        else:
            print("目录不存在!")
    
    print("\n处理完成!")

if __name__ == "__main__":
    main()
