import urllib.request
import urllib.parse
import os
import time
import re

# 目标PDF文件名
target_pdf_name = "人工智能对就业市场的影响数据集.pdf"
save_path = os.path.join("d:\D", target_pdf_name)

# 分析现有的HTML文件，提取模式
def analyze_existing_html_files():
    html_files = [
        "d:\D\大学生就业去向影响因素数据集.pdf.html",
        "d:\D\中国汽车不同车型每月销售量数据集.pdf.html",
        "d:\D\学生社交媒体与人际关系数据集.pdf.html"
    ]
    
    urls_found = []
    
    for html_file in html_files:
        if os.path.exists(html_file):
            try:
                with open(html_file, 'r', encoding='utf-8', errors='ignore') as f:
                    content = f.read()
                
                # 提取PDF链接模式
                pdf_url_pattern = r'file=https%3A%2F%2Fmoor-smile\.shec\.edu\.cn%2Frest%2Fpdf%2F[^&]+'
                matches = re.findall(pdf_url_pattern, content)
                
                for match in matches:
                    encoded_url = match.replace('file=', '')
                    pdf_url = encoded_url.replace('%3A', ':').replace('%2F', '/')
                    urls_found.append(pdf_url)
            except Exception as e:
                print(f"分析文件 {html_file} 时出错: {e}")
    
    return urls_found

# 从现有的URL中提取日期和UUID模式
def extract_patterns_from_urls(urls):
    date_patterns = set()
    uuid_patterns = set()
    
    # 匹配日期和UUID格式
    url_pattern = r'https://moor-smile\.shec\.edu\.cn/rest/pdf/(\d{6,8})/([a-f0-9-]+)/'
    
    for url in urls:
        match = re.search(url_pattern, url)
        if match:
            date = match.group(1)
            uuid = match.group(2)
            date_patterns.add(date)
            uuid_patterns.add(uuid)
    
    return date_patterns, uuid_patterns

# 尝试下载PDF文件
def try_download_pdf():
    print(f"尝试下载文件: {target_pdf_name}")
    print(f"保存路径: {save_path}")
    
    # 分析现有HTML文件提取URL模式
    existing_urls = analyze_existing_html_files()
    date_patterns, uuid_patterns = extract_patterns_from_urls(existing_urls)
    
    print(f"\n从现有文件中提取的信息:")
    print(f"找到的URL数量: {len(existing_urls)}")
    print(f"唯一日期模式: {date_patterns}")
    print(f"唯一UUID模式: {uuid_patterns}")
    
    # 如果找到现有模式，优先使用这些模式
    if date_patterns and uuid_patterns:
        # 对PDF名称进行URL编码
        encoded_pdf_name = urllib.parse.quote(target_pdf_name)
        
        # 尝试组合现有的日期和UUID
        for date in date_patterns:
            for uuid in uuid_patterns:
                pdf_url = f"https://moor-smile.shec.edu.cn/rest/pdf/{date}/{uuid}/{encoded_pdf_name}"
                print(f"\n尝试URL: {pdf_url}")
                
                try:
                    # 创建请求对象，添加用户代理
                    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
                    req = urllib.request.Request(pdf_url, headers=headers)
                    
                    # 尝试连接并下载
                    with urllib.request.urlopen(req, timeout=10) as response:
                        # 检查响应状态
                        if response.status == 200:
                            print(f"找到文件！状态码: {response.status}")
                            
                            # 读取并保存文件
                            with open(save_path, 'wb') as out_file:
                                out_file.write(response.read())
                            
                            # 验证文件下载成功
                            if os.path.exists(save_path):
                                file_size = os.path.getsize(save_path) / 1024 / 1024  # 转换为MB
                                print(f"PDF文件下载成功！")
                                print(f"保存路径: {save_path}")
                                print(f"文件大小: {file_size:.2f} MB")
                                return True
                            else:
                                print("文件保存失败。")
                                return False
                        else:
                            print(f"URL不存在，状态码: {response.status}")
                except urllib.error.HTTPError as e:
                    print(f"HTTP错误: {e.code}")
                except urllib.error.URLError as e:
                    print(f"URL错误: {e.reason}")
                except Exception as e:
                    print(f"下载时发生错误: {e}")
                
                # 避免请求过于频繁
                time.sleep(1)
    
    # 如果所有尝试都失败
    print("\n所有URL尝试都失败了。")
    print("可能的原因:")
    print("1. 文件可能不存在或URL格式不同")
    print("2. 服务器可能需要特定的访问权限")
    print("3. UUID或日期部分不正确")
    
    # 检查是否有包含此PDF的HTML文件
    print("\nD:\D目录中未找到与'人工智能对就业市场的影响数据集'相关的HTML文件。")
    print("要成功下载此文件，您需要:")
    print("1. 确认此PDF文件确实存在")
    print("2. 找到包含此PDF链接的HTML文件，并将其放在D:\D目录中")
    print("3. 或者获取正确的PDF文件URL")
    
    return False

if __name__ == "__main__":
    success = try_download_pdf()
    if not success:
        print("\n如果您有对应的HTML文件，请将其放在D:\D目录下，然后重新运行此脚本。")
        print("或者，如果您知道正确的URL，请直接修改脚本中的URL格式。")