import json
import re
import os
import urllib.request
from urllib.parse import urlparse
import datetime
import time

# 设置请求头，避免被服务器拒绝
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36')]
urllib.request.install_opener(opener)

# 读取JSON文件
with open('classPhotoByUserId.json', 'r', encoding='utf-8') as f:
    data = json.load(f)

# 存储所有视频URL
video_urls = []

# 提取所有视频URL
for item in data['data']['list']:
    content = item['content']
    # 使用正则表达式匹配data-url属性中的视频链接
    urls = re.findall(r'data-url="([^"]+\.mp4)"', content)
    for url in urls:
        video_urls.append(url)

# 去重
video_urls = list(set(video_urls))

print(f"找到 {len(video_urls)} 个唯一的视频URL")

downloaded_count = 0
failed_downloads = []

if len(video_urls) > 0:
    # 下载所有视频
    for i, url in enumerate(video_urls):
        try:
            # 获取文件名
            parsed_url = urlparse(url)
            filename = os.path.basename(parsed_url.path)
            
            # 检查文件是否已经存在
            if os.path.exists(filename):
                print(f"文件已存在，跳过下载: {filename}")
                downloaded_count += 1
                continue
            
            # 下载视频
            print(f"正在下载视频 {i+1}/{len(video_urls)}: {filename}")
            urllib.request.urlretrieve(url, filename)
            print(f"✓ 已下载 {i+1}/{len(video_urls)}: {filename}")
            downloaded_count += 1
            # 添加短暂延迟以避免过于频繁的请求
            time.sleep(0.5)
        except Exception as e:
            print(f"✗ 下载失败 {url}: {e}")
            failed_downloads.append((url, str(e)))
    
    print(f"\n下载完成统计:")
    print(f"总共找到: {len(video_urls)} 个视频")
    print(f"成功下载: {downloaded_count} 个视频")
    print(f"下载失败: {len(failed_downloads)} 个视频")
    
    if failed_downloads:
        print("\n失败的下载:")
        for url, error in failed_downloads:
            print(f"  {url}: {error}")
else:
    print("未找到视频文件")