import os
import hashlib
from com.lanson.db.VideoDAO import VideoDAO
import subprocess
import json
from math import ceil
import codecs

def bytes_to_mb(size_bytes):
    return round(ceil(size_bytes / (1024 * 1024)), 2)

def seconds_to_minutes(duration_seconds):
    return round(duration_seconds / 60, 2)

def get_video_metadata(file_path):
    cmd = [
        'ffprobe',
        '-v', 'error',
        '-select_streams', 'v:0',  # 确保只选择第一个视频流
        '-show_entries', 'format=size,duration,streams=width,height',
        '-of', 'json',  # 输出格式为JSON
        file_path
    ]

    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, text=True)
        metadata = json.loads(output)
        # 获取文件大小
        size = str(bytes_to_mb(int(metadata['format'].get('size', 0))))+'Mb'
        # 获取时长
        duration = metadata['format']['duration']
        duration = str(seconds_to_minutes(float(duration))) +'Min'
        return size, duration
    except subprocess.CalledProcessError as e:
        print(f"Error running ffprobe on file {file_path}: {e}")

def calc_hash_value(file_path):
    """计算文件的BLAKE2b散列值"""
    hash_blake2b = hashlib.blake2b(digest_size=32)
    try:
        with open(file_path, "rb") as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_blake2b.update(chunk)
    except IOError as e:
        print(f"Error opening or reading file {file_path}: {e}")
        return None
    return hash_blake2b.hexdigest()

def generate_video_info_tinydb(folder_path, dao=None, file_extensions=None):
    """遍历指定文件夹下的视频文件，将信息插入TinyDB数据库"""
    if dao is None:
        dao = VideoDAO()
    if file_extensions is None:
        file_extensions = ('.mp4', '.avi', '.mkv', '.mov', '.wmv', '.ts')

    try:
        video_info_list = []
        batch_size = 1000
        for root, dirs, files in os.walk(folder_path):
            for file in files:
                if file.lower().endswith(file_extensions):
                    file_path = os.path.join(root, file)
                    url_path = os.path.abspath(file_path)
                    hash_value = calc_hash_value(file_path)
                    size, duration = get_video_metadata(file_path)
                    if hash_value is not None:
                        video_info = {
                            "filename": str(file),
                            "url": url_path,
                            "hash": hash_value,
                            "duration": duration,
                            "size":size
                        }
                        video_info_list.append(video_info)
                        # 达到批次最大限度时，先执行一次执行批量插入，然后清空继续
                        if len(video_info_list) >= batch_size:
                            dao.batch_insert(video_info_list)
                            video_info_list = []  # 清空列表准备下一批数据
        # 处理剩余不足一个批次的数据
        if video_info_list:
            dao.batch_insert(video_info_list)
    except Exception as e:
        print(f"Error processing folder {folder_path}: {e}")
    finally:
        dao.close()  # 确保数据库连接被关闭

# 使用示例
folder_to_scan = "D:\\test"
generate_video_info_tinydb(folder_to_scan)
