import cv2
import imagehash
from PIL import Image
import os
import traceback
import numpy as np
import cv2
import hashlib
from concurrent.futures import ThreadPoolExecutor

def get_video_info(video_path):
    cap = cv2.VideoCapture(video_path)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    cap.release()
    return (width, height, fps)

# def compute_video_hash(video_path, sample_frames=5):
#     cap = cv2.VideoCapture(video_path)
#     frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
#     hashes = []
    
#     for i in range(sample_frames):
#         pos = int(frame_count/(sample_frames+1)*(i+1))
#         cap.set(cv2.CAP_PROP_POS_FRAMES, pos)
#         ret, frame = cap.read()
#         if ret:
#             pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
#             hashes.append(imagehash.average_hash(pil_img))
    
#     cap.release()
#     return np.sum(hashes)/len(hashes) if hashes else None

def compute_video_hash(video_path, sample_frames=5):
    cap = cv2.VideoCapture(video_path)
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    hash_values = []
    
    for i in range(sample_frames):
        pos = int(frame_count/(sample_frames+1)*(i+1))
        cap.set(cv2.CAP_PROP_POS_FRAMES, pos)
        ret, frame = cap.read()
        if ret:
            pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            hash_obj = imagehash.average_hash(pil_img)
            hash_values.append(int(str(hash_obj), 16))  # 将哈希转为16进制整数
    
    cap.release()
    return sum(hash_values)//len(hash_values) if hash_values else None

def find_duplicates(video_dir):
    video_info = {}
    for filename in os.listdir(video_dir):
        if filename.lower().endswith(('.mp4', '.avi', '.mov')):
            path = os.path.join(video_dir, filename)
            try:
                resolution = get_video_info(path)[:2]
                vhash = compute_video_hash(path)
                video_info[filename] = {
                    'resolution': resolution,
                    'hash': vhash
                }
            except Exception as e:
                traceback.print_exc()
                print(f"Error processing {filename}: {str(e)}")
    
    # 找出哈希值相似度高的视频
    duplicates = []
    processed = set()
    files = list(video_info.items())
    
    for i, (f1, data1) in enumerate(files):
        if f1 in processed:
            continue
        group = [f1]
        for j, (f2, data2) in enumerate(files[i+1:], i+1):
            if is_within_5_percent(data1['hash'], data2['hash']):  # 哈希相似度阈值
                group.append(f2)
                processed.add(f2)
        if len(group) > 1:
            duplicates.append(group)
    
    return duplicates

def is_within_5_percent(a, b):
    if a == 0 and b == 0:
        return True
    max_val = max(abs(a), abs(b))
    if max_val == 0:
        return False  # 避免除以零
    relative_diff = abs(a - b) / max_val
    return relative_diff < 0.0005

if __name__ == "__main__":
    print(find_duplicates(r"E:\test"))