import os
import subprocess
import cv2
import torch
import numpy as np
import hashlib
import sqlite3
import threading
import queue
from datetime import datetime
from tempfile import TemporaryDirectory
from tqdm import tqdm

# 全局参数
THUMBNAIL_MAX_WIDTH = 200
THUMBNAIL_MAX_HEIGHT = 200
OK_JPEG_QUALITY = 80
LOW_JPEG_QUALITY = 60
GO_BACKWARDS_PERCENT = 10
VIDEO_STILL_USABLE = 20
SSIM_SIZE = 32
PHASH_SIZE = 32
ALMOST_BLACK_BITMAP = 100
MIN_VIDEO_DURATION = 1000  # 最小视频时长（毫秒）
MAX_VIDEO_DURATION = 7200000  # 最大视频时长（毫秒，2小时）
MIN_VIDEO_SIZE = 1024 * 1024  # 最小视频大小（1MB）
MAX_VIDEO_SIZE = 1024 * 1024 * 1024 * 10  # 最大视频大小（10GB）


class Prefs:
    def __init__(self):
        self._numberOfVideos = 0
        self._thumbnails = 1  # 默认使用双缩略图模式
        self._comparisonMode = 0
        self._ssimBlockSize = 16
        self._differentDurationModifier = 4
        self._sameDurationModifier = 1
        self._thresholdSSIM = 0.85  # 降低阈值以捕获更多潜在重复
        self._thresholdPhash = 57
        self._minSimilarity = 0.80  # 最小相似度阈值
        self._maxDurationDiff = 1000  # 最大时长差异（毫秒）
        self._maxSizeDiff = 0.1  # 最大文件大小差异比例（10%）


class Video:
    def __init__(self, prefs, filename):
        self.prefs = prefs
        self.filename = filename
        self.duration = 0
        self.bitrate = 0
        self.codec = ""
        self.width = 0
        self.height = 0
        self.framerate = 0
        self.audio = ""
        self.size = 0
        self.modified = None
        self.hash = [0] * 2  # 初始化两个hash值
        self.grayThumb = [None] * 2  # 初始化两个灰度缩略图
        self.thumbnail = None
        self._jpegQuality = LOW_JPEG_QUALITY if prefs._numberOfVideos > 1000 else OK_JPEG_QUALITY
        self._ssimSize = SSIM_SIZE
        self._pHashSize = PHASH_SIZE
        self._almostBlackBitmap = ALMOST_BLACK_BITMAP
        self._isValid = True
        self._errorMessage = ""
        self.frame_features = []  # 存储每一帧的特征
        self.key_frames = []      # 存储关键帧
        self.scene_changes = []   # 存储场景变化点
        self._frame_interval = 0.5  # 降低采样间隔到0.5秒，提高精度
        self.scene_segments = []  # 存储场景片段
        self._scene_threshold = 20  # 降低场景检测阈值，更容易检测到变化

    def process(self, save_path=None):
        if not os.path.exists(self.filename):
            self._isValid = False
            self._errorMessage = "文件不存在"
            return False

        # 检查文件大小
        self.size = os.path.getsize(self.filename)
        if self.size < MIN_VIDEO_SIZE or self.size > MAX_VIDEO_SIZE:
            self._isValid = False
            self._errorMessage = f"文件大小超出范围 ({self.size/1024/1024:.1f}MB)"
            return False

        cache = Db(self.filename)
        if not cache.readMetadata(self):
            self.getMetadata()
            cache.writeMetadata(self)

        # 检查视频时长
        if self.duration < MIN_VIDEO_DURATION or self.duration > MAX_VIDEO_DURATION:
            self._isValid = False
            self._errorMessage = f"视频时长超出范围 ({self.duration/1000:.1f}秒)"
            return False

        if self.width == 0 or self.height == 0 or self.duration == 0:
            self._isValid = False
            self._errorMessage = "无法获取视频元数据"
            return False

        if not self._detect_scene_changes(save_path):
            self._errorMessage = "无法检测场景变化"
            return False

        ret = self.takeScreenCaptures(cache)
        if ret == -1:
            self._isValid = False
            self._errorMessage = "无法获取视频截图"
            return False
        elif (self.prefs._thumbnails != 1 and self.hash[0] == 0) or \
                (self.prefs._thumbnails == 1 and self.hash[0] == 0 and self.hash[1] == 0):
            self._isValid = False
            self._errorMessage = "无法生成视频指纹"
            return False

        return True

    def get_error_message(self):
        return self._errorMessage

    def getMetadata(self):
        # 使用ffprobe（FFmpeg的一部分）可以更精确地获取元数据
        command = f'ffprobe -v error -show_streams -show_format -of json "{self.filename}"'
        result = subprocess.run(command, shell=True, capture_output=True, text=True)

        if result.returncode != 0:
            print(f"FFprobe错误: {result.stderr}")
            # 回退到原始方法
            command = f'ffmpeg -hide_banner -i "{self.filename}"'
            result = subprocess.run(command, shell=True, capture_output=True, text=True)
            if result.returncode != 0:
                print(f"FFmpeg错误: {result.stderr}")
                return
            output = result.stderr  # ffmpeg -i的输出在stderr中
            self._parseMetadataFromText(output)
        else:
            self._parseMetadataFromJSON(result.stdout)

        # 获取文件大小和修改时间
        self.size = os.path.getsize(self.filename)
        self.modified = os.path.getmtime(self.filename)

    def _parseMetadataFromJSON(self, json_output):
        try:
            import json
            data = json.loads(json_output)

            # 获取格式信息（包含duration和bitrate）
            if 'format' in data:
                format_info = data['format']
                if 'duration' in format_info:
                    self.duration = int(float(format_info['duration']) * 1000)  # 转换为毫秒
                if 'bit_rate' in format_info:
                    self.bitrate = int(format_info['bit_rate']) // 1000  # 转换为kbps

            # 获取视频流信息
            video_stream = next((s for s in data.get('streams', []) if s.get('codec_type') == 'video'), None)
            if video_stream:
                self.codec = video_stream.get('codec_name', '')
                self.width = video_stream.get('width', 0)
                self.height = video_stream.get('height', 0)

                # 解析帧率
                if 'avg_frame_rate' in video_stream:
                    num, denom = video_stream['avg_frame_rate'].split('/')
                    self.framerate = round(float(num) / float(denom), 1)

            # 获取音频流信息
            audio_stream = next((s for s in data.get('streams', []) if s.get('codec_type') == 'audio'), None)
            if audio_stream:
                self.audio = audio_stream.get('codec_name', '')
                self.channels = audio_stream.get('channels', 0)

        except Exception as e:
            print(f"解析JSON元数据出错: {e}")

    def _parseMetadataFromText(self, output):
        lines = output.split('\n')
        rotated_once = False

        for line in lines:
            # 解析时长和比特率
            if " Duration:" in line:
                time = line.split(" ")[3]
                if time != "N/A,":
                    h = int(time[0:2])
                    m = int(time[3:5])
                    s = int(time[6:8])
                    ms = int(time[9:11])
                    self.duration = h * 60 * 60 * 1000 + m * 60 * 1000 + s * 1000 + ms * 10

                # 解析比特率
                if "bitrate:" in line:
                    bitrate_str = line.split("bitrate: ")[1].split(" ")[0]
                    if bitrate_str.isdigit():
                        self.bitrate = int(bitrate_str)

            # 解析视频信息
            if " Video:" in line and ("kb/s" in line or " fps" in line or output.count(" Video:") == 1):
                parts = line.split()
                codec_index = parts.index("Video:") + 1
                self.codec = parts[codec_index].split('(')[0]

                # 解析分辨率
                for part in parts:
                    if 'x' in part and part.replace('x', '').isdigit():
                        res = part.split('x')
                        self.width = int(res[0])
                        self.height = int(res[1])
                        break

                # 解析帧率
                for part in parts:
                    if 'fps' in part:
                        fps_str = part.replace('fps', '')
                        try:
                            self.framerate = round(float(fps_str), 1)
                        except ValueError:
                            pass

            # 解析音频信息
            if " Audio:" in line:
                parts = line.split()
                audio_codec = parts[parts.index("Audio:") + 1]
                self.audio = audio_codec

                # 解析声道
                for i, part in enumerate(parts):
                    if part == "channels":
                        try:
                            self.channels = int(parts[i - 1])
                        except ValueError:
                            pass
                        break

            # 处理视频旋转
            if "rotate" in line and not rotated_once:
                try:
                    rotate = int(line.split(":")[1])
                    if rotate in [90, 270]:
                        self.width, self.height = self.height, self.width
                    rotated_once = True
                except:
                    pass

    def takeScreenCaptures(self, cache):
        thumb = Thumbnail(self.prefs._thumbnails)
        ofDuration = 100
        for percent in thumb.percentages():
            frame = None
            cachedImage = cache.readCapture(percent)
            if cachedImage:
                frame = self.load_image_from_bytes(cachedImage)
                frame = self.resize_image(frame, self.width, self.height)
            else:
                frame = self.captureAt(percent, ofDuration)
                if frame is None:
                    ofDuration = ofDuration - GO_BACKWARDS_PERCENT
                    if ofDuration >= VIDEO_STILL_USABLE:
                        continue
                    return -1
                cache.writeCapture(percent, self.image_to_bytes(frame))
            
            if frame is not None:
                thumb.add_image(frame)

        hashes = 2 if self.prefs._thumbnails == 1 else 1
        self.processThumbnail(thumb, hashes)
        return 1

    def processThumbnail(self, thumbnail, hashes):
        if not thumbnail.images:
            return

        # 创建缩略图网格
        rows = thumbnail.rows()
        cols = thumbnail.cols()
        cell_height = thumbnail.images[0].shape[0]
        cell_width = thumbnail.images[0].shape[1]
        
        # 创建空白画布
        grid = np.zeros((cell_height * rows, cell_width * cols, 3), dtype=np.uint8)
        
        # 填充图像到网格
        for idx, img in enumerate(thumbnail.images):
            if idx >= rows * cols:
                break
            i, j = divmod(idx, cols)
            grid[i*cell_height:(i+1)*cell_height, j*cell_width:(j+1)*cell_width] = img

        # 计算 pHash
        for hashIndex in range(hashes):
            if self.prefs._thumbnails == 1:
                width = grid.shape[1]
                image = grid[:, hashIndex * width // 2:(hashIndex + 1) * width // 2]
            else:
                image = grid

            # 计算 pHash
            self.hash[hashIndex] = self.computePhash(image)

            # 调整图像大小并转换为灰度图用于 SSIM 计算
            resized_image = cv2.resize(image, (self._ssimSize, self._ssimSize), interpolation=cv2.INTER_AREA)
            gray_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
            self.grayThumb[hashIndex] = gray_image.astype(np.float32)

        # 最小化缩略图
        thumbnail = self.minimizeImage(grid)

        # 将缩略图保存为 JPEG 格式的字节数组
        _, buffer = cv2.imencode('.jpg', thumbnail, [cv2.IMWRITE_JPEG_QUALITY, self._jpegQuality])
        self.thumbnail = buffer.tobytes()

    def computePhash(self, input_image):
        resize_img = cv2.resize(input_image, (self._pHashSize, self._pHashSize), interpolation=cv2.INTER_AREA)
        gray_img = cv2.cvtColor(resize_img, cv2.COLOR_BGR2GRAY)

        shades_of_gray = 0
        first_pixel = gray_img[0, 0]
        for i in range(1, self._pHashSize):
            for j in range(self._pHashSize):
                shades_of_gray += abs(first_pixel - gray_img[i, j])

        if shades_of_gray < self._almostBlackBitmap:
            return 0

        gray_f_img = gray_img.astype(np.float32)
        dct_img = cv2.dct(gray_f_img)
        top_left_dct = dct_img[:8, :8]

        first_element = top_left_dct[0, 0]
        average = (np.sum(top_left_dct) - first_element) / 63

        hash_value = 0
        for i in range(8):
            for j in range(8):
                if top_left_dct[i, j] > average:
                    index = i * 8 + j
                    hash_value |= 1 << index

        return hash_value

    def minimizeImage(self, image):
        max_width = 200
        max_height = 200
        height, width = image.shape[:2]
        if width > height:
            if width > max_width:
                new_height = int(height * (max_width / width))
                return cv2.resize(image, (max_width, new_height), interpolation=cv2.INTER_AREA)
        elif height > max_height:
            new_width = int(width * (max_height / height))
            return cv2.resize(image, (new_width, max_height), interpolation=cv2.INTER_AREA)
        return image

    def captureAt(self, percent, ofDuration):
        with TemporaryDirectory() as temp_dir:
            screenshot = os.path.join(temp_dir, f"vidupe{percent}.bmp")
            command = f'ffmpeg -ss {self.msToHHMMSS(self.duration * (percent * ofDuration) / (100 * 100))} -i "{self.filename}" -an -frames:v 1 -pix_fmt rgb24 "{screenshot}"'
            subprocess.run(command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
            if os.path.exists(screenshot):
                img = cv2.imread(screenshot)
                return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            return None

    def msToHHMMSS(self, time):
        hours = int(time / (1000 * 60 * 60)) % 24
        minutes = int(time / (1000 * 60)) % 60
        seconds = int(time / 1000) % 60
        msecs = int(time % 1000)
        paddedHours = f"{hours:02d}"
        paddedMinutes = f"{minutes:02d}"
        paddedSeconds = f"{seconds:02d}"
        return f"{paddedHours}:{paddedMinutes}:{paddedSeconds}.{msecs}"

    # 辅助方法：模拟PyQt5的图像处理功能
    def load_image_from_bytes(self, data):
        nparr = np.frombuffer(data, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    def resize_image(self, image, width, height):
        return cv2.resize(image, (width, height), interpolation=cv2.INTER_AREA)

    def image_to_bytes(self, image):
        _, buffer = cv2.imencode('.jpg', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
        return buffer.tobytes()

    def _detect_scene_changes(self, save_path=None):
        """检测视频中的场景变化，可选择保存场景"""
        try:
            cap = cv2.VideoCapture(self.filename)
            if not cap.isOpened():
                return False

            fps = cap.get(cv2.CAP_PROP_FPS)
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            frame_interval = int(fps * self._frame_interval)
            
            prev_frame = None
            frame_idx = 0
            scene_start = 0
            current_scene = []
            
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                
                if frame_idx % frame_interval == 0:
                    if prev_frame is not None:
                        # 计算帧差
                        diff = cv2.absdiff(frame, prev_frame)
                        diff_mean = np.mean(diff)
                        
                        # 如果帧差大于阈值，认为是场景变化
                        if diff_mean > self._scene_threshold:
                            scene_end = frame_idx / fps
                            
                            # 保存场景信息
                            if len(current_scene) > 0:
                                self.scene_segments.append({
                                    'start': scene_start,
                                    'end': scene_end,
                                    'frames': current_scene
                                })
                                
                                # 如果需要保存场景
                                if save_path:
                                    self._save_scene_segment(
                                        save_path,
                                        scene_start,
                                        scene_end,
                                        current_scene
                                    )
                            
                            scene_start = scene_end
                            current_scene = []
                            self.scene_changes.append(scene_end)
                            self.key_frames.append(frame)
                            
                            # 提取该帧的特征
                            frame_feature = self._extract_frame_feature(frame)
                            self.frame_features.append(frame_feature)
                    
                    current_scene.append(frame)
                    prev_frame = frame.copy()
                
                frame_idx += 1
            
            # 保存最后一个场景
            if len(current_scene) > 0:
                scene_end = frame_idx / fps
                self.scene_segments.append({
                    'start': scene_start,
                    'end': scene_end,
                    'frames': current_scene
                })
                if save_path:
                    self._save_scene_segment(
                        save_path,
                        scene_start,
                        scene_end,
                        current_scene
                    )
            
            cap.release()
            return True
            
        except Exception as e:
            print(f"场景检测错误: {str(e)}")
            return False

    def _save_scene_segment(self, save_path, start_time, end_time, frames):
        """保存场景片段"""
        try:
            # 创建保存目录
            os.makedirs(save_path, exist_ok=True)
            
            # 生成文件名
            base_name = os.path.splitext(os.path.basename(self.filename))[0]
            segment_name = f"{base_name}_{start_time:.1f}_{end_time:.1f}"
            
            # 保存为视频文件
            if len(frames) > 0:
                height, width = frames[0].shape[:2]
                fourcc = cv2.VideoWriter_fourcc(*'mp4v')
                out = cv2.VideoWriter(
                    os.path.join(save_path, f"{segment_name}.mp4"),
                    fourcc,
                    30.0,  # fps
                    (width, height)
                )
                
                for frame in frames:
                    out.write(frame)
                out.release()
                
                # 保存第一帧作为预览图
                cv2.imwrite(
                    os.path.join(save_path, f"{segment_name}.jpg"),
                    frames[0]
                )
        except Exception as e:
            print(f"保存场景片段错误: {str(e)}")

    def _extract_frame_feature(self, frame):
        """提取帧的特征"""
        # 1. 调整大小
        resized = cv2.resize(frame, (64, 64))
        
        # 2. 转换为HSV颜色空间
        hsv = cv2.cvtColor(resized, cv2.COLOR_BGR2HSV)
        
        # 3. 计算颜色直方图
        hist = cv2.calcHist([hsv], [0, 1], None, [8, 8], [0, 180, 0, 256])
        hist = cv2.normalize(hist, hist).flatten()
        
        # 4. 计算边缘特征
        gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 100, 200)
        edge_density = np.sum(edges) / (edges.shape[0] * edges.shape[1])
        
        # 5. 计算DCT特征
        dct = cv2.dct(gray.astype(np.float32))
        dct_feature = dct[:8, :8].flatten()
        
        return {
            'hist': hist,
            'edge_density': edge_density,
            'dct': dct_feature
        }


class Db:
    def __init__(self, filename):
        self.filename = filename
        self.modified = os.path.getmtime(filename)
        self.id = self.uniqueId(os.path.basename(filename))
        self.dbfilename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "cache.db")
        self.conn = sqlite3.connect(self.dbfilename)
        self.createTables()

    def uniqueId(self, filename):
        if not filename:
            return self.id
        name_modified = f"{filename}_{self.modified}"
        return hashlib.md5(name_modified.encode()).hexdigest()

    def createTables(self):
        cursor = self.conn.cursor()
        cursor.execute("PRAGMA synchronous = OFF;")
        cursor.execute("PRAGMA journal_mode = WAL;")
        cursor.execute("CREATE TABLE IF NOT EXISTS metadata (id TEXT PRIMARY KEY, "
                       "size INTEGER, duration INTEGER, bitrate INTEGER, framerate REAL, "
                       "codec TEXT, audio TEXT, width INTEGER, height INTEGER);")
        cursor.execute("CREATE TABLE IF NOT EXISTS capture (id TEXT PRIMARY KEY, "
                       " at8 BLOB, at16 BLOB, at24 BLOB, at32 BLOB, at40 BLOB, at48 BLOB, "
                       "at56 BLOB, at64 BLOB, at72 BLOB, at80 BLOB, at88 BLOB, at96 BLOB);")
        cursor.execute("CREATE TABLE IF NOT EXISTS version (version TEXT PRIMARY KEY);")
        cursor.execute("INSERT OR REPLACE INTO version VALUES('1.0');")
        self.conn.commit()

    def readMetadata(self, video):
        cursor = self.conn.cursor()
        cursor.execute(f"SELECT * FROM metadata WHERE id = '{self.id}';")
        row = cursor.fetchone()
        if row:
            video.modified = self.modified
            video.size = row[1]
            video.duration = row[2]
            video.bitrate = row[3]
            video.framerate = row[4]
            video.codec = row[5]
            video.audio = row[6]
            video.width = row[7]
            video.height = row[8]
            return True
        return False

    def writeMetadata(self, video):
        cursor = self.conn.cursor()
        cursor.execute(
            f"INSERT OR REPLACE INTO metadata VALUES('{self.id}',{video.size},{video.duration},{video.bitrate},{video.framerate},'{video.codec}','{video.audio}',{video.width},{video.height});")
        self.conn.commit()

    def readCapture(self, percent):
        cursor = self.conn.cursor()
        cursor.execute(f"SELECT at{percent} FROM capture WHERE id = '{self.id}';")
        row = cursor.fetchone()
        if row:
            return row[0]
        return None

    def writeCapture(self, percent, image):
        cursor = self.conn.cursor()
        cursor.execute(f"INSERT OR IGNORE INTO capture (id) VALUES('{self.id}');")
        cursor.execute(f"UPDATE capture SET at{percent} =? WHERE id = '{self.id}';", (image,))
        self.conn.commit()

    def removeVideo(self, id):
        cursor = self.conn.cursor()
        cursor.execute(f"SELECT id FROM metadata WHERE id = '{id}';")
        row = cursor.fetchone()
        if not row:
            return False
        cursor.execute(f"DELETE FROM metadata WHERE id = '{id}';")
        cursor.execute(f"DELETE FROM capture WHERE id = '{id}';")
        cursor.execute(f"SELECT id FROM metadata WHERE id = '{id}';")
        row = cursor.fetchone()
        if row:
            return False
        return True


class Thumbnail:
    def __init__(self, mode):
        self.mode = mode
        self.shape = None
        self.images = []

    def cols(self):
        return 4

    def rows(self):
        return 3

    def percentages(self):
        return [8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96]

    def countModes(self):
        return 8

    def modeName(self, index):
        return f"Mode {index}"

    def add_image(self, image):
        if image is not None:
            self.images.append(image)
            if self.shape is None:
                self.shape = image.shape

    def get_image(self, index):
        if 0 <= index < len(self.images):
            return self.images[index]
        return None

    def get_all_images(self):
        return self.images


class VideoProcessor:
    def __init__(self, prefs):
        self.prefs = prefs
        self.video_list = []
        self.every_video = []
        self.rejected_videos = []
        self.extension_list = [
            '.mp4', '.avi', '.mkv', '.mov', '.wmv', '.flv',
            '.webm', '.m4v', '.mpeg', '.mpg', '.3gp'
        ]
        self.user_pressed_stop = False
        self.previous_run_folders = ""
        self.previous_run_thumbnails = -1
        self.load_extensions()
        self.save_scenes = False
        self.scene_save_path = None

    def load_extensions(self):
        try:
            with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "extensions.ini"), "r") as file:
                for line in file:
                    if line.startswith(";") or line.strip() == "":
                        continue
                    self.extension_list.extend(line.replace("*.", "").strip().split(" "))
        except FileNotFoundError:
            print("Error: extensions.ini not found. No video file will be searched.")

    def detect_ffmpeg(self):
        try:
            subprocess.run("ffmpeg", shell=True, capture_output=True)
            return True
        except FileNotFoundError:
            print("Error: FFmpeg not found. Download it from https://ffmpeg.org/")
            return False

    def find_duplicates(self, folders_to_search, save_scenes=False, scene_save_path=None):
        self.save_scenes = save_scenes
        self.scene_save_path = scene_save_path
        
        if not self.extension_list:
            print("Error: No extensions found in extensions.ini. No video file will be searched.")
            return
        if not self.detect_ffmpeg():
            return
        if folders_to_search != self.previous_run_folders or self.prefs._thumbnails != self.previous_run_thumbnails:
            print("\nSearching for videos...")
            self.video_list = []
            self.every_video = []
            directories = folders_to_search.split(";")
            not_found = []
            for directory in directories:
                if not directory:
                    continue
                if os.path.exists(directory):
                    self.find_videos(directory)
                else:
                    print(f"Cannot find folder: {os.path.normpath(directory)}")
                    not_found.append(os.path.normpath(directory))
            if not_found:
                print(f"Cannot find folder: {' '.join(not_found)}")
            self.process_videos()
        if len(self.video_list) > 1:
            comparison = Comparison(self.video_list, self.prefs)
            comparison.report_matching_videos()
            self.previous_run_folders = folders_to_search
            self.previous_run_thumbnails = self.prefs._thumbnails

    def find_videos(self, directory):
        for root, _, files in os.walk(directory):
            if self.user_pressed_stop:
                return
            for file in files:
                ext = os.path.splitext(file)[1][1:].lower()
                if ext in self.extension_list:
                    full_path = os.path.join(root, file)
                    if full_path.lower() not in [v.lower() for v in self.every_video]:
                        self.every_video.append(full_path)
                    print(os.path.normpath(full_path))

    def process_videos(self):
        self.prefs._numberOfVideos = len(self.every_video)
        print(f"Found {self.prefs._numberOfVideos} video file(s):")
        if self.prefs._numberOfVideos == 0:
            return

        processed_count = 0
        queue_in = queue.Queue()
        queue_out = queue.Queue()

        # 添加所有视频到处理队列
        for filename in self.every_video:
            queue_in.put(filename)

        # 创建工作线程
        def worker():
            nonlocal processed_count
            while True:
                try:
                    filename = queue_in.get(block=False)
                except queue.Empty:
                    break
                video = Video(self.prefs, filename)
                if self.save_scenes and self.scene_save_path:
                    success = video.process(self.scene_save_path)
                else:
                    success = video.process()
                if success:
                    queue_out.put(video)
                else:
                    self.rejected_videos.append(filename)
                processed_count += 1
                print(f"Processed {processed_count}/{self.prefs._numberOfVideos}: {os.path.basename(filename)}")
                queue_in.task_done()

        # 启动多线程处理
        num_workers = min(8, self.prefs._numberOfVideos)
        threads = []
        for _ in range(num_workers):
            t = threading.Thread(target=worker)
            t.start()
            threads.append(t)

        # 等待所有线程完成
        for t in threads:
            t.join()

        # 收集处理好的视频
        while not queue_out.empty():
            self.video_list.append(queue_out.get())

        self.video_summary()

    def video_summary(self):
        if not self.rejected_videos:
            print(f"{len(self.video_list)} intact video(s) found")
        else:
            print(f"{len(self.video_list)} intact video(s) out of {len(self.every_video)} total")
            print(f"\nThe following {len(self.rejected_videos)} video(s) could not be added due to errors:")
            for filename in self.rejected_videos:
                print(os.path.normpath(filename))
        self.rejected_videos = []


class Comparison:
    def __init__(self, videos, prefs):
        self.videos = videos
        self.prefs = prefs
        self.matches = []
        self.clip_matches = []
        self.mashup_matches = []
        self.feature_extractor = FeatureExtractor()  # 特征提取器

    def _detect_mashup(self, target_video):
        """检测混剪视频"""
        mashup_segments = []
        
        # 对目标视频的每个场景
        for segment in tqdm(target_video.scene_segments):
            best_match = None
            best_similarity = 0
            
            # 与所有源视频比较
            for source_video in self.videos:
                if source_video == target_video:
                    continue
                    
                # 比较当前场景与源视频的所有场景
                for source_segment in source_video.scene_segments:
                    similarity = self._compare_segments(
                        segment['frames'],
                        source_segment['frames']
                    )
                    
                    if similarity > best_similarity and similarity > 0.8:
                        best_similarity = similarity
                        best_match = {
                            'source_video': source_video,
                            'source_segment': source_segment,
                            'similarity': similarity
                        }
            
            if best_match:
                mashup_segments.append({
                    'target_segment': segment,
                    'source_match': best_match
                })
        
        # 如果找到足够的匹配片段，认为是混剪
        if len(mashup_segments) >= 3:  # 至少3个片段匹配
            self.mashup_matches.append({
                'target_video': target_video,
                'segments': mashup_segments
            })

    def _compare_segments(self, segment1, segment2):
        """比较两个视频片段的相似度"""
        if not segment1 or not segment2:
            return 0.0
            
        # 取每个片段的中间帧进行比较
        mid_frame1 = segment1[len(segment1)//2]
        mid_frame2 = segment2[len(segment2)//2]

        # 提取特征
        feature1 = self.feature_extractor.extract_frame_feature(mid_frame1)
        feature2 = self.feature_extractor.extract_frame_feature(mid_frame2)
        
        # 计算相似度
        return self._compare_frame_features(feature1, feature2)

    def report_matching_videos(self):
        print("\n正在比较视频...")
        
        # 检测混剪
        for video in self.videos:
            self._detect_mashup(video)
        
        # 原有的比较逻辑
        for i in range(len(self.videos)):
            for j in range(i + 1, len(self.videos)):
                similarity = self.compare_videos(self.videos[i], self.videos[j])
                if similarity > self.prefs._thresholdSSIM:
                    self.matches.append({
                        'video1': self.videos[i],
                        'video2': self.videos[j],
                        'similarity': similarity
                    })

        self._print_results()

    def compare_videos(self, video1, video2):
        # 1. 基本比较（时长、分辨率等）
        if not self._basic_comparison(video1, video2):
            return 0.0

        # 2. 整体相似度比较
        overall_similarity = self._calculate_overall_similarity(video1, video2)
        
        # 3. 剪辑片段检测
        clip_similarity = self._detect_clips(video1, video2)
        
        # 4. 综合相似度
        return max(overall_similarity, clip_similarity)

    def _basic_comparison(self, video1, video2):
        """基本属性比较"""
        # 时长差异检查（允许10%的差异）
        duration_diff = abs(video1.duration - video2.duration)
        if duration_diff > max(video1.duration, video2.duration) * 0.1:
            return False
            
        # 分辨率检查
        if video1.width != video2.width or video1.height != video2.height:
            return False
            
        return True

    def _calculate_overall_similarity(self, video1, video2):
        """计算整体相似度"""
        ssim_similarity = self._calculate_ssim(video1, video2)
        phash_similarity = self._calculate_phash_similarity(video1, video2)
        return (ssim_similarity + phash_similarity) / 2

    def _calculate_ssim(self, video1, video2):
        total_ssim = 0
        count = 0
        
        for i in range(len(video1.grayThumb)):
            if i >= len(video2.grayThumb):
                break
                
            ssim_value = self._compute_ssim(
                video1.grayThumb[i],
                video2.grayThumb[i]
            )
            total_ssim += ssim_value
            count += 1

        return total_ssim / count if count > 0 else 0

    def _compute_ssim(self, img1, img2):
        C1 = (0.01 * 255) ** 2
        C2 = (0.03 * 255) ** 2

        img1 = img1.astype(np.float64)
        img2 = img2.astype(np.float64)
        kernel = cv2.getGaussianKernel(11, 1.5)
        window = np.outer(kernel, kernel.transpose())

        mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
        mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
        mu1_sq = mu1 ** 2
        mu2_sq = mu2 ** 2
        mu1_mu2 = mu1 * mu2
        sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
        sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
        sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

        ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
        return ssim_map.mean()

    def _calculate_phash_similarity(self, video1, video2):
        total_similarity = 0
        count = 0
        
        for i in range(len(video1.hash)):
            if i >= len(video2.hash):
                break
                
            if video1.hash[i] == 0 or video2.hash[i] == 0:
                continue
                
            # 计算汉明距离
            hamming_distance = bin(video1.hash[i] ^ video2.hash[i]).count('1')
            similarity = 1 - (hamming_distance / 64)  # 64是pHash的位数
            total_similarity += similarity
            count += 1

        return total_similarity / count if count > 0 else 0

    def _detect_clips(self, video1, video2):
        """检测剪辑片段"""
        max_similarity = 0.0
        clip_matches = []

        # 对每个关键帧进行比较
        for i, frame1 in enumerate(video1.key_frames):
            if i >= len(video1.frame_features):
                continue
            for j, frame2 in enumerate(video2.key_frames):
                if j >= len(video2.frame_features):
                    continue
                similarity = self._compare_frame_features(
                    video1.frame_features[i],
                    video2.frame_features[j]
                )
                
                if similarity > 0.8:  # 阈值可调整
                    # 检查时间连续性
                    if self._check_temporal_continuity(
                        video1.scene_changes[i:],
                        video2.scene_changes[j:],
                        similarity
                    ):
                        clip_matches.append({
                            'start1': video1.scene_changes[i],
                            'start2': video2.scene_changes[j],
                            'similarity': similarity
                        })
                        max_similarity = max(max_similarity, similarity)

        if clip_matches:
            self.clip_matches.append({
                'video1': video1,
                'video2': video2,
                'clips': clip_matches
            })

        return max_similarity

    def _compare_frame_features(self, feature1, feature2):
        """比较帧特征"""
        # 1. 比较颜色直方图
        hist_similarity = cv2.compareHist(
            feature1['hist'],
            feature2['hist'],
            cv2.HISTCMP_CORREL
        )
        
        # 2. 比较边缘密度
        edge_similarity = 1 - abs(feature1['edge_density'] - feature2['edge_density'])
        
        # 3. 比较DCT特征
        dct_diff = np.mean(np.abs(feature1['dct'] - feature2['dct']))
        dct_similarity = 1 - min(dct_diff, 1.0)
        
        # 综合相似度
        return (hist_similarity + edge_similarity + dct_similarity) / 3

    def _check_temporal_continuity(self, scenes1, scenes2, initial_similarity):
        """检查时间连续性"""
        if len(scenes1) < 2 or len(scenes2) < 2:
            return False
            
        # 检查后续帧的相似度
        for i in range(1, min(len(scenes1), len(scenes2))):
            time_diff1 = scenes1[i] - scenes1[i-1]
            time_diff2 = scenes2[i] - scenes2[i-1]
            
            # 如果时间差异太大，认为不连续
            if abs(time_diff1 - time_diff2) > 1.0:  # 1秒阈值
                return False
                
        return True

    def _print_results(self):
        if not self.matches and not self.clip_matches and not self.mashup_matches:
            print("\n未找到重复视频。")
            return

        print("\n=== 完整视频匹配 ===")
        for match in self.matches:
            self._print_video_match(match)

        print("\n=== 剪辑片段匹配 ===")
        for clip_match in self.clip_matches:
            self._print_clip_match(clip_match)

        print("\n=== 混剪视频匹配 ===")
        for mashup_match in self.mashup_matches:
            self._print_mashup_match(mashup_match)

        self._save_results_to_file()

    def _print_video_match(self, match):
        """打印完整视频匹配结果"""
        video1 = match['video1']
        video2 = match['video2']
        similarity = match['similarity']
        
        print("\n" + "="*80)
        print(f"相似度: {similarity:.2%}")
        print(f"视频1: {os.path.basename(video1.filename)}")
        print(f"  路径: {video1.filename}")
        print(f"  时长: {video1.duration/1000:.1f}秒")
        print(f"  分辨率: {video1.width}x{video1.height}")
        print(f"  大小: {video1.size/1024/1024:.1f}MB")
        
        print(f"\n视频2: {os.path.basename(video2.filename)}")
        print(f"  路径: {video2.filename}")
        print(f"  时长: {video2.duration/1000:.1f}秒")
        print(f"  分辨率: {video2.width}x{video2.height}")
        print(f"  大小: {video2.size/1024/1024:.1f}MB")
        print("="*80)

    def _print_clip_match(self, clip_match):
        """打印剪辑片段匹配结果"""
        video1 = clip_match['video1']
        video2 = clip_match['video2']
        
        print(f"\n发现剪辑片段匹配:")
        print(f"源视频: {os.path.basename(video1.filename)}")
        print(f"目标视频: {os.path.basename(video2.filename)}")
        
        for clip in clip_match['clips']:
            print(f"\n片段匹配:")
            print(f"  源视频时间: {clip['start1']:.1f}秒")
            print(f"  目标视频时间: {clip['start2']:.1f}秒")
            print(f"  相似度: {clip['similarity']:.2%}")

    def _print_mashup_match(self, mashup_match):
        target_video = mashup_match['target_video']
        print(f"\n发现混剪视频: {os.path.basename(target_video.filename)}")
        print("包含以下源视频片段:")
        
        for segment in mashup_match['segments']:
            source_video = segment['source_match']['source_video']
            source_segment = segment['source_match']['source_segment']
            target_segment = segment['target_segment']
            
            print(f"\n片段匹配:")
            print(f"  源视频: {os.path.basename(source_video.filename)}")
            print(f"  源视频时间: {source_segment['start']:.1f}秒 - {source_segment['end']:.1f}秒")
            print(f"  目标视频时间: {target_segment['start']:.1f}秒 - {target_segment['end']:.1f}秒")
            print(f"  相似度: {segment['source_match']['similarity']:.2%}")

    def _save_results_to_file(self):
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        result_file = f"duplicate_videos_{timestamp}.txt"
        
        with open(result_file, "w", encoding="utf-8") as f:
            f.write(f"视频去重结果 - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            
            if self.matches:
                f.write(f"\n=== 完整视频匹配 ===\n")
                f.write(f"共发现 {len(self.matches)} 组重复视频\n\n")
                for match in self.matches:
                    self._write_video_match(f, match)
            
            if self.clip_matches:
                f.write(f"\n=== 剪辑片段匹配 ===\n")
                f.write(f"共发现 {len(self.clip_matches)} 组剪辑片段\n\n")
                for clip_match in self.clip_matches:
                    self._write_clip_match(f, clip_match)
                    
            if self.mashup_matches:
                f.write(f"\n=== 混剪视频匹配 ===\n")
                f.write(f"共发现 {len(self.mashup_matches)} 个混剪视频\n\n")
                for mashup_match in self.mashup_matches:
                    self._write_mashup_match(f, mashup_match)
        
        print(f"\n结果已保存到文件: {result_file}")

    def _write_video_match(self, f, match):
        video1 = match['video1']
        video2 = match['video2']
        similarity = match['similarity']
        
        f.write("="*80 + "\n")
        f.write(f"相似度: {similarity:.2%}\n")
        f.write(f"视频1: {video1.filename}\n")
        f.write(f"  时长: {video1.duration/1000:.1f}秒\n")
        f.write(f"  分辨率: {video1.width}x{video1.height}\n")
        f.write(f"  大小: {video1.size/1024/1024:.1f}MB\n")
        
        f.write(f"\n视频2: {video2.filename}\n")
        f.write(f"  时长: {video2.duration/1000:.1f}秒\n")
        f.write(f"  分辨率: {video2.width}x{video2.height}\n")
        f.write(f"  大小: {video2.size/1024/1024:.1f}MB\n")
        f.write("="*80 + "\n\n")

    def _write_clip_match(self, f, clip_match):
        video1 = clip_match['video1']
        video2 = clip_match['video2']
        
        f.write("="*80 + "\n")
        f.write(f"剪辑片段匹配: {os.path.basename(video1.filename)} 和 {os.path.basename(video2.filename)}\n")
        f.write("="*80 + "\n")
        
        for clip in clip_match['clips']:
            f.write(f"  源视频时间: {clip['start1']:.1f}秒\n")
            f.write(f"  目标视频时间: {clip['start2']:.1f}秒\n")
            f.write(f"  相似度: {clip['similarity']:.2%}\n")
            f.write("="*80 + "\n")

    def _write_mashup_match(self, f, mashup_match):
        target_video = mashup_match['target_video']
        f.write("="*80 + "\n")
        f.write(f"混剪视频: {os.path.basename(target_video.filename)}\n")
        f.write("包含以下源视频片段:\n")
        
        for segment in mashup_match['segments']:
            source_video = segment['source_match']['source_video']
            source_segment = segment['source_match']['source_segment']
            target_segment = segment['target_segment']
            
            f.write(f"\n片段匹配:\n")
            f.write(f"  源视频: {os.path.basename(source_video.filename)}\n")
            f.write(f"  源视频时间: {source_segment['start']:.1f}秒 - {source_segment['end']:.1f}秒\n")
            f.write(f"  目标视频时间: {target_segment['start']:.1f}秒 - {target_segment['end']:.1f}秒\n")
            f.write(f"  相似度: {segment['source_match']['similarity']:.2%}\n")
        
        f.write("="*80 + "\n")


class FeatureExtractor:
    def __init__(self):
        self._ssim_size = SSIM_SIZE
        self._pHashSize = PHASH_SIZE
        self._almostBlackBitmap = ALMOST_BLACK_BITMAP

    def extract_frame_feature(self, frame):
        """提取帧的多模态特征"""
        features = {}

        features.update(self._extract_basic_visual_features(frame))
        
        return features

    def _extract_basic_visual_features(self, frame):
        """提取基础视觉特征"""
        # 1. 调整大小
        resized = cv2.resize(frame, (64, 64))
        
        # 2. 转换为HSV颜色空间
        hsv = cv2.cvtColor(resized, cv2.COLOR_BGR2HSV)
        
        # 3. 计算颜色直方图
        hist = cv2.calcHist([hsv], [0, 1], None, [8, 8], [0, 180, 0, 256])
        hist = cv2.normalize(hist, hist).flatten()
        
        # 4. 计算边缘特征
        gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 100, 200)
        edge_density = np.sum(edges) / (edges.shape[0] * edges.shape[1])
        
        # 5. 计算DCT特征
        dct = cv2.dct(gray.astype(np.float32))
        dct_feature = dct[:8, :8].flatten()
        
        return {
            'hist': hist,
            'edge_density': edge_density,
            'dct': dct_feature
        }


if __name__ == "__main__":
    import sys

    if len(sys.argv) < 2:
        print("Usage: python video_processor.py <directory>")
        sys.exit(1)

    prefs = Prefs()
    processor = VideoProcessor(prefs)
    processor.find_duplicates(
        folders_to_search=sys.argv[1],
        save_scenes=False,  # 是否保存场景
        scene_save_path="clips"  # 场景保存目录
    )