import os
import subprocess
import cv2
import numpy as np
from database import Db
from config import *
from utils import p_hash, hamming_distance
from thumbnail import Thumbnail
from tempfile import TemporaryDirectory


class Prefs:
    def __init__(self):
        self._numberOfVideos = 0
        self._thumbnails = 1  # 默认使用双缩略图模式
        self._comparisonMode = 0
        self._ssimBlockSize = 16
        self._differentDurationModifier = 4
        self._sameDurationModifier = 1
        self._thresholdSSIM = 0.85  # 降低阈值以捕获更多潜在重复
        self._thresholdPhash = 57
        self._minSimilarity = 0.80  # 最小相似度阈值
        self._maxDurationDiff = 1000  # 最大时长差异（毫秒）
        self._maxSizeDiff = 0.1  # 最大文件大小差异比例（10%）


class Video:
    def __init__(self, prefs, filename):
        self.prefs = prefs
        self.filename = filename
        self.duration = 0
        self.bitrate = 0
        self.codec = ""
        self.width = 0
        self.height = 0
        self.framerate = 0
        self.audio = ""
        self.size = 0
        self.modified = None
        self.hash = [0] * 2  # 初始化两个hash值
        self.grayThumb = [None] * 2  # 初始化两个灰度缩略图
        self.thumbnail = None
        self._jpegQuality = LOW_JPEG_QUALITY if prefs._numberOfVideos > 1000 else OK_JPEG_QUALITY
        self._ssimSize = SSIM_SIZE
        self._pHashSize = PHASH_SIZE
        self._almostBlackBitmap = ALMOST_BLACK_BITMAP
        self._isValid = True
        self._errorMessage = ""
        self.frame_features = []  # 存储每一帧的特征
        self.key_frames = []  # 存储关键帧
        self.scene_changes = []  # 存储场景变化点
        self._frame_interval = 0.5  # 降低采样间隔到0.5秒，提高精度
        self.scene_segments = []  # 存储场景片段
        self._scene_threshold = 20  # 降低场景检测阈值，更容易检测到变化

    def process(self, save_path=None):
        if not os.path.exists(self.filename):
            self._isValid = False
            self._errorMessage = "文件不存在"
            return False

        # 检查文件大小
        self.size = os.path.getsize(self.filename)
        if self.size < MIN_VIDEO_SIZE or self.size > MAX_VIDEO_SIZE:
            self._isValid = False
            self._errorMessage = f"文件大小超出范围 ({self.size / 1024 / 1024:.1f}MB)"
            return False

        cache = Db(self.filename)
        if not cache.readMetadata(self):
            self.getMetadata()
            cache.writeMetadata(self)

        # 检查视频时长
        if self.duration < MIN_VIDEO_DURATION or self.duration > MAX_VIDEO_DURATION:
            self._isValid = False
            self._errorMessage = f"视频时长超出范围 ({self.duration / 1000:.1f}秒)"
            return False

        if self.width == 0 or self.height == 0 or self.duration == 0:
            self._isValid = False
            self._errorMessage = "无法获取视频元数据"
            return False

        if not self._detect_scene_changes(save_path):
            self._errorMessage = "无法检测场景变化"
            return False

        ret = self.takeScreenCaptures(cache)
        if ret == -1:
            self._isValid = False
            self._errorMessage = "无法获取视频截图"
            return False
        elif (self.prefs._thumbnails != 1 and self.hash[0] == 0) or \
                (self.prefs._thumbnails == 1 and self.hash[0] == 0 and self.hash[1] == 0):
            self._isValid = False
            self._errorMessage = "无法生成视频指纹"
            return False

        return True

    def get_error_message(self):
        return self._errorMessage

    def getMetadata(self):
        # 使用ffprobe（FFmpeg的一部分）可以更精确地获取元数据
        command = f'ffprobe -v error -show_streams -show_format -of json "{self.filename}"'
        result = subprocess.run(command, shell=True, capture_output=True, text=True)

        if result.returncode != 0:
            print(f"FFprobe错误: {result.stderr}")
            # 回退到原始方法
            command = f'ffmpeg -hide_banner -i "{self.filename}"'
            result = subprocess.run(command, shell=True, capture_output=True, text=True)
            if result.returncode != 0:
                print(f"FFmpeg错误: {result.stderr}")
                return
            output = result.stderr  # ffmpeg -i的输出在stderr中
            self._parseMetadataFromText(output)
        else:
            self._parseMetadataFromJSON(result.stdout)

        # 获取文件大小和修改时间
        self.size = os.path.getsize(self.filename)
        self.modified = os.path.getmtime(self.filename)

    def _parseMetadataFromJSON(self, json_output):
        try:
            import json
            data = json.loads(json_output)

            # 获取格式信息（包含duration和bitrate）
            if 'format' in data:
                format_info = data['format']
                if 'duration' in format_info:
                    self.duration = int(float(format_info['duration']) * 1000)  # 转换为毫秒
                if 'bit_rate' in format_info:
                    self.bitrate = int(format_info['bit_rate']) // 1000  # 转换为kbps

            # 获取视频流信息
            video_stream = next((s for s in data.get('streams', []) if s.get('codec_type') == 'video'), None)
            if video_stream:
                self.codec = video_stream.get('codec_name', '')
                self.width = video_stream.get('width', 0)
                self.height = video_stream.get('height', 0)

                # 解析帧率
                if 'avg_frame_rate' in video_stream:
                    num, denom = video_stream['avg_frame_rate'].split('/')
                    self.framerate = round(float(num) / float(denom), 1)

            # 获取音频流信息
            audio_stream = next((s for s in data.get('streams', []) if s.get('codec_type') == 'audio'), None)
            if audio_stream:
                self.audio = audio_stream.get('codec_name', '')
                self.channels = audio_stream.get('channels', 0)

        except Exception as e:
            print(f"解析JSON元数据出错: {e}")

    def _parseMetadataFromText(self, output):
        lines = output.split('\n')
        rotated_once = False

        for line in lines:
            # 解析时长和比特率
            if " Duration:" in line:
                time = line.split(" ")[3]
                if time != "N/A,":
                    h = int(time[0:2])
                    m = int(time[3:5])
                    s = int(time[6:8])
                    ms = int(time[9:11])
                    self.duration = h * 60 * 60 * 1000 + m * 60 * 1000 + s * 1000 + ms * 10

                # 解析比特率
                if "bitrate:" in line:
                    bitrate_str = line.split("bitrate: ")[1].split(" ")[0]
                    if bitrate_str.isdigit():
                        self.bitrate = int(bitrate_str)

            # 解析视频信息
            if " Video:" in line and ("kb/s" in line or " fps" in line or output.count(" Video:") == 1):
                parts = line.split()
                codec_index = parts.index("Video:") + 1
                self.codec = parts[codec_index].split('(')[0]

                # 解析分辨率
                for part in parts:
                    if 'x' in part and part.replace('x', '').isdigit():
                        res = part.split('x')
                        self.width = int(res[0])
                        self.height = int(res[1])
                        break

                # 解析帧率
                for part in parts:
                    if 'fps' in part:
                        fps_str = part.replace('fps', '')
                        try:
                            self.framerate = round(float(fps_str), 1)
                        except ValueError:
                            pass

            # 解析音频信息
            if " Audio:" in line:
                parts = line.split()
                audio_codec = parts[parts.index("Audio:") + 1]
                self.audio = audio_codec

                # 解析声道
                for i, part in enumerate(parts):
                    if part == "channels":
                        try:
                            self.channels = int(parts[i - 1])
                        except ValueError:
                            pass
                        break

            # 处理视频旋转
            if "rotate" in line and not rotated_once:
                try:
                    rotate = int(line.split(":")[1])
                    if rotate in [90, 270]:
                        self.width, self.height = self.height, self.width
                    rotated_once = True
                except:
                    pass

    def takeScreenCaptures(self, cache):
        thumb = Thumbnail(self.prefs._thumbnails)
        ofDuration = 100
        for percent in thumb.percentages():
            frame = None
            cachedImage = cache.readCapture(percent)
            if cachedImage:
                frame = self.load_image_from_bytes(cachedImage)
                frame = self.resize_image(frame, self.width, self.height)
            else:
                frame = self.captureAt(percent, ofDuration)
                if frame is None:
                    ofDuration = ofDuration - GO_BACKWARDS_PERCENT
                    if ofDuration >= VIDEO_STILL_USABLE:
                        continue
                    return -1
                cache.writeCapture(percent, self.image_to_bytes(frame))

            if frame is not None:
                thumb.add_image(frame)

        hashes = 2 if self.prefs._thumbnails == 1 else 1
        self.processThumbnail(thumb, hashes)
        return 1

    def processThumbnail(self, thumbnail, hashes):
        if not thumbnail.images:
            return

        # 创建缩略图网格
        rows = thumbnail.rows()
        cols = thumbnail.cols()
        cell_height = thumbnail.images[0].shape[0]
        cell_width = thumbnail.images[0].shape[1]

        # 创建空白画布
        grid = np.zeros((cell_height * rows, cell_width * cols, 3), dtype=np.uint8)

        # 填充图像到网格
        for idx, img in enumerate(thumbnail.images):
            if idx >= rows * cols:
                break
            i, j = divmod(idx, cols)
            grid[i * cell_height:(i + 1) * cell_height, j * cell_width:(j + 1) * cell_width] = img

        # 计算 pHash
        for hashIndex in range(hashes):
            if self.prefs._thumbnails == 1:
                width = grid.shape[1]
                image = grid[:, hashIndex * width // 2:(hashIndex + 1) * width // 2]
            else:
                image = grid

            # 计算 pHash
            self.hash[hashIndex] = self.computePhash(image)

            # 调整图像大小并转换为灰度图用于 SSIM 计算
            resized_image = cv2.resize(image, (self._ssimSize, self._ssimSize), interpolation=cv2.INTER_AREA)
            gray_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
            self.grayThumb[hashIndex] = gray_image.astype(np.float32)

        # 最小化缩略图
        thumbnail = self.minimizeImage(grid)

        # 将缩略图保存为 JPEG 格式的字节数组
        _, buffer = cv2.imencode('.jpg', thumbnail, [cv2.IMWRITE_JPEG_QUALITY, self._jpegQuality])
        self.thumbnail = buffer.tobytes()

    def computePhash(self, input_image):
        resize_img = cv2.resize(input_image, (self._pHashSize, self._pHashSize), interpolation=cv2.INTER_AREA)
        gray_img = cv2.cvtColor(resize_img, cv2.COLOR_BGR2GRAY)

        shades_of_gray = 0
        first_pixel = gray_img[0, 0]
        for i in range(1, self._pHashSize):
            for j in range(self._pHashSize):
                shades_of_gray += abs(first_pixel - gray_img[i, j])

        if shades_of_gray < self._almostBlackBitmap:
            return 0

        gray_f_img = gray_img.astype(np.float32)
        dct_img = cv2.dct(gray_f_img)
        top_left_dct = dct_img[:8, :8]

        first_element = top_left_dct[0, 0]
        average = (np.sum(top_left_dct) - first_element) / 63

        hash_value = 0
        for i in range(8):
            for j in range(8):
                if top_left_dct[i, j] > average:
                    index = i * 8 + j
                    hash_value |= 1 << index

        return hash_value

    def minimizeImage(self, image):
        max_width = 200
        max_height = 200
        height, width = image.shape[:2]
        if width > height:
            if width > max_width:
                new_height = int(height * (max_width / width))
                return cv2.resize(image, (max_width, new_height), interpolation=cv2.INTER_AREA)
        elif height > max_height:
            new_width = int(width * (max_height / height))
            return cv2.resize(image, (new_width, max_height), interpolation=cv2.INTER_AREA)
        return image

    def captureAt(self, percent, ofDuration):
        with TemporaryDirectory() as temp_dir:
            screenshot = os.path.join(temp_dir, f"vidupe{percent}.bmp")
            command = f'ffmpeg -ss {self.msToHHMMSS(self.duration * (percent * ofDuration) / (100 * 100))} -i "{self.filename}" -an -frames:v 1 -pix_fmt rgb24 "{screenshot}"'
            subprocess.run(command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
            if os.path.exists(screenshot):
                img = cv2.imread(screenshot)
                return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            return None

    def msToHHMMSS(self, time):
        hours = int(time / (1000 * 60 * 60)) % 24
        minutes = int(time / (1000 * 60)) % 60
        seconds = int(time / 1000) % 60
        msecs = int(time % 1000)
        paddedHours = f"{hours:02d}"
        paddedMinutes = f"{minutes:02d}"
        paddedSeconds = f"{seconds:02d}"
        return f"{paddedHours}:{paddedMinutes}:{paddedSeconds}.{msecs}"

    # 辅助方法：模拟PyQt5的图像处理功能
    def load_image_from_bytes(self, data):
        nparr = np.frombuffer(data, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    def resize_image(self, image, width, height):
        return cv2.resize(image, (width, height), interpolation=cv2.INTER_AREA)

    def image_to_bytes(self, image):
        _, buffer = cv2.imencode('.jpg', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
        return buffer.tobytes()

    def _detect_scene_changes(self, save_path=None):
        """检测视频中的场景变化，可选择保存场景"""
        try:
            cap = cv2.VideoCapture(self.filename)
            if not cap.isOpened():
                return False

            fps = cap.get(cv2.CAP_PROP_FPS)
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            frame_interval = int(fps * self._frame_interval)

            prev_frame = None
            frame_idx = 0
            scene_start = 0
            current_scene = []

            while True:
                ret, frame = cap.read()
                if not ret:
                    break

                if frame_idx % frame_interval == 0:
                    if prev_frame is not None:
                        # 计算帧差
                        diff = cv2.absdiff(frame, prev_frame)
                        diff_mean = np.mean(diff)

                        # 如果帧差大于阈值，认为是场景变化
                        if diff_mean > self._scene_threshold:
                            scene_end = frame_idx / fps

                            # 保存场景信息
                            if len(current_scene) > 0:
                                self.scene_segments.append({
                                    'start': scene_start,
                                    'end': scene_end,
                                    'frames': current_scene
                                })

                                # 如果需要保存场景
                                if save_path:
                                    self._save_scene_segment(
                                        save_path,
                                        scene_start,
                                        scene_end,
                                        current_scene
                                    )

                            scene_start = scene_end
                            current_scene = []
                            self.scene_changes.append(scene_end)
                            self.key_frames.append(frame)

                            # 提取该帧的特征
                            frame_feature = self._extract_frame_feature(frame)
                            self.frame_features.append(frame_feature)

                    current_scene.append(frame)
                    prev_frame = frame.copy()

                frame_idx += 1

            # 保存最后一个场景
            if len(current_scene) > 0:
                scene_end = frame_idx / fps
                self.scene_segments.append({
                    'start': scene_start,
                    'end': scene_end,
                    'frames': current_scene
                })
                if save_path:
                    self._save_scene_segment(
                        save_path,
                        scene_start,
                        scene_end,
                        current_scene
                    )

            cap.release()
            return True

        except Exception as e:
            print(f"场景检测错误: {str(e)}")
            return False

    def _save_scene_segment(self, save_path, start_time, end_time, frames):
        """保存场景片段"""
        try:
            # 创建保存目录
            os.makedirs(save_path, exist_ok=True)

            # 生成文件名
            base_name = os.path.splitext(os.path.basename(self.filename))[0]
            segment_name = f"{base_name}_{start_time:.1f}_{end_time:.1f}"

            # 保存为视频文件
            if len(frames) > 0:
                height, width = frames[0].shape[:2]
                fourcc = cv2.VideoWriter_fourcc(*'mp4v')
                out = cv2.VideoWriter(
                    os.path.join(save_path, f"{segment_name}.mp4"),
                    fourcc,
                    30.0,  # fps
                    (width, height)
                )

                for frame in frames:
                    out.write(frame)
                out.release()

                # 保存第一帧作为预览图
                cv2.imwrite(
                    os.path.join(save_path, f"{segment_name}.jpg"),
                    frames[0]
                )
        except Exception as e:
            print(f"保存场景片段错误: {str(e)}")

    def _extract_frame_feature(self, frame):
        """提取帧的特征"""
        # 1. 调整大小
        resized = cv2.resize(frame, (64, 64))

        # 2. 转换为HSV颜色空间
        hsv = cv2.cvtColor(resized, cv2.COLOR_BGR2HSV)

        # 3. 计算颜色直方图
        hist = cv2.calcHist([hsv], [0, 1], None, [8, 8], [0, 180, 0, 256])
        hist = cv2.normalize(hist, hist).flatten()

        # 4. 计算边缘特征
        gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 100, 200)
        edge_density = np.sum(edges) / (edges.shape[0] * edges.shape[1])

        # 5. 计算DCT特征
        dct = cv2.dct(gray.astype(np.float32))
        dct_feature = dct[:8, :8].flatten()

        return {
            'hist': hist,
            'edge_density': edge_density,
            'dct': dct_feature
        }
