import sys
import os
from PyQt5.QtWidgets import (QApplication, QMainWindow, QTabWidget, QWidget, QVBoxLayout,
                             QHBoxLayout, QPushButton, QLabel, QFileDialog, QListWidget,
                             QComboBox, QSplitter, QMessageBox, QToolBar, QAction, QStatusBar,
                             QSlider, QSpinBox, QDoubleSpinBox, QGroupBox, QGridLayout,
                             QDialog, QDialogButtonBox, QGraphicsScene, QGraphicsView,
                             QGraphicsPixmapItem, QInputDialog, QScrollArea, QVideoWidget,
                             QStyle, QSizePolicy)
from PyQt5.QtGui import QIcon, QPixmap, QImage, QPainter, QPen, QColor, QPainterPath
from PyQt5.QtCore import Qt, QThread, pyqtSignal, QRectF, QPointF, QTimer, QUrl, QSize, QTime
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
from PyQt5.QtMultimediaWidgets import QVideoWidget
import cv2
import numpy as np
import torch
import tensorflow as tf
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import face_recognition  # 用于人脸识别的库


class VideoThread(QThread):
    change_pixmap_signal = pyqtSignal(np.ndarray)

    def __init__(self, source=0, processing_method=None):
        super().__init__()
        self._run_flag = True
        self.source = source
        self.processing_method = processing_method
        self.processing_params = {}

    def run(self):
        # 打开摄像头或视频文件
        cap = cv2.VideoCapture(self.source)
        if not cap.isOpened():
            print(f"Error: Could not open video source {self.source}")
            return

        while self._run_flag:
            ret, cv_img = cap.read()
            if ret:
                # 如果有处理方法，应用处理
                if self.processing_method:
                    cv_img = self.process_image(cv_img)
                self.change_pixmap_signal.emit(cv_img)
            else:
                # 视频结束
                if isinstance(self.source, str):  # 视频文件
                    cap.set(cv2.CAP_PROP_POS_FRAMES, 0)  # 重新开始
                else:  # 摄像头
                    break

        # 释放资源
        cap.release()

    def stop(self):
        self._run_flag = False
        self.wait()

    def set_processing_method(self, method, params=None):
        self.processing_method = method
        if params:
            self.processing_params = params

    def process_image(self, image):
        if self.processing_method == "灰度化":
            processed = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            processed = cv2.cvtColor(processed, cv2.COLOR_GRAY2BGR)  # 转回BGR以便显示

        elif self.processing_method == "边缘检测":
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            processed = cv2.Canny(gray, 100, 200)
            processed = cv2.cvtColor(processed, cv2.COLOR_GRAY2BGR)

        elif self.processing_method == "图像增强":
            # 简单的对比度增强
            alpha = 1.5  # 对比度控制
            beta = 10  # 亮度控制
            processed = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)

        elif self.processing_method == "对象检测":
            # 简化示例，使用OpenCV的Haar级联分类器
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            processed = image.copy()
            for (x, y, w, h) in faces:
                cv2.rectangle(processed, (x, y), (x + w, y + h), (255, 0, 0), 2)

        elif self.processing_method == "人脸识别":
            # 假设处理参数中包含人脸数据库
            db = self.processing_params.get('face_database', {})
            tolerance = self.processing_params.get('face_tolerance', 0.6)
            processed = self.perform_face_recognition(image, db, tolerance)

        elif self.processing_method == "旋转":
            angle = self.processing_params.get('angle', 0)
            height, width = image.shape[:2]
            center = (width // 2, height // 2)
            rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
            processed = cv2.warpAffine(image, rotation_matrix, (width, height),
                                       flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)

        elif self.processing_method == "缩放":
            scale = self.processing_params.get('scale', 1.0)
            processed = cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)

        elif self.processing_method == "亮度/对比度":
            brightness = self.processing_params.get('brightness', 0)
            contrast = self.processing_params.get('contrast', 0)

            # 调整亮度
            if brightness > 0:
                processed = cv2.addWeighted(image, 1, np.zeros(image.shape, image.dtype), 0, brightness)
            elif brightness < 0:
                processed = cv2.addWeighted(image, 1, np.zeros(image.shape, image.dtype), 0, brightness)

            # 调整对比度
            if contrast != 0:
                factor = (259 * (contrast + 255)) / (255 * (259 - contrast))
                processed = np.clip(128 + factor * (image.astype(np.float32) - 128), 0, 255).astype(np.uint8)

        elif self.processing_method == "饱和度":
            saturation = self.processing_params.get('saturation', 0) / 100.0

            # 转换到HSV颜色空间
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

            # 分离通道
            h, s, v = cv2.split(hsv)

            # 调整饱和度
            if saturation > 0:
                # 增加饱和度
                s = s.astype(np.float32)
                s = s * (1 + saturation)
                s = np.clip(s, 0, 255).astype(np.uint8)
            elif saturation < 0:
                # 减少饱和度
                s = s.astype(np.float32)
                s = s * (1 + saturation)
                s = np.clip(s, 0, 255).astype(np.uint8)

            # 合并通道并转回BGR
            hsv = cv2.merge([h, s, v])
            processed = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        elif self.processing_method == "平滑处理":
            method_idx = self.processing_params.get('smooth_method', 0)
            kernel_size = self.processing_params.get('kernel_size', 3)

            if method_idx == 0:  # 高斯模糊
                processed = cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
            elif method_idx == 1:  # 中值模糊
                processed = cv2.medianBlur(image, kernel_size)
            elif method_idx == 2:  # 双边滤波
                processed = cv2.bilateralFilter(image, kernel_size, 75, 75)

        elif self.processing_method == "形状识别":
            shape_type = self.processing_params.get('shape_type', "所有形状")
            min_area = self.processing_params.get('min_area', 500)

            # 转换为灰度图
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            # 二值化
            _, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

            # 查找轮廓
            contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

            processed = image.copy()

            for contour in contours:
                area = cv2.contourArea(contour)
                if area < min_area:
                    continue

                # 获取近似轮廓
                epsilon = 0.02 * cv2.arcLength(contour, True)
                approx = cv2.approxPolyDP(contour, epsilon, True)

                # 计算轮廓中心
                M = cv2.moments(contour)
                if M["m00"] != 0:
                    cX = int(M["m10"] / M["m00"])
                    cY = int(M["m01"] / M["m00"])
                else:
                    cX, cY = 0, 0

                shape_name = "未知"

                # 根据顶点数量识别形状
                if shape_type == "所有形状" or shape_type == "多边形":
                    if len(approx) == 3:
                        shape_name = "三角形"
                    elif len(approx) == 4:
                        shape_name = "矩形"
                    elif 5 <= len(approx) <= 10:
                        shape_name = "多边形"
                    else:
                        # 检查是否为圆形
                        (x, y), radius = cv2.minEnclosingCircle(contour)
                        center = (int(x), int(y))
                        radius = int(radius)
                        area_ratio = area / (np.pi * radius * radius)
                        if area_ratio > 0.8:  # 圆形的面积比例接近1
                            shape_name = "圆形"

                elif shape_type == "圆形":
                    (x, y), radius = cv2.minEnclosingCircle(contour)
                    center = (int(x), int(y))
                    radius = int(radius)
                    area_ratio = area / (np.pi * radius * radius)
                    if area_ratio > 0.8:
                        shape_name = "圆形"

                elif shape_type == "矩形":
                    if len(approx) == 4:
                        shape_name = "矩形"

                elif shape_type == "三角形":
                    if len(approx) == 3:
                        shape_name = "三角形"

                # 绘制轮廓和标签
                if shape_name != "未知":
                    cv2.drawContours(processed, [contour], -1, (0, 255, 0), 2)
                    cv2.putText(processed, shape_name, (cX, cY),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)

        else:
            processed = image

        return processed

    def perform_face_recognition(self, image, face_database, face_tolerance):
        # 转换为RGB格式（face_recognition库使用RGB格式）
        rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # 检测人脸位置
        face_locations = face_recognition.face_locations(rgb_image)

        # 提取人脸特征
        face_encodings = face_recognition.face_encodings(rgb_image, face_locations)

        # 复制原始图像用于绘制结果
        result_image = image.copy()

        # 遍历检测到的每个人脸
        for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
            # 在图像上绘制人脸框
            cv2.rectangle(result_image, (left, top), (right, bottom), (0, 255, 0), 2)

            # 如果人脸数据库不为空，则进行识别
            if len(face_database) > 0:
                # 计算与数据库中所有人脸的相似度
                matches = []
                names = []
                distances = []

                for name, db_encoding in face_database.items():
                    # 计算人脸特征之间的欧氏距离
                    distance = face_recognition.face_distance([db_encoding], face_encoding)[0]
                    matches.append(distance <= face_tolerance)
                    names.append(name)
                    distances.append(distance)

                # 如果有匹配的人脸
                if True in matches:
                    # 找到最佳匹配
                    best_match_index = np.argmin(distances)
                    best_match_name = names[best_match_index]
                    best_match_distance = distances[best_match_index]

                    # 计算相似度百分比
                    similarity = (1 - best_match_distance) * 100

                    # 在图像上显示识别结果
                    text = f"{best_match_name} ({similarity:.1f}%)"
                    cv2.putText(result_image, text, (left, top - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                else:
                    # 未识别的人脸
                    cv2.putText(result_image, "Unknown", (left, top - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
            else:
                # 数据库为空，只显示人脸框
                cv2.putText(result_image, "No database", (left, top - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

        return result_image


class ImageViewer(QWidget):
    def __init__(self, parent=None):
        super().__init__(parent)
        self.layout = QVBoxLayout()
        self.image_label = QLabel("No image loaded")
        self.image_label.setAlignment(Qt.AlignCenter)
        self.layout.addWidget(self.image_label)
        self.setLayout(self.layout)

    def setImage(self, image):
        if isinstance(image, np.ndarray):
            # 转换OpenCV图像到Qt图像
            rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb_image.shape
            bytes_per_line = ch * w
            convert_to_qt_format = QPixmap.fromImage(QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888))
            self.image_label.setPixmap(convert_to_qt_format.scaled(640, 480, Qt.KeepAspectRatio))
        else:
            self.image_label.setText(image)


class AdvancedImageViewer(QGraphicsView):
    def __init__(self, parent=None):
        super().__init__(parent)
        self.scene = QGraphicsScene(self)
        self.setScene(self.scene)
        self.pixmap_item = None
        self.setRenderHint(QPainter.Antialiasing)
        self.setRenderHint(QPainter.SmoothPixmapTransform)
        self.setDragMode(QGraphicsView.ScrollHandDrag)
        self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
        self.setResizeAnchor(QGraphicsView.AnchorUnderMouse)

    def setImage(self, image):
        self.scene.clear()
        if isinstance(image, np.ndarray):
            rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb_image.shape
            bytes_per_line = ch * w
            q_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
            self.pixmap_item = QGraphicsPixmapItem(QPixmap.fromImage(q_image))
            self.scene.addItem(self.pixmap_item)
            self.fitInView(self.scene.itemsBoundingRect(), Qt.KeepAspectRatio)
        else:
            text_item = self.scene.addText(image)
            text_item.setPos(0, 0)

    def wheelEvent(self, event):
        # 缩放功能
        factor = 1.1
        if event.angleDelta().y() < 0:
            factor = 1.0 / factor
        self.scale(factor, factor)


class ProcessingTab(QWidget):
    def __init__(self, parent=None):
        super().__init__(parent)
        self.layout = QVBoxLayout()

        # 处理方法选择
        self.method_combo = QComboBox()
        self.method_combo.addItems([
            "灰度化", "边缘检测", "图像增强", "对象检测", "人脸识别",
            "旋转", "缩放", "裁剪", "亮度/对比度", "饱和度",
            "平滑处理", "形状识别", "图像拼接"
        ])
        self.method_combo.currentIndexChanged.connect(self.update_parameter_panel)

        # 图像处理按钮
        self.process_btn = QPushButton("处理图像")
        self.process_btn.clicked.connect(self.process_image)

        # 参数面板
        self.param_group = QGroupBox("处理参数")
        self.param_layout = QVBoxLayout()
        self.param_group.setLayout(self.param_layout)

        # 图像显示区域
        self.input_viewer = AdvancedImageViewer()
        self.output_viewer = AdvancedImageViewer()

        # 布局
        top_layout = QHBoxLayout()
        top_layout.addWidget(QLabel("处理方法:"))
        top_layout.addWidget(self.method_combo)
        top_layout.addWidget(self.process_btn)

        mid_layout = QHBoxLayout()
        mid_layout.addWidget(self.input_viewer)
        mid_layout.addWidget(self.output_viewer)

        self.layout.addLayout(top_layout)
        self.layout.addWidget(self.param_group)
        self.layout.addLayout(mid_layout)
        self.setLayout(self.layout)

        self.current_image = None
        self.original_image = None
        self.update_parameter_panel()

        # 人脸识别相关变量
        self.face_database = {}  # 存储人脸特征的数据库
        self.face_tolerance = 0.6  # 人脸识别的容差值

    def load_image(self, image_path):
        self.original_image = cv2.imread(image_path)
        self.current_image = self.original_image.copy()
        self.input_viewer.setImage(self.current_image)
        self.output_viewer.setImage(self.current_image)

    def update_parameter_panel(self):
        # 清空参数面板
        while self.param_layout.count():
            item = self.param_layout.takeAt(0)
            widget = item.widget()
            if widget:
                widget.deleteLater()

        method = self.method_combo.currentText()

        if method in ["旋转", "缩放", "亮度/对比度", "饱和度", "平滑处理"]:
            params_grid = QGridLayout()

            if method == "旋转":
                self.angle_spin = QDoubleSpinBox()
                self.angle_spin.setRange(-360, 360)
                self.angle_spin.setValue(0)
                self.angle_spin.setSingleStep(1)
                params_grid.addWidget(QLabel("旋转角度:"), 0, 0)
                params_grid.addWidget(self.angle_spin, 0, 1)

            elif method == "缩放":
                self.scale_spin = QDoubleSpinBox()
                self.scale_spin.setRange(0.1, 10.0)
                self.scale_spin.setValue(1.0)
                self.scale_spin.setSingleStep(0.1)
                params_grid.addWidget(QLabel("缩放比例:"), 0, 0)
                params_grid.addWidget(self.scale_spin, 0, 1)

            elif method == "亮度/对比度":
                self.brightness_slider = QSlider(Qt.Horizontal)
                self.brightness_slider.setRange(-100, 100)
                self.brightness_slider.setValue(0)
                self.brightness_slider.setTickInterval(10)
                self.brightness_slider.setTickPosition(QSlider.TicksBelow)

                self.contrast_slider = QSlider(Qt.Horizontal)
                self.contrast_slider.setRange(-100, 100)
                self.contrast_slider.setValue(0)
                self.contrast_slider.setTickInterval(10)
                self.contrast_slider.setTickPosition(QSlider.TicksBelow)

                params_grid.addWidget(QLabel("亮度:"), 0, 0)
                params_grid.addWidget(self.brightness_slider, 0, 1)
                params_grid.addWidget(QLabel("对比度:"), 1, 0)
                params_grid.addWidget(self.contrast_slider, 1, 1)

            elif method == "饱和度":
                self.saturation_slider = QSlider(Qt.Horizontal)
                self.saturation_slider.setRange(-100, 100)
                self.saturation_slider.setValue(0)
                self.saturation_slider.setTickInterval(10)
                self.saturation_slider.setTickPosition(QSlider.TicksBelow)

                params_grid.addWidget(QLabel("饱和度:"), 0, 0)
                params_grid.addWidget(self.saturation_slider, 0, 1)

            elif method == "平滑处理":
                self.smooth_method = QComboBox()
                self.smooth_method.addItems(["高斯模糊", "中值模糊", "双边滤波"])

                self.kernel_size = QSpinBox()
                self.kernel_size.setRange(1, 31)
                self.kernel_size.setValue(3)
                self.kernel_size.setSingleStep(2)

                params_grid.addWidget(QLabel("平滑方法:"), 0, 0)
                params_grid.addWidget(self.smooth_method, 0, 1)
                params_grid.addWidget(QLabel("核大小:"), 1, 0)
                params_grid.addWidget(self.kernel_size, 1, 1)

            self.param_layout.addLayout(params_grid)

        elif method == "裁剪":
            self.crop_btn = QPushButton("选择裁剪区域")
            self.crop_btn.clicked.connect(self.select_crop_region)
            self.param_layout.addWidget(self.crop_btn)

        elif method == "形状识别":
            self.shape_type = QComboBox()
            self.shape_type.addItems(["所有形状", "圆形", "矩形", "三角形", "多边形"])

            self.min_area = QSpinBox()
            self.min_area.setRange(100, 100000)
            self.min_area.setValue(500)

            self.param_layout.addWidget(QLabel("形状类型:"))
            self.param_layout.addWidget(self.shape_type)
            self.param_layout.addWidget(QLabel("最小面积:"))
            self.param_layout.addWidget(self.min_area)

        elif method == "图像拼接":
            self.add_image_btn = QPushButton("添加更多图像")
            self.add_image_btn.clicked.connect(self.add_more_images)
            self.param_layout.addWidget(self.add_image_btn)

            self.images_list = QListWidget()
            self.param_layout.addWidget(self.images_list)

            self.images = []

        elif method == "人脸识别":
            face_layout = QVBoxLayout()

            # 添加人脸数据库管理
            db_group = QGroupBox("人脸数据库")
            db_layout = QGridLayout()

            self.add_face_btn = QPushButton("添加人脸到数据库")
            self.add_face_btn.clicked.connect(self.add_face_to_database)

            self.clear_db_btn = QPushButton("清空数据库")
            self.clear_db_btn.clicked.connect(self.clear_face_database)

            self.face_tolerance_label = QLabel("相似度阈值:")
            self.face_tolerance_spin = QDoubleSpinBox()
            self.face_tolerance_spin.setRange(0.1, 1.0)
            self.face_tolerance_spin.setValue(self.face_tolerance)
            self.face_tolerance_spin.setSingleStep(0.05)
            self.face_tolerance_spin.valueChanged.connect(self.update_face_tolerance)

            db_layout.addWidget(self.add_face_btn, 0, 0)
            db_layout.addWidget(self.clear_db_btn, 0, 1)
            db_layout.addWidget(self.face_tolerance_label, 1, 0)
            db_layout.addWidget(self.face_tolerance_spin, 1, 1)

            db_group.setLayout(db_layout)

            # 显示数据库中的人脸
            self.db_faces_list = QListWidget()
            self.update_face_database_list()

            face_layout.addWidget(db_group)
            face_layout.addWidget(QLabel("数据库中的人脸:"))
            face_layout.addWidget(self.db_faces_list)

            self.param_layout.addLayout(face_layout)

    def select_crop_region(self):
        if self.current_image is None:
            QMessageBox.warning(self, "警告", "请先加载图像")
            return

        # 创建一个裁剪对话框
        dialog = CropDialog(self.current_image, self)
        if dialog.exec_() == QDialog.Accepted:
            self.crop_x = dialog.crop_x
            self.crop_y = dialog.crop_y
            self.crop_width = dialog.crop_width
            self.crop_height = dialog.crop_height
            self.process_image()

    def add_more_images(self):
        options = QFileDialog.Options()
        file_names, _ = QFileDialog.getOpenFileNames(
            self, "选择要拼接的图像", "",
            "图像文件 (*.png *.jpg *.jpeg *.bmp);;所有文件 (*)",
            options=options
        )

        if file_names:
            for file_name in file_names:
                img = cv2.imread(file_name)
                if img is not None:
                    self.images.append(img)
                    self.images_list.addItem(os.path.basename(file_name))

    def process_image(self):
        if self.current_image is None:
            QMessageBox.warning(self, "警告", "请先加载图像")
            return

        method = self.method_combo.currentText()
        processed = self.current_image.copy()

        if method == "灰度化":
            processed = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)
            processed = cv2.cvtColor(processed, cv2.COLOR_GRAY2BGR)  # 转回BGR以便显示

        elif method == "边缘检测":
            gray = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)
            processed = cv2.Canny(gray, 100, 200)
            processed = cv2.cvtColor(processed, cv2.COLOR_GRAY2BGR)

        elif method == "图像增强":
            # 简单的对比度增强
            alpha = 1.5  # 对比度控制
            beta = 10  # 亮度控制
            processed = cv2.convertScaleAbs(processed, alpha=alpha, beta=beta)

        elif method == "对象检测":
            # 简化示例，使用OpenCV的Haar级联分类器
            gray = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)
            face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            processed = processed.copy()
            for (x, y, w, h) in faces:
                cv2.rectangle(processed, (x, y), (x + w, y + h), (255, 0, 0), 2)

        elif method == "人脸识别":
            processed = self.perform_face_recognition(processed)

        elif method == "旋转":
            angle = self.angle_spin.value()
            height, width = processed.shape[:2]
            center = (width // 2, height // 2)
            rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
            processed = cv2.warpAffine(processed, rotation_matrix, (width, height),
                                       flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)

        elif method == "缩放":
            scale = self.scale_spin.value()
            processed = cv2.resize(processed, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)

        elif method == "裁剪":
            if hasattr(self, 'crop_x'):
                x, y, w, h = self.crop_x, self.crop_y, self.crop_width, self.crop_height
                processed = processed[y:y + h, x:x + w]

        elif method == "亮度/对比度":
            brightness = self.brightness_slider.value()
            contrast = self.contrast_slider.value()

            # 调整亮度
            if brightness > 0:
                processed = cv2.addWeighted(processed, 1, np.zeros(processed.shape, processed.dtype), 0, brightness)
            elif brightness < 0:
                processed = cv2.addWeighted(processed, 1, np.zeros(processed.shape, processed.dtype), 0, brightness)

            # 调整对比度
            if contrast != 0:
                factor = (259 * (contrast + 255)) / (255 * (259 - contrast))
                processed = np.clip(128 + factor * (processed.astype(np.float32) - 128), 0, 255).astype(np.uint8)

        elif method == "饱和度":
            saturation = self.saturation_slider.value() / 100.0

            # 转换到HSV颜色空间
            hsv = cv2.cvtColor(processed, cv2.COLOR_BGR2HSV)

            # 分离通道
            h, s, v = cv2.split(hsv)

            # 调整饱和度
            if saturation > 0:
                # 增加饱和度
                s = s.astype(np.float32)
                s = s * (1 + saturation)
                s = np.clip(s, 0, 255).astype(np.uint8)
            elif saturation < 0:
                # 减少饱和度
                s = s.astype(np.float32)
                s = s * (1 + saturation)
                s = np.clip(s, 0, 255).astype(np.uint8)

            # 合并通道并转回BGR
            hsv = cv2.merge([h, s, v])
            processed = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        elif method == "平滑处理":
            method_idx = self.smooth_method.currentIndex()
            kernel_size = self.kernel_size.value()

            if method_idx == 0:  # 高斯模糊
                processed = cv2.GaussianBlur(processed, (kernel_size, kernel_size), 0)
            elif method_idx == 1:  # 中值模糊
                processed = cv2.medianBlur(processed, kernel_size)
            elif method_idx == 2:  # 双边滤波
                processed = cv2.bilateralFilter(processed, kernel_size, 75, 75)

        elif method == "形状识别":
            shape_type = self.shape_type.currentText()
            min_area = self.min_area.value()

            # 转换为灰度图
            gray = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)

            # 二值化
            _, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

            # 查找轮廓
            contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

            result = processed.copy()

            for contour in contours:
                area = cv2.contourArea(contour)
                if area < min_area:
                    continue

                # 获取近似轮廓
                epsilon = 0.02 * cv2.arcLength(contour, True)
                approx = cv2.approxPolyDP(contour, epsilon, True)

                # 计算轮廓中心
                M = cv2.moments(contour)
                if M["m00"] != 0:
                    cX = int(M["m10"] / M["m00"])
                    cY = int(M["m01"] / M["m00"])
                else:
                    cX, cY = 0, 0

                shape_name = "未知"

                # 根据顶点数量识别形状
                if shape_type == "所有形状" or shape_type == "多边形":
                    if len(approx) == 3:
                        shape_name = "三角形"
                    elif len(approx) == 4:
                        shape_name = "矩形"
                    elif 5 <= len(approx) <= 10:
                        shape_name = "多边形"
                    else:
                        # 检查是否为圆形
                        (x, y), radius = cv2.minEnclosingCircle(contour)
                        center = (int(x), int(y))
                        radius = int(radius)
                        area_ratio = area / (np.pi * radius * radius)
                        if area_ratio > 0.8:  # 圆形的面积比例接近1
                            shape_name = "圆形"

                elif shape_type == "圆形":
                    (x, y), radius = cv2.minEnclosingCircle(contour)
                    center = (int(x), int(y))
                    radius = int(radius)
                    area_ratio = area / (np.pi * radius * radius)
                    if area_ratio > 0.8:
                        shape_name = "圆形"

                elif shape_type == "矩形":
                    if len(approx) == 4:
                        shape_name = "矩形"

                elif shape_type == "三角形":
                    if len(approx) == 3:
                        shape_name = "三角形"

                # 绘制轮廓和标签
                if shape_name != "未知":
                    cv2.drawContours(result, [contour], -1, (0, 255, 0), 2)
                    cv2.putText(result, shape_name, (cX, cY),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)

            processed = result

        elif method == "图像拼接":
            if len(self.images) < 2:
                QMessageBox.warning(self, "警告", "请至少添加两张图像进行拼接")
                return

            # 创建特征检测器和描述符
            sift = cv2.SIFT_create()

            # 存储关键点和描述符
            keypoints = []
            descriptors = []

            # 检测第一张图像的关键点和描述符
            kp1, des1 = sift.detectAndCompute(self.current_image, None)
            keypoints.append(kp1)
            descriptors.append(des1)

            # 检测其他图像的关键点和描述符
            for img in self.images:
                kp, des = sift.detectAndCompute(img, None)
                keypoints.append(kp)
                descriptors.append(des)

            # 特征匹配
            matcher = cv2.BFMatcher()
            homographies = []

            # 计算相邻图像之间的单应性矩阵
            for i in range(len(self.images)):
                matches = matcher.knnMatch(descriptors[i], descriptors[i + 1], k=2)

                # 应用比率测试筛选好的匹配点
                good_matches = []
                for m, n in matches:
                    if m.distance < 0.75 * n.distance:
                        good_matches.append(m)

                if len(good_matches) > 10:
                    src_pts = np.float32([keypoints[i][m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
                    dst_pts = np.float32([keypoints[i + 1][m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)

                    H, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
                    homographies.append(H)
                else:
                    QMessageBox.warning(self, "警告", f"图像 {i + 1} 和 {i + 2} 之间没有足够的匹配点")
                    return

            # 拼接图像
            stitcher = cv2.Stitcher_create()
            status, result = stitcher.stitch([self.current_image] + self.images)

            if status == cv2.Stitcher_OK:
                processed = result
            else:
                QMessageBox.warning(self, "警告", f"图像拼接失败，状态码: {status}")

        self.output_viewer.setImage(processed)
        self.current_image = processed

    def perform_face_recognition(self, image):
        # 转换为RGB格式（face_recognition库使用RGB格式）
        rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # 检测人脸位置
        face_locations = face_recognition.face_locations(rgb_image)

        # 提取人脸特征
        face_encodings = face_recognition.face_encodings(rgb_image, face_locations)

        # 复制原始图像用于绘制结果
        result_image = image.copy()

        # 遍历检测到的每个人脸
        for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
            # 在图像上绘制人脸框
            cv2.rectangle(result_image, (left, top), (right, bottom), (0, 255, 0), 2)

            # 如果人脸数据库不为空，则进行识别
            if len(self.face_database) > 0:
                # 计算与数据库中所有人脸的相似度
                matches = []
                names = []
                distances = []

                for name, db_encoding in self.face_database.items():
                    # 计算人脸特征之间的欧氏距离
                    distance = face_recognition.face_distance([db_encoding], face_encoding)[0]
                    matches.append(distance <= self.face_tolerance)
                    names.append(name)
                    distances.append(distance)

                # 如果有匹配的人脸
                if True in matches:
                    # 找到最佳匹配
                    best_match_index = np.argmin(distances)
                    best_match_name = names[best_match_index]
                    best_match_distance = distances[best_match_index]

                    # 计算相似度百分比
                    similarity = (1 - best_match_distance) * 100

                    # 在图像上显示识别结果
                    text = f"{best_match_name} ({similarity:.1f}%)"
                    cv2.putText(result_image, text, (left, top - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                else:
                    # 未识别的人脸
                    cv2.putText(result_image, "Unknown", (left, top - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
            else:
                # 数据库为空，只显示人脸框
                cv2.putText(result_image, "No database", (left, top - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

        return result_image

    def add_face_to_database(self):
        if self.current_image is None:
            QMessageBox.warning(self, "警告", "请先加载图像")
            return

        # 转换为RGB格式
        rgb_image = cv2.cvtColor(self.current_image, cv2.COLOR_BGR2RGB)

        # 检测人脸
        face_locations = face_recognition.face_locations(rgb_image)

        if len(face_locations) == 0:
            QMessageBox.warning(self, "警告", "未检测到人脸")
            return

        if len(face_locations) > 1:
            QMessageBox.warning(self, "警告", "检测到多个人脸，请选择只包含一个人脸的图像")
            return

        # 获取人脸名称
        name, ok = QInputDialog.getText(self, "添加人脸", "请输入此人的名称:")
        if ok and name:
            # 提取人脸特征
            face_encoding = face_recognition.face_encodings(rgb_image, face_locations)[0]

            # 添加到数据库
            self.face_database[name] = face_encoding

            # 更新数据库列表
            self.update_face_database_list()

            QMessageBox.information(self, "成功", f"已将 {name} 添加到人脸数据库")

    def clear_face_database(self):
        reply = QMessageBox.question(self, "确认", "确定要清空人脸数据库吗?",
                                     QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
        if reply == QMessageBox.Yes:
            self.face_database = {}
            self.update_face_database_list()

    def update_face_database_list(self):
        self.db_faces_list.clear()
        for name in self.face_database.keys():
            self.db_faces_list.addItem(name)

    def update_face_tolerance(self, value):
        self.face_tolerance = value


class CropDialog(QDialog):
    def __init__(self, image, parent=None):
        super().__init__(parent)
        self.setWindowTitle("选择裁剪区域")
        self.setMinimumSize(800, 600)

        self.image = image
        self.start_point = None
        self.end_point = None
        self.drawing = False

        # 创建布局
        layout = QVBoxLayout()

        # 创建图像显示区域
        self.scene = QGraphicsScene()
        self.view = QGraphicsView(self.scene)
        self.view.setRenderHint(QPainter.Antialiasing)
        self.view.setRenderHint(QPainter.SmoothPixmapTransform)

        # 转换OpenCV图像到Qt图像
        rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        bytes_per_line = ch * w
        q_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
        self.pixmap_item = QGraphicsPixmapItem(QPixmap.fromImage(q_image))
        self.scene.addItem(self.pixmap_item)

        # 添加矩形选择区域
        self.rect_item = self.scene.addRect(0, 0, 0, 0, QPen(Qt.red, 2))
        self.rect_item.setVisible(False)

        # 连接鼠标事件
        self.view.mousePressEvent = self.mouse_press_event
        self.view.mouseMoveEvent = self.mouse_move_event
        self.view.mouseReleaseEvent = self.mouse_release_event

        # 添加按钮
        button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
        button_box.accepted.connect(self.accept)
        button_box.rejected.connect(self.reject)

        layout.addWidget(self.view)
        layout.addWidget(button_box)
        self.setLayout(layout)

    def mouse_press_event(self, event):
        if event.button() == Qt.LeftButton:
            self.start_point = self.view.mapToScene(event.pos())
            self.drawing = True
            self.rect_item.setVisible(True)

    def mouse_move_event(self, event):
        if self.drawing:
            self.end_point = self.view.mapToScene(event.pos())
            x = min(self.start_point.x(), self.end_point.x())
            y = min(self.start_point.y(), self.end_point.y())
            w = abs(self.end_point.x() - self.start_point.x())
            h = abs(self.end_point.y() - self.start_point.y())
            self.rect_item.setRect(x, y, w, h)

    def mouse_release_event(self, event):
        if event.button() == Qt.LeftButton and self.drawing:
            self.end_point = self.view.mapToScene(event.pos())
            self.drawing = False

            # 计算裁剪区域
            x = min(self.start_point.x(), self.end_point.x