from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
import numpy as np
import mmcv, cv2
from PIL import Image, ImageDraw
import os
from torchvision import transforms
import time
from datetime import datetime
import logging
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
from queue import PriorityQueue
import threading

# 参数
LOCK_FRAME_IDX = 180  # 第N帧锁定特征
SIMILARITY_THRESHOLD = 0.6  # 合并同人脸的相似度阈值
CONFIDENCE_THRESHOLD = 0.985  # 置信度要求
STRONG_SIMILARITY_THRESHOLD = 0.65  # 强相似度阈值
RECORD_SECONDS = 75  # 录制时长（秒），-1表示录制到视频结束
PARALLEL_FRAMES = 4  # 并行处理的帧数
MAX_WORKERS = 4  # 线程池最大工作线程数

# 设置日志
log_dir = 'logs'
if not os.path.exists(log_dir):
    os.makedirs(log_dir)

log_file = os.path.join(log_dir, f'face_detection_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log')
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(log_file, encoding='utf-8'),
        logging.StreamHandler()
    ]
)

print(torch.__version__)
logging.info(f'使用torch版本: {torch.__version__}')
# GPU配置
if torch.cuda.is_available():
    # 设置CUDA设备
    torch.cuda.set_device(0)  # 使用第一个GPU
    # 设置CUDA性能优化
    torch.backends.cudnn.benchmark = True
    # 设置CUDA内存分配器
    torch.cuda.empty_cache()
    device = torch.device('cuda:0')
    logging.info(f'使用GPU: {torch.cuda.get_device_name(0)}')
    logging.info(f'CUDA版本: {torch.version.cuda}')
    logging.info(f'cuDNN版本: {torch.backends.cudnn.version()}')
else:
    device = torch.device('cpu')
    logging.warning('未检测到可用的GPU，将使用CPU运行')

start_time = time.time()
logging.info('脚本开始时间: {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))

# 输出目录
output_dir = 'face_videos'
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# 初始化模型
logging.info('正在初始化模型...')
# 配置MTCNN使用GPU
mtcnn = MTCNN(
    keep_all=True,
    device=device,
    selection_method='largest',  # 选择最大的人脸
    post_process=True,  # 后处理以提高准确性
    min_face_size=20  # 最小人脸尺寸
)

# 配置ResNet使用GPU
resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)
# 启用评估模式
resnet.eval()
# 使用半精度浮点数加速
if device.type == 'cuda':
    resnet = resnet.half()  # 使用FP16加速
logging.info('模型初始化完成')

# 图像预处理
preprocess = transforms.Compose([
    transforms.Resize((160, 160)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

TARGET_SIZE = (640, 640)

def get_square_box(center_x, center_y, box_width, box_height, frame_width, frame_height):
    max_side = max(box_width, box_height) * 2
    half_side = max_side / 2
    new_x1 = int(round(center_x - half_side))
    new_y1 = int(round(center_y - half_side))
    new_x2 = int(round(center_x + half_side))
    new_y2 = int(round(center_y + half_side))
    # 保证不越界
    if new_x1 < 0:
        new_x2 += -new_x1
        new_x1 = 0
    if new_y1 < 0:
        new_y2 += -new_y1
        new_y1 = 0
    if new_x2 > frame_width:
        diff = new_x2 - frame_width
        new_x1 = max(0, new_x1 - diff)
        new_x2 = frame_width
    if new_y2 > frame_height:
        diff = new_y2 - frame_height
        new_y1 = max(0, new_y1 - diff)
        new_y2 = frame_height
    # 再次保证正方形
    final_w = new_x2 - new_x1
    final_h = new_y2 - new_y1
    if final_w != final_h:
        side = min(final_w, final_h)
        new_x2 = new_x1 + side
        new_y2 = new_y1 + side
    return [new_x1, new_y1, new_x2, new_y2]

# 打开视频文件
logging.info('正在打开视频文件...')
video = cv2.VideoCapture('video6.mp4')
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
fps = video.get(cv2.CAP_PROP_FPS)
logging.info(f'视频总帧数: {total_frames}, 帧率: {fps}')

# 计算需要的帧数
required_frames = int(RECORD_SECONDS * fps) if RECORD_SECONDS > 0 else -1
if RECORD_SECONDS > 0:
    logging.info(f'设置录制时长为 {RECORD_SECONDS} 秒，需要 {required_frames} 帧')
else:
    logging.info('录制时长设置为无限制，将处理到视频结束')

# 第N帧锁定特征
locked_embeddings = []
locked_faces = []
locked_boxes = []
locked_probs = []
locked_square_boxes = []
mean_embeddings = []  # 新增：每个人脸的均值特征
frame_width, frame_height = None, None

# 处理锁定帧
logging.info(f'正在处理锁定帧（第{LOCK_FRAME_IDX}帧）...')
for i in range(LOCK_FRAME_IDX):
    ret, frame = video.read()
    if not ret:
        logging.error(f'无法读取第{LOCK_FRAME_IDX}帧！')
        exit(1)

ret, frame = video.read()
if not ret:
    logging.error(f'无法读取第{LOCK_FRAME_IDX}帧！')
    exit(1)

frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_pil = Image.fromarray(frame_rgb)
frame_width, frame_height = frame_pil.size

boxes, probs = mtcnn.detect(frame_pil)
if boxes is None or probs is None:
    logging.error(f'第{LOCK_FRAME_IDX}帧未检测到人脸，无法锁定特征！')
    exit(1)

valid = probs >= CONFIDENCE_THRESHOLD
boxes = boxes[valid]
probs = probs[valid]
if len(boxes) == 0:
    logging.error(f'第{LOCK_FRAME_IDX}帧未检测到高置信度人脸，无法锁定特征！')
    exit(1)

for box, prob in zip(boxes, probs):
    box_width = box[2] - box[0]
    box_height = box[3] - box[1]
    center_x = (box[0] + box[2]) / 2
    center_y = (box[1] + box[3]) / 2
    square_box = get_square_box(center_x, center_y, box_width, box_height, frame_width, frame_height)
    locked_square_boxes.append(square_box)
    x1, y1, x2, y2 = square_box
    face_region = frame_pil.crop((x1, y1, x2, y2))
    face_region_hires = face_region.resize(TARGET_SIZE, Image.BILINEAR)
    face_region = face_region.resize((160, 160), Image.BILINEAR)
    face_tensor = preprocess(face_region).unsqueeze(0).to(device)
    if device.type == 'cuda':
        face_tensor = face_tensor.half()  # 使用FP16加速
    with torch.no_grad():  # 禁用梯度计算
        embedding = resnet(face_tensor).detach().cpu()
    locked_embeddings.append(embedding)
    mean_embeddings.append(embedding.clone())  # 均值特征初始为锁定特征
    locked_faces.append([face_region_hires])  # 初始化每个人脸的帧序列
    locked_boxes.append([box])
    locked_probs.append(prob)

logging.info(f'第{LOCK_FRAME_IDX}帧锁定{len(locked_embeddings)}个人脸特征')

if not locked_embeddings:
    logging.error(f'第{LOCK_FRAME_IDX}帧未检测到高置信度人脸，无法锁定特征！')
    exit(1)

# 从第N帧开始，逐帧识别并合成
current_frame = LOCK_FRAME_IDX
logging.info('开始逐帧处理视频...')

# 记录每个人脸的有效帧数
valid_frames_count = [0] * len(locked_faces)

class ProcessingComplete(Exception):
    """当所有人脸都达到所需帧数时抛出此异常"""
    pass

def check_all_faces_complete():
    """检查是否所有人脸都达到所需帧数"""
    if RECORD_SECONDS <= 0:
        return False
        
    # 添加详细日志，显示每个人脸的当前帧数
    logging.info('检查所有人脸帧数:')
    for idx, count in enumerate(valid_frames_count):
        logging.info(f'人脸 {idx + 1}: {count}/{required_frames} 帧')
    
    # 检查是否所有人脸都达到所需帧数
    all_complete = True
    for idx, count in enumerate(valid_frames_count):
        if count < required_frames:
            all_complete = False
            break
    
    if all_complete:
        logging.info('所有人脸都已达到所需帧数！')
    else:
        logging.info('还有人脸未达到所需帧数，继续处理...')
    
    return all_complete

# 添加帧处理结果的数据结构
class FrameResult:
    def __init__(self, frame_idx, faces):
        self.frame_idx = frame_idx
        self.faces = faces

    def __lt__(self, other):
        return self.frame_idx < other.frame_idx

def process_single_frame(frame_data):
    """处理单帧图像的函数，用于并行处理"""
    frame_idx, frame_pil = frame_data
    boxes, probs = mtcnn.detect(frame_pil)
    if boxes is None or probs is None:
        return FrameResult(frame_idx, [None] * len(locked_faces))
        
    valid = probs >= CONFIDENCE_THRESHOLD
    boxes = boxes[valid]
    probs = probs[valid]
    
    if len(boxes) == 0:
        return FrameResult(frame_idx, [None] * len(locked_faces))
        
    used = [False] * len(locked_embeddings)
    frame_faces = [None] * len(locked_faces)
    
    for box, prob in zip(boxes, probs):
        box_width = box[2] - box[0]
        box_height = box[3] - box[1]
        center_x = (box[0] + box[2]) / 2
        center_y = (box[1] + box[3]) / 2
        square_box = get_square_box(center_x, center_y, box_width, box_height, frame_width, frame_height)
        x1, y1, x2, y2 = square_box
        face_region = frame_pil.crop((x1, y1, x2, y2))
        face_region_hires = face_region.resize(TARGET_SIZE, Image.BILINEAR)
        face_region = face_region.resize((160, 160), Image.BILINEAR)
        face_tensor = preprocess(face_region).unsqueeze(0).to(device)
        if device.type == 'cuda':
            face_tensor = face_tensor.half()
            
        with torch.no_grad():
            embedding = resnet(face_tensor).detach().cpu()
        
        best_idx = -1
        best_sim = -1
        for idx, (locked_emb, mean_emb) in enumerate(zip(locked_embeddings, mean_embeddings)):
            if RECORD_SECONDS > 0 and valid_frames_count[idx] >= required_frames:
                continue
                
            sim_locked = float(torch.nn.functional.cosine_similarity(embedding, locked_emb))
            sim_mean = float(torch.nn.functional.cosine_similarity(embedding, mean_emb))
            if sim_locked > STRONG_SIMILARITY_THRESHOLD and sim_mean > STRONG_SIMILARITY_THRESHOLD and sim_locked > best_sim and not used[idx]:
                best_sim = sim_locked
                best_idx = idx
                
        if best_idx != -1:
            frame_faces[best_idx] = face_region_hires
            used[best_idx] = True
            
    return FrameResult(frame_idx, frame_faces)

# 修改主处理循环
try:
    # 创建线程池
    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        frame_buffer = []
        result_queue = PriorityQueue()
        next_frame_idx = LOCK_FRAME_IDX + 1
        
        while True:
            # 填充帧缓冲区
            while len(frame_buffer) < PARALLEL_FRAMES:
                ret, frame = video.read()
                if not ret:
                    break
                    
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                frame_pil = Image.fromarray(frame_rgb)
                frame_buffer.append((next_frame_idx, frame_pil))
                next_frame_idx += 1
            
            if not frame_buffer:
                break
                
            # 并行处理帧缓冲区中的帧
            futures = [executor.submit(process_single_frame, frame_data) for frame_data in frame_buffer]
            frame_buffer.clear()
            
            # 收集处理结果
            for future in futures:
                result = future.result()
                result_queue.put(result)
                
                # 更新有效帧计数
                for idx, face in enumerate(result.faces):
                    if face is not None:
                        valid_frames_count[idx] += 1
                        if RECORD_SECONDS > 0:
                            logging.info(f'人脸 {idx + 1} 已收集 {valid_frames_count[idx]}/{required_frames} 帧')
                
                # 检查是否所有人脸都达到所需帧数
                if check_all_faces_complete():
                    logging.info('所有人脸都已达到所需帧数，停止处理并开始生成视频')
                    raise ProcessingComplete()
            
            # 将处理结果按顺序添加到对应的人脸序列中
            while not result_queue.empty():
                result = result_queue.get()
                for idx, face in enumerate(result.faces):
                    locked_faces[idx].append(face)
                
            # 计算进度百分比
            progress = (next_frame_idx / total_frames) * 100
            logging.info(f'正在处理第{next_frame_idx}/{total_frames}帧 ({progress:.2f}%)')
            
except ProcessingComplete:
    logging.info('检测到所有人脸都达到所需帧数，正在完成处理...')
    # 为未完成的人脸添加None，保持帧数一致
    remaining_frames = total_frames - next_frame_idx
    if remaining_frames > 0:
        for faces in locked_faces:
            faces.extend([None] * remaining_frames)
finally:
    video.release()
    logging.info('视频处理完成，开始合成人脸视频...')

# 合成视频
fourcc = cv2.VideoWriter_fourcc(*'XVID')
for idx, faces in enumerate(locked_faces):
    valid_frames = [f for f in faces if f is not None]
    min_frames = 10
    min_sequence_ratio = 0.5
    if len(valid_frames) < min_frames:
        logging.info(f'人脸 {idx + 1} 的有效帧数不足，跳过合成')
        continue
        
    sequence_gaps = 0
    last_valid = -1
    for i, f in enumerate(faces):
        if f is not None:
            if last_valid != -1 and i - last_valid > 2:
                sequence_gaps += 1
            last_valid = i
            
    sequence_continuity = 1 - (sequence_gaps / max(1, len(valid_frames)))
    if sequence_continuity < min_sequence_ratio:
        logging.info(f'人脸 {idx + 1} 的序列连续性不足，跳过合成')
        continue
        
    video_path = os.path.join(output_dir, f'face_{idx + 1}.avi')
    video_writer = cv2.VideoWriter(video_path, fourcc, fps, TARGET_SIZE)
    total = len(faces)
    
    logging.info(f'正在合成人脸 {idx + 1} 的视频，共 {total} 帧')
    for j, frame in enumerate(faces):
        if frame is not None:
            progress = (j + 1) / total * 100
            logging.info(f'正在合成视频{idx + 1}: {j + 1}/{total}帧 ({progress:.2f}%)')
            frame_cv = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
            video_writer.write(frame_cv)
            
    video_writer.release()
    logging.info(f'已保存人脸 {idx + 1} 的视频到: {video_path}，包含 {len(valid_frames)} 帧')

end_time = time.time()
logging.info('脚本结束时间: {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
logging.info('脚本总运行时间: {:.2f} 秒'.format(end_time - start_time))