import numpy as np
import cv2
import argparse

from tensorflow.python.keras.models import load_model


def fake_label(res_indices):
    video_capture = cv2.VideoCapture(args.video_path)
    output_path = "output_video.mp4"

    font = cv2.FONT_HERSHEY_SIMPLEX
    font_scale = 1
    font_color = (0, 0, 255)  #
    thickness = 2

    fps = video_capture.get(cv2.CAP_PROP_FPS)
    width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 使用 MP4 编码器
    output_video = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

    while video_capture.isOpened():
        ret, frame = video_capture.read()

        if not ret:
            break

        if video_capture.get(cv2.CAP_PROP_POS_FRAMES) in res_indices:
            cv2.putText(frame, 'fake', (50, 50), font, font_scale, font_color, thickness, cv2.LINE_AA)

        output_video.write(frame)

    video_capture.release()
    output_video.release()


def run(video_path):
    video = cv2.VideoCapture(video_path)

    # Read the first frame
    _, prev_frame = video.read()

    # Get the video properties
    total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))

    # Initialize lists to store the frames and labels
    frames = []

    for frame_idx in range(1, total_frames):
        # Read the current frame
        _, curr_frame = video.read()

        # Calculate the difference between the current frame and the previous frame
        frame_diff = cv2.subtract(curr_frame, prev_frame)

        # Convert the difference image to absolute values
        frame_diff = cv2.absdiff(frame_diff, 0)

        # Apply thresholding to create a binary difference image
        _, binary_diff = cv2.threshold(frame_diff, 30, 255, cv2.THRESH_BINARY)

        # Resize the frame
        resized_frame = cv2.resize(binary_diff, (args.length, args.width))

        # Append the frame and label to the lists
        frames.append(resized_frame)
        # Update the previous frame for the next iteration
        prev_frame = curr_frame

    # Release the video capture
    video.release()

    frames = np.array(frames)
    processed_frames = []

    # Pair consecutive frames
    for i in range(len(frames)):
        # Process the first frame
        if i == 0:
            frame_pair = np.stack((frames[0], frames[0], frames[0]))
        # Process the second frame
        elif i == 1:
            frame_pair = np.stack((frames[0], frames[1], frames[1]))
        # Process the rest of the frames
        else:
            frame_pair = np.stack((frames[i - 2], frames[i - 1], frames[i]))
        processed_frames.append(frame_pair)
    processed_frames = np.array(processed_frames)
    print("Processed frames shape:", processed_frames.shape)
    model = load_model('best_model.h5')
    res = model.predict(processed_frames)
    res_indices = [i for i, element in enumerate(res) if element > 0.5]
    print(res_indices)
    res_indices = [x + 1 for x in res_indices]
    print('map frames', res_indices)
    fake_label(res_indices)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='This is an example of command-line arguments')

    # Add command-line arguments
    parser.add_argument('--length', type=int, default=320)
    parser.add_argument('--width', type=int, default=240)
    parser.add_argument('--video_path', type=str, default='')
    args = parser.parse_args()
    run(args.video_path)
