import torch
import numpy as np
import cv2
from model import SimilarityModel
from torchvision import transforms

# 设置设备
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

# 加载模型
input_shape = (3, 1440, 3200)  # 根据训练时的输入形状调整这个参数

# 初始化并加载模型权重
model = SimilarityModel(input_shape).to(device)
model.load_state_dict(torch.load('similarity_model.pth'))
model.eval()

# 视频处理
video_path = '/home/zry/experiments/game/az_recorder_20240712_131826.mp4'  # 修改为你的视频路径
output_path = 'output_video.mp4'
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

# 视频编写器
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

# 图像预处理
preprocess = transforms.Compose([
    transforms.Lambda(lambda img: cv2.cvtColor(img, cv2.COLOR_BGR2RGB)),  # 转换为RGB
    transforms.Lambda(lambda img: img.astype(np.float32) / 255.0),       # 转换为float32
    transforms.Lambda(lambda img: torch.from_numpy(img).permute(2, 0, 1)),  # 转换为Tensor并从HWC转换为CHW
    transforms.ToPILImage(),
    transforms.Resize((height, width)),
    transforms.ToTensor()
])

# 读取第一帧
ret, prev_frame = cap.read()
if not ret:
    print("Error: Could not read the video.")
    cap.release()
    out.release()
    exit()

# 处理并保存第一帧
prev_frame_tensor = preprocess(prev_frame).unsqueeze(0).to(device)
out.write(prev_frame)

max_frames = 10000
frame_num = 1

# 读取相似度文件
# similaritys = []

# while frame_num < min(frame_count, max_frames):
#     ret, curr_frame = cap.read()
#     frame_num += 1
#     if not ret:
#         break
    
#     curr_frame_tensor = preprocess(curr_frame).unsqueeze(0).to(device)
#     with torch.no_grad():
#         similarity = model(curr_frame_tensor).item()

#     similaritys.append(similarity)
#     # print(similarity)
    
#     # if similarity >= 0.9:
#     #     out.write(curr_frame)
#     #     out.write(curr_frame)
#     #     next_write = True
#     #     prev_frame_tensor = curr_frame_tensor

# torch.save(similaritys, 'ss.pth')
similaritys = torch.load('ss.pth')
k = int(len(similaritys) * (1 - 0.8)) # 表示抽去的比例 20%，但是实际上最多对一半的数据进行处理，比如第1帧处理了，第二帧有可能不进行渲染，但是第3帧还是会照常渲染
values, indices = torch.topk(torch.tensor(similaritys), k)

# 计算帧率的相关变量
time_interval = 0.5  # 秒
frame_count_interval = int(time_interval * fps)
rendered_frames = 0
fps_actual = 120

frame_num = 1
next_write = False
while frame_num < min(frame_count, max_frames):
    ret, curr_frame = cap.read()
    frame_num += 1
    # print(frame_num)
    if not ret:
        break

    # 每隔 frame_count_interval 帧计算一次帧率
    if frame_num % frame_count_interval == 0:  
        fps_actual = rendered_frames / time_interval
        rendered_frames = 0
        print('frame_num:', frame_num, 'fps:', fps_actual)
    
    if next_write:
        next_write = False
        continue

    # 在图像上显示帧率
    cv2.putText(curr_frame, f"FPS: {fps_actual:.2f}", (width // 2, height // 2), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)

    if similaritys[frame_num - 2] <= values[-1]:
        out.write(curr_frame.copy())
        out.write(curr_frame.copy())
        rendered_frames += 1
        next_write = True
    else:
        out.write(curr_frame.copy())
        rendered_frames += 1
        next_write = False

cap.release()
out.release()
print("Finished processing the video.")
