import cv2
import os

def extract_frames(video_path, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    cap = cv2.VideoCapture(video_path)
    frame_id = 0
    
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        
        # 保存帧图像
        cv2.imwrite(os.path.join(output_dir, f'frame_{frame_id:06d}.jpg'), frame)
        frame_id += 1
    
    cap.release()

# 示例用法
video_path = 'path_to_your_video.mp4'
output_dir = 'extracted_frames'
extract_frames(video_path, output_dir)


import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader

# 假设已经有预处理好的数据集和标注
class VideoDataset(torch.utils.data.Dataset):
    def __init__(self, frames, labels, transform=None):
        self.frames = frames
        self.labels = labels
        self.transform = transform

    def __len__(self):
        return len(self.frames)

    def __getitem__(self, idx):
        frame = self.frames[idx]
        label = self.labels[idx]
        if self.transform:
            frame = self.transform(frame)
        return frame, label

# 定义模型
class VideoBehaviorRecognitionModel(nn.Module):
    def __init__(self, num_classes):
        super(VideoBehaviorRecognitionModel, self).__init__()
        self.cnn = models.resnet18(pretrained=True)
        self.cnn.fc = nn.Linear(self.cnn.fc.in_features, 512)  # 调整全连接层输出大小
        self.rnn = nn.LSTM(input_size=512, hidden_size=256, num_layers=1, batch_first=True)
        self.fc = nn.Linear(256, num_classes)

    def forward(self, x):
        # 假设x的维度为(batch_size, sequence_length, channels, height, width)
        batch_size, sequence_length, _, _, _ = x.size()
        x = x.view(batch_size * sequence_length, 3, 224, 224)  # 假设输入图像大小为224x224
        x = self.cnn(x)
        x = x.view(batch_size, sequence_length, -1)
        x, _ = self.rnn(x)
        x = self.fc(x[:, -1, :])  # 取最后一个时间步的输出进行分类
        return x

# 实例化模型、定义损失函数和优化器
model = VideoBehaviorRecognitionModel(num_classes=len(set(labels)))
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 加载数据集
transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = VideoDataset(frames, labels, transform=transform)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

# 训练模型
num_epochs = 10
for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    for inputs, labels in dataloader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item() * inputs.size(0)
    epoch_loss = running_loss / len(dataset)
    print(f'Epoch {epoch+1}, Loss: {epoch_loss:.4f}')


import cv2
from model import VideoBehaviorRecognitionModel  # 假设模型已定义在model.py中

# 加载模型
model = VideoBehaviorRecognitionModel(num_classes=len(set(labels)))
model.load_state_dict(torch.load('trained_model.pth'))
model.eval()

# 定义视频解码和预处理函数
def preprocess_frame(frame):
    # 这里使用与之前训练时相同的预处理流程
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
    img_tensor = transform(img).unsqueeze(0)  # 增加batch维度
    return img_tensor

# 打开视频流
cap = cv2.VideoCapture('path_to_your_video.mp4')

while True:
    ret, frame = cap.read()
    if not ret:
        break

    # 预处理帧并送入模型预测
    with torch.no_grad():
        img_tensor = preprocess_frame(frame)
        output = model(img_tensor)
        _, predicted = torch.max(output, 1)
        predicted_label = labels[predicted.item()]

    # 在帧上绘制预测结果（可选）
    cv2.putText(frame, predicted_label, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

    # 显示结果帧
    cv2.imshow('Behavior Recognition', frame)

    # 按'q'键退出循环
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()


from flask import Flask, request, jsonify
import torch
from PIL import Image
import io
from torchvision import transforms
from model import VideoBehaviorRecognitionModel  # 假设模型已定义在model.py中

app = Flask(__name__)
model = VideoBehaviorRecognitionModel(num_classes=len(set(labels)))
model.load_state_dict(torch.load('trained_model.pth'))
model.eval()

transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

@app.route('/predict', methods=['POST'])
def predict():
    if 'file' not in request.files:
        return jsonify({'error': 'No file part'}), 400
    file = request.files['file']
    img = Image.open(io.BytesIO(file.read()))
    img = transform(img).unsqueeze(0)  # 增加batch维度
    with torch.no_grad():
        output = model(img)
        _, predicted = torch.max(output, 1)
    return jsonify({'label': labels[predicted.item()]})

if __name__ == '__main__':
    app.run(debug=True)

