import os
import cv2
import numpy as np
import time
from moviepy.editor import VideoFileClip, concatenate_videoclips
import kagglehub
import tensorflow as tf
from tensorflow.keras.layers import TFSMLayer
from PIL import Image

class AutoVideoEditor:
    def __init__(self, video_path):
        self.clip = VideoFileClip(video_path)
        self.fps = self.clip.fps
        
        # 修改为使用TFSMLayer加载模型
        try:
            # 下载模型
            path = kagglehub.model_download("google/mobilenet-v2/tensorFlow2/100-224-classification")
            print("Path to model files:", path)

            # 修改为使用TFSMLayer加载模型
            self.scene_model = tf.keras.Sequential([
                TFSMLayer(path, call_endpoint='serving_default')
            ])
            print("模型加载成功!")
        except Exception as e:
            raise RuntimeError(f"模型加载失败: {str(e)}")
        
        # 分类
        self.classes = self._get_imagenet_labels()

    def _get_imagenet_labels(self):
        # 下载ImageNet标签文件
        labels_url = "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt"
        labels_path = tf.keras.utils.get_file("ImageNetLabels.txt", labels_url)
        with open(labels_path) as f:
            labels = f.read().splitlines()
        return labels

    def _classify_scene_type(self, frame):
        """使用TFSMLayer进行场景分类"""
        # 预处理图像
        img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        img = img.resize((224, 224))
        img_array = np.array(img) / 255.0
        img_array = np.expand_dims(img_array, axis=0)
        
        # 进行预测
        predictions = self.scene_model.predict(img_array)
        
        # 获取输出结果
        output_key = list(predictions.keys())[0]
        output = predictions[output_key]
        
        # Top1分类
        top_index = np.argmax(output[0])

        return self.classes[top_index] if top_index < len(self.classes) else "未知类别"

    def detect_scenes(self, threshold=30.0):
        """完整实现场景检测"""
        cap = cv2.VideoCapture(self.clip.filename)
        prev_frame = None
        scene_changes = []
        frame_count = 0
        
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break
                
            current_time = frame_count / self.fps
            frame_count += 1
            
            # 视觉差异计算
            if prev_frame is not None:
                gray_prev = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
                gray_curr = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                frame_diff = cv2.absdiff(gray_prev, gray_curr).mean()
                
                if frame_diff > threshold:
                    scene_changes.append({
                        'time': current_time,
                        'frames': [frame],
                        'type': None
                    })
            
            prev_frame = frame
        
        # 语义分类
        for scene in scene_changes:
            scene['type'] = self._classify_scene_type(scene['frames'][0])
            
        return scene_changes

    def auto_cut(self, template=None, max_duration=300):
        """完整实现自动剪辑"""
        scenes = self.detect_scenes()
        # print(f"检测到场景: {scenes}")  # 调试输出，查看检测到的场景和类型
        
        # 按模板筛选片段
        if template == "vlog":
            selected = self._apply_vlog_template(scenes)
        elif template == "tutorial":
            selected = self._apply_tutorial_template(scenes)
        else:
            selected = scenes
            
        # 生成剪辑时间线
        timeline = self._generate_timeline(selected, max_duration)
        
        # 输出最终视频
        return self._render_video(timeline)

    def _apply_vlog_template(self, segments):
        return [s for s in segments if s['type'] in ['landscape', 'action', 'interview']]

    def _apply_tutorial_template(self, segments):
        return [s for s in segments if s['type'] in ['interview', 'indoor']]

    def _generate_timeline(self, segments, max_duration):
        """生成优化后的时间线"""
        timeline = []
        current_duration = 0
        
        for seg in segments:
            seg_duration = 5  # 默认每个片段5秒
            if current_duration + seg_duration <= max_duration:
                timeline.append({
                    'start': seg['time'],
                    'end': seg['time'] + seg_duration,
                    'type': seg['type']
                })
                current_duration += seg_duration
                
        return timeline

    def _render_video(self, timeline):
        """渲染最终视频"""
        clips = []
        for item in timeline:
            # 修改为使用正确的剪辑方法
            clip = self.clip.subclip(item['start'], item['end'])
            clips.append(clip)
        
        final_clip = concatenate_videoclips(clips)
        output_path = os.path.join(os.getcwd(), "single", "output", f"output_{int(time.time())}.mp4")
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        final_clip.write_videofile(output_path)
        return output_path


def main():
    # 测试参数设置
    input_video = os.path.join(os.getcwd(), "tests", "data", "test_movie.mp4")  # 测试视频路径
    output_template = None  # 可选 "vlog" 或 "tutorial"
    max_duration = 60  # 最大时长(秒)
    
    # 初始化编辑器
    print(f"正在处理视频: {input_video}")
    editor = AutoVideoEditor(input_video)
    
    # 执行自动剪辑
    try:
        output_path = editor.auto_cut(template=output_template, max_duration=max_duration)
        print(f"视频剪辑完成! 输出路径: {output_path}")
    except Exception as e:
        print(f"处理失败: {str(e)}")

if __name__ == "__main__":
    main()