import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))  # 添加项目根目录到Python路径

import os
import cv2
import numpy as np
import json
from moviepy.editor import VideoFileClip
import kagglehub
import tensorflow as tf
from tensorflow.keras.layers import TFSMLayer
from PIL import Image
from transformers import pipeline
from modules.audio_processor import audio_processor

class AutoVideoEditor:
    def __init__(self, video_path):
        self.clip = VideoFileClip(video_path)
        self.fps = self.clip.fps
        self.base_dir = os.path.join(os.getcwd(), "single", "output")
        
        # 初始化模型
        try:
            # 场景分类模型
            path = kagglehub.model_download("google/mobilenet-v2/tensorFlow2/100-224-classification")
            self.scene_model = tf.keras.Sequential([
                TFSMLayer(path, call_endpoint='serving_default')
            ])
            
            # 多标签分类模型
            self.tag_model = pipeline("zero-shot-classification", 
                                   model="facebook/bart-large-mnli")
            
            print("模型加载成功!")
        except Exception as e:
            raise RuntimeError(f"模型加载失败: {str(e)}")
        
        # 分类标签
        self.classes = self._get_imagenet_labels()
        self.tag_candidates = ["风景", "人物", "建筑", "动物", "美食", 
                             "运动", "室内", "室外", "白天", "夜晚",
                             "城市", "乡村", "特写", "全景", "动态"]

    def _get_imagenet_labels(self):
        labels_url = "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt"
        labels_path = tf.keras.utils.get_file("ImageNetLabels.txt", labels_url)
        with open(labels_path) as f:
            labels = f.read().splitlines()
        return labels

    def _classify_scene_type(self, frame):
        """场景分类"""
        img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        img = img.resize((224, 224))
        img_array = np.array(img) / 255.0
        img_array = np.expand_dims(img_array, axis=0)
        
        predictions = self.scene_model.predict(img_array)
        output_key = list(predictions.keys())[0]
        output = predictions[output_key]
        top_index = np.argmax(output[0])
        return self.classes[top_index] if top_index < len(self.classes) else "未知类别"

    def _generate_tags(self, frame):
        """生成多维度标签"""
        img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        result = self.tag_model(img, self.tag_candidates, multi_label=True)
        return {label: score for label, score in zip(result['labels'], result['scores'])}

    def _audio_to_text(self, audio_clip):
        """音频转文本"""
        try:
            # 保存临时音频文件
            temp_audio = os.path.join(self.base_dir, "temp.wav")
            audio_clip.write_audiofile(temp_audio)
            
            # 使用语音识别
            text = audio_processor.speech_to_text(temp_audio)
            
            os.remove(temp_audio)
            return text
        except Exception as e:
            print(f"语音识别失败: {str(e)}")
            return ""

    def _save_metadata(self, metadata, output_dir):
        """保存元数据"""
        with open(os.path.join(output_dir, "metadata.json"), "w") as f:
            json.dump(metadata, f, ensure_ascii=False, indent=2)

    def detect_scenes(self, threshold=30.0):
        """场景检测"""
        cap = cv2.VideoCapture(self.clip.filename)
        prev_frame = None
        scene_changes = []
        frame_count = 0
        
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break
                
            current_time = frame_count / self.fps
            frame_count += 1
            
            if prev_frame is not None:
                gray_prev = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
                gray_curr = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                frame_diff = cv2.absdiff(gray_prev, gray_curr).mean()
                
                if frame_diff > threshold:
                    scene_changes.append({
                        'time': current_time,
                        'frames': [frame],
                        'type': None,
                        'tags': None
                    })
            
            prev_frame = frame
        
        # 分类和打标签
        for scene in scene_changes:
            scene['type'] = self._classify_scene_type(scene['frames'][0])
            scene['tags'] = self._generate_tags(scene['frames'][0])
            
        return scene_changes

    def auto_cut(self, template=None, max_duration=300):
        """自动剪辑并保存素材"""
        scenes = self.detect_scenes()
        
        # 按模板筛选片段
        if template == "vlog":
            selected = self._apply_vlog_template(scenes)
        elif template == "tutorial":
            selected = self._apply_tutorial_template(scenes)
        else:
            selected = scenes
            
        # 生成剪辑时间线
        timeline = self._generate_timeline(selected, max_duration)
        
        # 保存素材
        return self._save_materials(timeline)

    def _save_materials(self, timeline):
        """保存素材到分类目录"""
        output_paths = []
        
        for i, item in enumerate(timeline):
            # 创建分类目录
            scene_type = item['type'].replace(" ", "_")
            output_dir = os.path.join(self.base_dir, scene_type, f"scene_{i}")
            os.makedirs(output_dir, exist_ok=True)
            
            # 剪辑视频
            clip = self.clip.subclip(item['start'], item['end'])
            video_path = os.path.join(output_dir, "video.mp4")
            clip.write_videofile(video_path)
            
            # 提取音频并转文本
            audio_path = os.path.join(output_dir, "audio.wav")
            audio_text = self._audio_to_text(clip.audio)
            
            # 保存文本
            with open(os.path.join(output_dir, "transcript.txt"), "w") as f:
                f.write(audio_text)
                
            # 保存标签
            with open(os.path.join(output_dir, "tags.txt"), "w") as f:
                for tag, score in item['tags'].items():
                    f.write(f"{tag}: {score:.2f}\n")
            
            # 保存元数据
            metadata = {
                "start_time": item['start'],
                "end_time": item['end'],
                "duration": item['end'] - item['start'],
                "type": item['type'],
                "tags": item['tags'],
                "transcript": audio_text,
                "video_path": video_path,
                "audio_path": audio_path
            }
            self._save_metadata(metadata, output_dir)
            
            output_paths.append(output_dir)
        
        return output_paths

    def _apply_vlog_template(self, segments):
        return [s for s in segments if s['type'] in ['landscape', 'action', 'interview']]

    def _apply_tutorial_template(self, segments):
        return [s for s in segments if s['type'] in ['interview', 'indoor']]

    def _generate_timeline(self, segments, max_duration):
        """生成优化后的时间线"""
        timeline = []
        current_duration = 0
        
        for seg in segments:
            seg_duration = 5  # 默认每个片段5秒
            if current_duration + seg_duration <= max_duration:
                timeline.append({
                    'start': seg['time'],
                    'end': seg['time'] + seg_duration,
                    'type': seg['type']
                })
                current_duration += seg_duration
                
        return timeline

def main():
    input_video = os.path.join(os.getcwd(), "tests", "data", "test_movie.mp4")
    output_template = None
    max_duration = 60
    
    print(f"正在处理视频: {input_video}")
    editor = AutoVideoEditor(input_video)
    
    try:
        output_paths = editor.auto_cut(template=output_template, max_duration=max_duration)
        print(f"视频剪辑完成! 输出路径: {output_paths}")
    except Exception as e:
        print(f"处理失败: {str(e)}")

if __name__ == "__main__":
    main()