import os
import json
import asyncio
from pathlib import Path
from typing import List, Dict, Optional, Tuple
import uuid
from datetime import datetime, timedelta
import subprocess
import cv2
from app.config import settings
from app.utils.logger import logger

class VideoSplitter:
    """视频拆分服务"""
    
    def __init__(self):
        self.upload_dir = Path(settings.UPLOAD_DIR)
        self.segments_dir = self.upload_dir / "segments"
        self.segments_dir.mkdir(exist_ok=True)
        
    async def split_video_by_duration(
        self, 
        video_path: Path, 
        segment_duration: int = 60,  # 默认60秒一段
        max_segments: int = 50,  # 最大分段数（增加限制）
        min_segment_duration: int = 10  # 最小片段时长
    ) -> Dict:
        """按时长拆分视频"""
        try:
            # 获取视频信息
            video_info = await self._get_video_info(video_path)
            total_duration = video_info['duration']
            
            # 计算分段数量
            segment_count = min(
                int(total_duration / segment_duration) + 1,
                max_segments
            )
            
            # 如果视频很短，不需要拆分
            if total_duration <= segment_duration:
                return {
                    "need_split": False,
                    "original_video": str(video_path),
                    "segments": [],
                    "total_duration": total_duration
                }
            
            # 创建分段目录
            video_id = video_path.stem
            segment_dir = self.segments_dir / video_id
            segment_dir.mkdir(exist_ok=True)
            
            # 计算优化的分段时间点
            segment_times = self._calculate_balanced_segments(
                total_duration, segment_duration, min_segment_duration, max_segments
            )
            
            logger.info(f"计算得到 {len(segment_times)} 个分段时间点: {segment_times}")
            
            segments = []
            for i, (start_time, end_time) in enumerate(segment_times):
                segment_filename = f"segment_{i:03d}_{start_time:.1f}s-{end_time:.1f}s.mp4"
                segment_path = segment_dir / segment_filename
                
                # 使用ffmpeg切分视频
                success = await self._extract_segment(
                    video_path, segment_path, start_time, end_time - start_time
                )
                
                # 添加小延迟避免资源竞争
                if i < len(segment_times) - 1:  # 不是最后一个片段
                    await asyncio.sleep(0.1)  # 100ms延迟
                
                if success:
                    segment_url = f"{settings.VIDEO_BASE_URL}/videos/segments/{video_id}/{segment_filename}"
                    segments.append({
                        "index": i,
                        "filename": segment_filename,
                        "path": str(segment_path),
                        "url": segment_url,
                        "start_time": start_time,
                        "end_time": end_time,
                        "duration": end_time - start_time,
                        "file_size": segment_path.stat().st_size if segment_path.exists() else 0
                    })
            
            # 保存分段元数据
            metadata = {
                "video_id": video_id,
                "original_video": str(video_path),
                "original_duration": total_duration,
                "segment_duration": segment_duration,
                "total_segments": len(segments),
                "segments": segments,
                "created_at": datetime.now().isoformat(),
                "split_method": "duration"
            }
            
            metadata_file = segment_dir / "metadata.json"
            with open(metadata_file, 'w', encoding='utf-8') as f:
                json.dump(metadata, f, ensure_ascii=False, indent=2)
            
            logger.info(f"视频拆分完成: {video_path} -> {len(segments)} 个片段")
            
            return {
                "need_split": True,
                "video_id": video_id,
                "original_video": str(video_path),
                "segments": segments,
                "metadata_file": str(metadata_file),
                "total_duration": total_duration,
                "segment_count": len(segments)
            }
            
        except Exception as e:
            logger.error(f"视频拆分失败: {e}")
            raise Exception(f"视频拆分失败: {str(e)}")
    
    async def split_video_by_scenes(
        self, 
        video_path: Path,
        scene_threshold: float = 0.3,  # 场景变化阈值
        min_segment_duration: int = 10,  # 最小片段时长
        max_segments: int = 50  # 增加场景拆分的最大片段数
    ) -> Dict:
        """基于场景变化拆分视频"""
        try:
            # 检测场景变化点
            scene_changes = await self._detect_scene_changes(
                video_path, scene_threshold, min_segment_duration
            )
            
            if len(scene_changes) <= 1:
                # 没有明显场景变化，回退到时长拆分
                return await self.split_video_by_duration(video_path)
            
            # 限制分段数量
            if len(scene_changes) > max_segments:
                # 选择最显著的场景变化点
                scene_changes = scene_changes[:max_segments]
            
            video_info = await self._get_video_info(video_path)
            total_duration = video_info['duration']
            
            # 创建分段目录
            video_id = video_path.stem
            segment_dir = self.segments_dir / video_id
            segment_dir.mkdir(exist_ok=True)
            
            segments = []
            for i, (start_time, end_time) in enumerate(scene_changes):
                segment_filename = f"scene_{i:03d}_{start_time}s-{end_time}s.mp4"
                segment_path = segment_dir / segment_filename
                
                success = await self._extract_segment(
                    video_path, segment_path, start_time, end_time - start_time
                )
                
                if success:
                    segment_url = f"{settings.VIDEO_BASE_URL}/videos/segments/{video_id}/{segment_filename}"
                    segments.append({
                        "index": i,
                        "filename": segment_filename,
                        "path": str(segment_path),
                        "url": segment_url,
                        "start_time": start_time,
                        "end_time": end_time,
                        "duration": end_time - start_time,
                        "file_size": segment_path.stat().st_size if segment_path.exists() else 0,
                        "scene_type": "auto_detected"
                    })
            
            # 保存元数据
            metadata = {
                "video_id": video_id,
                "original_video": str(video_path),
                "original_duration": total_duration,
                "total_segments": len(segments),
                "segments": segments,
                "created_at": datetime.now().isoformat(),
                "split_method": "scene_detection",
                "scene_threshold": scene_threshold
            }
            
            metadata_file = segment_dir / "metadata.json"
            with open(metadata_file, 'w', encoding='utf-8') as f:
                json.dump(metadata, f, ensure_ascii=False, indent=2)
            
            logger.info(f"基于场景的视频拆分完成: {len(segments)} 个片段")
            
            return {
                "need_split": True,
                "video_id": video_id,
                "original_video": str(video_path),
                "segments": segments,
                "metadata_file": str(metadata_file),
                "total_duration": total_duration,
                "segment_count": len(segments),
                "split_method": "scene_detection"
            }
            
        except Exception as e:
            logger.error(f"场景拆分失败，回退到时长拆分: {e}")
            return await self.split_video_by_duration(video_path)
    
    async def _get_video_info(self, video_path: Path) -> Dict:
        """获取视频信息"""
        try:
            cap = cv2.VideoCapture(str(video_path))
            fps = cap.get(cv2.CAP_PROP_FPS)
            frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
            duration = frame_count / fps if fps > 0 else 0
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            cap.release()
            
            return {
                "duration": duration,
                "fps": fps,
                "frame_count": frame_count,
                "width": width,
                "height": height,
                "file_size": video_path.stat().st_size
            }
        except Exception as e:
            logger.error(f"获取视频信息失败: {e}")
            return {"duration": 0, "fps": 0, "frame_count": 0, "width": 0, "height": 0}
    
    async def _extract_segment(
        self, 
        input_path: Path, 
        output_path: Path, 
        start_time: float, 
        duration: float
    ) -> bool:
        """使用ffmpeg提取视频片段"""
        try:
            # 检查输入文件是否存在
            if not input_path.exists():
                logger.error(f"输入视频文件不存在: {input_path}")
                return False
            
            # 确保输出目录存在
            output_path.parent.mkdir(parents=True, exist_ok=True)
            
            cmd = [
                "ffmpeg",
                "-i", str(input_path),
                "-ss", str(start_time),
                "-t", str(duration),
                "-c", "copy",  # 复制编码，避免重新编码
                "-avoid_negative_ts", "make_zero",
                "-y",  # 覆盖输出文件
                str(output_path)
            ]
            
            logger.info(f"执行ffmpeg命令: {' '.join(cmd)}")
            
            # 使用同步subprocess避免Windows兼容性问题
            import subprocess
            process = subprocess.run(
                cmd,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                timeout=300  # 5分钟超时
            )
            
            stdout, stderr = process.stdout, process.stderr
            
            if process.returncode == 0 and output_path.exists():
                file_size = output_path.stat().st_size
                logger.info(f"视频片段提取成功: {output_path} (大小: {file_size} bytes)")
                return True
            else:
                stderr_text = stderr.decode('utf-8', errors='ignore')
                stdout_text = stdout.decode('utf-8', errors='ignore')
                logger.error(f"ffmpeg失败 (返回码: {process.returncode})")
                logger.error(f"ffmpeg stderr: {stderr_text}")
                logger.error(f"ffmpeg stdout: {stdout_text}")
                logger.error(f"输出文件存在: {output_path.exists()}")
                return False
                
        except Exception as e:
            logger.error(f"视频片段提取失败: {e}")
            import traceback
            logger.error(f"详细错误信息: {traceback.format_exc()}")
            return False
    
    async def _detect_scene_changes(
        self, 
        video_path: Path, 
        threshold: float = 0.3,
        min_duration: int = 30
    ) -> List[Tuple[float, float]]:
        """检测场景变化点"""
        try:
            cap = cv2.VideoCapture(str(video_path))
            fps = cap.get(cv2.CAP_PROP_FPS)
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            
            if fps <= 0:
                return [(0, frame_count / 25)]  # 假设25fps
            
            # 简化的场景检测：每隔一定帧数采样
            sample_interval = int(fps * 5)  # 每5秒采样一次
            scene_changes = [0]  # 开始时间
            
            prev_hist = None
            for frame_idx in range(0, frame_count, sample_interval):
                cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
                ret, frame = cap.read()
                
                if not ret:
                    break
                
                # 计算直方图
                hist = cv2.calcHist([frame], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
                
                if prev_hist is not None:
                    # 计算直方图相似度
                    correlation = cv2.compareHist(hist, prev_hist, cv2.HISTCMP_CORREL)
                    
                    # 如果相似度低于阈值，认为是场景变化
                    if correlation < (1 - threshold):
                        scene_time = frame_idx / fps
                        if scene_time - scene_changes[-1] >= min_duration:
                            scene_changes.append(scene_time)
                
                prev_hist = hist
            
            cap.release()
            
            # 添加结束时间
            total_duration = frame_count / fps
            scene_changes.append(total_duration)
            
            # 转换为时间段
            raw_segments = []
            for i in range(len(scene_changes) - 1):
                start_time = scene_changes[i]
                end_time = scene_changes[i + 1]
                raw_segments.append([start_time, end_time])
            
            # 应用平衡优化算法
            optimized_segments = self._optimize_small_segments(
                raw_segments, min_duration, 60  # 使用60秒作为目标时长
            )
            
            # 转换为元组
            segments = [(seg[0], seg[1]) for seg in optimized_segments if seg[1] - seg[0] >= min_duration]
            
            return segments
            
        except Exception as e:
            logger.error(f"场景检测失败: {e}")
            return [(0, 60)]  # 返回默认分段
    
    def get_segments_metadata(self, video_id: str) -> Optional[Dict]:
        """获取视频分段元数据"""
        try:
            metadata_file = self.segments_dir / video_id / "metadata.json"
            if metadata_file.exists():
                with open(metadata_file, 'r', encoding='utf-8') as f:
                    return json.load(f)
            return None
        except Exception as e:
            logger.error(f"读取分段元数据失败: {e}")
            return None
    
    def _calculate_balanced_segments(
        self, 
        total_duration: float, 
        target_duration: int, 
        min_duration: int, 
        max_segments: int
    ) -> List[Tuple[float, float]]:
        """计算平衡的分段时间点，确保没有小于最小时长的片段"""
        
        if total_duration <= target_duration:
            return [(0, total_duration)]
        
        # 初步计算分段数量
        initial_segment_count = min(
            int(total_duration / target_duration) + (1 if total_duration % target_duration > 0 else 0),
            max_segments
        )
        
        # 如果只需要一个分段
        if initial_segment_count <= 1:
            return [(0, total_duration)]
        
        # 计算平均分段时长
        avg_duration = total_duration / initial_segment_count
        
        # 如果平均时长小于最小时长，减少分段数
        while avg_duration < min_duration and initial_segment_count > 1:
            initial_segment_count -= 1
            avg_duration = total_duration / initial_segment_count
        
        # 生成初始分段点
        segments = []
        for i in range(initial_segment_count):
            start_time = i * avg_duration
            end_time = min((i + 1) * avg_duration, total_duration)
            segments.append([start_time, end_time])
        
        # 优化分段：处理过小的片段
        segments = self._optimize_small_segments(segments, min_duration, target_duration)
        
        # 转换为元组列表
        return [(seg[0], seg[1]) for seg in segments]
    
    def _optimize_small_segments(
        self, 
        segments: List[List[float]], 
        min_duration: int, 
        target_duration: int
    ) -> List[List[float]]:
        """优化过小的分段"""
        
        optimized = segments.copy()
        changed = True
        
        while changed:
            changed = False
            i = 0
            
            while i < len(optimized):
                current_duration = optimized[i][1] - optimized[i][0]
                
                # 如果当前片段太小
                if current_duration < min_duration:
                    merged = False
                    
                    # 尝试与下一个片段合并
                    if i + 1 < len(optimized):
                        next_duration = optimized[i + 1][1] - optimized[i + 1][0]
                        combined_duration = current_duration + next_duration
                        
                        # 如果合并后不会太大，则合并
                        if combined_duration <= target_duration * 1.5:  # 允许150%的目标时长
                            optimized[i][1] = optimized[i + 1][1]
                            optimized.pop(i + 1)
                            merged = True
                            changed = True
                    
                    # 如果无法与下一个合并，尝试与前一个合并
                    if not merged and i > 0:
                        prev_duration = optimized[i - 1][1] - optimized[i - 1][0]
                        combined_duration = prev_duration + current_duration
                        
                        # 如果合并后不会太大，则合并
                        if combined_duration <= target_duration * 1.5:
                            optimized[i - 1][1] = optimized[i][1]
                            optimized.pop(i)
                            merged = True
                            changed = True
                            i -= 1  # 回退索引
                    
                    # 如果都无法合并，尝试重新分配相邻片段的时间
                    if not merged:
                        if i + 1 < len(optimized):
                            # 从下一个片段借用时间
                            next_duration = optimized[i + 1][1] - optimized[i + 1][0]
                            if next_duration > min_duration + 5:  # 确保下一个片段借出后仍然足够大
                                borrow_time = min(min_duration - current_duration + 2, next_duration - min_duration)
                                optimized[i][1] += borrow_time
                                optimized[i + 1][0] += borrow_time
                                changed = True
                        elif i > 0:
                            # 从前一个片段借用时间
                            prev_duration = optimized[i - 1][1] - optimized[i - 1][0]
                            if prev_duration > min_duration + 5:
                                borrow_time = min(min_duration - current_duration + 2, prev_duration - min_duration)
                                optimized[i - 1][1] -= borrow_time
                                optimized[i][0] -= borrow_time
                                changed = True
                
                i += 1
        
        # 最终检查：如果仍有过小片段，强制合并
        final_segments = []
        i = 0
        while i < len(optimized):
            current = optimized[i]
            current_duration = current[1] - current[0]
            
            # 如果当前片段仍然太小且不是最后一个，强制与下一个合并
            if current_duration < min_duration and i + 1 < len(optimized):
                current[1] = optimized[i + 1][1]
                i += 1  # 跳过下一个片段
            
            final_segments.append(current)
            i += 1
        
        return final_segments

    def cleanup_segments(self, video_id: str) -> bool:
        """清理视频分段文件"""
        try:
            segment_dir = self.segments_dir / video_id
            if segment_dir.exists():
                import shutil
                shutil.rmtree(segment_dir)
                logger.info(f"清理视频分段: {video_id}")
                return True
            return False
        except Exception as e:
            logger.error(f"清理分段失败: {e}")
            return False