<template>
  <div class="min-h-screen py-8 px-4 sm:px-6 lg:px-8">
    <div class="max-w-6xl mx-auto">
      <!-- 页面标题 -->
      <div class="text-center mb-12">
        <h1 class="text-4xl font-bold mb-4 tech-text">Insertable Streams API</h1>
        <p class="text-gray-300 text-lg">
          对 MediaStreamTrack 进行实时处理和转换，实现视频特效和音频处理
        </p>
      </div>

      <!-- 浏览器支持提示 -->
      <div v-if="!isSupported" class="tech-card mb-8 border-yellow-600">
        <div class="flex items-center mb-4">
          <i class="i-carbon-warning text-yellow-500 text-xl mr-3"></i>
          <h3 class="text-xl font-semibold text-yellow-500">浏览器支持提示</h3>
        </div>
        <p class="text-gray-300 mb-4">
          您的浏览器不支持 Insertable Streams API。此功能需要 Chrome 94+ 或其他支持该 API 的浏览器。
        </p>
        <p class="text-gray-400 text-sm">
          注意：这是一个实验性 API，可能需要启用实验性功能标志。
        </p>
      </div>

      <!-- 摄像头控制 -->
      <div class="tech-card mb-8">
        <h2 class="text-2xl font-semibold mb-6 text-white">摄像头控制</h2>
        
        <div class="grid grid-cols-1 lg:grid-cols-2 gap-6">
          <!-- 原始视频 -->
          <div>
            <h3 class="text-lg font-semibold text-white mb-3">原始视频</h3>
            <div class="bg-gray-800 rounded-lg overflow-hidden">
              <video
                ref="originalVideoRef"
                autoplay
                playsinline
                muted
                class="w-full h-auto"
              ></video>
            </div>
          </div>

          <!-- 处理后的视频 -->
          <div>
            <h3 class="text-lg font-semibold text-white mb-3">处理后的视频</h3>
            <div class="bg-gray-800 rounded-lg overflow-hidden">
              <video
                ref="processedVideoRef"
                autoplay
                playsinline
                muted
                class="w-full h-auto"
              ></video>
            </div>
          </div>
        </div>

        <div class="mt-6 flex gap-3">
          <button
            v-if="!isStreaming"
            @click="startCamera"
            class="tech-button"
          >
            <i class="i-carbon-video mr-2"></i>
            启动摄像头
          </button>
          <button
            v-else
            @click="stopCamera"
            class="bg-red-600 hover:bg-red-700 text-white px-6 py-3 rounded-lg transition-colors"
          >
            <i class="i-carbon-video-off mr-2"></i>
            停止摄像头
          </button>
        </div>
      </div>

      <!-- 视频特效 -->
      <div v-if="isStreaming" class="tech-card mb-8">
        <h2 class="text-2xl font-semibold mb-6 text-white">视频特效</h2>
        
        <div class="grid grid-cols-2 md:grid-cols-4 gap-4">
          <button
            @click="applyEffect('none')"
            :class="currentEffect === 'none' ? 'bg-tech-accent' : 'bg-gray-700 hover:bg-gray-600'"
            class="text-white px-4 py-3 rounded-lg transition-colors"
          >
            <i class="i-carbon-close-outline text-xl mb-2 block"></i>
            无特效
          </button>

          <button
            @click="applyEffect('grayscale')"
            :class="currentEffect === 'grayscale' ? 'bg-tech-accent' : 'bg-gray-700 hover:bg-gray-600'"
            class="text-white px-4 py-3 rounded-lg transition-colors"
          >
            <i class="i-carbon-contrast text-xl mb-2 block"></i>
            灰度
          </button>

          <button
            @click="applyEffect('sepia')"
            :class="currentEffect === 'sepia' ? 'bg-tech-accent' : 'bg-gray-700 hover:bg-gray-600'"
            class="text-white px-4 py-3 rounded-lg transition-colors"
          >
            <i class="i-carbon-image-reference text-xl mb-2 block"></i>
            复古
          </button>

          <button
            @click="applyEffect('invert')"
            :class="currentEffect === 'invert' ? 'bg-tech-accent' : 'bg-gray-700 hover:bg-gray-600'"
            class="text-white px-4 py-3 rounded-lg transition-colors"
          >
            <i class="i-carbon-arrows-horizontal text-xl mb-2 block"></i>
            反色
          </button>

          <button
            @click="applyEffect('blur')"
            :class="currentEffect === 'blur' ? 'bg-tech-accent' : 'bg-gray-700 hover:bg-gray-600'"
            class="text-white px-4 py-3 rounded-lg transition-colors"
          >
            <i class="i-carbon-blur text-xl mb-2 block"></i>
            模糊
          </button>

          <button
            @click="applyEffect('edge')"
            :class="currentEffect === 'edge' ? 'bg-tech-accent' : 'bg-gray-700 hover:bg-gray-600'"
            class="text-white px-4 py-3 rounded-lg transition-colors"
          >
            <i class="i-carbon-filter-edit text-xl mb-2 block"></i>
            边缘检测
          </button>

          <button
            @click="applyEffect('pixelate')"
            :class="currentEffect === 'pixelate' ? 'bg-tech-accent' : 'bg-gray-700 hover:bg-gray-600'"
            class="text-white px-4 py-3 rounded-lg transition-colors"
          >
            <i class="i-carbon-grid text-xl mb-2 block"></i>
            像素化
          </button>

          <button
            @click="applyEffect('brightness')"
            :class="currentEffect === 'brightness' ? 'bg-tech-accent' : 'bg-gray-700 hover:bg-gray-600'"
            class="text-white px-4 py-3 rounded-lg transition-colors"
          >
            <i class="i-carbon-brightness-contrast text-xl mb-2 block"></i>
            高亮
          </button>
        </div>
      </div>

      <!-- 特效参数 -->
      <div v-if="isStreaming && currentEffect !== 'none'" class="tech-card mb-8">
        <h2 class="text-2xl font-semibold mb-6 text-white">特效参数</h2>
        
        <div class="grid grid-cols-1 md:grid-cols-2 gap-6">
          <!-- 强度控制 -->
          <div v-if="['blur', 'brightness', 'pixelate'].includes(currentEffect)" class="bg-gray-800 rounded-lg p-4">
            <label class="text-white font-medium mb-3 block">
              {{ getEffectLabel() }} ({{ effectIntensity }})
            </label>
            <input
              v-model.number="effectIntensity"
              type="range"
              :min="getEffectMin()"
              :max="getEffectMax()"
              :step="getEffectStep()"
              class="w-full"
            >
            <div class="flex justify-between text-xs text-gray-400 mt-1">
              <span>{{ getEffectMin() }}</span>
              <span>{{ getEffectMax() }}</span>
            </div>
          </div>

          <!-- FPS 显示 -->
          <div class="bg-gray-800 rounded-lg p-4">
            <div class="text-gray-400 text-sm mb-2">处理性能</div>
            <div class="text-white text-2xl font-semibold">{{ fps }} FPS</div>
            <div class="text-gray-400 text-sm mt-2">帧处理速度</div>
          </div>
        </div>
      </div>

      <!-- 实时统计 -->
      <div v-if="isStreaming" class="tech-card mb-8">
        <h2 class="text-2xl font-semibold mb-6 text-white">实时统计</h2>
        
        <div class="grid grid-cols-2 md:grid-cols-4 gap-4">
          <div class="bg-gray-800 rounded-lg p-4">
            <div class="text-gray-400 text-sm mb-2">已处理帧数</div>
            <div class="text-white text-2xl font-semibold">{{ processedFrames }}</div>
          </div>

          <div class="bg-gray-800 rounded-lg p-4">
            <div class="text-gray-400 text-sm mb-2">丢帧数</div>
            <div class="text-white text-2xl font-semibold">{{ droppedFrames }}</div>
          </div>

          <div class="bg-gray-800 rounded-lg p-4">
            <div class="text-gray-400 text-sm mb-2">平均处理时间</div>
            <div class="text-white text-2xl font-semibold">{{ avgProcessTime }}ms</div>
          </div>

          <div class="bg-gray-800 rounded-lg p-4">
            <div class="text-gray-400 text-sm mb-2">当前特效</div>
            <div class="text-white text-xl font-semibold">{{ getEffectName() }}</div>
          </div>
        </div>
      </div>

      <!-- Canvas 处理器 -->
      <canvas ref="canvasRef" class="hidden"></canvas>

      <!-- API 信息 -->
      <div class="tech-card">
        <h3 class="text-xl font-semibold mb-4 text-white">API 信息</h3>
        <div class="space-y-4 text-gray-300">
          <div>
            <h4 class="font-semibold text-tech-accent mb-2">浏览器支持</h4>
            <p>Chrome 94+ (实验性功能)</p>
          </div>
          <div>
            <h4 class="font-semibold text-tech-accent mb-2">主要接口</h4>
            <ul class="list-disc list-inside space-y-1">
              <li><code class="bg-gray-800 px-2 py-1 rounded">MediaStreamTrackProcessor</code> - 从轨道读取帧</li>
              <li><code class="bg-gray-800 px-2 py-1 rounded">MediaStreamTrackGenerator</code> - 生成新的轨道</li>
              <li><code class="bg-gray-800 px-2 py-1 rounded">VideoFrame</code> - 表示视频帧</li>
              <li><code class="bg-gray-800 px-2 py-1 rounded">TransformStream</code> - 转换流数据</li>
            </ul>
          </div>
          <div>
            <h4 class="font-semibold text-tech-accent mb-2">工作原理</h4>
            <p class="text-sm leading-relaxed">
              Insertable Streams API 使用 Streams API 来处理 MediaStreamTrack 中的媒体帧。
              通过 MediaStreamTrackProcessor 读取原始帧，经过自定义的 TransformStream 处理，
              最后通过 MediaStreamTrackGenerator 生成新的媒体轨道。这种方式允许开发者在不阻塞主线程的情况下，
              对音视频数据进行实时处理和转换。
            </p>
          </div>
          <div>
            <h4 class="font-semibold text-tech-accent mb-2">使用场景</h4>
            <ul class="list-disc list-inside space-y-1">
              <li>实时视频特效和滤镜</li>
              <li>背景虚化和替换</li>
              <li>人脸识别和增强</li>
              <li>音频降噪和增强</li>
              <li>自定义编解码处理</li>
              <li>视频会议特效</li>
            </ul>
          </div>
          <div>
            <h4 class="font-semibold text-tech-accent mb-2">性能考虑</h4>
            <p class="text-sm leading-relaxed">
              由于需要逐帧处理视频，性能优化非常重要。建议使用 Web Workers 进行复杂处理，
              合理使用硬件加速（WebGL/WebGPU），并根据设备性能动态调整处理质量。
              本演示使用 Canvas 2D 进行简单的图像处理，实际应用中可以考虑使用更高效的方法。
            </p>
          </div>
          <div>
            <h4 class="font-semibold text-tech-accent mb-2">注意事项</h4>
            <p class="text-sm leading-relaxed text-yellow-400">
              ⚠️ 这是一个实验性 API，规范可能会变化。在生产环境中使用前请充分测试。
              某些浏览器可能需要启用实验性功能标志才能使用此 API。
            </p>
          </div>
        </div>
      </div>
    </div>
  </div>
</template>

<script setup lang="ts">
import { ref, onUnmounted } from 'vue'

// 响应式状态
const isSupported = ref(
  'MediaStreamTrackProcessor' in window && 'MediaStreamTrackGenerator' in window
)
const isStreaming = ref(false)
const currentEffect = ref<string>('none')
const effectIntensity = ref(10)

// 视频元素
const originalVideoRef = ref<HTMLVideoElement | null>(null)
const processedVideoRef = ref<HTMLVideoElement | null>(null)
const canvasRef = ref<HTMLCanvasElement | null>(null)

// 统计信息
const fps = ref(0)
const processedFrames = ref(0)
const droppedFrames = ref(0)
const avgProcessTime = ref(0)

// 媒体流和处理器
let originalStream: MediaStream | null = null
let processedStream: MediaStream | null = null
let reader: ReadableStreamDefaultReader | null = null
let writer: WritableStreamDefaultWriter | null = null
let processingLoop = true
let lastFrameTime = 0
let frameCount = 0
let processTimes: number[] = []

// 启动摄像头
const startCamera = async () => {
  try {
    // 获取摄像头流
    originalStream = await navigator.mediaDevices.getUserMedia({
      video: {
        width: { ideal: 1280 },
        height: { ideal: 720 }
      }
    })

    // 显示原始视频
    if (originalVideoRef.value) {
      originalVideoRef.value.srcObject = originalStream
    }

    // 如果支持 Insertable Streams，则进行处理
    if (isSupported.value) {
      await setupInsertableStreams()
    } else {
      // 不支持则直接显示原始流
      if (processedVideoRef.value) {
        processedVideoRef.value.srcObject = originalStream
      }
    }

    isStreaming.value = true
    startFPSCounter()

  } catch (err) {
    console.error('启动摄像头失败:', err)
    alert('无法访问摄像头，请确保已授予权限')
  }
}

// 设置 Insertable Streams
const setupInsertableStreams = async () => {
  if (!originalStream) return

  const videoTrack = originalStream.getVideoTracks()[0]
  
  // 创建处理器和生成器
  const processor = new (window as any).MediaStreamTrackProcessor({ track: videoTrack })
  const generator = new (window as any).MediaStreamTrackGenerator({ kind: 'video' })

  // 获取读写流
  reader = processor.readable.getReader()
  writer = generator.writable.getWriter()

  // 创建新的媒体流
  processedStream = new MediaStream([generator])
  
  if (processedVideoRef.value) {
    processedVideoRef.value.srcObject = processedStream
  }

  // 开始处理循环
  processingLoop = true
  processFrames()
}

// 处理帧循环
const processFrames = async () => {
  if (!reader || !writer || !processingLoop) return

  try {
    const { value: frame, done } = await reader.read()
    
    if (done) {
      return
    }

    if (frame) {
      const startTime = performance.now()

      // 应用特效
      const processedFrame = await applyEffectToFrame(frame)

      // 写入处理后的帧
      if (processedFrame) {
        await writer.write(processedFrame)
        processedFrames.value++
      } else {
        // 如果处理失败，使用原始帧
        await writer.write(frame)
        droppedFrames.value++
      }

      // 记录处理时间
      const processTime = performance.now() - startTime
      processTimes.push(processTime)
      if (processTimes.length > 30) {
        processTimes.shift()
      }
      avgProcessTime.value = Math.round(
        processTimes.reduce((a, b) => a + b, 0) / processTimes.length
      )

      // 关闭原始帧
      frame.close()
    }

    // 继续处理下一帧
    if (processingLoop) {
      processFrames()
    }

  } catch (err) {
    console.error('帧处理失败:', err)
  }
}

// 应用特效到帧
const applyEffectToFrame = async (frame: any): Promise<any> => {
  if (currentEffect.value === 'none') {
    return frame
  }

  try {
    const canvas = canvasRef.value
    if (!canvas) return frame

    canvas.width = frame.displayWidth
    canvas.height = frame.displayHeight

    const ctx = canvas.getContext('2d')
    if (!ctx) return frame

    // 绘制原始帧到 canvas
    ctx.drawImage(frame, 0, 0)

    // 应用特效
    const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height)
    const data = imageData.data

    switch (currentEffect.value) {
      case 'grayscale':
        applyGrayscale(data)
        break
      case 'sepia':
        applySepia(data)
        break
      case 'invert':
        applyInvert(data)
        break
      case 'blur':
        ctx.filter = `blur(${effectIntensity.value}px)`
        ctx.drawImage(canvas, 0, 0)
        ctx.filter = 'none'
        break
      case 'edge':
        applyEdgeDetection(data, canvas.width, canvas.height)
        break
      case 'pixelate':
        applyPixelate(ctx, canvas.width, canvas.height, effectIntensity.value)
        break
      case 'brightness':
        applyBrightness(data, effectIntensity.value / 10)
        break
    }

    if (currentEffect.value !== 'blur' && currentEffect.value !== 'pixelate') {
      ctx.putImageData(imageData, 0, 0)
    }

    // 创建新的 VideoFrame
    const newFrame = new (window as any).VideoFrame(canvas, {
      timestamp: frame.timestamp
    })

    return newFrame

  } catch (err) {
    console.error('特效应用失败:', err)
    return frame
  }
}

// 灰度特效
const applyGrayscale = (data: Uint8ClampedArray) => {
  for (let i = 0; i < data.length; i += 4) {
    const gray = data[i] * 0.299 + data[i + 1] * 0.587 + data[i + 2] * 0.114
    data[i] = gray
    data[i + 1] = gray
    data[i + 2] = gray
  }
}

// 复古特效
const applySepia = (data: Uint8ClampedArray) => {
  for (let i = 0; i < data.length; i += 4) {
    const r = data[i]
    const g = data[i + 1]
    const b = data[i + 2]
    
    data[i] = Math.min(255, r * 0.393 + g * 0.769 + b * 0.189)
    data[i + 1] = Math.min(255, r * 0.349 + g * 0.686 + b * 0.168)
    data[i + 2] = Math.min(255, r * 0.272 + g * 0.534 + b * 0.131)
  }
}

// 反色特效
const applyInvert = (data: Uint8ClampedArray) => {
  for (let i = 0; i < data.length; i += 4) {
    data[i] = 255 - data[i]
    data[i + 1] = 255 - data[i + 1]
    data[i + 2] = 255 - data[i + 2]
  }
}

// 边缘检测
const applyEdgeDetection = (data: Uint8ClampedArray, width: number, height: number) => {
  const original = new Uint8ClampedArray(data)
  
  for (let y = 1; y < height - 1; y++) {
    for (let x = 1; x < width - 1; x++) {
      const idx = (y * width + x) * 4
      
      // Sobel 算子
      const gx = (
        -original[idx - width * 4 - 4] + original[idx - width * 4 + 4] +
        -2 * original[idx - 4] + 2 * original[idx + 4] +
        -original[idx + width * 4 - 4] + original[idx + width * 4 + 4]
      )
      
      const gy = (
        -original[idx - width * 4 - 4] - 2 * original[idx - width * 4] - original[idx - width * 4 + 4] +
        original[idx + width * 4 - 4] + 2 * original[idx + width * 4] + original[idx + width * 4 + 4]
      )
      
      const magnitude = Math.sqrt(gx * gx + gy * gy)
      data[idx] = magnitude
      data[idx + 1] = magnitude
      data[idx + 2] = magnitude
    }
  }
}

// 像素化特效
const applyPixelate = (ctx: CanvasRenderingContext2D, width: number, height: number, size: number) => {
  const pixelSize = Math.max(1, size)
  
  for (let y = 0; y < height; y += pixelSize) {
    for (let x = 0; x < width; x += pixelSize) {
      const pixelData = ctx.getImageData(x, y, 1, 1).data
      ctx.fillStyle = `rgb(${pixelData[0]}, ${pixelData[1]}, ${pixelData[2]})`
      ctx.fillRect(x, y, pixelSize, pixelSize)
    }
  }
}

// 亮度调整
const applyBrightness = (data: Uint8ClampedArray, factor: number) => {
  for (let i = 0; i < data.length; i += 4) {
    data[i] = Math.min(255, data[i] * factor)
    data[i + 1] = Math.min(255, data[i + 1] * factor)
    data[i + 2] = Math.min(255, data[i + 2] * factor)
  }
}

// 应用特效
const applyEffect = (effect: string) => {
  currentEffect.value = effect
  
  // 重置参数
  if (effect === 'blur') {
    effectIntensity.value = 5
  } else if (effect === 'brightness') {
    effectIntensity.value = 15
  } else if (effect === 'pixelate') {
    effectIntensity.value = 10
  }
}

// 获取特效标签
const getEffectLabel = (): string => {
  const labels: Record<string, string> = {
    blur: '模糊强度',
    brightness: '亮度',
    pixelate: '像素大小'
  }
  return labels[currentEffect.value] || '强度'
}

// 获取特效参数范围
const getEffectMin = (): number => {
  if (currentEffect.value === 'brightness') return 1
  if (currentEffect.value === 'pixelate') return 2
  return 0
}

const getEffectMax = (): number => {
  if (currentEffect.value === 'brightness') return 30
  if (currentEffect.value === 'pixelate') return 50
  return 20
}

const getEffectStep = (): number => {
  if (currentEffect.value === 'brightness') return 1
  return 1
}

// 获取特效名称
const getEffectName = (): string => {
  const names: Record<string, string> = {
    none: '无',
    grayscale: '灰度',
    sepia: '复古',
    invert: '反色',
    blur: '模糊',
    edge: '边缘',
    pixelate: '像素',
    brightness: '高亮'
  }
  return names[currentEffect.value] || '未知'
}

// FPS 计数器
const startFPSCounter = () => {
  const updateFPS = () => {
    if (!isStreaming.value) return
    
    const now = performance.now()
    frameCount++
    
    if (now - lastFrameTime >= 1000) {
      fps.value = Math.round(frameCount * 1000 / (now - lastFrameTime))
      frameCount = 0
      lastFrameTime = now
    }
    
    requestAnimationFrame(updateFPS)
  }
  
  lastFrameTime = performance.now()
  updateFPS()
}

// 停止摄像头
const stopCamera = () => {
  processingLoop = false
  
  if (reader) {
    reader.cancel()
    reader = null
  }
  
  if (writer) {
    writer.close()
    writer = null
  }
  
  if (originalStream) {
    originalStream.getTracks().forEach(track => track.stop())
    originalStream = null
  }
  
  if (processedStream) {
    processedStream.getTracks().forEach(track => track.stop())
    processedStream = null
  }
  
  if (originalVideoRef.value) {
    originalVideoRef.value.srcObject = null
  }
  
  if (processedVideoRef.value) {
    processedVideoRef.value.srcObject = null
  }
  
  isStreaming.value = false
  processedFrames.value = 0
  droppedFrames.value = 0
  avgProcessTime.value = 0
  fps.value = 0
}

// 清理
onUnmounted(() => {
  stopCamera()
})
</script>

