import { uniqueId } from 'lodash-es';
import type { BaseTractItem, TrackType } from './Base';
import { UnitFrame2μs } from '@/data/trackConfig';
import { customAudioDecoder, splitClip } from '@/utils/webcodecs';
import { OffscreenSprite, type IClip } from '@webav/av-cliper';

export interface AudioSource {
  id: string,
  url: string,
  name: string,
  format: string,
  duration: number
}

export class AudioTrack implements BaseTractItem {
  id: string;
  type: TrackType = 'audio';
  source: AudioSource;
  name: string;
  format: string;
  frameCount: number;
  start: number;
  end: number;
  offsetL: number;
  offsetR: number;
  constructor(source: AudioSource, curFrame: number) {
    // 设置ID
    this.id = uniqueId();
    // 设置音频信息
    this.source = source;
    // 获取文件名称
    this.name = source.name;
    // 获取文件类型
    this.format = source.format;

    // 获取音频时长，转换为frameCount
    this.frameCount = source.duration * 30;
    this.start = curFrame;
    this.end = this.start + this.frameCount;

    // 设置裁剪信息
    this.offsetL = 0;
    this.offsetR = 0;
  }
  audio: HTMLAudioElement | null = null;
  play(currentFrame: number) {
    if (!this.audio) {
      this.audio = new Audio(this.source.url);
    }
    if (this.audio?.paused) {
      this.audio.currentTime = (currentFrame - this.start - this.offsetL) / 30;
      this.audio.play();
    }
  }
  pause() {
    if (this.audio && !this.audio.paused) {
      this.audio.pause();
    }
  }
  // 生成合成对象
  async combine() {
    try {
      console.log(`AudioTrack.combine - id: ${this.source.id}, offsetL: ${this.offsetL}, offsetR: ${this.offsetR}, frameCount: ${this.frameCount}`);
      
      const audio = await customAudioDecoder.decode({ id: this.source.id });
      
      // 确保offsetL和offsetR为非负值
      const safeOffsetL = Math.max(0, this.offsetL);
      const safeOffsetR = Math.max(0, this.offsetR);
      
      const clip = await splitClip(audio as IClip, { 
        offsetL: safeOffsetL, 
        offsetR: safeOffsetR, 
        frameCount: this.frameCount 
      });
      
      if (!clip) {
        console.warn('Audio clip is not ready, using original audio');
        // 如果clip未准备好，使用原始音频
        return new OffscreenSprite(audio as IClip);
      }
      
      const spr = new OffscreenSprite(clip);
      
      // 计算安全的时间偏移和持续时间
      const safeOffset = Math.max(0, this.start * UnitFrame2μs);
      const safeDuration = Math.max(0, (this.end - this.start) * UnitFrame2μs);
      
      spr.time = { 
        offset: safeOffset, 
        duration: safeDuration 
      };
      
      console.log(`AudioTrack.combine - 时间设置: offset=${safeOffset}μs, duration=${safeDuration}μs`);
      
      return spr;
    } catch (error) {
      console.error('Error in AudioTrack.combine:', error);
      // 发生错误时，尝试创建一个空的音频精灵
      // 这可以防止整个合成过程失败
      try {
        const audio = await customAudioDecoder.decode({ id: this.source.id });
        return new OffscreenSprite(audio as IClip);
      } catch (innerError) {
        console.error('Failed to create fallback audio sprite:', innerError);
        // 如果所有尝试都失败，抛出一个更友好的错误
        throw new Error('音频处理失败，请检查音频文件是否完整');
      }
    }
  }
}