import { getNewWebsocket } from "@/utils/utils";
import { Base64 } from "js-base64";
import TransformPCMWorker from "./asr-pcm.worker.js?worker";
import { getPreParameter, submitAudioTranscriptionRecord } from "@/service/web";
import { t } from "@/config/lang.ts";
import { blobToFile } from "@/utils/file";
import dayjs from "dayjs";
import { nextTick } from "vue";
import ttsPlayer from '@/utils/speech/TtsPlayer.js';
const recorderWorker = new TransformPCMWorker();
class Iat {
  constructor(callback, close) {
    this.callback = callback;
    this.close = close;
    this.monitoringConnected = false; // 记录耳返是否已连接
  }
  stream;
  workletNode;
  stream;
  source;
  appId;
  socket;
  once = true;
  textList = [];
  transText = []  //翻译的结果
  parmes;
  audioFile = [];
  // transText = "";
  buffer = []; 
  audioBuffer = [];  // 临时缓存 Int8Array chunks
  bufferSize = 0;
  TARGET_SIZE = 1280; /// 每 40ms 发 1280 字节
  timeout = 3000; // 3秒
  errorFunc = null;
  ctx;
  canvas;
  bufferLength;
  analyser;
  dataArray;
  lock = false;
   async startRecording(parmes, func, errorFunc) {
    this.reset();
    this.errorFunc = errorFunc;
    this.parmes = parmes;

    console.log(parmes,'------配置----');
    

    await this.getWebsocket(parmes);
    // 使用 getUserMedia 请求用户授权访问麦克风。成功后返回一个 MediaStream 对象，表示音频输入流
    this.stream = await navigator.mediaDevices.getUserMedia({ audio: true });
    // 监听音频设备变化（耳机、扬声器）
    this.initDeviceChangeListener();

    this.audioContext = new AudioContext(); //所有音频节点（如分析器、处理器等）都需要在这个上下文中创建。用来管理音频处理图
    const processorUrl = new URL("./processor.js", import.meta.url).href;  //加载 http://localhost:10086/src/utils/processor.js

    // 加载 AudioWorklet 处理器
    await this.audioContext.audioWorklet.addModule(processorUrl);  // addModule() 方法会加载这个脚本，并注册脚本中的处理器类（"recorder-processor"）
    this.analyser = this.audioContext.createAnalyser(); //实时获取音频的时域或频率数据 为后续做音频可视化准备 （如音柱图、频谱图）
    // 创建 MediaStreamSource & WorkletNode
    this.source = this.audioContext.createMediaStreamSource(this.stream);//创建音频源节点（MediaStreamSource） 将麦克风的 MediaStream 包装成一个音频源节点，供后续音频图使用。
   
    // 根据之前加载的处理器名 "recorder-processor" 创建一个 AudioWorkletNode。
    // 每当 AudioWorkletNode 处理完一帧音频数据，它会通过 port.postMessage() 把原始音频数据 (event.data) 发给主线程。
    this.workletNode = new AudioWorkletNode(
      this.audioContext,
      "recorder-processor"
    );
    func && func();

    // 在 workletNode 创建后添加监听器
    this.workletNode.port.onmessage = (event) => {
      
      //  检查 WebSocket 是否已连接
      if (this.socket.readyState !== 1) {
        return;   //已经连接无需处理
      }

      // 将音频数据通过 postMessage 发送给 Web Worker
      // recorderWorker 是一个 Worker 线程（Web Worker），通常用来做后台任务，比如：
      // 录音编码成 WAV 或 MP3
      // 实时语音识别
      // 音频压缩、上传等
      recorderWorker.postMessage({
        command: "transform",
        buffer: event.data,   // console.log('我说的话:', event.data)  浏览器原始音频格式是Float32Array 类型，表示 PCM 音频数据
      });
      this.audioFile.push(event.data);// 把每一帧的音频数据存储起来，最后可以组合成完整的录音文件。// 通常在用户点击“停止”按钮后，会把这些数据合并并导出为 .wav 文件。
    };
    this.analyser.fftSize = 256; // 影响波形精细度
    this.source.connect(this.workletNode);
    this.source.connect(this.analyser);

    // 获取频率 bin 数量，并创建一个 Uint8Array 来接收数据
    this.bufferLength = this.analyser.frequencyBinCount;
    this.dataArray = new Uint8Array(this.bufferLength);

    // 接收 recorderWorker 处理完音频数据后返回的结果  处理e.data.buffer成为16kHz, 16-bit PCM 的字节流	每两个字节组成一个 int16 样本的普通数组
    recorderWorker.onmessage = (e) => {
      this.buffer = this.buffer.concat(e.data.buffer);
     
      while (this.buffer.length >= 1280) {
        const chunk = this.buffer.slice(0, 1280);
        this.buffer = this.buffer.slice(1280);
    
        // 转成 ArrayBuffer 发送
        const uint8Array = new Uint8Array(chunk);
        this.send(uint8Array.buffer); // 发送 binary message
      }

    }
  }

  // 优化拔出耳机或使用买凤凤插入耳机，连接设备的快照变化而失效
  initDeviceChangeListener() {
    navigator.mediaDevices.addEventListener('devicechange', async () => {
  
      // 获取当前所有音频输入设备
      const devices = await navigator.mediaDevices.enumerateDevices();
      const audioInputs = devices.filter(d => d.kind === 'audioinput');
  
      // 如果没有可用麦克风，直接返回
      if (audioInputs.length === 0) return;
  
      // 获取当前正在使用的设备 ID
      const currentDeviceId = this.stream?.getAudioTracks()[0]?.getSettings()?.deviceId;
      const currentLabel = this.stream?.getAudioTracks()[0]?.label;
  
      // 获取新的默认设备（系统自动选的）
      const defaultDevice = audioInputs.find(d => d.deviceId !== 'default') || audioInputs[0];
  
      // 如果当前设备还是默认设备，无需切换
      if (currentDeviceId === defaultDevice.deviceId) {
        // console.log('✅ 设备未变，无需切换:', defaultDevice.label);
        return;
      }
  
      // console.log(`🔄 麦克风切换: ${currentLabel || 'unknown'} → ${defaultDevice.label}`);
  
      // 停止旧流的所有轨道
      this.stream?.getTracks().forEach(track => track.stop());
  
      try {
        // 重新获取新设备的流
        const newStream = await navigator.mediaDevices.getUserMedia({ audio: true });
  
        // 创建新的 MediaStreamSource
        const newSource = this.audioContext.createMediaStreamSource(newStream);
  
        // 断开旧 source
        this.source.disconnect();
  
        // 替换 source
        this.source = newSource;
        this.stream = newStream;
  
        // 重新连接到 workletNode 和 analyser
        this.source.connect(this.workletNode);
        this.source.connect(this.analyser);

        if (this.audioContext.state === 'suspended') {
          await this.audioContext.resume();
        }
  
        console.log('✅ 已切换到新麦克风');
      } catch (err) {
        // console.error('❌ 切换麦克风失败:', err);
        this.errorFunc?.(err);
      }
    });
  }

  makeDistortionCurve(amount = 50) {
    const nSamples = 44100;
    const curve = new Float32Array(nSamples);
    const deg = Math.PI / 180;
    for (let i = 0; i < nSamples; ++i) {
      const x = (i * 2) / nSamples - 1;
      curve[i] = (Math.sin(x * Math.PI * 0.5) + (amount * Math.sin(x * Math.PI * 2)) * deg) / (1 + amount * 0.01);
    }
    return curve;
  }

  async getWebsocket(a) {
    const { origin: lang, parames = {}, isAddUrl } = a;
    if(isAddUrl){
      parames.language = a.code
    }else{
      delete parames.language
    }
    let obj = {
      // type: "tcUrl",
      // typeCode: "tc_demo",
      ...parames,
      // tts_speed_auto:true,
      multiFuncData:false,
      pd:'culture',
      role_type:'2',
      tts_speaker_map:a?.ent,
      audio_encode: "pcm_s16le",
      samplerate: "16000",
      lang: a?.code, // 转写语种
      target_lang:a?.src, // 翻译语种
      trans_type: "trans_llm",
      tts_target_lang: a?.src == 'cnen' ? a?.src.replace(/(..)(..)/, '$1,$2') : a?.src, //合成音频
      use_tts: true,
    }
    const res = await getPreParameter(obj);   //获取同传webbsocket链接地址
    const { data } = res;

    return new Promise((resolve, reject) => {
      const websocketUrl = getNewWebsocket(data);    // 转换请求返回的链接返回成ws或wss
      if ("WebSocket" in window) {                  //检查当前浏览器是否支持标准 WebSocket
        this.socket = new WebSocket(websocketUrl);
      } else if ("MozWebSocket" in window) {        //兼容早期的火狐浏览器
        this.socket = new MozWebSocket(websocketUrl);
      } else {
        alert("浏览器不支持WebSocket");
        reject();
        return;
      }

      // 设置连接超时机制
      clearTimeout(this.timeoutId);
      this.timeoutId = setTimeout(() => {
        // websocket连接中 3秒后关闭
        if (this.socket.readyState === WebSocket.CONNECTING) {   // 3秒内还未完成连接（仍处于 CONNECTING 状态）
          this.socket.close();                                   //主动关闭WebSocket连接
          this.errorFunc && this.errorFunc();                    //执行错误回调函数
        }
      }, this.timeout);

      //连接成功
      this.socket.onopen = () => {
        resolve();
        nextTick(() => {
          this.canvas = document.getElementById("waveform");
          this.ctx = this.canvas.getContext("2d");
          this.canvas.width = 200;
          this.canvas.height = 90;
          this.draw();                                           //绘制音频波形图，录音可视化
        });
      };

      // 接收服务器消息
      this.socket.onmessage = (e) => {
        const { type } = this.parmes;

        const funcs = {
          1: () => {
            const message = e.data;
            const data = JSON.parse(message);
            const code = data.header.code;
            const status = data.header.status;
            if (code !== 0) {
              console.log(`请求错误：${code}`);
              this.stopRecord();                             // 请求出错,停止录音
            } else {
              const payload = data.payload;
              console.log(payload,'-----payload----');
              
              if (payload) {
                const text = payload.result.text;
                console.log(text,'--------text-----------');
                
                const decodedText = JSON.parse(Base64.decode(text));
                console.log(decodedText,'--------decodedText-----------');

                this.renderResult({
                  data: {
                    result: decodedText,
                  },
                });
              }
              if (status === 2) {
                this.stopRecord();
              }
            }
          },
          2: () => {
            this.render(JSON.parse(e.data));
          },
          3: () => {
            this.render(JSON.parse(e.data));
          },
        };

        funcs[type]();
      };
      this.socket.onerror = (e) => {
        showError(t('连接失败'));
        reject(e);
      };
      this.socket.onclose = () => {};
    });
  }

  stopRecording() {
    this.lock = false;
    if (this.workletNode) {
      this.workletNode.port.postMessage("stop"); // 通知 Worklet 停止
      this.workletNode.disconnect();
    }
    if (this.source) {
      this.source.disconnect();
    }
    if (this.audioContext) {
      this.audioContext.close();
    }
    if (this.stream) {
      this.stream.getTracks().forEach((track) => track.stop());
    }
    // 4. 断开耳返状态
    this.monitoringConnected = false;
   
  }

  renderResult(resultData) {
    const { src } = this.parmes;
    let { data } = resultData;
    let str = "";
    const { ws = [], pgs, sn, rg, rst } = data.result;
    for (let i = 0; i < ws.length; i++) {
      str += ws[i].cw[0].w;
    }
    if (rst === "rlt") {
      this.textList.push(str);
      src && this.getTranslationUrl(str);
      str = "";
    }
    this.callback({
      text: this.textList.join("") + str,
    });
  }


  render(resultData) {
    // console.log(resultData,'--------webscoket返回数据---------');
    
    let { data,res_type } = resultData;
    let str = '' //转写结果
    let trans = '' //翻译结果
    // 转写解析
    if (res_type && res_type == 'asr'){
      if(data?.cn?.st?.type == '0'){
        str += (data?.cn?.st?.rt || [])
        .flatMap(item => item.ws || [])
        .flatMap(ws => ws.cw || [])
        .map(cw => cw.w)
        .join('');
      }

      this.textList.push(str)
      this.callback({
        text: this.textList.join(""),
      });
    }


    // 翻译解析
    if(res_type && res_type == 'trans'){
      if(!data.end && data?.type == 0){ //只取最终结果，防止重复
        trans += data?.dst
        this.getTranslationUrl(trans)
      }
    }

    // 音频解析
    if(res_type && res_type == 'tts'){
      // 播放音频
      this.playVoice(data?.audio)
    }
    
  }

  // 翻译，把我们说的话翻译成文字
  async getTranslationUrl(
    text,
    src = this.parmes.src,
    language = this.parmes.language
  ) {
    if (!text) {
      return;
    }
 
    //--翻译的结果  text
    this.transText.push(text);
    this.callback({
      translation: this.transText.join(''),
    });

  }

  playVoice (audio){
    audio && ttsPlayer.play(audio);
  }

  
  reset() {
    this.stream = null;
    this.workletNode = null;
    this.stream = null;
    this.source = null;
    this.appId = null;
    this.socket = null;
    this.once = true;
    this.textList = [];
    this.parmes = null;
    this.audioFile = [];
    this.transText = [];
    this.buffer = [];
    this.lock = true;
    this.analyser = null;
    this.ctx = null;
    this.canvas = null;
  }

  send(data) {
    // this.socket.send(JSON.stringify(data));
    this.socket.send(data);
  }

  ArrayBufferToBase64(buffer) {
    let binary = "";
    const bytes = new Uint8Array(buffer);
    const len = bytes.byteLength;
    for (let i = 0; i < len; i++) {
      binary += String.fromCharCode(bytes[i]);
    }
    return window.btoa(binary);
  }

  stopRecord() {
    ttsPlayer.stop()//停止播放语音
    this.close(false);
    const { type } = this.parmes;
    const audio = this.ArrayBufferToBase64(this.buffer);
    const stopType = {
      1: {
        header: {
          status: 2,
          app_id: this.appId,
        },
        payload: {
          audio: {
            audio,
            sample_rate: 16000,
            encoding: "raw",
          },
        },
      },
      2: {
        data: {
          audio,
          status: 2,
          format: "audio/L16;rate=16000",
          encoding: "raw",
        },
      },
      3: {
        data: {
            audio,
            status: 2,
            format: "audio/L16;rate=16000",
            encoding: "raw",
          },
      },
    };
    // this.send(stopType[type]);
    this.socket.close();
    this.stopRecording();
    // this.saveFile(this.textList.join(''), this.transText)
  }
  encodeWAV(buffers, sampleRate) {
    let dataLength = buffers.reduce((len, buffer) => len + buffer.length, 0);
    let buffer = new ArrayBuffer(44 + dataLength * 2);
    let view = new DataView(buffer);

    function writeString(offset, str) {
      for (let i = 0; i < str.length; i++) {
        view.setUint8(offset + i, str.charCodeAt(i));
      }
    }

    writeString(0, "RIFF");
    view.setUint32(4, 36 + dataLength * 2, true);
    writeString(8, "WAVE");
    writeString(12, "fmt ");
    view.setUint32(16, 16, true);
    view.setUint16(20, 1, true);
    view.setUint16(22, 1, true);
    view.setUint32(24, sampleRate, true);
    view.setUint32(28, sampleRate * 2, true);
    view.setUint16(32, 2, true);
    view.setUint16(34, 16, true);
    writeString(36, "data");
    view.setUint32(40, dataLength * 2, true);

    let offset = 44;
    for (let buffer of buffers) {
      for (let i = 0; i < buffer.length; i++, offset += 2) {
        let sample = Math.max(-1, Math.min(1, buffer[i]));
        view.setInt16(offset, sample * 0x7fff, true);
      }
    }

    return buffer;
  }
  // 保存音频文件
  saveFile(originText, transText) {
    const { origin, src } = this.parmes;
    const sampleRate = 44100; // 采样率
    const wavBuffer = this.encodeWAV(this.audioFile, sampleRate);
    const voiceBlob = new Blob([wavBuffer], { type: "audio/wav" });
    const fileName = dayjs().format("YYYY-MM-DD HH:mm:ss") + ".wav";

    const voiceFile = blobToFile(voiceBlob, fileName);
    // 使用示例
    const textBlob = new Blob([transText], { type: "text/plain" });
    const textFile = blobToFile(textBlob, t("结果文件") + ".txt");

    const srcBlob = new Blob([originText], { type: "text/plain" });
    const srcFile = blobToFile(srcBlob, t("源文件") + ".txt");

    const formData = new FormData();

    formData.append("abilityType", 1);

    formData.append("audioFile", voiceFile);
    formData.append("fileName", fileName);
    // formData.append("audioDuration", 1);
    formData.append("originalLanguage", origin);
    src && formData.append("targetLanguage", src);

    if (src) {
      formData.append("originalFile", srcFile);
      formData.append("targetFile", textFile);
    } else {
      formData.append("originalFile", srcFile);
    }
    submitAudioTranscriptionRecord(formData).then(() => {
      showMessage(t("本次结果可在文件中心查询"));
    });
  }

  draw() {
    try {
      this.lock && requestAnimationFrame(() => this.draw());
      this.analyser.getByteFrequencyData(this.dataArray);
      const minHeight = 2; // 🌟 无声音时的基础高度更大
      this.ctx.clearRect(0, 0, this.canvas.width, this.canvas.height);
      const barCount = 20; // 🌟 增加到 14 根柱子，让波形更丰富
      const barWidth = 4; // 🌟 柱子更细
      const gap = 6; // 🌟 柱子间距调整
      const centerY = this.canvas.height / 2; // 中心线
      let barHeights = new Array(barCount).fill(minHeight);
      for (let i = 0; i < barCount; i++) {
        let volume = Math.pow(this.dataArray[i] / 255, 2.2); // 🌟 非线性音量映射
        let targetHeight = Math.max(volume * this.canvas.height, minHeight); // 🌟 映射到画布高度

        barHeights[i] += (targetHeight - barHeights[i]) * 0.3; // 🌟 平滑过渡

        const x =
          (this.canvas.width - (barCount * (barWidth + gap) - gap)) / 2 +
          i * (barWidth + gap);
        const barHeight = barHeights[i];

        // 绿色渐变
        const gradient = this.ctx.createLinearGradient(
          0,
          centerY - barHeight,
          0,
          centerY + barHeight
        );
        gradient.addColorStop(0, "rgb(79, 151, 255)");
        gradient.addColorStop(1, "rgb(39, 94, 255)");

        this.ctx.fillStyle = gradient;
        this.ctx.fillRect(x, centerY - barHeight, barWidth, barHeight * 2);
      }
    } catch (err) {}
  }
}

export default Iat;
