<template>
  <div class="input-wrapper">
    <van-field
      v-show="!ishowLongPress"
      v-model="input1"
      type="textarea"
      placeholder="请在此输入您的提问～"
      @focus="scrollToBottom"
    >
      <template #button>
        <div class="btn-wrapper">
          <img
            @click="handleAudioClick"
            v-show="isShowAudioIcon"
            class="btn"
            :src="audioImage"
            alt="svg"
          />
          <img v-show="isShowSendIcon" class="btn" :src="sendImage" alt="svg" />
        </div>
      </template>
    </van-field>
    <div class="longpress-wrapper" v-show="ishowLongPress">
      <div ref="longPressRef" class="van-btn-wrapper">
        <van-button
          class="longpress-btn"
          @mousedown="preventDefault"
          round
          type="success"
          >按住 说话</van-button
        >
      </div>
      <div class="btn-wrapper">
        <img
          @click="handleMessageClick"
          class="btn"
          :src="messageImage"
          alt="svg"
        />
      </div>
    </div>
    <van-popup
      v-model:show="showBottom"
      position="bottom"
      round
      :style="{ height: '40%' }"
    >
      <div ref="usrTransDom" class="cover-top"></div>
      <div class="cover-down">
        <div ref="cancelBtnRef" class="cancel-btn">
          <div v-show="isInsideRangeByCancel" class="cancel-text">取消</div>
          <img
            :class="
              clsx([isInsideRangeByCancel ? 'cancel-img-active' : 'cancel-img'])
            "
            src="../assets/cancel.svg"
            alt="cancel"
          />
        </div>
        <div ref="editBtnRef" class="edit-btn">
          <div v-show="isInsideRangeByEdit" class="edit-text">编辑</div>
          <img
            :class="
              clsx([isInsideRangeByEdit ? 'edit-img-active' : 'edit-img'])
            "
            src="../assets/edit.svg"
            alt="edit"
          />
        </div>
      </div>
    </van-popup>
  </div>
</template>

<script setup lang="ts">
import { ref, watch, onMounted, nextTick } from "vue";
import Recorder from 'recorder-core';
import 'recorder-core/src/engine/mp3';
import 'recorder-core/src/engine/mp3-engine';
import clsx from "clsx";
import Hammer from "hammerjs";
import audioImage from "../assets/speak.svg";
import sendImage from "../assets/send.svg";
import messageImage from "../assets/message.svg";
import { useSocket } from "../utils/useSocket";
import { aliAppKey, aiSocketUrl, aliSocketUrl } from "../utils/const";
import { baseEncode, getRandomStrNum } from "../utils/methods";
import SessionManager from "../utils/SessionManager";
import { getAliToken } from "../utils/api";

//接收params
const { aiAvatarId } = defineProps<{ aiAvatarId: number }>();

//页面url params
const url = new URL(window.location.href);
const queryParams = {
  sessionId: url.searchParams.get("sessionId") || "",
  userGuid: url.searchParams.get("userGuid") || "",
};

//页面数据
const { send, on } = useSocket(aiSocketUrl);
// const { send: aliSend, on: aliOn } = useSocket(aliSocketUrl);
const sessionManager = new SessionManager();
const input1 = ref("");
const usrTransDom = ref<any>(null);
const isShowSendIcon = ref(false); // 是否显示发送图标
const isShowAudioIcon = ref(false); // 是否显示语音图标
const ishowLongPress = ref(true); // 是否显示长按图标
const showBottom = ref(true); // 是否显示底部弹窗
const startPoint = ref({ x: 0, y: 0 });
const editBtnRef = ref(null);
const cancelBtnRef = ref(null);
let aliSocket: any = null; //ali-socket实例
let token: string = ""; //a
let noiseRate: number = 0.5; //噪音比率
let lastTouchEnd = 0;
let isInsideRangeByEdit = ref(false); // 用于跟踪当前是否在编辑范围内
let isInsideRangeByCancel = ref(false); // 用于跟踪当前是否在取消范围内

let testSampleRate = 16000;
let testBitRate = 16;
let SendFrameSize = 3200;
let send_pcmBuffer; //将pcm数据缓冲起来按固定大小切分发送
let send_pcmSampleRate; //pcm缓冲的采样率，等于testSampleRate，但取值过大时可能低于配置值
let send_chunk; //SampleData需要的上次转换结果，用于连续转换采样率
let send_lastFrame; //最后发送的一帧数据
let send_logNumber;
let hasMicrophonePermission = ref(false);

let websocket_task_id: string = ""; //websocket任务id 整个实时语音识别的会话ID，整个请求中需要保持一致，32位唯一ID。
let websocket_audio2txt_result_msg: string = ""; //websocket实例 音频转文字的结果

//长按按钮元素
const longPressRef = ref(null);
//显示audio按钮
const showAudioBtn = () => {
  isShowAudioIcon.value = true;
  isShowSendIcon.value = false;
};

//显示send按钮
const showSendBtn = () => {
  isShowAudioIcon.value = false;
  isShowSendIcon.value = true;
};

//处理音频按钮点击事件
const handleAudioClick = () => {
  ishowLongPress.value = true;
};

//处理消息按钮点击事件
const handleMessageClick = () => {
  ishowLongPress.value = false;
};

//处理长按点击事件
const handleLongpressClick = () => {
  console.log("handle longpress click");
  showBottom.value = true;
  initAliSocket();
};

const handleLongpressUp = () => {
  console.log("handle longpress up");
  showBottom.value = false;
  recStop();
};

//聚焦输入框时滚动到底部
const scrollToBottom = () => {
  nextTick(() => {
    window.scrollTo({
      top: document.body.scrollHeight,
      behavior: "smooth",
    });
  });
};

//阻止默认事件
const preventDefault = (event) => {
  event.preventDefault();
};

const handleEnterRangeOnEdit = () => {
  console.log("进入编辑范围");
  // 在这里执行进入范围时的操作
};

const handleLeaveRangeOnEdit = () => {
  console.log("离开编辑范围");
  // 在这里执行离开范围时的操作
};

const handleEnterRangeOnCancel = () => {
  console.log("进入取消范围");
  // 在这里执行进入范围时的操作
};

const handleLeaveRangeOnCancel = () => {
  console.log("离开取消范围");
  // 在这里执行离开范围时的操作
};

//初始化hammer
const initHammer = () => {
  const mc = new Hammer(longPressRef.value, {
    recognizers: [[Hammer.Pan, { threshold: 50 }]],
  });
  mc.add(new Hammer.Press({ time: 300 }));
  mc.on("press", (e) => {
    handleLongpressClick();
    startPoint.value = { x: e.center.x, y: e.center.y };
  });
  mc.on("pressup", (e) => {
    handleLongpressUp()
  });
  mc.on("pan", (e) => {
    const currentPoint = { x: e.center.x, y: e.center.y };
    //计算是否在编辑按钮范围内
    const editBtnRect = editBtnRef.value.getBoundingClientRect();
    const isTouchingEditBtn =
      currentPoint.x >= editBtnRect.left &&
      currentPoint.x <= editBtnRect.right &&
      currentPoint.y >= editBtnRect.top &&
      currentPoint.y <= editBtnRect.bottom;
    //计算是否在取消按钮范围内
    const cancelBtnRect = cancelBtnRef.value.getBoundingClientRect();
    const isTouchingCancelBtn =
      currentPoint.x >= cancelBtnRect.left &&
      currentPoint.x <= cancelBtnRect.right &&
      currentPoint.y >= cancelBtnRect.top &&
      currentPoint.y <= cancelBtnRect.bottom;
    if (isTouchingEditBtn) {
      if (!isInsideRangeByEdit.value) {
        isInsideRangeByEdit.value = true;
        handleEnterRangeOnEdit();
      }
    } else {
      if (isInsideRangeByEdit.value) {
        isInsideRangeByEdit.value = false;
        handleLeaveRangeOnEdit();
      }
    }
    if (isTouchingCancelBtn) {
      if (!isInsideRangeByCancel.value) {
        isInsideRangeByCancel.value = true;
        handleEnterRangeOnCancel();
      }
    } else {
      if (isInsideRangeByCancel.value) {
        isInsideRangeByCancel.value = false;
        handleLeaveRangeOnCancel();
      }
    }
  });

  mc.on("panend", (e) => {
    console.log("panend");
  });

  mc.on("pancancel", (e) => {
    console.log("pancancel");
  });
  mc.on("panstart", (e) => {
    console.log("panstart");
  });
  mc.on("panleft", (e) => {
    console.log("panleft");
  });
  mc.on("panright", (e) => {
    console.log("panright");
  });
  mc.on("panup", (e) => {
    console.log("panup");
  });
  mc.on("pandown", (e) => {
    console.log("pandown");
  });
};

const preventContextMenu = () => {
  document.addEventListener("contextmenu", (e) => {
    e.preventDefault();
    e.stopPropagation();
    return false;
  });
};
//阻止双指放大
const preventZoom = () => {
  document.documentElement.addEventListener(
    "touchstart",
    function (event) {
      if (event.touches.length > 1) {
        event.preventDefault();
      }
    },
    {
      passive: false,
    }
  );
};
//禁止双击放大
const preventZoomByDoubleClick = () => {
  document.documentElement.addEventListener(
    "touchend",
    function (event) {
      var now = Date.now();
      if (now - lastTouchEnd <= 300) {
        event.preventDefault();
      }
      lastTouchEnd = now;
    },
    {
      passive: false,
    }
  );
};

//初始化ai-socket
const initAiSocket = async () => {
  on("open", (event: any) => {
    console.log("Connected to server", event);
  });
  on("message", (data: any) => {
    const { payload, conversationKey, voice_url } = JSON.parse(data).Data;
    console.log("Received data:", JSON.parse(data));
    console.log("payload--", payload);
    if (!sessionManager.isCurrentSession(conversationKey)) {
      console.log("flowKey was emtpy", conversationKey);
      return;
    }
    //添加文字信息到UI界面
    if (voice_url.length !== 0) {
      console.log("完整音频:", "https://saberdance.cc" + voice_url);
      // emitter.emit("userStop", {
      //   key: sessionManager.sessionKey,
      // });
      // audioStart(conversationKey, "https://saberdance.cc" + voice_url);
    }
  });
  on("error", (error: any) => {
    console.error("WebSocket Error:", error);
  });
  on("close", (event: any) => {
    console.log("Connection closed", event);
  });
};

//初始化ali-socket
const initAliSocket = async () => {
  if (aliSocket !== null) {
    aliSocket.close();
    aliSocket = null;
  }
  let url = aliSocketUrl + `?token=${token}`;
  aliSocket = new WebSocket(url);
  aliSocket.onopen = websocketOnOpen;
  aliSocket.onmessage = websocketOnMessage;
  aliSocket.onclose = websocketClose;
  aliSocket.onerror = websocketOnError;
};

//建立连接
function websocketOnOpen() {
  // console.log("向 websocket 发送 链接请求");
  //清空返回的文字内容
  websocket_audio2txt_result_msg = "";
  //生成新的任务id
  websocket_task_id = getRandomStrNum();
  //生成ali的请求参数message_id
  let message_id = getRandomStrNum();
  let actions = {
    header: {
      namespace: "SpeechTranscriber", //固定值
      name: "StartTranscription", //发送请求的名称，固定值
      appkey: aliAppKey, //appkey
      message_id: message_id, //消息id
      task_id: websocket_task_id, //任务id
    },
    payload: {
      format: "PCM", //音频编码格式，默认是PCM（无压缩的PCM文件或WAV文件），16bit采样位数的单声道。
      sample_rate: 16000, //需要与录音采样率一致、默认是16000，单位是Hz。
      enable_intermediate_result: true, //是否返回中间识别结果，默认是false。
      enable_punctuation_prediction: true, //是否在后处理中添加标点，默认是false。
      enable_inverse_text_normalization: true, //是否在后处理中执行数字转写，默认是false。
      max_sentence_silence: 500, //	语音断句检测阈值，静音时长超过该阈值会被认为断句，参数范围200ms～2000ms，默认值800ms。
      speech_noise_threshold: noiseRate,
    },
  };

  //发送请求
  websocketSend(JSON.stringify(actions));
}

/****************ws 请求处理 start *********************/
//发送数据
function websocketSend(data: any) {
  //console.log('websocket 数据发送',data);
  //判断是否连接成功,连接成功再发送数据过去
  if (aliSocket.readyState === 1) {
    aliSocket.send(data);
  } else {
    console.log("websock未连接-------------------");
  }
}

//接收数据
function websocketOnMessage(e: any) {
  //接受ali 语音返回的数据
  const ret = JSON.parse(e.data);
  //判断返回的数据类型
  if (ret.header.name === "TranscriptionResultChanged") {
    //数据在收集中 一句话的中间结果
    console.log("数据在收集中");
    //实时获取语音转文本的结果
    ingText(ret.payload.result);
    console.log("ret.payload.result", ret.payload.result);
  } else if (ret.header.name === "SentenceBegin") {
    //一句话开始后，就可以启动录音了
    console.log("检测到了一句话的开始");
    //添加一个新的p标签、用于显示中间变化状态
    var span = document.createElement("p");
    span.innerText = "";
    usrTransDom.value.appendChild(span);
  } else if (ret.header.name === "TranscriptionStarted") {
    console.log("服务端已经准备好了进行识别，客户端可以发送音频数据了");
    //获取音频信息，定时获取并发送
    // timer = setInterval(() => {
    //   getPCMAndSend();
    // }, 100);
    recStart();
  } else if (ret.header.name === "SentenceEnd") {
    console.log("数据接收结束", ret);
    endText(ret.payload.result);
    console.log("ret.payload.result", ret.payload.result);
  } else if (ret.header.name === "TranscriptionCompleted") {
    console.log("服务端已停止了语音转写", ret);
    // let textTimer = setTimeout(() => {
    //   //关闭文字展板 清空展板内的dom
    //   // recStop();
    //   clearTimeout(textTimer);
    // }, 2000);
    clearAllChildren(usrTransDom.value);
    // sceneManager.mui.addBlock("琉璃正在思考呀,请耐心等待哟~");
    send(
      JSON.stringify({
        chat_type: 3,
        conversationKey: sessionManager.sessionKey,
        payload: baseEncode(websocket_audio2txt_result_msg),
        session_id: Number(queryParams.sessionId),
        user_id: queryParams.userGuid,
        voiceType: aiAvatarId === 0 ? 3 : aiAvatarId,
      })
    );
  }
}

//错误处理
function websocketOnError(e) {
  console.log("连接建立失败重连");
  //initWebSocket();
}

//关闭处理
function websocketClose(e) {
  console.log("websocketClose断开连接", e);
}

//检测是否开启麦克风权限
// const checkMicrophonePermission = () => {
//   navigator.mediaDevices
//     .getUserMedia({ audio: true })
//     .then((stream) => {
//       hasMicrophonePermission.value = true;
//       vibratePhone();
//     })
//     .catch((error) => {
//       hasMicrophonePermission.value = false;
//     });
// };

//重置环境，每次开始录音时必须先调用此方法，清理环境
const realTimeSendReset = () => {
  send_pcmBuffer = new Int16Array(0);
  send_pcmSampleRate = testSampleRate;
  send_chunk = null;
  send_lastFrame = null;
  send_logNumber = 0;
};

//数据传输函数
const TransferUpload = (pcmFrame, isClose) => {
  if (isClose && pcmFrame.length == 0) {
    //最后一帧数据，在没有指定固定的帧大小时，因为不是从onProcess调用的，pcmFrame的长度为0没有数据。可以修改成复杂一点的逻辑：停止录音时不做任何处理，等待下一次onProcess回调时再调用实际的停止录音，这样pcm就一直数据了；或者延迟一帧的发送，isClose时取延迟的这帧作为最后一帧
    //这里使用简单的逻辑：直接生成一帧静默的pcm（全0），使用上一帧的长度或50ms长度
    //return; //如果不需要处理最后一帧数据，直接return不做任何处理
    var len = send_lastFrame
      ? send_lastFrame.length
      : Math.round((send_pcmSampleRate / 1000) * 50);
    pcmFrame = new Int16Array(len);
  }
  send_lastFrame = pcmFrame;

  var arrayBuffer = pcmFrame.buffer;
  websocketSend(arrayBuffer);

  //最后一次调用发送，此时的pcmFrame可以认为是最后一帧
  if (isClose) {
    return;
  }
};

//调用录音
var rec;
function recStart() {
  if (rec) {
    rec.close();
  }
  //配置有效性检查
  if (testBitRate != 16 || SendFrameSize % 2 == 1) {
    console.log("本例子只支持16位pcm SendFrameSize 必须为2的整数倍", 1);
    return;
  }

  var clearBufferIdx = 0,
    processTime = 0;
  var rec2 = (rec = Recorder({
    type: "unknown", //这里特意使用unknown格式，方便清理内存
    onProcess: function (
      buffers,
      powerLevel,
      bufferDuration,
      bufferSampleRate,
      newBufferIdx,
      asyncEnd
    ) {
      processTime = Date.now();

      //实时释放清理内存，用于支持长时间录音；在指定了有效的type时，编码器内部可能还会有其他缓冲，必须同时提供takeoffEncodeChunk才能清理内存，否则type需要提供unknown格式来阻止编码器内部缓冲
      //这里进行了延迟操作（必须要的操作），只清理上次到现在之前的buffer，新的还未推入编码器进行编码需保留
      //if(this.clearBufferIdx>newBufferIdx){ this.clearBufferIdx=0 } //变量改到this里面时，重新录音了，这样写可以重置this环境
      for (var i = clearBufferIdx; i < newBufferIdx; i++) {
        buffers[i] = null;
      }
      clearBufferIdx = newBufferIdx;

      //【关键代码】推入实时处理
      realTimeSendTry(buffers, bufferSampleRate, false);
    },
  }));

  rec2.open(
    function () {
      //打开麦克风授权获得相关资源
      if (rec2 != rec) return; //sync
      rec2.start(); //开始录音
      console.log("已开始录音");

      //监控是否在正常录音有onProcess回调，如果长时间没有回调就代表录音不正常
      var wdt = (rec.watchDogTimer = setInterval(function () {
        if (!rec || wdt != rec.watchDogTimer) {
          clearInterval(wdt);
          return;
        } //sync
        if (Date.now() < rec.wdtPauseT) return; //如果暂停录音了就不检测，此demo没有用到暂停。puase时赋值rec.wdtPauseT=Date.now()*2（永不监控），resume时赋值rec.wdtPauseT=Date.now()+1000（1秒后再监控）
        if (Date.now() - (processTime || startTime) > 1500) {
          clearInterval(wdt);
          console.log(processTime ? "录音被中断" : "录音未能正常开始", 1);
          // ... 错误处理，关闭录音，提醒用户
        }
      }, 1000));
      var startTime = Date.now();
      rec.wdtPauseT = 0;
    },
    function (msg, isUserNotAllow) {
      if (rec2 != rec) return; //sync
      console.log(
        (isUserNotAllow ? "UserNotAllow，" : "") + "无法录音:" + msg,
        1
      );
    }
  );
  realTimeSendReset(); //重置环境，开始录音时必须调用一次
}
function recStop() {
  var rec2 = rec;
  rec = null;
  if (!rec2) return console.log("未开始录音", 1);
  rec2.watchDogTimer = 0; //停止监控onProcess超时
  rec2.close(); //直接close掉即可，这个例子不需要获得最终的音频文件。unknown、wav等不支持实时编码的格式无法调用stop，因为onProcess里面清理掉了内存数据

  realTimeSendTry([], 0, true); //最后一次发送

  let message_id = getRandomStrNum();
  let actions = {
    header: {
      namespace: "SpeechTranscriber", //固定值
      name: "StopTranscription", //发送请求的名称，固定值
      appkey: aliAppKey, //appkey
      message_id: message_id, //消息id
      task_id: websocket_task_id, //任务id
    },
  };

  //发送请求
  websocketSend(JSON.stringify(actions));
}

function recCancelStop() {
  var rec2 = rec;
  rec = null;
  if (!rec2) return console.log("未开始录音", 1);
  rec2.watchDogTimer = 0; //停止监控onProcess超时
  rec2.close();
}

//实时处理核心函数
const realTimeSendTry = (buffers, bufferSampleRate, isClose) => {
  //提取出新的pcm数据
  var pcm = new Int16Array(0);
  if (buffers.length > 0) {
    //【关键代码】借用SampleData函数进行数据的连续处理，采样率转换是顺带的，得到新的pcm数据
    var chunk = Recorder.SampleData(
      buffers,
      bufferSampleRate,
      testSampleRate,
      send_chunk
    );
    send_chunk = chunk;

    pcm = chunk.data; //此时的pcm就是原始的音频16位pcm数据（小端LE），直接保存即为16位pcm文件、加个wav头即为wav文件、丢给mp3编码器转一下码即为mp3文件
    send_pcmSampleRate = chunk.sampleRate; //实际转换后的采样率，如果testSampleRate值比录音数据的采样率大，将会使用录音数据的采样率
  }

  //没有指定固定的帧大小，直接把pcm发送出去即可
  if (!SendFrameSize) {
    TransferUpload(pcm, isClose);
    return;
  }

  //先将新的pcm写入缓冲，再按固定大小切分后发送
  var pcmBuffer = send_pcmBuffer;
  var tmp = new Int16Array(pcmBuffer.length + pcm.length);
  tmp.set(pcmBuffer, 0);
  tmp.set(pcm, pcmBuffer.length);
  pcmBuffer = tmp;

  //循环切分出固定长度的数据帧
  var chunkSize = SendFrameSize / (testBitRate / 8);
  while (true) {
    //切分出固定长度的一帧数据
    if (pcmBuffer.length >= chunkSize) {
      var frame = new Int16Array(pcmBuffer.subarray(0, chunkSize));
      pcmBuffer = new Int16Array(pcmBuffer.subarray(chunkSize));

      var closeVal = false;
      if (isClose && pcmBuffer.length == 0) {
        closeVal = true; //已关闭录音，且没有剩余要发送的数据了
      }
      TransferUpload(frame, closeVal);
      if (!closeVal) continue; //循环切分剩余数据
    } else if (isClose) {
      //已关闭录音，但此时结尾剩余的数据不够一帧长度，结尾补0凑够一帧即可，或者直接丢弃结尾的这点数据
      var frame = new Int16Array(chunkSize);
      frame.set(pcmBuffer);
      pcmBuffer = new Int16Array(0);
      TransferUpload(frame, true);
    }
    break;
  }
  //剩余数据存回去，留给下次发送
  send_pcmBuffer = pcmBuffer;
};

const ingText = (text) => {
  let ps = usrTransDom.value.querySelectorAll("p");
  //更新中间变化状态
  let lastP = ps[ps.length - 1];
  lastP.innerText = text;
};

const endText = (text) => {
  let ps = usrTransDom.value.querySelectorAll("p");
  //更新最后的识别结果
  let lastP = ps[ps.length - 1];
  lastP.innerText = text;

  //获取全文
  websocket_audio2txt_result_msg += text;
  console.log(
    "websocket_audio2txt_result_msg:",
    websocket_audio2txt_result_msg
  );

  //todo 可以进行匹配语音匹配了
};

//获取ali的token和噪音比率
const getAliTokenAndNoiseRate = async () => {
  let res: any = await getAliToken(queryParams.userGuid);
  token = res.token;
  noiseRate = res.noiseRate;
};

//清空指定dom下的所有子元素
const clearAllChildren = (element) => {
  while (element.firstChild) {
    element.removeChild(element.firstChild);
  }
};

watch(input1, (newValue, oldValue) => {
  console.log(newValue, oldValue);
  if (newValue) {
    showSendBtn();
  } else {
    showAudioBtn();
  }
});
onMounted(async () => {
  showAudioBtn();
  initHammer();
  preventContextMenu();
  preventZoom();
  preventZoomByDoubleClick();
  initAiSocket();
  getAliTokenAndNoiseRate();
});
</script>
<style scoped lang="scss">
.input-wrapper {
  width: 100vw;
  .btn-wrapper {
    display: flex;
    justify-content: center;
    align-items: center;
    width: 40px;
    height: 40px;
    background-color: orange;
    border-radius: 50%;
    .btn {
      border-radius: 50%;
      width: 30px;
      height: 30px;
      /* background-color: #fffcf4; */
    }
  }
  .longpress-wrapper {
    // width: 100vw;
    padding: 10px 16px;
    display: flex;
    gap: 20px;
    justify-content: space-between;
    align-items: center;
    height: 60px;
    background-color: #fff;
    .van-btn-wrapper {
      display: flex;
      align-items: center;
      flex: 1;

      .longpress-btn {
        width: 100%;
      }
    }
  }
  .cover-top {
    background-color: red;
    max-height: 50px;
  }
  .cover-down {
    background-color: burlywood;
    display: flex;
    justify-content: space-between;
    align-items: center;
    margin-top: 50px;
    padding: 5px 0;
    .cancel-btn {
      // margin-left: 50px;
      height: 60px;
      background-color: blue;
      width: 40%;
      display: flex;
      flex-direction: column;
      align-items: center;
      justify-content: center;
      position: relative;
      .cancel-text {
        font-size: 16px;
        position: absolute;
        top: 0;
        left: 50%;
        transform: translateX(-50%);
      }
      .cancel-img {
        width: 30px;
        height: 30px;
        margin-top: 20px;
        scale: 1;
        transition: scale 0.5s;
      }
      .cancel-img-active {
        @extend .cancel-img;
        scale: 1.2;
        transition: scale 0.3s;
      }
    }
    .edit-btn {
      width: 40%;
      height: 60px;
      // margin-right: 50px;
      background-color: yellow;
      display: flex;
      flex-direction: column;
      align-items: center;
      justify-content: center;
      position: relative;
      .edit-text {
        font-size: 16px;
        position: absolute;
        top: 0;
        left: 50%;
        transform: translateX(-50%);
      }
      .edit-img {
        width: 30px;
        height: 30px;
        scale: 1;
        transition: scale 0.5s;
        margin-top: 20px;
      }
      .edit-img-active {
        @extend .edit-img;
        scale: 1.2;
        transition: scale 0.3s;
      }
    }
  }
}
</style>
