<!-- 订单审核 -->
<template>
  <div class="order_contract">订单审核</div>
  <div class="group_bt">
    <div class="screen_button" @click="startRecord">开始录屏</div>
    <div class="screen_button" @click="stopRecord">停止录屏</div>
  </div>
  <video ref="MyVideo" class="media" height="320" width="480" controls></video>
</template>

<script setup>
//引入阿里云oss
import OSS from "ali-oss";
import RecordRTC from "recordrtc";
import { reactive } from "vue";
import { getOssInfo } from "@/config/upload/config";

//阿里云OSS配置信息
const OSSInfo = reactive({
  region: "oss-cn-qingdao",
  bucket: "ycsj2",
});

//屏幕录制追加文件到阿里云OSS的位置
let nextScreenAppendPosition = 0; // 追加位置

//屏幕录制时保存上传保存的文件名,这里需要动态生成,按用户的ID或者名称来生成文件夹
let objecScreentName =
  "screen/screen_" + Math.floor(Math.random() * 10000000) + ".webm"; // 文件名

//摄像头录制追加文件到阿里云OSS的位置
let nextCameraAppendPosition = 0; // 追加位置

//摄像头保存上传的文件名
let objCameraName =
  "camera/camera_" + Math.floor(Math.random() * 10000000) + ".webm"; // 文件名

//屏幕录制对象
let sreenRecorder = reactive(null);

//摄像头录制对象
let cameraRecorder = reactive(null);

//定义存放分片内录制数据的数组
let sliceArr = reactive([]);

//阿里云对象
let client = reactive(null);

//屏幕录制状态
let isScreenRecording = ref(false);

//摄像头录制的状态
let isCameraRecording = ref(false);

//获取播放器对象
const MyVideo = ref();

onMounted(() => {
  //获取临时凭证
  getTemporaryInfo();
});

//判断是否有可用的摄像头或者麦克风
const getAudioVideo = (constraintsData) => {
  if (navigator.mediaDevices === undefined) {
    navigator.mediaDevices = {};
  }
  if (navigator.mediaDevices.getUserMedia === undefined) {
    navigator.mediaDevices.getUserMedia = function (constraints) {
      // 首先，如果有getUserMedia的话，就获得它
      var getUserMedia =
        navigator.getUserMedia ||
        navigator.webkitGetUserMedia ||
        navigator.mozGetUserMedia ||
        navigator.msGetUserMedia;
      // 一些浏览器根本没实现它 - 那么就返回一个error到promise的reject来保持一个统一的接口
      if (!getUserMedia) {
        return Promise.reject({ code: 404 });
      }

      // 否则，为老的navigator.getUserMedia方法包裹一个Promise
      return new Promise(function (resolve, reject) {
        getUserMedia.call(navigator, constraints, resolve, reject);
      });
    };
  }
  return navigator.mediaDevices.getUserMedia(constraintsData);
};

//初始化屏幕录制设备信息
const initScreenRecord = () => {
  navigator.mediaDevices
    //屏幕录制
    .getDisplayMedia({
      video: true,
      audio: true,
    })
    .then((stream) => {
      //根据返回的流创建录制流程
      createRecord(stream, 1);
    });
};

//初始化摄像头录制信息
const initCameraRecord = () => {
  navigator.mediaDevices
    //屏幕录制
    .getUserMedia({
      video: true,
      audio: true,
    })
    .then((stream) => {
      //根据返回的流创建录制流程
      createRecord(stream, 2);
    });
};

//创建屏幕录制流程
const createRecord = async (stream, type) => {
  if (type == 1) {
    //屏幕录制
    stream.getVideoTracks()[0].onended = () => {
      //监听以后的处理逻辑……
      // console.log(`监听到手动触发停止共享的事件了`);
      //调用停止录制功能
      if (sreenRecorder.state != "destroyed") {
        stopRecord();
      }
    };

    sreenRecorder = RecordRTC(stream, {
      type: "video",

      //视频类型
      mimeType: "video/webm",

      //分片时间
      timeSlice: 20000,

      //该回调函数必须和上面的的timeSlice分片时间配合使用
      ondataavailable: async (blob) => {
        // console.log(`分片时间内录制的数据是:${blob.size}`);
        //追加上传文件
        appenFile(blob, type);
      },

      //获取时间片段的时间戳
      onTimeStamp: function (timestamp) {},

      //录制的码率
      bitsPerSecond: 3840000,
    });

    //开始录制
    sreenRecorder.startRecording();
  } else if (type == 2) {
    cameraRecorder = RecordRTC(stream, {
      type: "video",

      //视频类型
      mimeType: "video/webm",

      //分片时间
      timeSlice: 30000,

      //该回调函数必须和上面的的timeSlice分片时间配合使用
      ondataavailable: async (blob) => {
        // console.log(`分片时间内录制的数据是:${blob.size}`);
        //追加上传文件
        appenFile(blob, type);
      },

      //获取时间片段的时间戳
      onTimeStamp: function (timestamp) {},

      //录制的码率
      bitsPerSecond: 3840000,
    });

    //开始录制
    cameraRecorder.startRecording();
  }
};

//开始录制
const startRecord = async () => {
  isScreenRecording.value = true;
  // initScreenRecord();

  //判断时候有摄像头或者麦克风设备
  getAudioVideo({ audio: true, video: true })
    .then((res) => {
      console.log("已点击允许,开启成功");

      isScreenRecording.value = true;
      initScreenRecord();

      //开启摄像头录制
      isCameraRecording.value = true;
      initCameraRecord();
    })
    .catch((err) => {
      if (err.code && err.code == 404) {
        console.log("浏览器不支持，请更换浏览器");
      } else {
        console.log("请检查是否存在摄像头或麦克风");
      }
    });
};

//停止录制
const stopRecord = async () => {
  if (!isScreenRecording.value || !isCameraRecording) {
    return;
  }

  //关闭屏幕录制对象
  if (sreenRecorder) {
    sreenRecorder.stopRecording();
  }

  //改变录制状态
  isScreenRecording.value = false;

  //关闭摄像头录制对象
  if (cameraRecorder) {
    cameraRecorder.stopRecording();
  }

  //改变录制状态
  isCameraRecording.value = false;
};

//将流转化为arrayBuffer方法
const blobToUint8Array = async (blob) => {
  return new Promise((resolve, reject) => {
    var reader = new FileReader();

    reader.onload = function (event) {
      // 'result' will contain the ArrayBuffer representation of the Blob
      var arrayBuffer = event.target.result;

      // Create a Uint8Array from the ArrayBuffer
      var uint8Array = new Uint8Array(arrayBuffer);
      resolve(uint8Array);
    };

    reader.onerror = function (error) {
      reject(error);
    };

    reader.readAsArrayBuffer(blob);
  });
};

//请求临时凭证
const getTemporaryInfo = async () => {
  let res = await getOssInfo();
  let { AccessKeyId, AccessKeySecret, SecurityToken } = res.data.data;
  client = new OSS({
    region: OSSInfo.region,
    accessKeyId: AccessKeyId,
    accessKeySecret: AccessKeySecret,
    stsToken: SecurityToken,
    bucket: OSSInfo.bucket,
  });
  // console.log(`client信息是:`, client);

  // let url = "http://localhost:3000";
  // return OSS.urllib
  //   .request(url, {
  //     method: "GET",
  //   })
  //   .then(function (result) {
  //     var creds = JSON.parse(result.data);
  //     // OSS.Wrapper
  //     client = new OSS({
  //       region: OSSInfo.region,
  //       accessKeyId: creds.AccessKeyId,
  //       accessKeySecret: creds.AccessKeySecret,
  //       stsToken: creds.SecurityToken,
  //       bucket: OSSInfo.bucket,
  //     });
  //     console.log("client", client);
  //   });
};

//追加上传阿里云oss的方法
const appenFile = async (blob, type) => {
  if (blob && blob.size > 0) {
    const buffer = await blobToUint8Array(blob);

    const args = [
      type == 1 ? objecScreentName : objCameraName,
      OSS.Buffer.from(buffer),
    ];

    if (nextScreenAppendPosition > 0 || nextCameraAppendPosition > 0) {
      // nextScreenAppendPosition === 0 时说明是新建
      //追加时额外添加position参数
      args.push({
        position:
          type == 1 ? nextScreenAppendPosition : nextCameraAppendPosition,
      });
    }

    const result = await client.append(...args);

    console.log(`${type == 1 ? "屏幕录制" : "摄像头录制"}:`, result);

    if (type == 1) {
      nextScreenAppendPosition += buffer.byteLength; // 追加位置更新
    } else {
      nextCameraAppendPosition += buffer.byteLength; // 追加位置更新
    }

    // sliceArr.push(blob);
  }
};
</script>
<style lang="scss" scoped>
.group_bt {
  display: flex;
  align-items: center;
  .screen_button {
    cursor: pointer;
    display: flex;
    margin: 10px 20px;
    align-items: center;
    justify-content: center;
    width: 300px;
    height: 48px;
    background: orangered;
    color: white;
  }
}
</style>
