<template>
  <div class="multimodal-interface">
    <h1>多模态对话界面</h1>

    <!-- 文本输入 -->
    <div class="input-section">
      <textarea v-model="textInput" placeholder="请输入文本..."></textarea>
      <button @click="sendText">发送文本</button>
    </div>

    <!-- 语音输入 -->
    <div class="input-section">
      <button @click="startVoiceInput" v-if="!isRecording">开始语音输入</button>
      <button @click="stopVoiceInput" v-else>停止语音输入</button>
      <p>{{ voiceInputText }}</p>
    </div>

    <!-- 图片上传 -->
    <div class="input-section">
      <input type="file" @change="onImageUpload" accept="image/*">
      <img v-if="imagePreview" :src="imagePreview" alt="预览图片" class="preview-image">
      <button @click="analyzeImage" v-if="imagePreview">分析图片</button>
    </div>

    <!-- 视频上传 -->
    <div class="input-section">
      <input type="file" @change="onVideoUpload" accept="video/*">
      <video v-if="videoPreview" :src="videoPreview" controls class="preview-video"></video>
      <button @click="analyzeVideo" v-if="videoPreview">分析视频</button>
    </div>

    <!-- 手写文字识别 -->
    <div class="input-section">
      <canvas ref="drawingCanvas" width="400" height="200" @mousedown="startDrawing" @mousemove="draw" @mouseup="stopDrawing" @mouseleave="stopDrawing"></canvas>
      <button @click="recognizeHandwriting">识别手写文字</button>
      <button @click="clearCanvas">清除画布</button>
    </div>

    <!-- 响应展示 -->
    <div class="response-section">
      <h2>模型响应</h2>
      <p>{{ modelResponse }}</p>
      <button @click="speakResponse">朗读响应</button>
    </div>

    <!-- 情感分析 -->
    <div class="emotion-section">
      <h3>情感分析</h3>
      <p>检测到的情感: {{ detectedEmotion }}</p>
    </div>

    <!-- 3D模型展示 -->
    <div class="model-section">
      <h3>3D模型展示</h3>
      <div ref="modelContainer"></div>
      <input type="file" @change="on3DModelUpload" accept=".glb,.gltf">
    </div>
  </div>
</template>

<script>
export default {
  data() {
    return {
      textInput: '',
      isRecording: false,
      voiceInputText: '',
      imagePreview: null,
      videoPreview: null,
      modelResponse: '',
      detectedEmotion: '',
      drawingContext: null,
      isDrawing: false
    }
  },
  mounted() {
    this.drawingContext = this.$refs.drawingCanvas.getContext('2d');
    this.drawingContext.lineWidth = 2;
    this.drawingContext.strokeStyle = '#000000';
  },
  methods: {
    sendText() {
      // 发送文本到后端进行处理
      fetch('/api/process-text', {
        method: 'POST',
        headers: { 'Content-Type': 'application/json' },
        body: JSON.stringify({ text: this.textInput })
      })
      .then(response => response.json())
      .then(data => {
        this.modelResponse = data.response;
        this.detectedEmotion = data.emotion;
      });
    },
    startVoiceInput() {
      this.isRecording = true;
      // 这里应该实现实际的语音识别逻辑
      // 由于浏览器API限制，这里只是一个模拟
      setTimeout(() => {
        this.voiceInputText = "这是模拟的语音输入文本";
        this.stopVoiceInput();
      }, 3000);
    },
    stopVoiceInput() {
      this.isRecording = false;
      // 停止语音识别并处理结果
    },
    onImageUpload(event) {
      const file = event.target.files[0];
      this.imagePreview = URL.createObjectURL(file);
    },
    analyzeImage() {
      // 发送图片到后端进行分析
      // 这里应该实现实际的图片上传和分析逻辑
      this.modelResponse = "正在分析上传的图片...";
    },
    onVideoUpload(event) {
      const file = event.target.files[0];
      this.videoPreview = URL.createObjectURL(file);
    },
    analyzeVideo() {
      // 发送视频到后端进行分析
      // 这里应该实现实际的视频上传和分析逻辑
      this.modelResponse = "正在分析上传的视频...";
    },
    startDrawing(event) {
      this.isDrawing = true;
      this.draw(event);
    },
    draw(event) {
      if (!this.isDrawing) return;
      const canvas = this.$refs.drawingCanvas;
      const rect = canvas.getBoundingClientRect();
      const x = event.clientX - rect.left;
      const y = event.clientY - rect.top;
      this.drawingContext.lineTo(x, y);
      this.drawingContext.stroke();
      this.drawingContext.beginPath();
      this.drawingContext.moveTo(x, y);
    },
    stopDrawing() {
      this.isDrawing = false;
      this.drawingContext.beginPath();
    },
    recognizeHandwriting() {
      // 发送手写内容到后端进行识别
      // 这里应该实现实际的手写识别逻辑
      this.modelResponse = "正在识别手写内容...";
    },
    clearCanvas() {
      this.drawingContext.clearRect(0, 0, this.$refs.drawingCanvas.width, this.$refs.drawingCanvas.height);
    },
    speakResponse() {
      // 使用文字转语音API朗读响应
      // 这里使用Web Speech API作为示例
      const speech = new SpeechSynthesisUtterance(this.modelResponse);
      window.speechSynthesis.speak(speech);
    },
    on3DModelUpload(event) {
      const file = event.target.files[0];
      // 这里应该实现3D模型的加载和渲染
      // 可以使用three.js等库来实现
      this.modelResponse = "3D模型已上传，正在处理...";
    }
  }
}
</script>

<style scoped>
.multimodal-interface {
  max-width: 800px;
  margin: 0 auto;
  padding: 20px;
}

.input-section, .response-section, .emotion-section, .model-section {
  margin-bottom: 20px;
}

textarea, input[type="file"] {
  width: 100%;
  margin-bottom: 10px;
}

button {
  margin-right: 10px;
}

.preview-image, .preview-video {
  max-width: 100%;
  max-height: 300px;
  margin-top: 10px;
}

canvas {
  border: 1px solid #000;
  margin-bottom: 10px;
}
</style>
