<template>
  <div class="voice-test-container">
    <!-- Page Header -->
    <div class="page-header">
      <h1 class="text-h4 font-weight-bold mb-2">Voice Test</h1>
      <p class="text-body-1 grey--text">Test voice recognition and real-time speech-to-text functionality</p>
    </div>

    <!-- Main Content Area -->
    <v-row>
      <!-- Full Width: Real-time Display Area -->
      <v-col cols="12">
        <!-- Real-time Transcript Display -->
        <v-card class="display-panel" elevation="2">
          <v-card-title class="primary white--text">
            <v-icon left>mdi-text</v-icon>
            Real-time Transcript
            <v-spacer></v-spacer>
            <v-chip 
              :color="pushWs && pushWs.readyState === webSocketReadyState.OPEN ? 'green' : 'red'" 
              small 
              dark
            >
              <v-icon left small>{{ pushWs && pushWs.readyState === webSocketReadyState.OPEN ? 'mdi-wifi' : 'mdi-wifi-off' }}</v-icon>
              {{ pushWs && pushWs.readyState === webSocketReadyState.OPEN ? 'Connected' : 'Disconnected' }}
            </v-chip>
          </v-card-title>
          
                     <v-card-text>
             <!-- Current Transcript -->
             <div class="current-transcript">
               <div class="text-subtitle-2 mb-2">Messages from ws-push WebSocket:</div>
               <v-card outlined class="transcript-box pa-3" style="min-height: 120px; max-height: 400px; overflow-y: auto;">
                 <div v-if="transcriptHistory.length > 0">
                   <div 
                     v-for="(item, index) in transcriptHistory" 
                     :key="index"
                     class="transcript-item mb-2"
                   >
                     <div class="text-caption grey--text mb-1">
                       {{ new Date(item.timestamp).toLocaleString() }}
                     </div>
                     <div class="text-body-2">{{ item.text }}</div>
                   </div>
                 </div>
                                    <div v-else class="text-body-1 grey--text text-center mt-8">
                     <v-icon large class="mb-2" color="grey">mdi-webhook</v-icon>
                     <br>
                     Waiting for messages from ws-push WebSocket...
                     <br>
                     <small>Only messages from ws-push will be displayed here</small>
                     <br>
                     <small class="text-caption">Voice recognition results are shown in the assistant panel</small>
                   </div>
               </v-card>
             </div>
           </v-card-text>
        </v-card>
      </v-col>
    </v-row>

    <!-- Floating Voice Assistant (bottom-right) -->
    <div class="voice-assistant">
      <!-- Docked (collapsed) bar -->
      <div v-if="!assistantExpanded" class="ai-dock elevation-6">
        <div class="dock-left">Voice Assistant</div>
        <div class="dock-actions">
          <v-btn small icon @click="expandAssistant">
            <v-icon>mdi-plus</v-icon>
          </v-btn>
        </div>
      </div>

      <!-- Expanded panel -->
      <v-card v-else class="ai-panel elevation-10">
        <div class="ai-header d-flex align-center justify-space-between">
          <div class="font-weight-bold">Voice Assistant</div>
          <div>
            <v-btn small depressed class="mr-2" @click="assistantMode='text'">Text</v-btn>
            <v-btn small icon :color="isRecording ? 'red' : 'success'" class="mr-2" @click="toggleRecording">
              <v-icon>{{ isRecording ? 'mdi-microphone' : 'mdi-microphone-outline' }}</v-icon>
            </v-btn>
            <v-btn small icon @click="collapseAssistant"><v-icon>mdi-minus</v-icon></v-btn>
          </div>
        </div>
        <v-divider></v-divider>
        <div class="ai-body">
                     <div class="ai-greeting">
             {{ assistantMode==='voice' && lastTranscript ? 'Voice: ' + lastTranscript : 'Hello! I can help you test voice recognition. Choose voice mode to start recording or text mode to send test messages.' }}
           </div>

          <div v-if="assistantMode==='voice'" class="ai-voice text-center">
            <v-btn :color="isRecording ? 'red' : 'success'" class="mic-btn" fab large @click="toggleRecording">
              <v-icon large>mdi-microphone</v-icon>
            </v-btn>
            <div class="mt-2 grey--text text--darken-1">{{ isRecording ? 'Recording...' : 'Press to start recording' }}</div>
          </div>

          <div v-else class="ai-text">
            <v-text-field 
              v-model="assistantText" 
              outlined 
              hide-details 
              placeholder="Type test message..."
              @keyup.enter="sendAssistantText"
            />
            <div class="mt-2 d-flex justify-end">
              <v-btn color="primary" @click="sendAssistantText">Send</v-btn>
            </div>
          </div>
        </div>
      </v-card>
    </div>

    <!-- Toast Messages -->
    <v-snackbar
      v-model="showMessage"
      :timeout="3000"
      color="info"
      bottom
    >
      {{ messageText }}
      <template v-slot:action="{ attrs }">
        <v-btn text v-bind="attrs" @click="showMessage = false">
          Close
        </v-btn>
      </template>
    </v-snackbar>
  </div>
</template>

<script>
import SpeechRecognizer from '@/api/simpson-stt.js'

export default {
  name: 'VoiceTest',
  data() {
    return {
      // Voice recognition related
      recognizer: null,
      isRecording: false,
      isConnected: false,
      
      // Display related
      currentTranscript: '',
      transcriptHistory: [],
      lastTranscript: '',
      
             // Settings
       wsUrl: process.env.VUE_APP_STT_WS || 'ws://localhost:8080/ws',
       wsPushUrl: process.env.VUE_APP_PUSH_WS || 'ws://localhost:8080/ws-push',
      
      // Push WebSocket related (following PowerCanvas pattern)
      pushWs: null,
      pushReconnectTimer: null,
      pushShouldReconnect: true,
      pushHeartbeatTimer: null,
      pushHeartbeatIntervalMs: 15000,
      
             // Assistant related
       assistantExpanded: false,
       assistantMode: 'text',
       assistantText: '',
      
      // Toast messages
      showMessage: false,
      messageText: '',
      
      // WebSocket ready state constants
      webSocketReadyState: {
        CONNECTING: 0,
        OPEN: 1,
        CLOSING: 2,
        CLOSED: 3
      }
    }
  },
  
  mounted() {
    this.initRecognizer()
    // Connect to backend push channel
    this.connectPushSocket()
  },
  
  beforeDestroy() {
    this.stopRecording()
    // Clean up push WebSocket connection
    this.pushShouldReconnect = false
    if (this.pushReconnectTimer) clearTimeout(this.pushReconnectTimer)
    if (this.pushHeartbeatTimer) clearInterval(this.pushHeartbeatTimer)
    if (this.pushWs) {
      try { this.pushWs.close() } catch (e) { console.warn('[PushWS] close failed', e) }
    }
  },
  
  methods: {
    // Initialize voice recognizer
    initRecognizer() {
      try {
        this.recognizer = new SpeechRecognizer()
        console.log('Voice recognizer initialized successfully')
      } catch (error) {
        console.error('Voice recognizer initialization failed:', error)
        this.showToastMessage('Voice recognizer initialization failed')
      }
    },
    
    // Toggle recording
    async toggleRecording() {
      this.assistantMode = 'voice'
      if (this.isRecording) {
        this.stopRecording()
      } else {
        await this.startRecording()
      }
    },
    
    // Start recording
    async startRecording() {
      if (!this.recognizer) {
        this.showToastMessage('Voice recognizer not initialized')
        return
      }
      
      try {
        // Use token as clientId for ws connection (same as ws-push)
        const clientId = window.localStorage.getItem('token')
        await this.recognizer.recognize(this.wsUrl, clientId, this.onRecognitionResult)
        this.isRecording = true
        this.isConnected = true
        this.showToastMessage('Recording started')
      } catch (error) {
        console.error('Start recording failed:', error)
        this.showToastMessage('Connection failed: ' + error.message)
        this.isConnected = false
      }
    },
    
    // Stop recording
    stopRecording() {
      try {
        if (this.recognizer) {
          this.recognizer.stop()
        }
      } catch (error) {
        console.error('Stop recording failed:', error)
      } finally {
        this.isRecording = false
        this.isConnected = false
        this.showToastMessage('Recording stopped')
      }
    },
    
         // Handle recognition result
     onRecognitionResult(data) {
       let raw = ''
       if (typeof data === 'string') {
         raw = data
       } else if (data && typeof data === 'object') {
         raw = data.text || data.message || data.content || ''
       }
       
       const transcript = this.sanitizeMessage(raw)
       if (transcript) {
         // Only update lastTranscript for voice assistant display
         // Don't add to transcriptHistory - only ws-push messages go there
         this.lastTranscript = transcript
       }
     },
    
    // Sanitize message
    sanitizeMessage(msg) {
      if (msg == null) return ''
      const cleaned = String(msg).replace(/[\u4E00-\u9FFF\u3000-\u303F\uFF00-\uFFEF]/g, '').trim()
      return cleaned
    },
    
    // Show message
    showToastMessage(message) {
      this.messageText = message
      this.showMessage = true
    },

    // Send assistant text
    sendAssistantText() {
      const text = (this.assistantText || '').trim()
      if (!text) return
      
      const ws = this.pushWs
      if (!ws || ws.readyState !== this.webSocketReadyState.OPEN) {
        this.showToastMessage('Push channel not connected')
        return
      }
      
      try {
        ws.send(text)
        this.showToastMessage('Sent to push channel: ' + text)
        this.assistantText = ''
      } catch (e) {
        console.error('Send assistant text failed:', e)
        this.showToastMessage('Send failed')
      }
    },
    
    // Connect to backend push WebSocket (using same clientId as ws connection)
    connectPushSocket() {
      try {
        const clientId = window.localStorage.getItem('token')
        const url = `${this.wsPushUrl}?clientId=${encodeURIComponent(clientId)}`
        const ws = new WebSocket(url)
        this.pushWs = ws
        
        ws.onopen = () => {
          console.log('[PushWS] connected')
          this.startPushHeartbeat()
          this.showToastMessage('Push WebSocket connected')
        }
        
        ws.onmessage = (evt) => {
          let payload = evt.data
          try { 
            payload = JSON.parse(evt.data) 
          } catch (e) { 
            // If not JSON, treat as plain text
          }
          
          // Handle different message formats
          let messageText = ''
          
          if (typeof payload === 'string') {
            // Plain text message
            messageText = payload
          } else if (payload && typeof payload === 'object') {
            // JSON message - extract text from various possible fields
            messageText = payload.text || payload.message || payload.content || payload.data || payload.transcript || JSON.stringify(payload)
          }
          
                                if (messageText && messageText !== 'ping') {
              // Only add to history for ws-push messages
              // Don't update lastTranscript - that's only for voice recognition results
             
             // Add new message to the beginning of the array
             this.transcriptHistory.unshift({
               text: messageText,
               timestamp: Date.now()
             })
             
             // Keep only last 50 items
             if (this.transcriptHistory.length > 50) {
               this.transcriptHistory.pop()
             }
            
            // Show toast for important messages (optional)
            if (messageText.length > 10) {
              this.showToastMessage('Received: ' + messageText.substring(0, 50) + (messageText.length > 50 ? '...' : ''))
            }
          }
        }
        
        ws.onerror = (err) => {
          console.error('[PushWS] error', err)
          this.showToastMessage('Push WebSocket error')
        }
        
        ws.onclose = () => {
          console.log('[PushWS] closed')
          this.stopPushHeartbeat()
          if (this.pushShouldReconnect) {
            this.pushReconnectTimer = setTimeout(() => this.connectPushSocket(), 2000)
          }
        }
      } catch (e) {
        console.error('[PushWS] failed to connect', e)
        this.showToastMessage('Push WebSocket connection failed')
      }
    },
    
    startPushHeartbeat() {
      if (this.pushHeartbeatTimer) clearInterval(this.pushHeartbeatTimer)
      this.pushHeartbeatTimer = setInterval(() => {
        try {
          const ws = this.pushWs
          if (ws && ws.readyState === this.webSocketReadyState.OPEN) {
            ws.send('ping')
          }
        } catch (e) {
          console.warn('[PushWS] heartbeat send failed', e)
        }
      }, this.pushHeartbeatIntervalMs)
    },
    
    stopPushHeartbeat() {
      if (this.pushHeartbeatTimer) {
        clearInterval(this.pushHeartbeatTimer)
        this.pushHeartbeatTimer = null
      }
    },
    

    
    // Expand voice assistant
    expandAssistant() {
      this.assistantExpanded = true
    },
    
    // Collapse voice assistant
    collapseAssistant() {
      this.assistantExpanded = false
    }
  }
}
</script>

<style scoped>
.voice-test-container {
  padding: 24px;
  min-height: calc(100vh - 64px);
  background: #f6f7fb;
}

.page-header {
  margin-bottom: 32px;
}

.status-panel, .display-panel {
  height: 100%;
}

.transcript-box {
  background: #fafafa;
}

.transcript-item {
  padding: 8px;
  border-left: 3px solid #1976d2;
  background: #f8f9fa;
  border-radius: 4px;
  margin-bottom: 8px;
}

.transcript-item:first-child {
  background: #e3f2fd;
  border-left-color: #2196f3;
}

.voice-assistant {
  position: fixed;
  right: 24px;
  bottom: 24px;
  z-index: 1000;
}

.ai-dock {
  display: flex;
  align-items: center;
  background: #fff;
  border-radius: 14px;
  padding: 10px 12px;
  box-shadow: 0 6px 20px rgba(0,0,0,0.12);
}

.dock-left {
  font-weight: 600;
  margin-right: 12px;
}

.dock-actions {
  display: flex;
  align-items: center;
}

.ai-panel {
  width: 360px;
  background: #fff;
  border-radius: 12px;
  overflow: hidden;
}

.ai-header {
  padding: 10px 12px;
}

.ai-body {
  padding: 14px;
}

.ai-greeting {
  color: #5f6368;
  margin-bottom: 12px;
}

.mic-btn {
  width: 84px;
  height: 84px;
}
</style>
