<script setup lang="ts">
  // Type import (can be combined if desired)
  import type { OllamaRequest } from '@/types/ollama'

  import { computed, nextTick, onMounted, ref } from 'vue'

  // Element Plus components & icons
  import { ChatDotRound, Loading, Promotion, Warning } from '@element-plus/icons-vue'
  // Moved Element Plus imports below internal imports
  import { useDebounceFn } from '@vueuse/core'
  import { ElMessage, ElOption, ElSelect } from 'element-plus'

  // Import Services and Interceptors (adjust paths if necessary)
  import { DatabaseService } from '#/services/database' // Reverted to # alias
  import { dbErrorInterceptor, dbRequestInterceptor, dbResponseInterceptor, ollamaErrorInterceptor, ollamaRequestInterceptor, ollamaResponseInterceptor } from '#/services/interceptors' // Reverted to # alias
  import { OllamaService } from '#/services/ollama' // Reverted to # alias

  // --- Service Setup ---
  const ollamaService = new OllamaService('/ollama')

  // Mock MCP Client for DatabaseService (replace with actual client if available)
  const mockMcpClient = {
    query: async (_sql: string, _params: any[]) => {
      console.warn('Mock DB Query:', _sql, _params) // Log mock query
      // Simulate a successful query result structure
      await new Promise(resolve => setTimeout(resolve, 500)) // Simulate delay
      return { rows: [{ mockData: 'Sample result from mock DB' }], rowCount: 1, fields: [] }
    },
  }
  const dbService = new DatabaseService(mockMcpClient)

  // Add Interceptors
  ollamaService.addInterceptor(ollamaRequestInterceptor)
  ollamaService.addInterceptor(ollamaResponseInterceptor)
  ollamaService.addInterceptor(ollamaErrorInterceptor)

  dbService.addInterceptor(dbRequestInterceptor)
  dbService.addInterceptor(dbResponseInterceptor)
  dbService.addInterceptor(dbErrorInterceptor)

  // Check Ollama Instance Status Function
  async function checkOllamaStatus() {
    try {
      // Use the service's internal base URL or define explicitly
      const response = await fetch(`${ollamaService.getBaseUrl()}/api/version`)
      if (!response.ok) throw new Error(`HTTP error! status: ${response.status}`)
      const data = await response.json()
      console.warn('Ollama 实例状态:', {
        address: ollamaService.getBaseUrl(),
        version: data.version,
        status: 'running',
      })
      return true
    } catch (error) {
      console.warn('Ollama 实例未运行或无法访问:', {
        address: ollamaService.getBaseUrl(),
        status: 'error',
        error: error instanceof Error ? error.message : '未知错误',
      })
      ElMessage.error(`无法连接到 Ollama 服务: ${ollamaService.getBaseUrl()}`)
      console.warn('[Debug] checkOllamaStatus finished - isOllamaRunning: false') // Log status
      return false
    }
  }

  // --- NEW: Fetch Available Models Function ---
  async function fetchAvailableModels() {
    if (!isOllamaRunning.value) return
    try {
      console.warn('[Debug] Fetching available Ollama models...')
      const response = await fetch(`${ollamaService.getBaseUrl()}/api/tags`)
      if (!response.ok) throw new Error(`HTTP error! status: ${response.status}`)
      const data = await response.json()
      availableModels.value = data.models || [] // Ollama API returns { models: [...] }
      console.warn('[Debug] Available models fetched:', availableModels.value)

      // Set default selected model if current default exists or use the first available
      if (availableModels.value.length > 0) {
        const defaultModelExists = availableModels.value.some(m => m.name === DEFAULT_MODEL)
        // Prefer positive condition
        if (defaultModelExists) {
          selectedModel.value = DEFAULT_MODEL
          console.warn(`[Debug] Default model '${DEFAULT_MODEL}' selected.`)
        } else if (availableModels.value[0]) {
          // Check if first model exists before accessing name
          selectedModel.value = availableModels.value[0].name
          console.warn(`[Debug] Default model '${DEFAULT_MODEL}' not found. Using first available: '${selectedModel.value}'`)
        }
      } else {
        console.warn('[Debug] No Ollama models found.')
        ElMessage.warning('未找到可用的 Ollama 模型。')
      }
    } catch (error) {
      console.error('Error fetching Ollama models:', error)
      ElMessage.error('获取 Ollama 模型列表失败')
      availableModels.value = []
    }
  }
  // --- End Fetch Models ---

  // --- End Service Setup ---

  interface ChatMessage {
    id: string
    type: 'assistant' | 'user'
    content: string
    time: string
    status?: 'error' | 'sent' | 'thinking' // Simplified status
  }

  // Constants
  const USER_AVATAR = 'https://cube.elemecdn.com/0/88/03b0d39583f48206768a7534e55bcpng.png' // Replace with actual user avatar if available
  const ASSISTANT_AVATAR = 'https://cube.elemecdn.com/3/7c/3ea6beec64369c2642b92c6726f1epng.png' // Replace with DeepSeek logo/avatar
  const MAX_MESSAGE_LENGTH = 2000
  const SCROLL_DEBOUNCE_TIME = 100
  const DEFAULT_MODEL = 'llama2' // Define default model

  // State
  const messages = ref<ChatMessage[]>([])
  const inputMessage = ref('') // Clear initial value
  const isLoading = ref(false) // Controls send button state and potentially global loading
  const chatHistory = ref<HTMLElement | null>(null)
  const isOllamaRunning = ref(false) // Track Ollama status
  // --- NEW State for Models ---
  const availableModels = ref<{ name: string }[]>([])
  const selectedModel = ref<string>(DEFAULT_MODEL) // Initialize with default
  // --- END NEW State ---

  // Computed
  const canSendMessage = computed(() => {
    return (
      inputMessage.value.trim().length > 0 &&
      inputMessage.value.length <= MAX_MESSAGE_LENGTH &&
      !isLoading.value &&
      isOllamaRunning.value &&
      availableModels.value.length > 0 && // Must have models to select from
      !!selectedModel.value // Must have a model selected
    )
  })

  // Methods
  const generateMessageId = () => {
    return Date.now().toString(36) + Math.random().toString(36).slice(2)
  }

  const formatTime = (date: Date) => {
    // DeepSeek UI doesn't explicitly show time per message in the example
    // Return a simple format if needed elsewhere, or omit
    return date.toLocaleTimeString('zh-CN', {
      hour: '2-digit',
      minute: '2-digit',
    })
  }

  const scrollToBottom = useDebounceFn(async () => {
    await nextTick()
    if (chatHistory.value) {
      chatHistory.value.scrollTo({
        top: chatHistory.value.scrollHeight,
        behavior: 'smooth',
      })
    }
  }, SCROLL_DEBOUNCE_TIME)

  const addMessage = (type: 'assistant' | 'user', content: string, status: 'error' | 'sent' | 'thinking' = 'sent') => {
    const message: ChatMessage = {
      id: generateMessageId(),
      type,
      content,
      time: formatTime(new Date()), // Store time even if not displayed
      status,
    }
    messages.value.push(message)
    // Ensure scroll happens after message is added and DOM updates
    nextTick(() => {
      scrollToBottom()
    })
    return message.id // Return ID for updates
  }

  const updateThinkingMessage = (id: string, newContent: string, finalStatus: 'error' | 'sent' = 'sent') => {
    const messageIndex = messages.value.findIndex(m => m.id === id)
    if (messageIndex !== -1 && messages.value[messageIndex]?.status === 'thinking') {
      messages.value[messageIndex].content = newContent
      messages.value[messageIndex].status = finalStatus
      messages.value[messageIndex].time = formatTime(new Date()) // Update time
      nextTick(() => {
        scrollToBottom() // Scroll again after content update
      })
    }
  }

  const sendMessage = async () => {
    console.warn('[Debug] sendMessage attempt - isLoading:', isLoading.value, 'isOllamaRunning:', isOllamaRunning.value, 'canSend:', canSendMessage.value, 'selectedModel:', selectedModel.value) // Log states
    if (!canSendMessage.value) {
      if (!isOllamaRunning.value) {
        ElMessage.warning('Ollama 服务未连接，无法发送消息。')
        return
      }
      if (availableModels.value.length === 0 || !selectedModel.value) {
        ElMessage.warning('请先选择一个模型。')
        return
      }
      if (inputMessage.value.length > MAX_MESSAGE_LENGTH) {
        ElMessage.warning(`消息长度不能超过 ${MAX_MESSAGE_LENGTH} 字符`)
      }
      return
    }

    const userInput = inputMessage.value.trim()
    inputMessage.value = ''

    addMessage('user', userInput, 'sent')
    const thinkingMessageId = addMessage('assistant', '', 'thinking')
    isLoading.value = true
    console.warn('[Debug] isLoading set to true') // Log state change

    try {
      // --- Call Ollama Service ---
      const requestPayload: OllamaRequest = {
        model: selectedModel.value, // Use the selected model
        prompt: userInput,
        options: {
          temperature: 0.7,
          top_p: 0.9,
        },
      }
      console.warn('[Debug] Sending request to Ollama:', requestPayload)
      const ollamaResponse = await ollamaService.generate(requestPayload)
      // --- End Ollama Call ---

      if (ollamaResponse.code === 200 && ollamaResponse.data) {
        let finalContent = ollamaResponse.data.response

        // --- Optional: Database Query Integration ---
        // Example: Check if the response looks like a SQL query
        // This is a basic check, needs refinement for real use
        const isSqlQuery = /^\s*(?:SELECT|INSERT|UPDATE|DELETE|CREATE|ALTER|DROP|BEGIN|COMMIT|ROLLBACK)\b/i.test(finalContent)
        if (isSqlQuery) {
          finalContent += `\n\n*(检测到可能的 SQL 查询，尝试在模拟数据库中执行...)*`
          // Update content but keep status as 'thinking' while querying DB
          const messageIndex = messages.value.findIndex(m => m.id === thinkingMessageId)
          if (messageIndex !== -1 && messages.value[messageIndex]?.status === 'thinking') {
            messages.value[messageIndex].content = finalContent
            await nextTick() // Wait for DOM update if needed
            scrollToBottom()
          }

          try {
            const dbResponse = await dbService.query({ query: finalContent, params: [] })
            finalContent += dbResponse.code === 200 && dbResponse.data ? `\n\n**数据库查询结果:**\n\`\`\`json\n${JSON.stringify(dbResponse.data.rows, null, 2)}\n\`\`\`` : `\n\n**数据库查询失败:** ${dbResponse.message}`
          } catch (dbError) {
            finalContent += `\n\n**数据库查询出错:** ${dbError instanceof Error ? dbError.message : '未知错误'}`
            console.error('Database query error:', dbError)
          }
          updateThinkingMessage(thinkingMessageId, finalContent, 'sent') // Final update with DB result
        } else {
          // If not a query, just update with the Ollama response
          updateThinkingMessage(thinkingMessageId, finalContent, 'sent')
        }
        // --- End Database Query Integration ---
      } else {
        // Handle Ollama service error (non-200 code or null data)
        const errorMessage = ollamaResponse.message || '从 Ollama 获取回复时出错'
        ElMessage.error(errorMessage)
        updateThinkingMessage(thinkingMessageId, `抱歉，处理请求时出错: ${errorMessage}`, 'error')
      }
    } catch (error) {
      // Handle network errors or errors thrown by interceptors/service logic
      console.error('Error sending message:', error)
      const errorMessage = error instanceof Error ? error.message : '未知错误'
      ElMessage.error(`发送消息失败: ${errorMessage}`)
      updateThinkingMessage(thinkingMessageId, `抱歉，处理请求时出错: ${errorMessage}`, 'error')
    } finally {
      isLoading.value = false
      console.warn('[Debug] isLoading set to false') // Log state change
    }
  }

  // Lifecycle hooks
  onMounted(async () => {
    console.warn('[Debug] onMounted start - checking Ollama status...')
    isOllamaRunning.value = await checkOllamaStatus()
    console.warn('[Debug] onMounted finished - isOllamaRunning:', isOllamaRunning.value) // Log status after check
    if (isOllamaRunning.value) {
      await fetchAvailableModels() // Fetch models if Ollama is running
    }
  })
</script>

<template>
  <div class="deepseek-chat-container" style="height: 100vh; max-height: 800px">
    <div class="chat-history" ref="chatHistory">
      <!-- Empty State / Initial -->
      <div v-if="messages.length === 0" class="empty-state">
        <el-button type="primary" plain @click="() => addMessage('assistant', '你好！我是 DeepSeek AI，有什么可以帮您？')">
          <el-icon><ChatDotRound /></el-icon> 开启新对话
        </el-button>
      </div>

      <!-- Messages Loop -->
      <div v-for="message in messages" :key="message.id" class="message-wrapper" :class="[`message-${message.type}`]">
        <el-avatar class="message-avatar" :size="32" :src="message.type === 'user' ? USER_AVATAR : ASSISTANT_AVATAR" />
        <div class="message-bubble" :class="[message.status === 'error' ? 'message-bubble-error' : '']">
          <div v-if="message.status === 'thinking'" class="thinking-indicator">
            <el-icon class="is-loading"><Loading /></el-icon> 思考中...
          </div>
          <div v-else class="message-text" v-html="message.content"></div>
          <!-- Error indicator shown inside the bubble or below -->
          <div v-if="message.status === 'error'" class="error-indicator-text">
            <el-icon size="12"><Warning /></el-icon> 获取回复失败
          </div>
        </div>
      </div>
    </div>

    <!-- Input Area Simplified -->
    <div class="input-area-container">
      <!-- NEW: Model Selection Dropdown -->
      <ElSelect v-model="selectedModel" placeholder="选择模型" style="width: 220px; margin-bottom: 10px" :disabled="availableModels.length === 0 || !isOllamaRunning || isLoading">
        <ElOption v-for="model in availableModels" :key="model.name" :label="model.name" :value="model.name" />
        <template #empty>
          <span style="padding: 0 10px; font-size: 12px; color: #999">{{ isOllamaRunning ? '加载模型中或无可用模型' : 'Ollama未连接' }}</span>
        </template>
      </ElSelect>

      <!-- Direct Flex layout for input and button -->
      <div class="input-row">
        <textarea v-model="inputMessage" :rows="1" placeholder="给 DeepSeek 发送消息 (HTML Textarea)" @keydown.enter.prevent="sendMessage" style="flex: 1; padding: 8px; resize: none; border: 1px solid grey" :disabled="isLoading"> </textarea>
        <el-button circle type="primary" @click="sendMessage" :loading="isLoading" :disabled="!canSendMessage" class="send-button" :icon="Promotion" />
      </div>
      <div class="input-footer-text">内容由 AI 生成，请仔细甄别</div>
    </div>
  </div>
</template>

<style scoped>
  .deepseek-chat-container {
    display: flex;
    flex-direction: column;
    width: 100%;
    height: 100%; /* Expect parent to define height */
    overflow: hidden;
    background-color: #fff;
  }

  .chat-history {
    display: flex;
    flex: 1 1 auto; /* Grow and shrink, take available space */
    flex-direction: column;
    gap: 28px; /* Spacing between messages */
    padding: 24px 15%; /* Adjust padding for centering */
    overflow-y: auto;
    background-color: #fff;
    scroll-behavior: smooth;
  }

  .empty-state {
    display: flex;
    flex-grow: 1;
    flex-direction: column;
    align-items: center;
    justify-content: center;
    color: #b0b0b0;
    text-align: center;
  }

  .message-wrapper {
    display: flex;
    gap: 12px;
    align-items: flex-start;
    max-width: 80%; /* Max width of the entire message row */
  }

  .message-user {
    flex-direction: row-reverse; /* Avatar on the right */
    align-self: flex-end;
  }

  .message-assistant {
    align-self: flex-start;
  }

  .message-avatar {
    flex-shrink: 0;
    margin-top: 2px; /* Fine-tune avatar alignment */
  }

  .message-bubble {
    position: relative;
    padding: 10px 14px;
    font-size: 14px;
    line-height: 1.6;
    word-wrap: break-word;
    overflow-wrap: break-word;
    border-radius: 12px; /* Slightly more rounded */
    box-shadow: 0 1px 3px rgb(0 0 0 / 6%); /* Softer shadow */
  }

  .message-assistant .message-bubble {
    color: #333;
    background-color: #f7f7f8; /* DeepSeek assistant bubble color */
    border: 1px solid #ebebeb;
  }

  .message-user .message-bubble {
    color: #333;
    background-color: #e1f0ff; /* DeepSeek user bubble color */
    border: 1px solid #d1e7ff;
  }

  .message-bubble-error {
    background-color: #fff5f5;
    border-color: #fdd;
  }

  .message-text {
    white-space: pre-wrap; /* Preserve whitespace and newlines */
  }

  .thinking-indicator {
    display: flex;
    gap: 6px;
    align-items: center;
    min-height: 22px; /* Consistent height */
    font-size: 14px;
    color: #999;
  }

  .error-indicator-text {
    display: flex;
    gap: 4px;
    align-items: center;
    margin-top: 6px;
    font-size: 11px;
    color: #f56c6c;
  }

  /* --- Input Area Refactored Styles --- */
  .input-area-container {
    display: flex;
    flex-shrink: 0;
    flex-direction: column;

    /* gap: 10px; Is handled by margins now */
    padding: 12px 15%;
    background-color: #fff;
    border-top: 1px solid #f0f0f0;
  }

  /* Style for the direct input row */
  .input-row {
    display: flex;
    gap: 10px;
    align-items: flex-end; /* Corrected typo */
    margin-bottom: 10px; /* Add space before footer */
  }

  /* Send button styles */
  .send-button {
    flex-shrink: 0;
    transition:
      background-color 0.2s ease,
      transform 0.1s ease;
  }

  .send-button:not(.is-disabled):hover {
    background-color: #3a8ee6;
    transform: scale(1.05);
  }

  .send-button:not(.is-disabled):active {
    transform: scale(0.98);
  }

  .input-footer-text {
    padding-top: 4px;
    font-size: 12px;
    color: #ccc;
    text-align: center;
    user-select: none;
  }

  /* Optional: Custom Scrollbar */
  .chat-history::-webkit-scrollbar {
    width: 6px;
  }

  .chat-history::-webkit-scrollbar-thumb {
    background-color: #e0e0e0;
    border: 1px solid #f7f7f8; /* Match track */
    border-radius: 3px;
  }

  .chat-history::-webkit-scrollbar-track {
    background-color: #f7f7f8;
    border-radius: 3px;
  }

  /* Improved icon styling */
  :deep(.el-icon) {
    font-size: 16px;
    vertical-align: middle;
  }

  .send-button :deep(.el-icon) {
    font-size: 18px;
  }

  .thinking-indicator :deep(.el-icon) {
    color: #409eff;
  }

  .error-indicator-text :deep(.el-icon) {
    margin-right: 2px;
  }

  .empty-state :deep(.el-button) .el-icon {
    margin-right: 4px;
    font-size: 16px;
  }
</style>
