<template>
  <div class="chat-root">
    <header class="chat-header">
      <div class="brand">
        <img src="/src/assets/logo.svg" alt="logo" class="logo" />
        <div class="title">MateChat Demo</div>
      </div>
      <div class="subtitle">Lightweight demo for your article — local mock or proxy-enabled</div>
    </header>

    <div class="messages" ref="msgsRef">
      <div v-if="messages.length === 0" class="empty">No messages yet — say hello 👋</div>
      <div v-for="(m, idx) in messages" :key="idx" :class="['msg', m.from]">
        <div class="avatar">{{ m.avatarConfig?.name === 'user' ? 'U' : 'M' }}</div>
        <div class="bubble">
          <div v-if="m.loading" class="loading">Typing…</div>
          <div v-else class="content">{{ m.content }}</div>
        </div>
      </div>
    </div>

    <div class="composer">
      <form class="composer-form" @submit.prevent="onSubmit(inputValue)">
        <input class="composer-input" v-model="inputValue" placeholder="Type a message and press Enter" />
        <button class="composer-btn" type="submit">Send</button>
      </form>
      <div class="composer-meta">
        <input class="apikey" v-model="apiKey" placeholder="API Key (optional)" />
        <input class="apikey" v-model="baseUrl" placeholder="Base URL (e.g. https://api.example.com)" />
        <input class="apikey" v-model="modelName" placeholder="Model (e.g. gpt-4o-mini)" />
      </div>
    </div>
  </div>
</template>

<script setup>
import { ref, onMounted, nextTick } from 'vue'
// Use OpenAI in browser if user supplies API key + base URL

// Default credentials (will be used if user does not enter values in UI)
// IMPORTANT: do NOT hardcode real API keys here. Leave blank for safety.
const DEFAULT_API_KEY = ""
const DEFAULT_BASE_URL = "https://api.chatanywhere.tech"

const inputValue = ref('')
const apiKey = ref('')
const baseUrl = ref(DEFAULT_BASE_URL)
const modelName = ref('gpt-4o-mini')
// no SDK client in browser; we'll call the HTTP API directly
const messages = ref([])
const msgsRef = ref(null)

// helper to scroll to bottom
const scrollBottom = async () => {
  await nextTick()
  try {
    const el = msgsRef.value
    if (el) el.scrollTop = el.scrollHeight
  } catch (e) {
    // ignore
  }
}

const onSubmit = (evt) => {
  const text = (typeof evt === 'string' ? evt : inputValue.value).trim()
  if (!text) return

  inputValue.value = ''

  // push user message
  messages.value.push({ from: 'user', content: text, avatarConfig: { name: 'user' } })

  // push placeholder for model reply and capture index
  messages.value.push({ from: 'model', content: '', avatarConfig: { name: 'model' }, id: '', loading: true })
  const modelIndex = messages.value.length - 1

  // fetch data (streaming or simulated)
  fetchData(text, modelIndex)
  scrollBottom()
}

// fetchData: call model HTTP API via fetch (or local mock if no API key)
const fetchData = async (ques, modelIndex) => {
  if (modelIndex == null || modelIndex < 0 || modelIndex >= messages.value.length) return

  // If no apiKey provided, simulate a streaming response for demo
  if (!apiKey.value) {
    const reply = `Received: ${ques}. This is a local demo response from MateChat mock.`
    messages.value[modelIndex].content = ''
    messages.value[modelIndex].loading = true
    // simulate streaming by chunks
    const chunks = reply.match(/.{1,20}/g) || [reply]
    for (const chunk of chunks) {
      await new Promise((r) => setTimeout(r, 120))
      messages.value[modelIndex].content += chunk
      await scrollBottom()
    }
    messages.value[modelIndex].loading = false
    return
  }

  // Otherwise, call the service directly via fetch using API key + base URL (browser mode)
  const used_api_key = apiKey.value && apiKey.value.trim() ? apiKey.value.trim() : DEFAULT_API_KEY
  const used_base_url = baseUrl.value && baseUrl.value.trim() ? baseUrl.value.trim() : DEFAULT_BASE_URL

  try {
    const url = used_base_url.replace(/\/+$/,'') + '/chat/completions'
    const resp = await fetch(url, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        'Authorization': `Bearer ${used_api_key}`,
      },
      body: JSON.stringify({ model: modelName.value, messages: [{ role: 'user', content: ques }], stream: true }),
    })

    if (!resp.ok) {
      const t = await resp.text()
      messages.value[modelIndex].content = '[Error] ' + t
      messages.value[modelIndex].loading = false
      return
    }

    // Try to read as stream; if not streaming, fall back to json
    const ct = resp.headers.get('content-type') || ''
    if (ct.includes('application/json')) {
      const data = await resp.json()
      messages.value[modelIndex].content = data.choices?.[0]?.message?.content || data.text || JSON.stringify(data)
    } else if (resp.body) {
      const reader = resp.body.getReader()
      const decoder = new TextDecoder()
      let buffer = ''
      while (true) {
        const { value, done } = await reader.read()
        if (done) break
        buffer += decoder.decode(value, { stream: true })

        const lines = buffer.split(/\r?\n/)
        buffer = lines.pop() || ''

        for (const rawLine of lines) {
          const line = rawLine.trim()
          if (!line) continue
          // Some servers use SSE-like `data: {...}` lines
          const dataLine = line.startsWith('data:') ? line.replace(/^data:\s*/, '') : line
          if (dataLine === '[DONE]') continue

          try {
            const obj = JSON.parse(dataLine)
            // try to capture chat id if provided
            if (obj.id && !messages.value[modelIndex].id) messages.value[modelIndex].id = obj.id
            const content = obj.choices?.[0]?.delta?.content ?? obj.choices?.[0]?.message?.content ?? obj.choices?.[0]?.text ?? ''
            if (content) {
              messages.value[modelIndex].content += content
              await scrollBottom()
            }
          } catch (e) {
            // not JSON — append raw
            messages.value[modelIndex].content += dataLine
            await scrollBottom()
          }
        }
      }

      // flush any remaining buffer
      if (buffer) {
        const maybe = buffer.trim()
        if (maybe && maybe !== '[DONE]') {
          try {
            const obj = JSON.parse(maybe)
            if (obj.id && !messages.value[modelIndex].id) messages.value[modelIndex].id = obj.id
            const content = obj.choices?.[0]?.delta?.content ?? obj.choices?.[0]?.message?.content ?? obj.choices?.[0]?.text ?? ''
            if (content) messages.value[modelIndex].content += content
          } catch (e) {
            messages.value[modelIndex].content += maybe
          }
        }
      }
    } else {
      const text = await resp.text()
      messages.value[modelIndex].content = text
    }
  } catch (err) {
    console.error(err)
    messages.value[modelIndex].content = '[Request failed] ' + String(err)
  } finally {
    messages.value[modelIndex].loading = false
    await scrollBottom()
  }
}

// NOTE: we avoid using the Node/official SDK in the browser (incompatible runtime).
// Instead the demo uses direct `fetch` calls (see `fetchData`) to emulate the
// same streaming behavior the SDK provides.

onMounted(() => {
  // initial welcome message
  messages.value.push({ from: 'model', content: 'Welcome to MateChat demo. Enter a message below.', avatarConfig: { name: 'model' }, loading: false })
})
</script>

<style scoped>
.chat-root { max-width:820px; margin:20px auto; font-family: Inter, ui-sans-serif, system-ui, -apple-system, 'Segoe UI', Roboto, 'Helvetica Neue', Arial; color:#1f2937 }
.chat-header { padding:16px 20px; background: linear-gradient(90deg,#4f46e5,#06b6d4); color:#fff; border-radius:10px }
.brand { display:flex; align-items:center; gap:12px }
.logo { width:42px; height:42px }
.title { font-weight:700; font-size:18px }
.subtitle { margin-top:6px; opacity:0.9; font-size:13px }
.messages { border-radius:8px; height:56vh; min-height:320px; overflow:auto; padding:16px; background:linear-gradient(180deg,#fbfdff,#f7fafc); box-shadow:0 6px 18px rgba(16,24,40,0.06); margin-top:12px }
.empty { text-align:center; color:#6b7280; padding:40px 0 }
.msg { display:flex; gap:12px; margin-bottom:12px; align-items:flex-end }
.msg.user { flex-direction:row-reverse }
.msg .avatar { width:40px; height:40px; border-radius:20px; background:#111827; color:#fff; display:flex; align-items:center; justify-content:center; font-weight:600 }
.bubble { max-width:72%; padding:10px 14px; border-radius:12px; background:#fff; box-shadow:0 1px 2px rgba(2,6,23,0.06); line-height:1.45 }
.msg.user .bubble { background:linear-gradient(90deg,#e0f2fe,#dbeafe); color:#0f172a }
.loading { color:#6b7280; font-style:italic }
.composer { margin-top:12px; display:flex; flex-direction:column; gap:8px }
.composer-form { display:flex; gap:8px }
.composer-input { flex:1; padding:12px 14px; border-radius:12px; border:1px solid #e6eef6; background:#fff }
.composer-btn { padding:10px 14px; border-radius:10px; background:#4f46e5; color:#fff; border:0 }
.composer-meta { display:flex; justify-content:flex-end }
.apikey { padding:8px 10px; border-radius:8px; border:1px dashed #c7d2fe; width:260px }

@media (max-width:520px) {
  .chat-root { margin:8px }
  .messages { height:56vh }
  .apikey { width:100% }
}
</style>
