import { useStorage } from '@vueuse/core'
import { useModelStore } from './model'
import type { AbortableAsyncIterator, GenerateRequest, GenerateResponse } from 'ollama'

export interface ChatList {
  /**会话id */
  id: number
  /**模型名称 */
  model: string
  /**会话创建时间 */
  createTime: number
  /**会话更新时间 */
  updateTime: number
  /**会话内容 */
  content: ChatListContent[]
}

export interface ChatListContent {
  /**问题 */
  question: string
  /**用户名 */
  userName?: string
  /**系统回答 */
  sysRes?: string
  /**系统名称 */
  sysName?: string
  /**ai回答 */
  aiRes: string
  /**AI名称 */
  aiName?: string
  /**创建时间 */
  created_at: Date
  /**是否结束 */
  done: boolean
  /**结束时间 */
  done_at?: Date
  /**思考内容 */
  thinking: string
  /**返回内容 */
  response: string
  /**取消器 */
  aborter?: AbortableAsyncIterator<GenerateResponse>
}

export const useLocalChatStore = defineStore('chat-local', () => {
  // 当前选中的模型
  const currentModel = useStorage('currentModel', '')
  // 当前选中的会话
  const currentChatId = useStorage<number>('currentChatId', 0)
  // 聊天列表
  const chatList = useStorage<ChatList[]>('chatList', [])
  const currentChat = computed(() => {
    return chatList.value.find((item) => item.id === currentChatId.value)
  })
  // 会话实例
  const chatInstance = shallowRef<AbortableAsyncIterator<GenerateResponse>>()
  const isLoading = ref(false)
  // 新建会话
  async function newChat(model: string, msg?: string) {
    isLoading.value = true
    const id = currentChatId.value ? currentChatId.value++ : 1
    currentChatId.value = id
    const chat: ChatList = {
      id,
      model,
      createTime: Date.now(),
      updateTime: Date.now(),
      content: [],
    }
    if (msg) {
      chat.content.push({
        question: msg,
        created_at: new Date(),
        done: false,
        thinking: '',
        aiRes: '',
        response: '',
      })
    }
    chatList.value.push(chat)
    try {
      chatInstance.value = await useModelStore().ollama.generate({
        model,
        prompt: '',
        stream: true,
      })
    } catch (error) {
      console.log('🚀 ~ newChat ~ error:', error)
    } finally {
      isLoading.value = false
    }
  }
  // 发送
  async function send(content: GenerateRequest) {
    if (!chatInstance.value) {
      await newChat(currentModel.value)
    }
    currentChat.value?.content.push({
      question: content.prompt,
      created_at: new Date(),
      done: false,
      thinking: '',
      aiRes: '',
      response: '',
    })
    const target = currentChat.value!.content[currentChat.value!.content.length - 1]

    const RES = await useModelStore().ollama.generate({
      ...content,
      stream: true,
    })
    target.aborter = RES
    let isThinking = true
    try {
      for await (const el of RES) {
        const { created_at, done, response } = el
        target.response += response
        if (isThinking) {
          target.thinking += response.replace(/<[\/]?think>/g, '')
        } else {
          target.aiRes += response
        }
        if (response.startsWith('</think>')) {
          isThinking = false
        }
        if (done) {
          target.done = done
          target.done_at = created_at
        }
      }
    } catch (error) {
      console.log('🚀 ~ send ~ error:', error)
    }
  }
  // 停止
  const stop = () => {
    chatInstance.value?.abort()
    chatInstance.value = undefined
  }

  // 删除会话
  const deleteChat = (id: number) => {
    const index = chatList.value.findIndex((item) => item.id === id)
    if (index !== -1) {
      chatList.value.splice(index, 1)
    }
    if (id === currentChatId.value) {
      chatInstance.value?.abort()
      useModelStore()
        .ollama.chat({
          model: currentChat.value?.model || currentModel.value,
          messages: [],
          stream: true,
          keep_alive: 0,
        })
        .then((res) => {
          console.log('🚀 ~ .then ~ res:', res)
          chatInstance.value = undefined
        })
    }
  }

  return {
    currentModel,
    currentChatId,
    currentChat,
    chatList,
    newChat,
    send,
    deleteChat,
    stop,
  }
})
