import { ref, computed, h } from 'vue'
import { nanoid } from 'nanoid'
import { message } from 'ant-design-vue'
import { renderExportForm } from '../../render/ReviewRender'
import { renderHSTable } from '../../render/HSRender'
import { getReviewData, getHscode } from '@/api/api'
import { getBusinessKnowledge, getBusinessKnowledgeStream } from '@/api/api2'
import { XStream } from 'ant-design-x-vue'
import MarkdownIt from 'markdown-it'
const md = new MarkdownIt()

// 获取AI结果 - 支持流式输出
const getRenderContent = async (nextContent, files, scene, config = {}) => {
  switch (scene) {
    case '业务知识':
      // 返回一个包含流式数据的对象
      return {
        isStream: true,
        streamData: await getBusinessKnowledgeStreamLocal(nextContent, config),
        sources: [
          {
            content: '暂时无法显示',
            metadata: {
              filename: 'QR台账.csv',
            },
          },
          {
            content: '暂时无法显示',
            metadata: {
              filename: '供应商质量问题台账.csv',
            },
          },
        ],
      }
    case '制单审核':
      const res_review = await getReviewData(
        {
          query: nextContent,
        },
        config,
      )
      console.log('🐯 ~ getRenderContent ~ res:', res_review)
      return renderExportForm(res_review.data, res_review.pdf_url, res_review.pdf_size)
    case '申报要素':
      const res_hscode = await getHscode(
        {
          query: nextContent,
        },
        config,
      )
      console.log('🐯 ~ getRenderContent ~ res:', res_hscode)
      return renderHSTable(res_hscode.data)

    default:
      return '该功能模块正在开发中...'
  }
}

// 创建模拟的SSE流数据
const createMockSSEStream = () => {
  const mockChunks = [
    {
      event: 'message',
      data: JSON.stringify({
        id: 0,
        content: {
          code: 200,
          stage: 'start',
          message:
            '开始处理查询: 你好，我们这边有一批小体重的濒危猪种，每头都在10公斤以下，是人工繁殖的非改良品种，想安排出口，麻烦帮我看看出口流程',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 1,
        content: {
          code: 200,
          stage: 'rag_start',
          message: '正在检索文档库...',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 2,
        content: {
          code: 200,
          stage: 'rag_complete',
          message: '文档库检索完成',
          ans: [
            {
              metadata: {
                filename:
                  'pdf_9_汽车零部件行业供应链质量管理与探讨——基于A公司在供应链质量管理的实践.docx',
              },
              content:
                '以质量方法与质量保证作为全员的生产制造指导思想，将质量管理细化到各个生产制造环节当中。4．2．1．4物流管理体系物流管理体系集中体现了其生产制造系统中及时生产的理念，A公司要求其供应商共同使用相同的工具以达到能够有效持续地增加对材料地控制。',
            },
            {
              metadata: {
                filename: '2_供应链.docx',
              },
              content:
                '用量控制、出入库可用量检查、库存展望，多种盘点模式，以及与外部设备相关联条码设备，盘点机等等强化库存控制，适应灵活需要：委托代销、组装拆卸、形态转换；调拨盘点、保质期报警、最高最低库存控制，限额领料配比领料齐套领料等等',
            },
          ],
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 3,
        content: {
          code: 200,
          stage: 'kg_start',
          message: '正在检索知识图谱...',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 4,
        content: {
          code: 200,
          stage: 'kg_complete',
          message: '知识图谱检索完成',
          kg_result: '当前知识图谱无法提供与濒危猪种出口流程相关的信息。',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 5,
        content: {
          code: 200,
          stage: 'csv_start',
          message: '正在检索CSV数据...',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 6,
        content: {
          code: 200,
          stage: 'csv_complete',
          message: 'CSV数据检索完成',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 7,
        content: {
          code: 200,
          stage: 'llm_start',
          message: '正在生成回答...',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 8,
        content: {
          code: 200,
          stage: 'llm_streaming',
          message: '根据您的查询，关于濒危猪种出口流程，我需要为您提供以下信息：\n\n',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 9,
        content: {
          code: 200,
          stage: 'llm_streaming',
          message: '## 1. 濒危物种出口许可\n',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 10,
        content: {
          code: 200,
          stage: 'llm_streaming',
          message:
            '- 需要向国家林业和草原局申请CITES出口许可证\n- 提供物种鉴定报告和人工繁殖证明\n\n',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 11,
        content: {
          code: 200,
          stage: 'llm_streaming',
          message: '## 2. 检疫要求\n- 动物检疫合格证明\n- 疫苗接种记录\n- 健康状况评估报告\n\n',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 12,
        content: {
          code: 200,
          stage: 'llm_streaming',
          message: '## 3. 运输安排\n- 专业动物运输车辆\n- 温度和湿度控制\n- 应急预案准备\n\n',
        },
      }),
    },
    {
      event: 'message',
      data: JSON.stringify({
        id: 13,
        content: {
          code: 200,
          stage: 'llm_complete',
          message: '回答生成完成',
        },
      }),
    },
  ]

  return new ReadableStream({
    async start(controller) {
      for (const chunk of mockChunks) {
        await new Promise((resolve) => setTimeout(resolve, 800)) // 模拟网络延迟
        const sseData = `event: ${chunk.event}\ndata: ${chunk.data}\n\n`
        controller.enqueue(new TextEncoder().encode(sseData))
      }
      controller.close()
    },
  })
}

// 处理业务知识的流式响应
const getBusinessKnowledgeStreamLocal = async (query, config = {}) => {
  try {
    // 临时使用mock数据进行测试
    // console.log('使用Mock数据测试流式输出...')
    // return createMockSSEStream()

    // 使用api2.js中的流式接口
    return await getBusinessKnowledgeStream(query, config)
  } catch (error) {
    console.error('获取业务知识流失败:', error)
    throw error
  }
}

// 处理流式消息的函数
const processStreamMessage = async (readableStream, messageId, updateCallback) => {
  const lines = []
  let thinkingContent = '' // 思考阶段内容
  let answerContent = '' // 回答阶段内容
  let sources = []
  let isCompleted = false
  let isAnswering = false // 是否进入回答阶段

  // 添加延迟函数，控制思考阶段输出速度
  const delay = (ms) => new Promise((resolve) => setTimeout(resolve, ms))

  try {
    for await (const chunk of XStream({ readableStream })) {
      console.log('收到流式数据:', chunk)
      lines.push(chunk)

      // 解析数据并拼接内容
      try {
        const data = JSON.parse(chunk.data)

        // 处理嵌套的content结构
        if (data.content) {
          const content = data.content

          // 根据不同的stage处理不同的消息
          switch (content.stage) {
            case 'start':
              thinkingContent += `🔍 ${content.message}\n\n`
              break
            case 'rag_start':
              thinkingContent += `📚 ${content.message}\n`
              break
            case 'rag_complete':
              thinkingContent += `✅ ${content.message}\n\n`
              // 提取文档来源信息但不立即显示
              if (content.ans && Array.isArray(content.ans)) {
                sources = content.ans.map((item) => ({
                  content: item.content,
                  metadata: item.metadata,
                }))
                console.log('提取到sources:', sources)
              }
              break
            case 'kg_start':
              thinkingContent += `🧠 ${content.message}\n`
              break
            case 'kg_complete':
              thinkingContent += `✅ ${content.message}\n\n`
              // if (content.kg_result) {
              //   thinkingContent += `📊 知识图谱结果: ${content.kg_result}\n\n`
              // }
              break
            case 'networking_start':
              thinkingContent += `🌐 ${content.message}\n`
              break
            case 'networking_complete':
              thinkingContent += `✅ ${content.message}\n\n`
              break
            case 'csv_start':
              thinkingContent += `📊 ${content.message}\n`
              break
            case 'csv_complete':
              thinkingContent += `✅ ${content.message}\n\n`
              break
            case 'llm_start':
              isAnswering = true // 标记进入回答阶段
              answerContent += `🤖 正在生成回答...\n\n` // 添加换行

              break
            case 'llm_streaming':
              // 这里处理实际的AI回答内容
              if (content.message) {
                answerContent += content.message
              }
              break
            case 'complete':
            case 'llm_complete':
              // 添加完成标记和换行
              if (answerContent && !answerContent.endsWith('\n')) {
                answerContent += '\n'
              }
              answerContent += `\n\n✅ 回答完成`
              isCompleted = true
              // 流式完成后，带上sources一起更新
              updateCallback(messageId, {
                message: {
                  thinking: thinkingContent,
                  answer: answerContent,
                  sources: sources,
                },
                loading: false,
                isStreaming: false,
                streamCompleted: true,
              })
              return // 直接返回，不再继续处理
            default:
              // 处理其他类型的消息
              if (content.message) {
                if (isAnswering) {
                  answerContent += content.message
                } else {
                  thinkingContent += content.message
                }
              }
          }

          // 在思考阶段添加延迟，控制输出速度
          if (!isAnswering && !isCompleted) {
            await delay(500) // 思考阶段每次更新延迟500ms
          }

          // 实时更新消息内容，但不传递sources
          if (!isCompleted) {
            updateCallback(messageId, {
              message: {
                thinking: thinkingContent,
                answer: answerContent,
                sources: [], // 流式过程中不显示sources
              },
              loading: false,
              isStreaming: true,
            })
          }
        }
      } catch (parseError) {
        console.warn('解析流式数据失败:', parseError, chunk)
      }
    }

    // 流结束，如果没有收到complete信号，也要显示sources
    if (!isCompleted) {
      updateCallback(messageId, {
        message: {
          thinking: thinkingContent,
          answer: answerContent,
          sources: sources,
        },
        loading: false,
        isStreaming: false,
        streamCompleted: true,
      })
    }

    return { content: answerContent, sources }
  } catch (error) {
    console.error('处理流式消息失败:', error)
    updateCallback(messageId, {
      message: '获取回复失败',
      loading: false,
      error: true,
    })
    throw error
  }
}

export function useMessages(activeKey) {
  // 使用 Map 存储每个会话的消息
  const messagesMap = ref(new Map())
  const tempMessages = ref([])
  const content = ref('')
  const requestLoading = ref(false)
  let aiAbortController = null

  // 获取当前会话的消息
  const getCurrentMessages = (key) => {
    if (!messagesMap.value.has(key)) {
      messagesMap.value.set(key, [])
    }
    return messagesMap.value.get(key)
  }

  const BubbleItems = computed(() => {
    if (activeKey.value === null) {
      // 新对话状态，显示临时消息
      return tempMessages.value.map(formatMessage)
    } else {
      // 已有对话，显示对应消息
      const currentMessages = getCurrentMessages(activeKey.value)
      return currentMessages.map(formatMessage)
    }
  })

  watch(activeKey, (newVal, oldVal) => {
    console.log('🐯 ~ watch ~ oldVal,newVal:', oldVal, newVal)
    if (!newVal) {
    }
  })

  // const formatMessage = ({ id, message, role, loading }) => {
  //   const base = {
  //     key: id,
  //     role,
  //     loading,
  //     // footer:
  //   }

  //   if (role === 'ai') {
  //     if (typeof message === 'string') {
  //       // base.typing = { step: 2, interval: 50 }
  //       base.content = message
  //       base.onTypingComplete = () => {}
  //     } else {
  //       base.messageRender = () => h(message)
  //     }
  //   } else {
  //     base.content = message
  //   }

  //   return base
  // }

  const formatMessage = ({ id, message, role, loading }) => {
    const base = {
      key: id,
      role,
      loading,
    }

    if (role === 'ai') {
      // if (typeof message === 'string') {
      //   // 使用 content 而不是 messageRender，这样 typing 效果才能生效
      //   base.content = message
      //   // 如果需要 Markdown 渲染，可以在 roles 配置中处理
      // } else {
      //   base.messageRender = () => h(message)
      // }
      base.content = message
    } else {
      base.content = message
    }

    return base
  }

  const sendMessage = async (
    { content: nextContent, files, scene },
    isCreatingNewConversation,
    conversationsItems,
  ) => {
    if (!nextContent && (!files || !files.length)) return
    requestLoading.value = true
    console.log('🐯 ~ sendMessage ~ requestLoading.value:', requestLoading.value)
    console.log(nextContent, files, scene)
    // 第一次发送消息或在新建对话状态
    if (isCreatingNewConversation.value) {
      // 如果有文件，添加文件消息
      if (files && files.length > 0) {
        tempMessages.value.push({
          id: nanoid(),
          role: 'file',
          message: files,
        })
      }

      // 添加用户消息到临时区
      tempMessages.value.push({
        id: nanoid(),
        message: nextContent,
        role: 'local',
        loading: false,
      })

      // 添加 AI loading 消息
      const aiId = nanoid()
      tempMessages.value.push({
        id: aiId,
        message: '',
        role: 'ai',
        loading: true,
        scene: scene, // 添加场景信息
      })

      // 创建新的 AbortController
      aiAbortController = new AbortController()

      try {
        const aiReply = await getRenderContent(nextContent, files, scene, {
          signal: aiAbortController.signal, // 传入 signal
        })
        console.log('🐯 ~ sendMessage ~ aiReply:', aiReply)

        // 检查是否是流式响应
        if (aiReply.isStream && aiReply.streamData) {
          // 处理流式响应
          await processStreamMessage(aiReply.streamData, aiId, (messageId, updateData) => {
            const idx = tempMessages.value.findIndex((m) => m.id === messageId)
            if (idx !== -1) {
              tempMessages.value[idx] = {
                ...tempMessages.value[idx],
                ...updateData,
                sources: aiReply.sources, // 保持sources信息
                scene: scene,
              }
            }
          })
        } else {
          // 处理非流式响应（原有逻辑）
          const idx = tempMessages.value.findIndex((m) => m.id === aiId)
          if (idx !== -1) {
            tempMessages.value[idx] = {
              ...tempMessages.value[idx],
              message: aiReply,
              loading: false,
              scene: scene,
            }
          }
        }

        // AI 回复后，创建新会话
        const newKey = nanoid()
        // 使用用户第一句话作为会话标题
        const newLabel = nextContent.trim().slice(0, 10) || '新聊天'
        conversationsItems.value.push({
          key: newKey,
          label: newLabel,
        })
        // 将临时消息转移到新会话
        messagesMap.value.set(newKey, [...tempMessages.value])
        console.log('🐯 ~ sendMessage ~ messagesMap.value:', messagesMap.value)

        // 激活新会话
        activeKey.value = newKey

        // 清空临时消息与输入框
        tempMessages.value = []
        content.value = ''
      } catch (e) {
        console.log('🐯 ~ useMessages ~ e:', e)
        // 错误处理
        const idx = tempMessages.value.findIndex((m) => m.id === aiId)
        if (idx !== -1) {
          // 判断是否是取消请求导致的错误
          const message = e.name === 'CanceledError' ? '您已取消请求。' : 'AI请求失败，请重试'
          tempMessages.value[idx] = {
            ...tempMessages.value[idx],
            message,
            loading: false,
            scene: scene, // 保持场景信息
          }
        }
        if (e.name !== 'CanceledError') {
          message.error('AI请求失败')
        }
      } finally {
        requestLoading.value = false
        aiAbortController = null
      }
    } else {
      // 现有会话流程
      const currentMessages = getCurrentMessages(activeKey.value)

      // 如果有文件，添加文件消息
      if (files && files.length > 0) {
        currentMessages.push({
          id: nanoid(),
          role: 'file',
          message: files,
        })
      }

      // 添加用户消息
      const userMessage = {
        id: nanoid(),
        message: nextContent,
        role: 'local',
        loading: false,
      }
      currentMessages.push(userMessage)

      // 添加 AI 消息
      const aiId = nanoid()
      const aiMessage = {
        id: aiId,
        message: '',
        role: 'ai',
        loading: true,
        scene: scene, // 添加场景信息
      }
      currentMessages.push(aiMessage)

      // 创建新的 AbortController
      aiAbortController = new AbortController()

      try {
        const aiReply = await getRenderContent(nextContent, files, scene, {
          signal: aiAbortController.signal, // 传入 signal
        })

        // 检查是否是流式响应
        if (aiReply.isStream && aiReply.streamData) {
          // 处理流式响应
          await processStreamMessage(aiReply.streamData, aiId, (messageId, updateData) => {
            const idx = currentMessages.findIndex((m) => m.id === messageId)
            if (idx !== -1) {
              currentMessages[idx] = {
                ...currentMessages[idx],
                ...updateData,
                sources: aiReply.sources, // 保持sources信息
                scene: scene,
              }
              // 更新 Map 中的消息列表
              messagesMap.value.set(activeKey.value, [...currentMessages])
            }
          })
        } else {
          // 处理非流式响应（原有逻辑）
          const idx = currentMessages.findIndex((m) => m.id === aiId)
          if (idx !== -1) {
            currentMessages[idx] = {
              ...currentMessages[idx],
              message: aiReply,
              loading: false,
              scene: scene, // 保持场景信息
            }
          }
          // 更新 Map 中的消息列表
          messagesMap.value.set(activeKey.value, currentMessages)
        }

        // 清空输入内容
        content.value = ''
      } catch (e) {
        // 错误处理
        const idx = currentMessages.findIndex((m) => m.id === aiId)
        if (idx !== -1) {
          // 判断是否是取消请求导致的错误
          const message = e.name === 'CanceledError' ? '您已取消请求。' : 'AI请求失败，请重试'
          currentMessages[idx] = {
            ...currentMessages[idx],
            message,
            loading: false,
            scene: scene, // 保持场景信息
          }
        }
        if (e.name !== 'CanceledError') {
          message.error('AI请求失败')
        }
      } finally {
        requestLoading.value = false
        aiAbortController = null
      }
    }
  }

  const deleteConversationMessages = (key) => {
    messagesMap.value.delete(key)
  }

  const cancelMessage = (isCreatingNewConversation, scene) => {
    console.log('🐯 ~ cancelMessage ~ scene:', scene)
    requestLoading.value = false

    // 如果存在未完成的请求，取消它
    if (aiAbortController) {
      aiAbortController.abort()
      aiAbortController = null
    }

    if (isCreatingNewConversation.value) {
      const idx = tempMessages.value.findIndex((m) => m.role === 'ai' && m.loading)
      if (idx !== -1) {
        tempMessages.value[idx] = {
          ...tempMessages.value[idx],
          message: '您已取消请求。',
          loading: false,
          scene: scene, // 保持场景信息
        }
      }
    } else {
      const currentMessages = getCurrentMessages(activeKey.value)
      const idx = currentMessages.findIndex((m) => m.role === 'ai' && m.loading)
      if (idx !== -1) {
        currentMessages[idx] = {
          ...currentMessages[idx],
          message: '您已取消请求。',
          loading: false,
          scene: scene, // 保持场景信息
        }

        messagesMap.value.set(activeKey.value, currentMessages)
      }
    }
  }

  return {
    messagesMap,
    tempMessages,
    content,
    requestLoading,
    BubbleItems,
    sendMessage,
    cancelMessage,
    deleteConversationMessages,
  }
}

