import { v4 as uuidv4 } from 'uuid'
import fs from 'fs/promises'
import path from 'path'
import { vectorStore } from '../services/vectorStore.js'
import { modelService } from '../services/model.js'
import logger from '../services/logger.js'
import { createReadStream } from 'fs'
import mammoth from 'mammoth'
import { marked } from 'marked'
import { processFile, checkFileType, saveUploadedFile } from '../utils/fileProcessors/index.js'
import { TextChunker } from '../utils/textChunker.js'

// 修改 pdf-parse 的导入和初始化
let pdfParse
const initPdfParse = async () => {
  if (!pdfParse) {
    try {
      const pdfParseModule = await import('pdf-parse/lib/pdf-parse.js')
      pdfParse = pdfParseModule.default
    } catch (error) {
      logger.error('Failed to initialize pdf-parse:', error)
      throw new Error('PDF processing module initialization failed')
    }
  }
  return pdfParse
}

// 文件类型映射
const FILE_TYPE_MAP = {
  'md': 'Markdown',
  'txt': 'Text',
  'pdf': 'PDF',
  'doc': 'Word',
  'docx': 'Word',
  'unknown': 'Unknown'
}

// 获取文档列表
export const getDocuments = async (req, res, next) => {
  try {
    // 确保文档目录存在
    const documentsDir = path.join(process.cwd(), 'data/documents')
    try {
      await fs.access(documentsDir)
    } catch (err) {
      // 如果目录不存在，创建它
      await fs.mkdir(documentsDir, { recursive: true })
      logger.info('Created documents directory:', documentsDir)
      return res.json({
        list: [],
        total: 0
      })
    }

    // 获取文件列表
    const files = await fs.readdir(documentsDir)
    logger.info('Found files in documents directory:', files)
    
    // 先过滤文件，再处理
    const documentFiles = files.filter(file => !file.endsWith('.meta.json'))
    
    // 读取每个文档的元数据
    const documents = (await Promise.all(documentFiles.map(async file => {
      const filePath = path.join(documentsDir, file)
      const metaPath = `${filePath}.meta.json`
      
      try {
        // 获取文件状态
        const stats = await fs.stat(filePath)
        
        // 读取元数据
        let meta = {}
        try {
          // 使用 UTF-8 编码读取元数据
          const metaContent = await fs.readFile(metaPath, 'utf8')
          meta = JSON.parse(metaContent)

          // 确保文件名正确编码
          if (meta.originalName) {
            meta.originalName = Buffer.from(meta.originalName, 'utf8').toString('utf8')
          }
          if (meta.title) {
            meta.title = Buffer.from(meta.title, 'utf8').toString('utf8')
          }
        } catch (err) {
          logger.warn(`Metadata not found for file: ${file}`, err)
          // 如果没有元数据文件，使用默认值
          meta = {
            id: file,
            title: Buffer.from(file, 'utf8').toString('utf8'),
            originalName: Buffer.from(file, 'utf8').toString('utf8'),
            status: 'pending'
          }
        }

        // 构建文档信息
        const doc = {
          id: meta.id,
          title: meta.originalName || meta.title,
          size: stats.size,
          uploadTime: meta.uploadTime || stats.mtime,
          status: meta.status || 'pending',
          type: FILE_TYPE_MAP[path.extname(meta.originalName || meta.title).slice(1).toLowerCase()] || 'Unknown'
        }

        // 记录文档信息
        logger.debug('Processed document:', {
      id: doc.id,
      title: doc.title,
          originalName: meta.originalName
        })

        return doc
      } catch (err) {
        logger.error(`Error processing file: ${file}`, err)
        return null
      }
    }))).filter(doc => doc !== null)
    
    logger.info('Processed documents:', {
      count: documents.length,
      documents: documents.map(d => ({
        id: d.id,
        title: d.title,
        status: d.status
      }))
    })
    
    res.json({
      list: documents,
      total: documents.length
    })
  } catch (error) {
    logger.error('Failed to get documents:', error)
    next(error)
  }
}

// 上传文档
export const uploadDocument = async (req, res, next) => {
  try {
    if (!req.file) {
      return res.status(400).json({ 
        message: 'No file uploaded',
        details: 'Please select a file to upload'
      })
    }

    // 解码文件名并确保正确的编码
    let originalName = req.file.originalname
    try {
      originalName = Buffer.from(originalName, 'latin1').toString('utf8')
    } catch (err) {
      logger.warn('Failed to decode filename:', err)
    }

    const id = uuidv4()
    const fileType = path.extname(originalName).slice(1).toLowerCase()
    
    // 创建元数据
    const meta = {
      id,
      title: originalName,
      originalName,
      uploadTime: new Date().toISOString(),
      status: 'pending',
      type: fileType
    }

    // 保存文件到指定目录
    const docPath = path.join(process.cwd(), 'data/documents', id)
    
    // 根据文件类型选择保存方式
    if (fileType === 'pdf' || fileType === 'docx' || fileType === 'doc') {
      // 二进制文件直接复制
      await fs.copyFile(req.file.path, docPath)
    } else {
      // 文本文件使用 UTF-8 编码保存
      const content = await fs.readFile(req.file.path, 'utf8')
      await fs.writeFile(docPath, content, 'utf8')
    }
    
    // 删除临时文件
    await fs.unlink(req.file.path)

    // 保存元数据
    const metaPath = `${docPath}.meta.json`
    await fs.writeFile(metaPath, JSON.stringify(meta, null, 2), 'utf8')

    // 添加调试日志
    logger.info('Document saved:', {
      id,
      title: originalName,
      type: fileType,
      size: req.file.size
    })

    res.json({ 
      message: 'Document uploaded successfully',
      id,
      title: originalName
    })
  } catch (error) {
    if (req.file) {
      try {
        await fs.unlink(req.file.path)
      } catch (unlinkError) {
        logger.error('Failed to delete temp file:', unlinkError)
      }
    }
    
    logger.error('Failed to upload document:', error)
    next(error)
  }
}

// 向量化文档
export const vectorizeDocument = async (req, res, next) => {
  const { id } = req.params
  const defaultParams = modelService.config.embedding.parameters
  const chunkSize = Number(req.body.chunkSize) || defaultParams.chunkSize
  const overlap = Number(req.body.overlap) || defaultParams.overlap
  
  let heartbeat = null

  // 定义发送进度函数
  const sendProgress = (data) => {
    try {
      res.write(`data: ${JSON.stringify(data)}\n\n`)
      res.flush && res.flush()
    } catch (error) {
      logger.error('Failed to send progress:', error)
    }
  }

  // 批处理函数 - 添加更多错误处理和重试逻辑
  const processBatch = async (batch, currentIndex, totalParagraphs) => {
    try {
      // 生成嵌入向量
      sendProgress({ 
        status: 'processing', 
        progress: 30 + Math.floor((currentIndex / totalParagraphs) * 60), 
        message: `正在向量化段落 ${currentIndex}/${totalParagraphs}...` 
      })
      
      // 为每个段落生成嵌入向量 - 添加重试逻辑
      let embeddings = []
      let retries = 3
      
      while (retries > 0) {
        try {
          embeddings = await modelService.generateEmbeddingBatch(
            batch.map(chunk => chunk.content)
          )
          
          // 验证嵌入向量
          const isValid = embeddings.every(emb => 
            Array.isArray(emb) && 
            emb.length === 768 && 
            emb.every(val => typeof val === 'number' && !isNaN(val))
          )
          
          if (isValid) break
          
          logger.warn('Invalid embeddings, retrying...', {
            batchSize: batch.length,
            currentIndex
          })
          retries--
          await new Promise(resolve => setTimeout(resolve, 1000))
        } catch (err) {
          logger.error('Failed to generate embeddings, retrying...', {
            error: err.message,
            batchSize: batch.length,
            currentIndex,
            retriesLeft: retries
          })
          retries--
          if (retries === 0) throw err
          await new Promise(resolve => setTimeout(resolve, 2000))
        }
      }
      
      // 将向量存储到数据库
      await vectorStore.insertVectors(
        batch.map((chunk, i) => ({
          ...chunk,
          embedding: embeddings[i]
        }))
      )
      
      logger.info(`Processed batch ${currentIndex}/${totalParagraphs}`)
    } catch (error) {
      logger.error('Failed to process batch:', {
        batchSize: batch.length,
        currentIndex,
        error: error.message
      })
      throw error
    }
  }

  try {
    // 设置 SSE 响应头
    res.writeHead(200, {
      'Content-Type': 'text/event-stream',
      'Cache-Control': 'no-cache',
      'Connection': 'keep-alive'
    })

    // 设置心跳 - 增加频率
    heartbeat = setInterval(() => {
      try {
        res.write(':\n\n')
        res.flush && res.flush()
      } catch (error) {
        logger.error('Heartbeat failed:', error)
        clearInterval(heartbeat)
      }
    }, 3000) // 减少到 3 秒

    // 读取文档内容
    const docPath = path.join(process.cwd(), 'data/documents', id)
    const metaPath = `${docPath}.meta.json`
    const meta = JSON.parse(await fs.readFile(metaPath, 'utf8'))
    
    // 清理已存在的向量
    try {
      await vectorStore.deleteVectors(`parent_id == "${id}"`)
      logger.info('Cleaned up existing vectors:', { id })
    } catch (error) {
      logger.warn('Failed to cleanup vectors:', { id, error: error.message })
    }
    
    sendProgress({ 
      status: 'processing', 
      progress: 10, 
      message: '正在读取文档...' 
    })

    // 使用统一的文件处理方法
    const content = await processFile(docPath)
    
    // 分块处理
    sendProgress({ 
      status: 'processing', 
      progress: 30, 
      message: '正在分割文档...' 
    })
    
    const chunks = TextChunker.splitText(content, { chunkSize, overlap })
    
    // 批量处理段落 - 减小批处理大小，提高稳定性
    const batchSize = 3 // 减小批处理大小
    let chunkIndex = 0
    let currentBatch = []

    for (const chunk of chunks) {
      const chunkData = {
        id: `${id}_${chunkIndex}`,
        parent_id: id,
        content: chunk.content,
        title: meta.title || meta.originalName,
        chunk_index: chunkIndex
      }
      currentBatch.push(chunkData)
      chunkIndex++
      
      if (currentBatch.length >= batchSize) {
        await processBatch(currentBatch, chunkIndex, chunks.length)
        currentBatch = []
        
        // 添加小延迟，避免过度请求
        await new Promise(resolve => setTimeout(resolve, 100))
      }
    }
    
    // 处理剩余的分段
    if (currentBatch.length > 0) {
      await processBatch(currentBatch, chunkIndex, chunks.length)
    }

    // 更新元数据
    meta.status = 'vectorized'
    meta.vectorizedAt = new Date().toISOString()
    meta.chunksCount = chunkIndex
    await fs.writeFile(metaPath, JSON.stringify(meta, null, 2), 'utf8')

    // 验证向量化结果
    const verifyChunks = await vectorStore.getDocumentChunks(id)
    if (!verifyChunks.length) {
      throw new Error('Vectorization verification failed')
    }

    sendProgress({
      status: 'completed',
      progress: 100,
      message: '处理完成',
      chunks: chunkIndex
    })

    if (heartbeat) {
      clearInterval(heartbeat)
    }
    res.end()

  } catch (error) {
    logger.error('Vectorization failed:', {
      id,
      error: error.message,
      stack: error.stack
    })
    
    sendProgress({
      status: 'error',
      message: `处理失败: ${error.message}`
    })
    
    if (heartbeat) {
      clearInterval(heartbeat)
    }
    res.end()
  }
}

// 获取文档内容
export const getDocumentContent = async (req, res, next) => {
  try {
    const { id } = req.params
    const { raw } = req.query
    const docPath = path.join(process.cwd(), 'data/documents', id)
    
    // 检查文件是否存在
    try {
      await fs.access(docPath)
    } catch (err) {
      logger.error('Document file not found:', id)
      return res.status(404).json({
        message: 'Document not found',
        details: 'The requested document does not exist'
      })
    }

    // 读取元数据获取文件类型
    const metaPath = `${docPath}.meta.json`
    let meta = {}
    try {
      const metaContent = await fs.readFile(metaPath, 'utf8')
      meta = JSON.parse(metaContent)
    } catch (err) {
      logger.warn('Metadata not found for document:', id)
    }

    const fileType = meta.type || path.extname(meta.originalName || '').slice(1).toLowerCase()

    // 如果请求原始文件且是 PDF
    if (raw === 'true' && fileType === 'pdf') {
      try {
        const stat = await fs.stat(docPath)
        res.setHeader('Content-Type', 'application/pdf')
        res.setHeader('Content-Length', stat.size)
        res.setHeader('Accept-Ranges', 'bytes')
        res.setHeader('Cache-Control', 'public, max-age=3600')
        
        // 使用 createReadStream 创建可读流
        const stream = createReadStream(docPath)
        
        // 处理流错误
        stream.on('error', (error) => {
          logger.error('Error streaming PDF:', error)
          if (!res.headersSent) {
            res.status(500).json({
              message: 'Failed to stream PDF file',
              error: error.message
            })
          }
        })

        // 将文件流传输到响应
        stream.pipe(res)
        return
      } catch (error) {
        logger.error('Failed to stream PDF:', error)
        return next(error)
      }
    }

    // 处理文本内容
    let content = ''
    let isHtml = false
    try {
      switch (fileType) {
        case 'pdf':
          const dataBuffer = await fs.readFile(docPath)
          try {
            const parser = await initPdfParse()
            const pdfData = await parser(dataBuffer)
            content = pdfData.text || '无法提取 PDF 文本内容'
            content = content
              .replace(/\r\n/g, '\n')
              .replace(/\n{3,}/g, '\n\n')
              .trim()
          } catch (pdfError) {
            logger.error('Failed to parse PDF:', {
              error: pdfError,
              id,
              fileSize: dataBuffer.length
            })
            content = 'PDF 文件解析失败'
          }
          break

        case 'docx':
          try {
            // 转换为 HTML 以保持格式
            const result = await mammoth.convertToHtml({path: docPath})
            content = result.value || '无法提取 Word 文档内容'
            isHtml = true  // 标记为 HTML 内容
            
            if (result.messages.length > 0) {
              logger.warn('Word document conversion warnings:', {
                id,
                messages: result.messages
              })
            }
          } catch (docxError) {
            logger.error('Failed to parse DOCX:', {
              error: docxError,
              id
            })
            content = 'Word 文档解析失败'
          }
          break

        case 'doc':
          content = '不支持预览旧版 Word 文档格式 (.doc)'
          break

        case 'md':
        case 'markdown':
          try {
            // 读取 Markdown 内容
            const mdContent = await fs.readFile(docPath, 'utf8')
            // 转换为 HTML
            content = marked(mdContent, {
              gfm: true,  // 启用 GitHub 风格的 Markdown
              breaks: true,  // 支持换行
              headerIds: true,  // 为标题添加 ID
              mangle: false,  // 不转义内联 HTML
              sanitize: false  // 不净化 HTML
            })
            isHtml = true
          } catch (mdError) {
            logger.error('Failed to parse Markdown:', {
              error: mdError,
              id
            })
            content = 'Markdown 文件解析失败'
          }
          break

        default:
          // 普通文本文件
          content = await fs.readFile(docPath, 'utf8')
      }
      
      res.json({ 
        content,
        type: fileType,
        title: meta.title || meta.originalName,
        isHtml
      })
    } catch (error) {
      logger.error('Failed to read document content:', {
        error,
        type: fileType,
        id
      })
      throw error
    }
  } catch (error) {
    logger.error('Failed to get document content:', error)
    next(error)
  }
}

// 删除文档
export const deleteDocument = async (req, res, next) => {
  try {
    const { id } = req.params
    const docPath = path.join(process.cwd(), 'data/documents', id)
    const metaPath = `${docPath}.meta.json`

    // 读取元数据，检查文档状态
    let meta = {}
    try {
      const metaContent = await fs.readFile(metaPath, 'utf8')
      meta = JSON.parse(metaContent)
    } catch (err) {
      // 如果元数据不存在，继续删除
      logger.warn('Document metadata not found:', id)
    }

    // 如果文档已向量化，从向量数据库中删除
    if (meta.status === 'vectorized') {
      await vectorStore.deleteVectors(`parent_id == "${id}"`)
      logger.info('Deleted document vectors:', id)
    }

    // 删除文件和元数据
    try {
      await fs.unlink(docPath)
      await fs.unlink(metaPath)
      logger.info('Deleted document files:', id)
    } catch (err) {
      logger.warn('Failed to delete some document files:', err)
    }

    res.json({ 
      message: 'Document deleted successfully',
      id 
    })
  } catch (error) {
    logger.error('Failed to delete document:', {
      id: req.params.id,
      error: error.message,
      stack: error.stack
    })
    next(error)
  }
}

// 获取向量化内容
export const getVectorizedContent = async (req, res, next) => {
  const { id } = req.params
  const page = Number(req.query.page || 1)
  const pageSize = Number(req.query.pageSize || 10)

  try {
    // 获取文档元数据
    const metaPath = path.join(process.cwd(), 'data/documents', `${id}.meta.json`)
    let meta
    try {
      const metaContent = await fs.readFile(metaPath, 'utf-8')
      meta = JSON.parse(metaContent)
    } catch (err) {
      logger.error('Failed to read document metadata:', { id, error: err.message })
      return res.status(404).json({
        message: 'Document not found',
        details: 'The document metadata does not exist'
      })
    }
    
    // 获取文档的所有向量化片段
    const chunks = await vectorStore.getDocumentChunks(id)
    
    if (!chunks || chunks.length === 0) {
      logger.warn('No vectorized content found for document:', { id })
      return res.status(404).json({
        message: 'No vectorized content found',
        details: 'The document has not been vectorized yet'
      })
    }

    // 按照 chunk_index 排序
    const sortedChunks = chunks.sort((a, b) => a.chunk_index - b.chunk_index)

    // 计算分页
    const start = (page - 1) * pageSize
    const end = start + pageSize
    const paginatedChunks = sortedChunks.slice(start, end)

    // 构建结构化的响应
    const response = {
      documentInfo: {
        id: meta.id,
        title: meta.title || meta.originalName,
        type: meta.type,
        vectorizedAt: meta.vectorizedAt,
        totalChunks: meta.chunksCount
      },
      chunks: paginatedChunks.map(chunk => ({
        id: chunk.id,
        index: +chunk.chunk_index,
        content: chunk.content,
        preview: chunk.content.length > 100 
          ? chunk.content.substring(0, 100) + '...' 
          : chunk.content,
        stats: {
          characters: chunk.content.length,
          words: chunk.content.trim().split(/\s+/).length
        }
      })),
      pagination: {
        total: chunks.length,
        page,
        pageSize,
        totalPages: Math.ceil(chunks.length / pageSize)
      },
      summary: {
        totalChunks: chunks.length,
        totalCharacters: chunks.reduce((sum, chunk) => sum + chunk.content.length, 0),
        totalWords: chunks.reduce((sum, chunk) => 
          sum + chunk.content.trim().split(/\s+/).length, 0
        ),
        averageChunkSize: Math.round(
          chunks.reduce((sum, chunk) => sum + chunk.content.length, 0) / chunks.length
        )
      }
    }

    res.json(response)
  } catch (error) {
    logger.error('Failed to get vectorized content:', {
      id: req.params.id,
      error: error.message,
      stack: error.stack
    })
    next(error)
  }
}

// 删除向量化内容
export const deleteVectorizedContent = async (req, res, next) => {
  const { id } = req.params

  try {
    // 1. 读取文档元数据
    const metaPath = path.join(process.cwd(), 'data/documents', `${id}.meta.json`)
    let meta
    try {
      const metaContent = await fs.readFile(metaPath, 'utf-8')
      meta = JSON.parse(metaContent)
    } catch (err) {
      return res.status(404).json({
        message: 'Document not found',
        details: 'The document metadata does not exist'
      })
    }

    // 2. 从向量数据库中删除文档的所有向量
    await vectorStore.deleteVectors(`parent_id == "${id}"`)
    logger.info('Deleted document vectors:', id)

    // 3. 更新文档状态
    meta.status = 'processed' // 重置为已处理状态，允许重新向量化
    meta.vectorizedAt = null
    meta.chunksCount = 0
    await fs.writeFile(metaPath, JSON.stringify(meta, null, 2))

    res.json({
      message: 'Vectorized content deleted successfully',
      documentId: id,
      status: meta.status // 返回更新后的状态
    })
  } catch (error) {
    logger.error('Failed to delete vectorized content:', {
      id: req.params.id,
      error: error.message,
      stack: error.stack
    })
    next(error)
  }
}

// 预览文档分段
export const previewDocumentChunks = async (req, res, next) => {
  const { id } = req.params
  const defaultParams = modelService.config.embedding.parameters
  const chunkSize = Number(req.body.chunkSize) || defaultParams.chunkSize
  const overlap = Number(req.body.overlap) || defaultParams.overlap
  const previewLimit = 10000 // 限制预览的文本长度
  
  try {
    // 1. 读取文档元数据和内容
    const metaPath = path.join(process.cwd(), 'data/documents', `${id}.meta.json`)
    const filePath = path.join(process.cwd(), 'data/documents', id)
    
    const [metaContent, fullContent] = await Promise.all([
      fs.readFile(metaPath, 'utf-8'),
      processFile(filePath)
    ])
    
    const meta = JSON.parse(metaContent)
    
    // 2. 截取预览内容
    const content = fullContent.slice(0, previewLimit)
    const hasMore = fullContent.length > previewLimit
    
    // 3. 使用 TextChunker 进行分段
    const chunks = TextChunker.splitText(content, { chunkSize, overlap })
    
    // 4. 格式化预览数据
    const previewChunks = chunks.map((chunk, index) => ({
      index,
      content: chunk.content,
      length: chunk.length
    }))
    
    // 5. 返回分段结果
    res.json({
      documentId: id,
      documentName: meta.title,
      documentType: meta.type,
      totalLength: fullContent.length,
      previewLength: content.length,
      hasMore,
      chunks: previewChunks,
      stats: {
        totalChunks: chunks.length,
        estimatedTotalChunks: Math.ceil(fullContent.length / chunkSize),
        averageChunkSize: Math.round(chunks.reduce((sum, c) => sum + c.length, 0) / chunks.length),
        chunkSize,
        overlap,
        minChunkSize: Math.min(...chunks.map(c => c.length)),
        maxChunkSize: Math.max(...chunks.map(c => c.length))
      }
    })
  } catch (error) {
    logger.error('Failed to preview document chunks:', error)
    next(error)
  }
} 