import express from 'express';
import openai from 'openai';
import { query } from '../db.js';
import { authenticateToken } from '../middleware/authMiddleware.js';
import { SERVER_CONFIG } from '../config.js';

const router = express.Router();

// OpenAI client factory (supports per-user overrides in request body if needed)
const createOpenAIClient = (apiKey, baseURL) => {
  const options = { apiKey };
  if (baseURL) {
    options.baseURL = baseURL;
  } else if (SERVER_CONFIG.AI_BASE_URL) {
    options.baseURL = SERVER_CONFIG.AI_BASE_URL;
  }
  return new openai.OpenAI(options);
};

// Helper: upsert tags and link to document
const upsertTagsForDocument = async (documentId, keywords) => {
  if (!Array.isArray(keywords)) return;
  
  // 先清除该文档的所有旧标签关联
  await query('DELETE FROM document_tags WHERE document_id = ?', [documentId]);

  for (const raw of keywords) {
    const name = String(raw).trim().slice(0, 4); // 限制标签长度为4个字
    if (!name || name.length < 2) continue;
    // Insert tag if not exists
    const result = await query('INSERT IGNORE INTO tags (name) VALUES (?)', [name]);
    // Fetch tag id
    const [tag] = await query('SELECT id FROM tags WHERE name = ?', [name]);
    if (tag) {
      await query('INSERT IGNORE INTO document_tags (document_id, tag_id) VALUES (?, ?)', [documentId, tag.id]);
    }
  }
};

// Helper: save/update analysis
const saveDocumentAnalysis = async (documentId, summary, keywordsArray) => {
  // 智能处理摘要：如果summary为null，则只更新关键词，保持现有摘要
  let updateQuery;
  let queryParams;
  
  if (summary === null) {
    // 只更新关键词，不清空摘要
    updateQuery = 'INSERT INTO document_analysis (document_id, keywords, created_at, updated_at) VALUES (?, ?, NOW(), NOW()) ON DUPLICATE KEY UPDATE keywords = VALUES(keywords), updated_at = NOW()';
    queryParams = [documentId, JSON.stringify(keywordsArray || [])];
    console.log(`只更新关键词，保持现有摘要，文档ID: ${documentId}`);
  } else {
    // 更新摘要和关键词
    const safeSummary = String(summary || '');
    const safeKeywords = Array.isArray(keywordsArray) ? keywordsArray : [];
    const keywordsJson = JSON.stringify(safeKeywords);
    updateQuery = 'INSERT INTO document_analysis (document_id, summary, keywords, created_at, updated_at) VALUES (?, ?, ?, NOW(), NOW()) ON DUPLICATE KEY UPDATE summary = VALUES(summary), keywords = VALUES(keywords), updated_at = NOW()';
    queryParams = [documentId, safeSummary, keywordsJson];
    console.log(`更新摘要和关键词，文档ID: ${documentId}`);
  }
  
  await query(updateQuery, queryParams);
};

// -------- Helpers: sanitize and fallback keyword extraction --------
const stripCodeFences = (text) => {
  if (!text) return '';
  let t = String(text).trim();
  // remove ```json ... ``` or ``` ... ``` wrappers
  if (t.startsWith('```')) {
    // remove first fence line
    t = t.replace(/^```[a-zA-Z]*\s*/,'');
    // remove trailing fence
    t = t.replace(/\s*```\s*$/,'');
  }
  // also remove leading/trailing backticks accidentally added
  t = t.replace(/^`+/, '').replace(/`+$/, '');
  return t.trim();
};

const fallbackExtractKeywords = (content, max = 6) => {
  if (!content) return [];
  const text = String(content)
    .replace(/[#*_>`\-{}"\[\]:]/g, ' ') // 移除更多特殊字符
    .replace(/https?:[^\s)]+/g, ' ')
    .replace(/summary|keywords/gi, ' ') // 移除JSON字段名
    .slice(0, 4000);
  // split by punctuation/space
  const tokens = text.split(/[\s，。、"""',.;:!?()\[\]<>]+/).filter(Boolean);
  // enhanced stopwords
  const stop = new Set(['的','了','和','与','及','是','在','对','为','或','与','一个','进行','通过','以及','并','等','通过','使用','使用了','可以','我们','你','我','他','她','它','他们','这些','那些','本','该','有','没有','会','能','要','从','到','把','被','给','让','使','由','按','根据','关于','对于','因为','所以','但是','然而','虽然','尽管','如果','假如','当','while','when','if','the','and','or','but','in','on','at','to','for','of','with','by']);
  const freq = new Map();
  for (const tok of tokens) {
    const w = tok.trim();
    if (w.length < 2 || w.length > 10) continue; // 限制长度
    if (stop.has(w.toLowerCase())) continue;
    if (/^\d+$/.test(w)) continue; // 跳过纯数字
    freq.set(w, (freq.get(w) || 0) + 1);
  }
  const arr = Array.from(freq.entries())
    .sort((a,b)=>b[1]-a[1])
    .slice(0, max)
    .map(([w])=>w)
    .filter(w => w.length >= 2 && w.length <= 4); // 限制长度不超过4个字
  console.log('fallbackExtractKeywords结果:', arr);
  return arr;
};

// 清洗摘要为纯文本，去除围栏/反引号/花括号等特殊符号，并限制长度
const sanitizeSummary = (text, maxLen = 350) => {
  let t = stripCodeFences(text)
    .replace(/[`~]/g, '')
    .replace(/[{}\[\]]/g, '')
    .replace(/[<>]/g, '')
    .replace(/\s+/g, ' ')
    .trim();
  if (t.length > maxLen) t = t.slice(0, maxLen);
  return t;
};

const generateFallbackSummary = (content, maxLen = 350) => {
  const t = String(content || '')
    .replace(/```[\s\S]*?```/g, ' ')
    .replace(/[#*_>`\-]/g, ' ')
    .replace(/\s+/g, ' ')
    .trim();
  return t.slice(0, maxLen);
};

// ========== Streaming process state (similar pattern to aiRoutes) ==========
const processStatusMap = new Map();
const ProcessStatus = {
  PENDING: 'pending',
  PROCESSING: 'processing',
  SAVING: 'saving',
  COMPLETED: 'completed',
  FAILED: 'failed'
};

const generateProcessId = () => Date.now().toString(36) + Math.random().toString(36).slice(2, 8);

const updateProcess = (processId, data) => {
  if (!processStatusMap.has(processId)) {
    processStatusMap.set(processId, {
      status: ProcessStatus.PENDING,
      progress: 0,
      message: '初始化中...',
      error: null,
      summary: '',
      keywords: [],
      result: null,
      updateTime: Date.now()
    });
  }
  const s = processStatusMap.get(processId);
  Object.assign(s, data);
  s.updateTime = Date.now();
  setTimeout(() => processStatusMap.delete(processId), 15 * 60 * 1000);
};

// 启动流式分析
router.post('/stream/:docId/start', authenticateToken, async (req, res) => {
  try {
    const userId = req.user.userId;
    const docId = Number(req.params.docId);
    const [doc] = await query('SELECT id, content, title FROM documents WHERE id = ? AND user_id = ?', [docId, userId]);
    if (!doc) return res.status(404).json({ message: '文档不存在或无权访问' });

    const processId = generateProcessId();
    updateProcess(processId, { status: ProcessStatus.PENDING, progress: 5, message: '准备分析...' });
    res.status(202).json({ processId });

    // 异步执行
    (async () => {
      try {
        const { apiKey, baseUrl, modelSummary } = req.body || {};
        // 选择配置（与同步分析一致）
        let effectiveApiKey = apiKey;
        let effectiveBaseUrl = baseUrl;
        let userDefaultModel = null;
        try {
          const [userSettings] = await query('SELECT api_key, base_url, model FROM user_ai_settings WHERE user_id = ? LIMIT 1', [userId]);
          if (!effectiveApiKey) effectiveApiKey = userSettings?.api_key;
          if (!effectiveBaseUrl) effectiveBaseUrl = userSettings?.base_url;
          userDefaultModel = userSettings?.model || null;
        } catch {}
        effectiveApiKey = effectiveApiKey || SERVER_CONFIG.AI_API_KEY;
        effectiveBaseUrl = effectiveBaseUrl || SERVER_CONFIG.AI_BASE_URL;
        console.log('AI配置 - API Key存在:', !!effectiveApiKey);
        console.log('AI配置 - Base URL:', effectiveBaseUrl);
        if (!effectiveApiKey) {
          updateProcess(processId, { status: ProcessStatus.FAILED, error: new Error('缺少API Key') });
          return;
        }
        const client = createOpenAIClient(effectiveApiKey, effectiveBaseUrl);
        console.log('OpenAI客户端创建完成');

        // 内容 - 减少输入长度，为提示词留出空间
        const content = (doc.content || '').slice(0, 4000);
        if (!content.trim()) {
          updateProcess(processId, { status: ProcessStatus.FAILED, error: new Error('文档内容为空') });
          return;
        }
        console.log('文档内容长度:', content.length);

        let summaryModel = modelSummary || userDefaultModel || SERVER_CONFIG.AI_MODEL || 'gpt-4o-mini';
        
        // 检测problematic模型，强制降级
        if (summaryModel.includes('deepseek-r1')) {
          console.log('检测到deepseek-r1模型，可能有输出限制，建议更换模型');
          // 可以在这里强制更换模型
          // summaryModel = 'gpt-4o-mini';
        }
        
        const fullPrompt = `你是中文文档分析助手。严格按以下JSON格式输出：
{"summary":"纯文字摘要约300字","keywords":["词1","词2","词3","词4"]}
要求：
1. summary：纯文字摘要，不分点不分段，连续叙述，不超过300字，包含句号逗号等标点符号
2. keywords：3-6个中文名词或动词名词组合，每个不超过4字，如"剧本杀""远程控制""数据分析"，禁止英文字符
3. 不要输出解释说明或代码围栏
文档内容：\n${content}`;
        updateProcess(processId, { status: ProcessStatus.PROCESSING, progress: 30, message: '生成摘要与关键词中...' });

        let collected = '';
        
        // 对于已知有问题的模型，直接使用非流式
        const forceNonStreaming = summaryModel.includes('deepseek-r1');
        
        if (forceNonStreaming) {
          console.log('强制使用非流式请求，模型:', summaryModel);
          const resp = await client.chat.completions.create({ 
            model: summaryModel, 
            messages: [{ role: 'user', content: fullPrompt }], 
            temperature: 0.2, 
            max_tokens: 800 
          });
          collected = resp.choices?.[0]?.message?.content || '';
          console.log('非流式请求完成，内容长度:', collected.length);
        } else {
          try {
            console.log('开始流式请求，模型:', summaryModel);
            console.log('提示词:', fullPrompt.slice(0, 500) + '...');
            const stream = await client.chat.completions.create({
              model: summaryModel,
              messages: [{ role: 'user', content: fullPrompt }],
              temperature: 0.2,
              max_tokens: 800, // 增加token限制
              stream: true
            });
          let chunkCount = 0;
          for await (const chunk of stream) {
            chunkCount++;
            const delta = chunk.choices?.[0]?.delta?.content || '';
            const finishReason = chunk.choices?.[0]?.finish_reason;
            
            if (delta) {
              collected += delta;
              const last = collected.slice(-200);
              updateProcess(processId, { status: ProcessStatus.PROCESSING, progress: Math.min(80, (30 + Math.floor(collected.length / 20))), message: `生成中...\n${last}` });
            }
            
            if (finishReason) {
              console.log(`流式结束，原因: ${finishReason}, 总chunk数: ${chunkCount}, 收集长度: ${collected.length}`);
              break;
            }
          }
          console.log('流式请求完成，收集到内容长度:', collected.length);
          // 如果流式响应太短，可能被截断，强制重试非流式
          if (collected.length < 100) {
            console.log('流式响应太短，强制重试非流式');
            throw new Error('流式响应太短，重试非流式');
          }
          } catch (e) {
            console.warn('流式请求失败，降级为非流式:', e?.message);
            // 流失败，降级一次非流
            const resp = await client.chat.completions.create({ 
              model: summaryModel, 
              messages: [{ role: 'user', content: fullPrompt }], 
              temperature: 0.2, 
              max_tokens: 800 
            });
            collected = resp.choices?.[0]?.message?.content || '';
            console.log('非流式请求完成，内容长度:', collected.length);
          }
        }

        console.log('流式AI原始返回:', collected);
        let parsed;
        try {
          const cleanText = stripCodeFences(collected);
          console.log('流式清理后文本:', cleanText);
          parsed = JSON.parse(cleanText);
        } catch {
          console.warn('流式JSON解析失败，尝试正则提取');
          // 强化正则提取
          const summaryMatch = collected.match(/"summary"\s*:\s*"([^"]*(?:\\.[^"]*)*)"/s);
          const keywordsMatch = collected.match(/"keywords"\s*:\s*\[(.*?)\]/s);
          
          console.log('summaryMatch:', summaryMatch);
          console.log('keywordsMatch:', keywordsMatch);
          
          let summary = '';
          let keywords = [];
          
          if (summaryMatch) {
            summary = summaryMatch[1].replace(/\\"/g, '"').slice(0, 250);
          } else {
            // 尝试提取第一段连续文字
            const textMatch = collected.match(/[\u4e00-\u9fa5][^{}"]*[\u4e00-\u9fa5]/);
            summary = textMatch ? textMatch[0].trim().slice(0, 200) : collected.replace(/[{}":\[\]]/g, '').slice(0, 150);
          }
          
          if (keywordsMatch) {
            const keywordStr = keywordsMatch[1];
            keywords = keywordStr.split(/[,，]/).map(k => k.replace(/["\[\]]/g, '').trim()).filter(Boolean).slice(0, 6);
          } else {
            // 如果没有找到keywords数组，尝试从文本中提取关键词
            keywords = fallbackExtractKeywords(collected);
          }
          
          if (!keywords.length) {
            keywords = fallbackExtractKeywords(content);
          }
          
          console.log('流式解析结果 - summary:', summary);
          console.log('流式解析结果 - keywords:', keywords);
          
          parsed = { summary, keywords };
        }

        let summary = sanitizeSummary(String(parsed.summary || ''), 350);
        if (!summary || summary.length < 10) {
          console.log('摘要为空或过短，使用fallback生成');
          summary = generateFallbackSummary(content, 350);
        }
        
        let keywords = Array.isArray(parsed.keywords) ? parsed.keywords : [];
        if (!keywords.length || keywords.length === 0) {
          console.log('关键词为空，使用fallback提取');
          keywords = fallbackExtractKeywords(content);
        }
        
        // 清理关键词：去除标点符号，限制长度不超过4个字，只保留中文
        const cleanKeywords = keywords
          .map(k => String(k).replace(/[，。、；：""''？！【】（）]/g, '').trim())
          .filter(k => {
            // 只保留中文字符，过滤掉包含英文、数字、特殊符号的关键词
            const chineseOnly = /^[\u4e00-\u9fa5]+$/.test(k);
            return k.length > 0 && k.length <= 4 && chineseOnly;
          })
          .slice(0, 6); // 最多6个
        
        console.log('最终摘要:', summary.slice(0, 100) + '...');
        console.log('最终关键词:', cleanKeywords);

        // 保存DB
        updateProcess(processId, { status: ProcessStatus.SAVING, progress: 90, message: '保存分析结果中...' });
        await saveDocumentAnalysis(docId, summary, cleanKeywords);
        await upsertTagsForDocument(docId, cleanKeywords);

        updateProcess(processId, { status: ProcessStatus.COMPLETED, progress: 100, message: '分析完成', summary, keywords, result: { docId } });
      } catch (err) {
        updateProcess(processId, { status: ProcessStatus.FAILED, error: err, message: err?.message || '分析失败' });
      }
    })();
  } catch (error) {
    res.status(500).json({ message: '启动分析失败', error: error.message });
  }
});

// 轮询进度
router.get('/stream/process-status/:processId', authenticateToken, (req, res) => {
  const { processId } = req.params;
  if (!processStatusMap.has(processId)) {
    return res.status(404).json({ message: '任务不存在' });
  }
  const s = processStatusMap.get(processId);
  res.json({ status: s.status, progress: s.progress, message: s.message, error: s.error ? s.error.message : null, summary: s.summary, keywords: s.keywords, result: s.result });
});

// Helper: with timeout
const withTimeout = async (promise, ms, onTimeoutMessage = '请求超时') => {
  return await Promise.race([
    promise,
    new Promise((_, reject) => setTimeout(() => reject(new Error(onTimeoutMessage)), ms))
  ]);
};

// POST /api/analysis/:docId/analyze - summarize + keywords + tags
router.post('/:docId/analyze', authenticateToken, async (req, res) => {
  try {
    const userId = req.user.userId;
    const docId = Number(req.params.docId);
    if (!Number.isFinite(docId)) return res.status(400).json({ message: '无效的文档ID' });

    // Ownership check
    const [doc] = await query('SELECT id, content, title FROM documents WHERE id = ? AND user_id = ?', [docId, userId]);
    if (!doc) return res.status(404).json({ message: '文档不存在或无权访问' });

    const { apiKey, baseUrl, modelSummary } = req.body || {};

    // 优先级：请求体 > 用户自定义设置 > 服务器配置
    let effectiveApiKey = apiKey;
    let effectiveBaseUrl = baseUrl;

    let userDefaultModel = null;
    try {
      if (!effectiveApiKey || !effectiveBaseUrl || !modelSummary) {
        const [userSettings] = await query(
          'SELECT api_key, base_url, model FROM user_ai_settings WHERE user_id = ? LIMIT 1',
          [userId]
        );
        if (userSettings) {
          effectiveApiKey = effectiveApiKey || userSettings.api_key;
          effectiveBaseUrl = effectiveBaseUrl || userSettings.base_url;
          userDefaultModel = userSettings.model || null;
        }
      }
    } catch (e) {
      console.warn('读取用户AI设置失败，继续使用默认配置:', e?.message);
    }

    effectiveApiKey = effectiveApiKey || SERVER_CONFIG.AI_API_KEY;
    effectiveBaseUrl = effectiveBaseUrl || SERVER_CONFIG.AI_BASE_URL; // 可为空，SDK将使用默认

    // 仅强制要求 apiKey；baseUrl 可为空（OpenAI SDK 默认 https://api.openai.com/v1）
    if (!effectiveApiKey) {
      return res.status(400).json({
        message: 'AI服务未配置，请在系统或用户设置中配置 API Key 后重试'
      });
    }

    const client = createOpenAIClient(effectiveApiKey, effectiveBaseUrl);

    // 校验内容
    if (!doc.content || String(doc.content).trim().length === 0) {
      return res.status(400).json({ message: '文档内容为空，无法分析' });
    }

    // Trim overly long content to speed up generation and avoid provider limits
    const maxInputChars = 4000;
    const contentForAi = (doc.content || '').slice(0, maxInputChars);
    console.log('同步分析 - 文档内容长度:', contentForAi.length);

    // Generate summary and keywords via chat
    const prompt = `阅读以下文本，输出JSON格式：
{"summary":"纯文字摘要约300字","keywords":["词1","词2","词3","词4"]}
要求：
1) summary：纯文字摘要，不分点不分段，连续叙述，不超过350字，包含句号逗号等标点符号
2) keywords：3-6个中文名词或动词名词组合，每个不超过4字，如"剧本杀""远程控制""数据分析"，禁止英文字符
3) 不要输出解释说明或代码围栏
文本：\n${contentForAi}`;

    const summaryModel = modelSummary || userDefaultModel || SERVER_CONFIG.AI_MODEL || 'gpt-4o-mini';
    let chat;
    try {
      chat = await withTimeout(
        client.chat.completions.create({
          model: summaryModel,
          messages: [
            { role: 'system', content: '你是一个只输出严格JSON结果的助手，绝不输出Markdown代码围栏或解释说明。' },
            { role: 'user', content: prompt }
          ],
          temperature: 0.2,
          max_tokens: 800
        }),
        25000,
        '生成摘要超时'
      );
    } catch (primaryErr) {
      console.warn('摘要生成失败，尝试降级参数:', primaryErr?.message);
      // 降级重试：更短的输入、更小的max_tokens
      const shorter = contentForAi.slice(0, 3000);
              const fallbackPrompt = `任务：输出严格JSON格式。字段：summary(约300字纯文字)、keywords(3-6个中文名词数组，每词≤4字)。格式：{"summary":"纯文字摘要，不分点不分段，连续叙述，不超过350字！","keywords":["词1","词2","词3"]}。内容：\n${shorter}`;
      chat = await withTimeout(
        client.chat.completions.create({
          model: summaryModel,
          messages: [
            { role: 'system', content: '你是一个只输出严格JSON结果的助手，绝不输出Markdown代码围栏或解释说明。' },
            { role: 'user', content: fallbackPrompt }
          ],
          temperature: 0.2,
          max_tokens: 600
        }),
        20000,
        '生成摘要超时'
      );
    }
    const text = chat.choices?.[0]?.message?.content?.trim() || '{}';
    console.log('AI原始返回:', text);
    let parsed = {};
    try { 
      const cleanText = stripCodeFences(text);
      console.log('清理后文本:', cleanText);
      parsed = JSON.parse(cleanText); 
    } catch (e) { 
      console.warn('JSON解析失败，尝试正则提取:', e?.message);
      // 正则提取 summary 和 keywords
      const summaryMatch = text.match(/"summary"\s*:\s*"([^"]*(?:\\.[^"]*)*)"/);
      const keywordsMatch = text.match(/"keywords"\s*:\s*\[(.*?)\]/s);
      
      let summary = '';
      let keywords = [];
      
      if (summaryMatch) {
        summary = summaryMatch[1].replace(/\\"/g, '"').slice(0, 250);
      } else {
        summary = text.replace(/[{}"\[\]]/g, '').slice(0, 200);
      }
      
      if (keywordsMatch) {
        const keywordStr = keywordsMatch[1];
        keywords = keywordStr.split(/[,，]/).map(k => k.replace(/["\[\]]/g, '').trim()).filter(Boolean).slice(0, 6);
      } else {
        // 如果没有找到keywords数组，尝试从文本中提取关键词
        keywords = fallbackExtractKeywords(text);
      }
      
      if (!keywords.length) {
        keywords = fallbackExtractKeywords(contentForAi);
      }
      
      parsed = { summary, keywords };
    }
    console.log('解析结果:', JSON.stringify(parsed));
    let summary = sanitizeSummary(parsed.summary || '', 350);
    if (!summary) {
      summary = generateFallbackSummary(contentForAi, 350);
    }
    const keywords = Array.isArray(parsed.keywords) ? parsed.keywords : String(parsed.keywords || '').split(/[，,\s]+/).filter(Boolean);
    console.log('关键词处理前:', keywords);
    // 清理关键词：去除标点符号，限制长度不超过4个字，只保留中文
    const cleanKeywords = keywords
      .map(k => String(k).replace(/[，。、；：""''？！【】（）]/g, '').trim())
      .filter(k => {
        // 只保留中文字符，过滤掉包含英文、数字、特殊符号的关键词
        const chineseOnly = /^[\u4e00-\u9fa5]+$/.test(k);
        return k.length > 0 && k.length <= 4 && chineseOnly;
      })
      .slice(0, 6); // 最多6个
    console.log('关键词处理后:', cleanKeywords);

            await saveDocumentAnalysis(docId, summary, cleanKeywords);
        await upsertTagsForDocument(docId, cleanKeywords);

    res.json({ message: '分析完成', summary, keywords });
  } catch (error) {
    const providerMsg = error?.response?.data?.error?.message || error?.error?.message;
    console.error('文档分析失败:', error?.message, providerMsg || '');
    res.status(500).json({ 
      message: '文档分析失败', 
      error: providerMsg || error.message 
    });
  }
});

// GET /api/analysis/:docId - get analysis with tags
router.get('/:docId', authenticateToken, async (req, res) => {
  try {
    const userId = req.user.userId;
    const docId = Number(req.params.docId);
    const [doc] = await query('SELECT id FROM documents WHERE id = ? AND user_id = ?', [docId, userId]);
    if (!doc) return res.status(404).json({ message: '文档不存在或无权访问' });

    const [analysis] = await query('SELECT summary, keywords, updated_at FROM document_analysis WHERE document_id = ?', [docId]);
    const tags = await query('SELECT t.id, t.name FROM document_tags dt JOIN tags t ON dt.tag_id = t.id WHERE dt.document_id = ?', [docId]);
    
    console.log('数据库中的摘要长度:', analysis?.summary?.length);
    console.log('数据库中的摘要内容:', analysis?.summary?.slice(0, 100) + '...');
    let keywords = [];
    if (analysis && analysis.keywords) {
      try {
        if (typeof analysis.keywords === 'string' && analysis.keywords.trim().length > 0) {
          keywords = JSON.parse(analysis.keywords);
        } else if (typeof analysis.keywords === 'object') {
          keywords = analysis.keywords;
        }
      } catch (e) {
        console.warn('解析keywords失败，已使用空数组替代');
        keywords = [];
      }
    }
    console.log('返回分析数据 - summary:', analysis?.summary?.slice(0, 100) + '...');
    console.log('返回分析数据 - keywords:', keywords);
    console.log('返回分析数据 - tags:', tags.map(t => t.name));
    res.json({
      summary: analysis?.summary || null,
      keywords,
      tags
    });
  } catch (error) {
    console.error('获取分析失败:', error);
    res.status(500).json({ message: '获取分析失败' });
  }
});

// GET /api/analysis/recommendations?limit=3 - personalized recommendations
router.get('/recommendations/list', authenticateToken, async (req, res) => {
  try {
    const userId = req.user.userId;
    const limit = Math.min(parseInt(req.query.limit || '3', 10), 20);
    const useRaw = String(req.query.raw || '').toLowerCase() === '1' || String(req.query.raw || '').toLowerCase() === 'true';
    const currentDocId = req.query.currentDocId ? parseInt(req.query.currentDocId) : null;

    // Get user's documents and their analysis data
    const docs = await query('SELECT d.id, d.title FROM documents d WHERE d.user_id = ? ORDER BY d.updated_at DESC LIMIT 200', [userId]);
    if (docs.length === 0) return res.json([]);

    // Choose a seed: prioritize current document if provided, otherwise use most recent interacted document
    let seedId;
    if (currentDocId && docs.some(d => d.id === currentDocId)) {
      seedId = currentDocId;
      console.log('使用当前文档作为种子文档:', seedId);
    } else {
      const [lastInteraction] = await query('SELECT document_id FROM user_interactions WHERE user_id = ? ORDER BY created_at DESC LIMIT 1', [userId]);
      seedId = lastInteraction?.document_id || docs[0].id;
      console.log('使用最近交互文档作为种子文档:', seedId);
    }

    const docIds = docs.map(d => d.id);
    console.log('要查询的文档ID列表:', docIds);
    
    // 获取所有文档的关键词数据，基于关键词计算相似性
    let analysisRows = [];
    if (docIds.length > 0) {
      try {
        // 使用 ? 占位符，避免 IN (?) 的问题
        const placeholders = docIds.map(() => '?').join(',');
        const query = `SELECT document_id, keywords FROM document_analysis WHERE document_id IN (${placeholders})`;
        console.log('执行查询:', query, '参数:', docIds);
        
        analysisRows = await query(query, docIds);
        console.log('获取到的分析数据行数:', analysisRows.length);
        
        if (analysisRows.length === 0) {
          console.warn('批量查询返回空结果，尝试逐个查询...');
          // 如果批量查询失败，尝试逐个查询
          for (const docId of docIds) {
            try {
              const [row] = await query('SELECT document_id, keywords FROM document_analysis WHERE document_id = ?', [docId]);
              if (row) {
                analysisRows.push(row);
                console.log(`逐个查询文档 ${docId} 成功`);
              }
            } catch (e) {
              console.warn(`逐个查询文档 ${docId} 失败:`, e?.message);
            }
          }
          console.log('逐个查询后的分析数据行数:', analysisRows.length);
        }
      } catch (e) {
        console.error('批量查询失败:', e?.message);
        // 批量查询失败，尝试逐个查询
        for (const docId of docIds) {
          try {
            const [row] = await query('SELECT document_id, keywords FROM document_analysis WHERE document_id = ?', [docId]);
            if (row) {
              analysisRows.push(row);
              console.log(`逐个查询文档 ${docId} 成功`);
            }
          } catch (e) {
            console.warn(`逐个查询文档 ${docId} 失败:`, e?.message);
          }
        }
        console.log('逐个查询后的分析数据行数:', analysisRows.length);
      }
    } else {
      console.warn('没有文档ID需要查询');
    }
    
    // 构建文档ID到关键词的映射
    const idToKeywords = new Map();
    console.log('开始解析分析数据，总行数:', analysisRows.length);
    
    for (const r of analysisRows) {
      try {
        let keywords = [];
        console.log(`\n=== 解析文档 ${r.document_id} 的关键词 ===`);
        console.log(`原始数据:`, r.keywords);
        console.log(`数据类型:`, typeof r.keywords);
        console.log(`数据长度:`, r.keywords ? r.keywords.length : 'N/A');
        
        if (typeof r.keywords === 'string' && r.keywords.trim().length > 0) {
          try {
            keywords = JSON.parse(r.keywords);
            console.log(`✅ JSON解析成功:`, keywords);
          } catch (parseError) {
            console.warn(`❌ JSON解析失败:`, parseError?.message);
            console.warn(`原始数据:`, r.keywords);
            
            // 尝试其他解析方式
            if (r.keywords.includes('[') && r.keywords.includes(']')) {
              // 可能是格式化的JSON字符串
              const cleaned = r.keywords.replace(/[\[\]"]/g, '').split(',').map(k => k.trim()).filter(Boolean);
              keywords = cleaned;
              console.log(`🔄 使用备用解析方式:`, keywords);
            } else {
              console.warn(`⚠️ 无法解析的字符串格式`);
            }
          }
        } else if (Array.isArray(r.keywords)) {
          keywords = r.keywords;
          console.log(`✅ 直接使用数组:`, keywords);
        } else if (r.keywords === null || r.keywords === undefined) {
          console.log(`ℹ️ 关键词为空`);
          keywords = [];
        } else {
          console.warn(`⚠️ 关键词类型未知:`, typeof r.keywords, '值:', r.keywords);
          keywords = [];
        }
        
        // 确保keywords是数组
        if (!Array.isArray(keywords)) {
          console.warn(`⚠️ 关键词不是数组，转换为空数组`);
          keywords = [];
        }
        
        // 过滤有效的关键词
        const validKeywords = keywords
          .map(k => String(k).trim())
          .filter(k => k.length > 0 && k.length <= 10);
        
        idToKeywords.set(r.document_id, new Set(validKeywords));
        console.log(`📊 最终关键词:`, validKeywords);
        console.log(`📊 关键词数量:`, validKeywords.length);
        
      } catch (e) {
        console.error(`❌ 文档 ${r.document_id} 关键词处理失败:`, e?.message);
        idToKeywords.set(r.document_id, new Set());
      }
    }
    
    // 调试：显示所有文档的关键词状态
    console.log('\n=== 所有文档关键词状态汇总 ===');
    for (const [docId, keywordsSet] of idToKeywords.entries()) {
      const docTitle = docs.find(d => d.id === docId)?.title || '未知';
      console.log(`文档 ${docId} (${docTitle}): ${keywordsSet.size} 个关键词 -`, Array.from(keywordsSet));
    }
    
    console.log('种子文档ID:', seedId);
    const seedKeywords = idToKeywords.get(seedId) || new Set();
    console.log('种子文档关键词数量:', seedKeywords.size);
    console.log('种子文档关键词详情:', Array.from(seedKeywords));
    
    // 调试：检查种子文档是否在分析数据中
    const seedAnalysisRow = analysisRows.find(r => r.document_id === seedId);
    if (seedAnalysisRow) {
      console.log('种子文档在分析数据中找到:', seedAnalysisRow);
    } else {
      console.warn('种子文档在分析数据中未找到，ID:', seedId);
    }
    
    // 如果种子文档没有关键词，尝试从数据库直接查询
    if (seedKeywords.size === 0) {
      console.log('种子文档无关键词，尝试从数据库直接查询...');
      try {
        // 直接查询种子文档的分析数据
        const [seedAnalysis] = await query('SELECT keywords FROM document_analysis WHERE document_id = ?', [seedId]);
        if (seedAnalysis && seedAnalysis.keywords) {
          console.log('从数据库直接查询到的种子文档关键词:', seedAnalysis.keywords);
          
          let directKeywords = [];
          if (typeof seedAnalysis.keywords === 'string' && seedAnalysis.keywords.trim().length > 0) {
            try {
              directKeywords = JSON.parse(seedAnalysis.keywords);
              console.log('直接查询JSON解析成功:', directKeywords);
            } catch (parseError) {
              console.warn('直接查询JSON解析失败:', parseError?.message);
              // 尝试备用解析方式
              if (seedAnalysis.keywords.includes('[') && seedAnalysis.keywords.includes(']')) {
                directKeywords = seedAnalysis.keywords.replace(/[\[\]"]/g, '').split(',').map(k => k.trim()).filter(Boolean);
                console.log('直接查询备用解析方式:', directKeywords);
              }
            }
          } else if (Array.isArray(seedAnalysis.keywords)) {
            directKeywords = seedAnalysis.keywords;
            console.log('直接查询使用数组:', directKeywords);
          }
          
          // 更新种子关键词
          if (Array.isArray(directKeywords) && directKeywords.length > 0) {
            for (const kw of directKeywords) {
              seedKeywords.add(String(kw).trim());
            }
            console.log('从数据库直接查询更新后的种子关键词:', Array.from(seedKeywords));
          } else {
            console.log('直接查询仍然没有关键词，尝试从内容生成...');
            // 如果还是没有，尝试从内容生成
            const [seedDoc] = await query('SELECT content FROM documents WHERE id = ? LIMIT 1', [seedId]);
            if (seedDoc?.content) {
              const fallbackKeywords = fallbackExtractKeywords(seedDoc.content, 6);
              for (const kw of fallbackKeywords) {
                seedKeywords.add(kw.toLowerCase().trim());
              }
              console.log('从内容生成的关键词（仅内存使用）:', Array.from(seedKeywords));
            }
          }
        } else {
          console.log('数据库中没有种子文档的分析数据');
        }
      } catch (e) {
        console.error('查询种子文档关键词失败:', e?.message);
      }
    }



    // 基于关键词计算相似度（改进的相似度计算）
    const candidates = [];
    for (const d of docs) {
      if (d.id === seedId) continue;
      
      const docKeywords = idToKeywords.get(d.id) || new Set();
      console.log(`文档 ${d.id} (${d.title}) 关键词:`, Array.from(docKeywords));
      
      // 改进的相似度计算：考虑关键词权重和语义相关性
      let similarityScore = 0;
      
      if (seedKeywords.size > 0 && docKeywords.size > 0) {
        // 计算关键词交集
        const intersection = new Set([...seedKeywords].filter(x => docKeywords.has(x)));
        const intersectionSize = intersection.size;
        
        // 计算Jaccard相似度：交集 / 并集
        const union = new Set([...seedKeywords, ...docKeywords]);
        const jaccardSimilarity = intersectionSize / union.size;
        
        // 计算关键词覆盖率：交集 / 种子文档关键词总数
        const coverageScore = intersectionSize / seedKeywords.size;
        
        // 计算关键词密度：交集 / 目标文档关键词总数
        const densityScore = intersectionSize / docKeywords.size;
        
        // 综合相似度：加权平均
        // Jaccard相似度权重40%，覆盖率权重35%，密度权重25%
        similarityScore = (jaccardSimilarity * 0.4) + (coverageScore * 0.35) + (densityScore * 0.25);
        
        console.log(`文档 ${d.id} 相似度计算详情:`);
        console.log(`  - 关键词交集: ${Array.from(intersection)}`);
        console.log(`  - 交集数量: ${intersectionSize}`);
        console.log(`  - Jaccard相似度: ${jaccardSimilarity.toFixed(3)}`);
        console.log(`  - 覆盖率: ${coverageScore.toFixed(3)}`);
        console.log(`  - 密度: ${densityScore.toFixed(3)}`);
        console.log(`  - 综合相似度: ${similarityScore.toFixed(3)}`);
      } else if (seedKeywords.size === 0 && docKeywords.size === 0) {
        // 两个文档都没有关键词，相似度为0
        similarityScore = 0;
        console.log(`文档 ${d.id} 无关键词，相似度为0`);
      } else {
        // 一个文档有关键词，另一个没有，给予很低的相似度
        similarityScore = 0.01;
        console.log(`文档 ${d.id} 关键词不匹配，给予最低相似度: 0.01`);
      }
      
      candidates.push({ 
        id: d.id, 
        title: d.title, 
        raw: similarityScore, 
        mode: 'enhanced_keywords',
        intersectionCount: seedKeywords.size > 0 && docKeywords.size > 0 ? 
          new Set([...seedKeywords].filter(x => docKeywords.has(x))).size : 0,
        totalKeywords: seedKeywords.size + docKeywords.size
      });
    }

    if (useRaw) {
      candidates.sort((a, b) => b.raw - a.raw);
      return res.json(candidates.slice(0, limit).map(({ id, title, raw, intersectionCount }) => ({ 
        id, 
        title, 
        score: raw,
        raw: raw,  // 保持前端兼容性
        intersectionCount 
      })));
    }

    // 简化处理：直接使用原始Jaccard相似度，不做复杂的标准化
    // 这样0分就是0分，高分就是高分，更直观
    candidates.sort((a, b) => b.raw - a.raw);
    res.json(candidates.slice(0, limit).map(({ id, title, raw, intersectionCount }) => ({ 
      id, 
      title, 
      score: raw,
      raw: raw,  // 保持前端兼容性
      intersectionCount 
    })));
  } catch (error) {
    console.error('获取推荐失败:', error);
    res.status(500).json({ message: '获取推荐失败' });
  }
});

export default router;


