const { documentService } = require('../services/docs.service');
const { embeddingService } = require('../services/embedding.service');
const { logger } = require('../utils/logger');

function wrap(handler) {
  return async (req, res, next) => {
    try {
      await handler(req, res, next);
    } catch (error) {
      next(error);
    }
  };
}

/**
 * RAG endpoint: retrieves relevant docs and returns context for AI to use.
 * Client can optionally pass the context to an LLM for answer generation.
 */
const rag = wrap(async (req, res) => {
  const { query, limit = 5 } = req.body;

  if (!query) {
    return res.status(400).json({
      error: 'invalid_request',
      message: 'Query is required'
    });
  }

  // Generate query embedding
  const vector = await embeddingService.embedText(query);
  
  // Search for similar documents
  const results = await documentService.searchByVector(vector, limit);

  // Build context from retrieved documents
  const context = results
    .map((doc, idx) => `[${idx + 1}] ${doc.title}\n${doc.content}`)
    .join('\n\n');

  res.json({
    query,
    context,
    documents: results.map((doc) => ({
      id: doc.id,
      title: doc.title,
      score: doc.score
    })),
    count: results.length
  });
});

module.exports = {
  aiController: {
    rag
  }
};
