const express = require('express'); const axios = require('axios'); const app = express(); const PORT = process.env.PORT || 7860; const targetUrl = 'https://geminiyufi.vercel.app/v1/chat/completions'; const apiToken = process.env.API_KEY; app.use(express.json()); app.post('/chat', async (req, res) => { const { messages, temperature, max_tokens } = req.body; try { const { default: fetch } = await import('node-fetch'); const response = await fetch('https://api-inference.huggingface.co/models/codellama/CodeLlama-34b-Instruct-hf', { method: 'POST', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${process.env.API_KEY}` // Замените на ваш токен Hugging Face API }, body: JSON.stringify({ inputs: messages, parameters: { temperature: temperature || 0.7, max_new_tokens: max_tokens || 100 } }) }); const data = await response.json(); const generatedText = data.generated_text; // Добавляем сгенерированное сообщение в конец массива messages messages.push({ role: "assistant", content: generatedText }); res.json({ messages }); } catch (error) { console.error(error); res.status(500).json({ error: 'Произошла ошибка при генерации текста.' }); } }); app.listen(PORT, () => { console.log(`Server is running on port ${PORT}`); });