g1 / index.js
GitHub Actions
Initial commit
8e074b3
const { default: Groq } = require("groq-sdk");
const { Ollama } = require("ollama");
const groq = new Groq({ apiKey: "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" });
async function getGroqChatCompletion(q) {
data = await groq.chat.completions.create({
messages: [
{
role: "user",
content: q,
},
],
model: "llama-3.1-70b-versatile",
});
return data.choices[0]?.message?.content || "";
}
const ollama = new Ollama({ host: "http://localhost:11434" });
// Function to call the Ollama model through the library
async function callLLM(query) {
try {
return getGroqChatCompletion(query);
console.log(`Prompt: ${query}`); // Log the prompt being sent
const response = await ollama.generate({
model: "qwen2.5-coder",
prompt: query,
});
// console.log(response);
const output = response.response.trim();
console.log(`Response: ${output}`); // Log the response received
return output;
} catch (error) {
console.error("Error:", error);
throw error;
}
}
// Function to create a chain of thought for the input question
async function chainOfThought(inputQuery) {
let thoughtChain = [];
console.log(`\nInput Question: ${inputQuery}\n`);
const step1 = `Break down the following question into key points: "${inputQuery}"`;
const understanding = await callLLM(step1);
thoughtChain.push(understanding);
const step2 = `Given the key points: "${understanding}", provide any relevant background information.`;
const context = await callLLM(step2);
thoughtChain.push(context);
const step3 = `Analyze the following question based on its background information: "${inputQuery}". What are the different aspects to consider?`;
const analysis = await callLLM(step3);
thoughtChain.push(analysis);
const step4 = `Based on the analysis: "${analysis}", generate possible solutions or insights.`;
const solutions = await callLLM(step4);
thoughtChain.push(solutions);
const step5 = `Given the possible solutions: "${solutions}", evaluate the pros and cons, or refine the best approach.`;
const evaluation = await callLLM(step5);
thoughtChain.push(evaluation);
const step6 = `Based on the evaluation: "${evaluation}", provide a concise and well-reasoned answer to the original question.`;
const conclusion = await callLLM(step6);
thoughtChain.push(conclusion);
return {
thoughtProcess: thoughtChain,
finalAnswer: conclusion,
};
}
// Test the function with an example question
const inputQuestion =
"How can we improve network security in a large organization?";
chainOfThought(inputQuestion)
.then((response) => {
console.log("\nFinal Thought Process:", response.thoughtProcess);
console.log("Final Answer:", response.finalAnswer);
})
.catch((error) => {
console.error("Error:", error);
});