import ollama from "ollama"; // Function to call the Ollama model through the library async function callLLM(query) { try { const response = await ollama.chat({ model: "qwen-2.5-coder", // Replace with the specific model you want messages: [{ role: "user", content: query }], }); return response.response.trim(); } catch (error) { console.error("Error:", error); throw error; } } // Function to create a chain of thought for the input question async function chainOfThought(inputQuery) { let thoughtChain = []; const step1 = `Break down the following question into key points: "${inputQuery}"`; const understanding = await callLLM(step1); thoughtChain.push(understanding); const step2 = `Given the key points: "${understanding}", provide any relevant background information.`; const context = await callLLM(step2); thoughtChain.push(context); const step3 = `Analyze the following question based on its background information: "${inputQuery}". What are the different aspects to consider?`; const analysis = await callLLM(step3); thoughtChain.push(analysis); const step4 = `Based on the analysis: "${analysis}", generate possible solutions or insights.`; const solutions = await callLLM(step4); thoughtChain.push(solutions); const step5 = `Given the possible solutions: "${solutions}", evaluate the pros and cons, or refine the best approach.`; const evaluation = await callLLM(step5); thoughtChain.push(evaluation); const step6 = `Based on the evaluation: "${evaluation}", provide a concise and well-reasoned answer to the original question.`; const conclusion = await callLLM(step6); thoughtChain.push(conclusion); return { thoughtProcess: thoughtChain, finalAnswer: conclusion, }; } // Test the function with an example question const inputQuestion = "How can we improve network security in a large organization?"; chainOfThought(inputQuestion) .then((response) => { console.log("Thought Process:", response.thoughtProcess); console.log("Final Answer:", response.finalAnswer); }) .catch((error) => { console.error("Error:", error); });