File size: 4,145 Bytes
8e074b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
const express = require('express');
const http = require('http');
const { Server } = require('socket.io');
const { default: Groq } = require('groq-sdk');
require('dotenv').config()

const app = express();
const server = http.createServer(app);
const io = new Server(server, {
  cors: {
    origin: '*',
    methods: ['GET', 'POST'],
  },
});

const groq = new Groq({ apiKey: process.env.GROK_KI_API_KEY }); // Replace with your actual API key

async function getGroqChatCompletion(messages) {
  const data = await groq.chat.completions.create({
    messages: messages,
    model: "llama-3.2-90b-text-preview" ,
    temperature: 0.7,
    max_tokens: 7000,
  });
  return data.choices[0]?.message?.content || "";
}

async function chainOfThought(inputQuery, socket) {
  console.log(`\nInput Question: ${inputQuery}\n`);

  var steps = [
    {
      role: "system",
      // content: "You are an AI assistant designed to break down complex questions and provide step-by-step analysis. Each of your responses should build upon the previous steps."
      // content: "You are an AI assistant designed to break down complex questions and provide step-by-step analysis. Each of your responses should build upon the previous steps.talk like you are passing the steps to the next LLM and ai to improve upon current answer like a human is prompting the LLM in first person"
      content: "You are an AI assistant designed to break down complex questions and provide step-by-step analysis. Each of your responses should build upon the previous steps.talk to improve upon current answer talk like a human dont excpect somebody is giving you something after first prompt assume the best answer yourself"

    },
    {
      role: "user",
      content: `Break down the following question into key components and identify the main goal: "${inputQuery}"`
    },
    {
      role: "user",
      content: "Based on the key components identified, what relevant background information,edge cases etc or context is necessary to address this question?"
    },
    {
      role: "user",
      content: "Considering the background information, what are the main aspects or sub-problems that need to be addressed to answer the original question?"
    },
    {
      role: "user",
      content: "For each aspect or sub-problem identified, what are potential approaches or solutions?"
    },
    {
      role: "user",
      content: "Evaluate the pros and cons of each approach or solution. Which seem most promising and why?"
    },
    // {
    //   role: "user",
    //   content: "Synthesize the analysis from previous steps to formulate a comprehensive answer to the original question. Make sure to address all key components identified in the all steps and give the final answer to this question."
    // },
    {
      role: "user",
      content: "Synthesize the analysis from previous steps to formulate a comprehensive answer to the original question implement the final approach fully and dont leave anything if code give complete code of each and every file. Make sure to address all key components identified in the all steps and give the final answer to this question."
    },
    // {
    //   role: "user",
    //   content: "write the complete final answer dont hesitate"
    // }
  ];



  let messages = steps.slice(0, 2);  // Include system message and first user message
  
  for (let i = 1; i < steps.length; i++) {
    const response = await getGroqChatCompletion(messages);
    socket.emit('thoughtStep', response);
    
    messages.push({ role: "assistant", content: response });
    if (i < steps.length - 1) {
      messages.push(steps[i + 1]);
    }
  }

  const finalAnswer = messages[messages.length - 1].content;
  socket.emit('finalAnswer', finalAnswer);
}

io.on('connection', (socket) => {
  console.log('A user connected');

  socket.on('startChainOfThought', (inputQuestion) => {
    chainOfThought(inputQuestion, socket);
  });

  socket.on('disconnect', () => {
    console.log('User disconnected');
  });
});

const PORT = process.env.PORT || 3001;
server.listen(PORT, () => {
  console.log(`Server running on port ${PORT}`);
});