File size: 1,922 Bytes
9f21ec5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

import openai
openai.api_key = "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"

class GPTCompletion:
    def __init__(
            self,
            system="You are a helpful AI assistant",
            model="gpt-3.5-turbo",
            temperature=1.0,
            top_p=1.0,
            n=1,
            stream=False,
            stop=None,
            max_tokens=256,
            presence_penalty=0.0,
            frequency_penalty=0.0,
            logit_bias={}
        ):
        self.system = system
        self.model = model
        self.messages = [{"role": "system", "content": f"{self.system}"}]
        self.temperature = temperature
        self.top_p = top_p
        self.n = n
        self.stream = stream
        self.stop = stop
        self.max_tokens = max_tokens
        self.presence_penalty = presence_penalty
        self.frequency_penalty = frequency_penalty
        self.logit_bias = logit_bias
    

    def chatComplete(self, chatHistory, newMessage,firstMessage=""):

        self.messages.append({"role": "user", "content": f"{firstMessage}"})
        for i in range(len(chatHistory)):
            self.messages.append({"role": "user", "content": f"{chatHistory[i][0]}"})
            self.messages.append({"role": "assistant", "content": f"{chatHistory[i][1]}"})
        
        self.messages.append({"role": "user", "content": f"{newMessage}"})

        response = openai.ChatCompletion.create(
            model=self.model,
            messages=self.messages,
            temperature=self.temperature,
            top_p=self.top_p,
            n=self.n,
            stream=self.stream,
            stop=self.stop,
            max_tokens=self.max_tokens,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty,
            logit_bias=self.logit_bias
        )
        
        return response["choices"][0].message["content"].strip()