File size: 3,812 Bytes
802f7a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import anthropic
import openai

class ClaudeCompletion:
    def __init__(
            self,
            prompt,
            model="claude-v1.3",
            max_tokens_to_sample=256,
            stop_sequences=[anthropic.HUMAN_PROMPT],
            stream=False,
            temperature=1.0,
            top_k=-1,
            top_p=-1
            ):
        self.model = model
        self.prompt = prompt
        self.max_tokens_to_sample = max_tokens_to_sample
        self.stop_sequences = stop_sequences
        self.stream = stream
        self.temperature = temperature
        self.top_k = top_k
        self.top_p = top_p

    
    def execute(self, claudeClient):

        response = claudeClient.completion(
            prompt = f"{anthropic.HUMAN_PROMPT} {self.prompt} {anthropic.AI_PROMPT}",
            model = self.model,
            max_tokens_to_sample = self.max_tokens_to_sample,
            stop_sequences = self.stop_sequences,
            steam = self.stream,
            temperature = self.temperature,
            top_k = self.top_k,
            top_p = self.top_p,
        )
        return response["completion"].strip()
    
    
    def chatComplete(self, claudeClient, chatHistory):

        for i in range(len(chatHistory)-1):
            self.prompt = self.prompt + f"{anthropic.HUMAN_PROMPT} {chatHistory[i][0]} {anthropic.AI_PROMPT}"
            self.prompt = self.prompt + f"{anthropic.AI_PROMPT} {chatHistory[i][1]}"
        self.prompt = self.prompt + f"{anthropic.HUMAN_PROMPT} {chatHistory[-1][0]} {anthropic.AI_PROMPT}"

        # print("------------anthropic------------")
        # print(self.prompt)

        response = claudeClient.completion(
            prompt = self.prompt,
            model = self.model,
            max_tokens_to_sample = self.max_tokens_to_sample,
            stop_sequences = self.stop_sequences,
            steam = self.stream,
            temperature = self.temperature,
            top_k = self.top_k,
            top_p = self.top_p,
        )
        return response["completion"].strip()
    
class GPTCompletion:
    def __init__(
            self,
            system="You are a helpful AI assistant",
            model="gpt-3.5-turbo",
            temperature=1.0,
            top_p=1.0,
            n=1,
            stream=False,
            stop=None,
            max_tokens=256,
            presence_penalty=0.0,
            frequency_penalty=0.0,
            logit_bias={}
        ):
        self.system = system
        self.model = model
        self.messages = [{"role": "system", "content": f"{self.system}"}]
        self.temperature = temperature
        self.top_p = top_p
        self.n = n
        self.stream = stream
        self.stop = stop
        self.max_tokens = max_tokens
        self.presence_penalty = presence_penalty
        self.frequency_penalty = frequency_penalty
        self.logit_bias = logit_bias
    

    def chatComplete(self, chatHistory, firstMessage=""):

        self.messages.append({"role": "user", "content": f"{firstMessage}"})
        for i in range(len(chatHistory)):
            self.messages.append({"role": "assistant", "content": f"{chatHistory[i][0]}"})
            self.messages.append({"role": "user", "content": f"{chatHistory[i][1]}"})
        
        response = openai.ChatCompletion.create(
            model=self.model,
            messages=self.messages,
            temperature=self.temperature,
            top_p=self.top_p,
            n=self.n,
            stream=self.stream,
            stop=self.stop,
            max_tokens=self.max_tokens,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty,
            logit_bias=self.logit_bias
        )
        
        return response["choices"][0].message["content"].strip()