Spaces:
Runtime error
Runtime error
import openai | |
import tiktoken | |
class Conversation: | |
def __init__(self, prompt, model="gpt-3.5-turbo", temperature=0.8, max_tokens=250): | |
self.prompt = prompt | |
self.model = model | |
self.temperature = temperature | |
self.max_tokens = max_tokens | |
self._init_messages() | |
def _init_messages(self): | |
self.messages = [{"role": "system", "content": self.prompt}] | |
def reset(self): | |
self._init_messages() | |
def ask(self, question, pprint=True): | |
self.messages.append({"role": "user", "content": question}) | |
if self.num_tokens(self.messages, self.model) >= self.max_tokens: | |
if len(self.messages) > 3: | |
self.messages = self.messages[:1] + self.messages[3:] # remove the first user message | |
else: | |
return "Error: max tokens exceeded." | |
try: | |
response = openai.ChatCompletion.create( | |
model=self.model, | |
messages=self.messages | |
) | |
except Exception as e: | |
return e | |
if pprint: | |
print(f"tiktoken: {self.num_tokens(self.messages, self.model)}\ntokens: {response['usage']}") | |
assistant_message = response["choices"][0]["message"]["content"] | |
self.messages.append({"role": "assistant", "content": assistant_message}) | |
return assistant_message | |
def num_tokens(self, messages, model): | |
"""Returns the number of tokens used by a list of messages.""" | |
try: | |
encoding = tiktoken.encoding_for_model(model) | |
except KeyError: | |
print("Warning: model not found. Using cl100k_base encoding.") | |
encoding = tiktoken.get_encoding("cl100k_base") | |
if model == "gpt-3.5-turbo": | |
print("Warning: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.") | |
return self.num_tokens(messages, model="gpt-3.5-turbo-0301") | |
elif model == "gpt-4": | |
print("Warning: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.") | |
return self.num_tokens(messages, model="gpt-4-0314") | |
elif model == "gpt-3.5-turbo-0301": | |
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n | |
tokens_per_name = -1 # if there's a name, the role is omitted | |
elif model == "gpt-4-0314": | |
tokens_per_message = 3 | |
tokens_per_name = 1 | |
else: | |
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""") | |
num_tokens = 0 | |
for message in messages: | |
num_tokens += tokens_per_message | |
for key, value in message.items(): | |
num_tokens += len(encoding.encode(value)) | |
if key == "name": | |
num_tokens += tokens_per_name | |
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> | |
return num_tokens | |