Spaces:
Running
Running
import openai | |
import time | |
import json | |
import yaml | |
from typing import Union | |
import os | |
from langchain_openai import OpenAI | |
from transformers import AutoTokenizer | |
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage | |
class ChatBot(object): | |
def __init__(self, model_name, url): | |
self.model_name = model_name | |
self.tokenizer = AutoTokenizer.from_pretrained(model_name) | |
self.url = url | |
self.model = self._set_model() | |
self.chat_history = [] | |
self.history = self._set_initial_history() | |
def _set_model(self): | |
return OpenAI( | |
model_name=self.model_name, | |
openai_api_base = f"{self.url}", | |
openai_api_key="EMPTY", | |
temperature = 0.1, | |
top_p = 0.95, | |
max_tokens = 1024, | |
max_retries=3 | |
) | |
def _set_initial_history(self): | |
return ["You are an exceptionally intelligent coding assistant developed by DLI lab that consistently delivers accurate and reliable responses to user instructions. If somebody asks you who are you, answer as 'AI programming assistant based on DLI Lab'.\n\n"] | |
def set_model_input(self, input_text = None): | |
model_input = [] | |
if input_text is not None: | |
self.history.append(input_text) | |
model_input.append({ | |
"role": "system", | |
"content": self.history[0] | |
}) | |
chat_history = self.history[1:] | |
for i in range(len(chat_history)): | |
if i % 2 == 0: | |
model_input.append({ | |
"role": "user", | |
"content": chat_history[i] | |
}) | |
else: | |
model_input.append({ | |
"role": "assistant", | |
"content": chat_history[i] | |
}) | |
return model_input | |
def chat(self, chat_history, input_text): | |
self.chat_history = chat_history | |
model_input_list = self.set_model_input(input_text) | |
model_input = self.tokenizer.apply_chat_template(model_input_list, tokenize=False, add_generation_prompt=True) | |
response = self.model.invoke(model_input) | |
if response is not None: | |
self.history.append(response) | |
self.chat_history = self.chat_history + [(input_text, response)] | |
return self.chat_history | |
def regenerate(self, chat_history, input_text): | |
self.chat_history = chat_history[:-1] | |
self.history = self.history[:-2] | |
model_input_list = self.set_model_input(None) | |
model_input = self.tokenizer.apply_chat_template(model_input_list, tokenize=False, add_generation_prompt=True) | |
response = self.model.invoke(model_input) | |
if response is not None: | |
self.history.append(response) | |
self.chat_history = self.chat_history + [(input_text, response)] | |
return self.chat_history | |
def clear_chat(self): | |
self.chat_history = [] | |
self.history = self._set_initial_history() | |
return self.chat_history |