custimator / HugChatLLM.py
K00B404's picture
Update HugChatLLM.py
efc3fee verified
import os,re,sys,yaml
from rich import print as rp
from dotenv import load_dotenv, find_dotenv
from improvement_prompts import first_prompt, second_prompt,new_first_prompt, new_second_prompt
load_dotenv(find_dotenv())
import logging
from hugchat import hugchat
from hugchat.login import Login
from typing import List, Dict, Generator, Optional,Tuple
logging.basicConfig(filename='improvement_log.log', level=logging.INFO)
from UberToolkit import UberToolkit as UberK
class HugChatLLM:
def __init__(self, cookie_path_dir: str = "./cookies/"):
self.email = os.getenv("EMAIL")
self.password = os.getenv("PASSWD")
if not self.email or not self.password:
print("EMAIL and PASSWD environment variables must be set.")
sys.exit(1)
self.cookie_path_dir = cookie_path_dir
self.chatbot = self._login_and_create_chatbot()
self.current_conversation = self.chatbot.new_conversation(modelIndex=1, system_prompt='', switch_to=True)
self.img_gen_servers = ["Yntec/HuggingfaceDiffusion", "Yntec/WinningBlunder"]
#self.igllm=ImageGeneratorLLM()
self.triggers={'/improve:' :'Improve the following text: ',
'/fix:' :'Fix the following code: ',
"/save_yaml:" :self.save_yaml,
"/add_tab:" :"",
"/edit_tab:" :"",
}
def _login_and_create_chatbot(self) -> hugchat.ChatBot:
sign = Login(self.email, self.password)
cookies = sign.login(cookie_dir_path=self.cookie_path_dir, save_cookies=True)
return hugchat.ChatBot(cookies=cookies.get_dict())
def __call__(self, text: str, stream: bool = True, web_search: bool = False):
if stream:
return self.trigger_mon(input_string=self.query(text))
else:
text_result, code_result = self.trigger_mon(self.query(text, web_search))
return text_result, code_result
def query(self, text: str, web_search: bool = False) -> str:
if not text == '':
message_result = self.chatbot.chat(text, web_search=web_search)
return message_result.wait_until_done()
def stream_query(self, text: str) -> Generator[str, None, None]:
if not text == '':
for resp in self.chatbot.chat(text, stream=True):
yield resp
def new_conversation(self, model_index=1, system_prompt='', switch_to: bool = True) -> str:
return self.chatbot.new_conversation(modelIndex=model_index, system_prompt=system_prompt, switch_to=switch_to)
#return self.chatbot.get_conversation_info()
def get_remote_conversations(self, replace_conversation_list: bool = True) -> List[Dict]:
return self.chatbot.get_remote_conversations(replace_conversation_list=replace_conversation_list)
def get_conversation_list(self) -> List[Dict]:
return self.chatbot.get_conversation_list()
def get_available_models(self) -> List[str]:
return self.chatbot.get_available_llm_models()
def switch_model(self, index: int) -> None:
self.chatbot.switch_llm(index)
def get_conversation_info(self) -> Dict:
info = self.chatbot.get_conversation_info()
return {
"id": info.id,
"title": info.title,
"model": info.model,
"system_prompt": info.system_prompt,
"history": info.history
}
def search_assistant_by_name(self, assistant_name: str) -> Dict:
return self.chatbot.search_assistant(assistant_name=assistant_name)
def search_assistant_by_id(self, assistant_id: str) -> Dict:
return self.chatbot.search_assistant(assistant_id=assistant_id)
def get_assistant_list(self, page: int = 0) -> List[Dict]:
return self.chatbot.get_assistant_list_by_page(page=page)
def new_conversation_with_assistant(self, assistant: Dict, switch_to: bool = True) -> None:
self.chatbot.new_conversation(assistant=assistant, switch_to=switch_to)
return self.chatbot.get_conversation_info()
def delete_all_conversations(self) -> None:
self.chatbot.delete_all_conversations()
def get_available_models(self) -> List[Dict[str, str]]:
models = self.chatbot.get_available_llm_models()
for model in models:
logging.info(model.id)
logging.info(model.name)
return [{"id": model.id,"name": model.name} for model in models]
def save_yaml(self, input_string: str, file_path: str, file_name: str) -> str:
yaml_file_path = os.path.join(file_path, file_name)
rp.print(f'Saving YAML data: {input_string}\n\nTo: {yaml_file_path}')
# here we write the YAML data to a file
with open(yaml_file_path, 'w') as file:
yaml.dump(input_string, file)
return "YAML data saved successfully."
def trigger_mon(self, input_string) -> str:
'''in this we need to detect if the
AI response contains a key of the Dict triggers ers then fetch the input directly from the response behind the the triggerger....
and call the method lidyed in the value in the triggers Dict'''
# here we detect and route the trigger keys from responses
# using a dictionary mapping trigger keys to their respective prompts
for trigger, action in self.triggers.items():
if trigger in input_string:
input_split = input_string.split(trigger).pop().split('\n').pop(0)
rest_split = ' '.join(input_string.split(f'{trigger}{input_split}').replace('input','[trigger_processed]input'))
print(f'Detected trigger: {trigger}, fetching input: {input_split}')
action(input_split)
# if the trigger is found, we need to fetch the input directly from the response
return rest_split
# if no trigger is found, we return the input string as is
return input_string
class ImageGeneratorLLM(HugChatLLM):
def __init__(self):
super().__init__()
def __call__(self, prompt: str) -> str:
return self.generate_image(prompt)
def generate_image(self, prompt: str) -> str:
return self.chatbot.generate_image(prompt)