"""Refer to https://github.com/abacaj/mpt-30B-inference/blob/main/download_model.py.""" # pylint: disable=invalid-name, missing-function-docstring, missing-class-docstring, redefined-outer-name, broad-except import os import time import gradio as gr # from mcli import predict from huggingface_hub import hf_hub_download from loguru import logger URL = os.environ.get("URL") _ = """ if URL is None: raise ValueError("URL environment variable must be set") if os.environ.get("MOSAICML_API_KEY") is None: raise ValueError("git environment variable must be set") # """ def predict(x, y, timeout): logger.debug(f"{x=}, {y=}, {timeout=}") def download_mpt_quant(destination_folder: str, repo_id: str, model_filename: str): local_path = os.path.abspath(destination_folder) return hf_hub_download( repo_id=repo_id, filename=model_filename, local_dir=local_path, local_dir_use_symlinks=True, ) class Chat: default_system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers." system_format = "<|im_start|>system\n{}<|im_end|>\n" def __init__( self, system: str = None, user: str = None, assistant: str = None ) -> None: if system is not None: self.set_system_prompt(system) else: self.reset_system_prompt() self.user = user if user else "<|im_start|>user\n{}<|im_end|>\n" self.assistant = ( assistant if assistant else "<|im_start|>assistant\n{}<|im_end|>\n" ) self.response_prefix = self.assistant.split("{}", maxsplit=1)[0] def set_system_prompt(self, system_prompt): # self.system = self.system_format.format(system_prompt) return system_prompt def reset_system_prompt(self): return self.set_system_prompt(self.default_system_prompt) def history_as_formatted_str(self, system, history) -> str: system = self.system_format.format(system) text = system + "".join( [ "\n".join( [ self.user.format(item[0]), self.assistant.format(item[1]), ] ) for item in history[:-1] ] ) text += self.user.format(history[-1][0]) text += self.response_prefix # stopgap solution to too long sequences if len(text) > 4500: # delete from the middle between <|im_start|> and <|im_end|> # find the middle ones, then expand out start = text.find("<|im_start|>", 139) end = text.find("<|im_end|>", 139) while end < len(text) and len(text) > 4500: end = text.find("<|im_end|>", end + 1) text = text[:start] + text[end + 1 :] if len(text) > 4500: # the nice way didn't work, just truncate # deleting the beginning text = text[-4500:] return text def clear_history(self, history): return [] def turn(self, user_input: str): self.user_turn(user_input) return self.bot_turn() def user_turn(self, user_input: str, history): history.append([user_input, ""]) return user_input, history def bot_turn(self, system, history): conversation = self.history_as_formatted_str(system, history) assistant_response = call_inf_server(conversation) history[-1][-1] = assistant_response print(system) print(history) return "", history def call_inf_server(prompt): try: response = predict( URL, {"inputs": [prompt], "temperature": 0.2, "top_p": 0.9, "output_len": 512}, timeout=70, ) # print(f'prompt: {prompt}') # print(f'len(prompt): {len(prompt)}') response = response["outputs"][0] # print(f'len(response): {len(response)}') # remove spl tokens from prompt spl_tokens = ["<|im_start|>", "<|im_end|>"] clean_prompt = prompt.replace(spl_tokens[0], "").replace(spl_tokens[1], "") return response[len(clean_prompt) :] # remove the prompt except Exception as e: # assume it is our error # just wait and try one more time print(e) time.sleep(1) response = predict( URL, {"inputs": [prompt], "temperature": 0.2, "top_p": 0.9, "output_len": 512}, timeout=70, ) # print(response) response = response["outputs"][0] return response[len(prompt) :] # remove the prompt logger.info("start dl") _ = """full url: https://huggingface.co/TheBloke/mpt-30B-chat-GGML/blob/main/mpt-30b-chat.ggmlv0.q4_1.bin""" repo_id = "TheBloke/mpt-30B-chat-GGML" model_filename = "mpt-30b-chat.ggmlv0.q4_1.bin" destination_folder = "models" download_mpt_quant(destination_folder, repo_id, model_filename) logger.info("done dl") with gr.Blocks( theme=gr.themes.Soft(), css=".disclaimer {font-variant-caps: all-small-caps;}", ) as demo: gr.Markdown( """