"""Run codes""" # pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring # r uff: noqa: E501 # import gradio # gradio.load("models/WizardLM/WizardCoder-15B-V1.0").launch() import os import time from dataclasses import asdict, dataclass from pathlib import Path from types import SimpleNamespace import gradio as gr from about_time import about_time # from ctransformers import AutoConfig, AutoModelForCausalLM from ctransformers import AutoModelForCausalLM from huggingface_hub import hf_hub_download from loguru import logger os.environ["TZ"] = "Asia/Shanghai" try: time.tzset() # type: ignore # pylint: disable=no-member except Exception: # Windows logger.warning("Windows, cant run time.tzset()") ns = SimpleNamespace( response="", generator=[], ) default_system_prompt = "A conversation between a user and an LLM-based AI assistant named Local Assistant. Local Assistant gives helpful and honest answers." user_prefix = "[user]: " assistant_prefix = "[assistant]: " def predict(prompt, bot): # logger.debug(f"{prompt=}, {bot=}, {timeout=}") logger.debug(f"{prompt=}, {bot=}") ns.response = "" with about_time() as atime: # type: ignore try: # user_prompt = prompt generator = generate( LLM, GENERATION_CONFIG, system_prompt=default_system_prompt, user_prompt=prompt.strip(), ) ns.generator = generator # for .then print(assistant_prefix, end=" ", flush=True) response = "" buff.update(value="diggin...") for word in generator: # print(word, end="", flush=True) print(word, flush=True) # vertical stream response += word ns.response = response buff.update(value=response) print("") logger.debug(f"{response=}") except Exception as exc: logger.error(exc) response = f"{exc=}" # bot = {"inputs": [response]} _ = ( f"(time elapsed: {atime.duration_human}, " # type: ignore f"{atime.duration/(len(prompt) + len(response)):.1f}s/char)" # type: ignore ) bot.append([prompt, f"{response} {_}"]) return prompt, bot def predict_api(prompt): logger.debug(f"{prompt=}") ns.response = "" try: # user_prompt = prompt _ = GenerationConfig( temperature=0.2, top_k=0, top_p=0.9, repetition_penalty=1.0, max_new_tokens=512, # adjust as needed seed=42, reset=False, # reset history (cache) stream=True, # TODO stream=False and generator threads=os.cpu_count() // 2, # type: ignore # adjust for your CPU stop=["<|im_end|>", "|<"], ) # TODO stream does not make sense in api? generator = generate( LLM, _, system_prompt=default_system_prompt, user_prompt=prompt.strip() ) print(assistant_prefix, end=" ", flush=True) response = "" buff.update(value="diggin...") for word in generator: print(word, end="", flush=True) response += word ns.response = response buff.update(value=response) print("") logger.debug(f"{response=}") except Exception as exc: logger.error(exc) response = f"{exc=}" # bot = {"inputs": [response]} # bot = [(prompt, response)] return response def download_quant(destination_folder: str, repo_id: str, model_filename: str): local_path = os.path.abspath(destination_folder) return hf_hub_download( repo_id=repo_id, filename=model_filename, local_dir=local_path, local_dir_use_symlinks=True, ) @dataclass class GenerationConfig: temperature: float top_k: int top_p: float repetition_penalty: float max_new_tokens: int seed: int reset: bool stream: bool threads: int stop: list[str] def format_prompt(system_prompt: str, user_prompt: str): """Format prompt based on: https://huggingface.co/spaces/mosaicml/mpt-30b-chat/blob/main/app.py.""" # TODO im_start/im_end possible fix for WizardCoder system_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n" user_prompt = f"<|im_start|>user\n{user_prompt}<|im_end|>\n" assistant_prompt = "<|im_start|>assistant\n" return f"{system_prompt}{user_prompt}{assistant_prompt}" def generate( llm: AutoModelForCausalLM, generation_config: GenerationConfig, system_prompt: str = default_system_prompt, user_prompt: str = "", ): """Run model inference, will return a Generator if streaming is true""" # if not user_prompt.strip(): return llm( format_prompt( system_prompt, user_prompt, ), **asdict(generation_config), ) _ = """full url: https://huggingface.co/TheBloke/mpt-30B-chat-GGML/blob/main/mpt-30b-chat.ggmlv0.q4_1.bin""" # https://huggingface.co/TheBloke/mpt-30B-chat-GGML _ = """ mpt-30b-chat.ggmlv0.q4_0.bin q4_0 4 16.85 GB 19.35 GB 4-bit. mpt-30b-chat.ggmlv0.q4_1.bin q4_1 4 18.73 GB 21.23 GB 4-bit. Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models. mpt-30b-chat.ggmlv0.q5_0.bin q5_0 5 20.60 GB 23.10 GB mpt-30b-chat.ggmlv0.q5_1.bin q5_1 5 22.47 GB 24.97 GB mpt-30b-chat.ggmlv0.q8_0.bin q8_0 8 31.83 GB 34.33 GB """ MODEL_FILENAME = "mpt-30b-chat.ggmlv0.q4_1.bin" MODEL_FILENAME = "WizardCoder-15B-1.0.ggmlv3.q4_0.bin" # 10.7G MODEL_FILENAME = "WizardCoder-15B-1.0.ggmlv3.q4_1.bin" # 11.9G MODEL_FILENAME = "WizardCoder-15B-1.0.ggmlv3.q4_1.bin" # 11.9G # https://huggingface.co/TheBloke/WizardLM-13B-V1.0-Uncensored-GGML MODEL_FILENAME = "wizardlm-13b-v1.0-uncensored.ggmlv3.q4_1.bin" # 8.4G DESTINATION_FOLDER = "models" REPO_ID = "TheBloke/mpt-30B-chat-GGML" if "WizardCoder" in MODEL_FILENAME: REPO_ID = "TheBloke/WizardCoder-15B-1.0-GGML" if "uncensored" in MODEL_FILENAME.lower(): REPO_ID = "TheBloke/WizardLM-13B-V1.0-Uncensored-GGML" logger.info("start dl, {REPO_ID=}, {MODEL_FILENAME=}, {DESTINATION_FOLDER=}") download_quant(DESTINATION_FOLDER, REPO_ID, MODEL_FILENAME) logger.info("done dl") # if "mpt" in model_filename: # config = AutoConfig.from_pretrained("mosaicml/mpt-30b-cha t", context_length=8192) # llm = AutoModelForCausalLM.from_pretrained( # os.path.abspath(f"models/{model_filename}"), # model_type="mpt", # config=config, # ) # https://huggingface.co/spaces/matthoffner/wizardcoder-ggml/blob/main/main.py _ = """ llm = AutoModelForCausalLM.from_pretrained( "TheBloke/WizardCoder-15B-1.0-GGML", model_file="", model_type="starcoder", threads=8 ) # """ logger.debug(f"{os.cpu_count()=}") logger.info("load llm") _ = Path("models", MODEL_FILENAME).absolute().as_posix() logger.debug(f"model_file: {_}, exists: {Path(_).exists()}") LLM = AutoModelForCausalLM.from_pretrained( # "TheBloke/WizardCoder-15B-1.0-GGML", REPO_ID, model_file=_, model_type="starcoder", threads=os.cpu_count() // 2, # type: ignore ) logger.info("done load llm") cpu_count = os.cpu_count() // 2 # type: ignore logger.debug(f"{cpu_count=}") GENERATION_CONFIG = GenerationConfig( temperature=0.2, top_k=0, top_p=0.9, repetition_penalty=1.0, max_new_tokens=512, # adjust as needed seed=42, reset=False, # reset history (cache) stream=True, # streaming per word/token threads=cpu_count, stop=["<|im_end|>", "|<"], # TODO possible fix of stop ) css = """ .importantButton { background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important; border: none !important; } .importantButton:hover { background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important; border: none !important; } .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;} .xsmall {font-size: x-small;} """ with gr.Blocks( # title="mpt-30b-chat-ggml", title=f"{MODEL_FILENAME}", theme=gr.themes.Soft(text_size="sm", spacing_size="sm"), css=css, ) as block: with gr.Accordion("🎈 Info", open=False): # gr.HTML( # """