import gradio as gr import os os.environ["HF_HUB_ENABLE_HF_TRANSFER"]="1" from langchain.llms import LlamaCpp from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from huggingface_hub import hf_hub_download callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) repo_id="TheBloke/Mistral-7B-OpenOrca-GGUF" model_name="mistral-7b-openorca.Q5_K_M.gguf" hf_hub_download(repo_id=repo_id, filename=model_name,local_dir =".") llm = LlamaCpp( model_path=model_name, n_ctx=4096, callback_manager=callback_manager, verbose=True, # Verbose is required to pass to the callback manager ) def format_prompt(message, history): prompt = "" for user_prompt, bot_response in history: prompt += f"<|im_start|>user\n {user_prompt} <|im_end|>\n" prompt += f"<|im_start|>assistant\n {bot_response}<|im_end|>\n" prompt += f"<|im_start|>user\n {message} <|im_end|>\n<|im_start|>assistant\n" return prompt def generate( prompt, history, temperature=0.9, top_p=0.95, max_new_tokens=256,repetition_penalty=1.0, ): temperature = float(temperature) if temperature < 1e-2: temperature = 1e-2 top_p = float(top_p) formatted_prompt = format_prompt(prompt, history) # stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" output=llm(formatted_prompt, temperature=temperature, max_tokens=max_new_tokens, repeat_penalty=repetition_penalty, top_p=top_p, stop=["<|im_end|>","<|im_start|>user"] ) # output=formatted_prompt+"ans:"+output # for response in stream: # output += response.token.text # yield output return output additional_inputs=[ gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ), gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ), gr.Slider( label="Max new tokens", value=400, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ), gr.Slider( label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", ) ] css = """ #mkd { height: 500px; overflow: auto; border: 1px solid #ccc; } """ with gr.Blocks(css=css) as demo: gr.HTML("

Mistral 7B Instruct

") gr.HTML("

In this demo, you can chat with Mistral-7B-Instruct model. 💬

") gr.HTML("

Learn more about the model here. 📚

") gr.HTML(f"

it's lamacpp running {model_name} from {repo_id}

") gr.ChatInterface( generate, additional_inputs=additional_inputs, examples=[["What is the secret to life?"], ["Write me a recipe for pancakes."]] ) demo.queue(max_size=None).launch(debug=True)