import os import gradio as gr import copy import time import llama_cpp import ingest from llama_cpp import Llama from huggingface_hub import hf_hub_download import run_localGPT_API llm = Llama( model_path=hf_hub_download( repo_id=os.environ.get("REPO_ID", "TheBloke/Llama-2-7b-Chat-GGUF"), filename=os.environ.get("MODEL_FILE", "llama-2-7b-chat.Q4_K_M.gguf"), ), n_ctx=2048, n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM ) history = [] system_message = """ you are a helpful, respectful and honest assistant. you should only respond to the following topics: water, climate, global warming, NASA data and geography. Always answer in the most helpful and safe way possible. Your answers should not include harmful, unethical, racist, sexist, toxic, dangerous or illegal content. Make sure that your answers are socially unbiased and positive in nature, as well as sticking to the topics of water, climate, global warming, NASA data and geography. If a question doesn't make sense or isn't factually coherent, explain that only questions on the topics of water, climate, global warming, NASA data and geography are accepted. If you don't know the answer to a question, don't share false information. """ def generate_text(message, history): temp = "" input_prompt = f"[INST] <>\n{system_message}\n<>\n\n " for interaction in history: input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " [INST] " input_prompt = input_prompt + str(message) + " [/INST] " output = llm( input_prompt, temperature=0.15, top_p=0.1, top_k=40, repeat_penalty=1.1, max_tokens=1024, stop=[ "<|prompter|>", "<|endoftext|>", "<|endoftext|> \n", "ASSISTANT:", "USER:", "SYSTEM:", ], stream=True, ) for out in output: stream = copy.deepcopy(out) temp += stream["choices"][0]["text"] yield temp history = ["init", input_prompt] demo = gr.ChatInterface( generate_text, title="Katara LLM", description="LLM of project https://katara.earth/", examples=["Show me all about water"], cache_examples=True, retry_btn=None, undo_btn="Delete Previous", clear_btn="Clear", ) demo.queue(concurrency_count=1, max_size=5) demo.launch() ingest.main() run_localGPT_API.main()