File size: 2,305 Bytes
b531b23
 
 
 
 
0651cd2
b531b23
 
 
 
 
 
d588c88
b531b23
 
 
 
 
 
 
 
6ef666e
28a3186
b531b23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35fc364
b531b23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
508598a
fabe0f5
5f035df
b531b23
 
 
 
 
 
0651cd2
 
 
ca0ed50
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import os
import gradio as gr
import copy
import time
import llama_cpp
import ingest
from llama_cpp import Llama
from huggingface_hub import hf_hub_download  

llm = Llama(
    model_path=hf_hub_download(
        repo_id=os.environ.get("REPO_ID", "TheBloke/Llama-2-7b-Chat-GGUF"),
        filename=os.environ.get("MODEL_FILE", "llama-2-7b-chat.Q4_K_M.gguf"),
    ),
    n_ctx=2048,
    n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM 
) 

history = []

system_message = """
You are a helpful respectful and honest assistant. Your answers should only be on the following topics: water, climate, global warming, nasa data and geography. Always answer as helpfully as possible and while being safe.  Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain that only topics are accepted. If you don't know the answer to a question, please don't share false information.
"""


def generate_text(message, history):
    temp = ""
    input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
    for interaction in history:
        input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "

    input_prompt = input_prompt + str(message) + " [/INST] "

    output = llm(
        input_prompt,
        temperature=0.15,
        top_p=0.1,
        top_k=40, 
        repeat_penalty=1.1,
        max_tokens=1024,
        stop=[
            "<|prompter|>",
            "<|endoftext|>",
            "<|endoftext|> \n",
            "ASSISTANT:",
            "USER:",
            "SYSTEM:",
        ],
        stream=True,
    )
    for out in output:
        stream = copy.deepcopy(out)
        temp += stream["choices"][0]["text"]
        yield temp

    history = ["init", input_prompt]


demo = gr.ChatInterface(
    generate_text,
    title="Katara LLM",
    description="LLM of project https://katara.earth/",
    examples=["Show me all about water"],
    cache_examples=True,
    retry_btn=None,
    undo_btn="Delete Previous",
    clear_btn="Clear",
)
demo.queue(concurrency_count=1, max_size=5)

demo.launch()

ingest.main()