File size: 900 Bytes
828d00f
8e9efce
 
 
 
57d78bf
8e9efce
 
 
 
4bd7767
8e9efce
2e8f13b
8e9efce
2e8f13b
8e9efce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import gradio as gr
import time
from ctransformers import AutoModelForCausalLM

def load_llm():
    llm = AutoModelForCausalLM.from_pretrained("TheBloke/CodeLlama-13B-Instruct-GGUF",
    model_type='llama',
    max_new_tokens = 1096,
    repetition_penalty = 1.13,
    temperature = 0.1
    )
    return llm

def llm_function(message, chat_history):

    llm = load_llm()
    response = llm(
        message
    )
    output_texts = response
    return output_texts

title = "CodeLlama 13B GGUF Demo"

examples = [
    'Write a python code to connect with a SQL database and list down all the tables.',
    'Write the python code to train a linear regression model using Scikit Learn.',
    'Explain the concepts of Functional Programming.',
    'Can you explain the benefits of Python programming language?'
]

gr.ChatInterface(
    fn=llm_function,
    title=title,
    examples=examples
).launch()