File size: 1,752 Bytes
7a8c6cb
 
 
 
 
 
 
7752684
7a8c6cb
 
7752684
7a8c6cb
 
 
 
 
7752684
7a8c6cb
 
 
 
 
 
7752684
1984220
7a8c6cb
 
 
 
 
 
 
 
7752684
 
 
7a8c6cb
 
 
 
 
7752684
7a8c6cb
 
7752684
 
 
 
7a8c6cb
7752684
7a8c6cb
7752684
 
 
 
 
 
 
7a8c6cb
7752684
 
 
7a8c6cb
7752684
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from langchain_community.llms.ctransformers import CTransformers
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
import os
import gradio as gr
import time

custom_prompt_template = """
You are an AI coding assistant and your task is to solve coding problems
and return code snippets based on the user's query. Below is the user's query.
Query: {query}
You just return the helpful code and related details.
Helpful code and related details:
"""

def set_custom_prompt():
    prompt = PromptTemplate(
        template=custom_prompt_template,
        input_variables=['query']
    )
    return prompt

def load_model():
    llm = CTransformers(
        model='TheBloke/CodeLlama-7B-Instruct-GGML',
        model_type='llama',
        max_new_tokens=1096,
        temperature=0.2,
        repetition_penalty=1.13
    )
    return llm

def chain_pipeline():
    llm = load_model()
    qa_prompt = set_custom_prompt()
    qa_chain = LLMChain(
        prompt=qa_prompt,
        llm=llm
    )
    return qa_chain

llmchain = chain_pipeline()

def bot(query):
    llm_response = llmchain.run({'query': query})
    # Wrap the response in triple backticks for code formatting
    formatted_response = f"```\n{llm_response}\n```"
    return formatted_response

with gr.Blocks(title="Can AI code ?") as demo:
    gr.Markdown('# Code LLAMA demo')
    chatbot = gr.Chatbot([], elem_id='chatbot', height=700)
    msg = gr.Textbox()
    clear = gr.ClearButton([msg, chatbot])

    def respond(message, chat_history):
        bot_message = bot(message)
        chat_history.append((message, bot_message))
        time.sleep(2)
        return "", chat_history

    msg.submit(respond, [msg, chatbot], [msg, chatbot])

demo.launch()