File size: 2,832 Bytes
288b3f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import gradio as gr
from transformers import pipeline
import torch
import csv
import re
import warnings

warnings.filterwarnings("ignore")

# Define a prompt template for Magicoder with placeholders for instruction and response.
MAGICODER_PROMPT = """You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.
@@ Instruction
{instruction}
@@ Response
"""

# Create a text generation pipeline using the Magicoder model, text-generation task, bfloat16 torch data type and auto device mapping.
generator = pipeline(
    model="ise-uiuc/Magicoder-S-DS-6.7B",
    task="text-generation",
    torch_dtype=torch.bfloat16,
    device_map="auto",
)

# Function to generate response
def generate_response(instruction):
    prompt = MAGICODER_PROMPT.format(instruction=instruction)
    result = generator(prompt, max_length=2048, num_return_sequences=1, temperature=0.0)
    response = result[0]["generated_text"]
    response_start_index = response.find("@@ Response") + len("@@ Response")
    response = response[response_start_index:].strip()
    return response

# Function to append data to a CSV file
def save_to_csv(data, filename):
    with open(filename, 'a', newline='') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(data)

# Function to process user feedback
def process_output(correct_output, feedback=None, correct_code=None):
    if correct_output.lower() == 'yes':
        save_to_csv(["Correct", feedback], 'output_ratings.csv')
    else:
        save_to_csv(["Incorrect", feedback, correct_code], 'output_ratings.csv')

# Gradio interface
input_text = gr.inputs.Textbox(lines=5, label="Enter your instruction here:")
output_text = gr.outputs.Textbox(label="Generated response:")

def generate_and_process(instruction, correct_output):
    generated_response = generate_response(instruction)
    if correct_output.lower() == "no":
        feedback = gr.Interface(
            lambda x: x,
            gr.inputs.Textbox(lines=5, label="Please enter the correct code:"),
            "text"
        ).launch()
        correct_code = feedback
        process_output(correct_output, feedback, correct_code)
    else:
        feedback = ""
        process_output(correct_output, feedback)
    return generated_response

title = "Magicoder Assistant"
description = "An intelligent coding assistant that generates responses based on user instructions."
examples = [["Implement a high-level API for a TODO list application. The API takes as input an operation request and updates the TODO list in place. If the request is invalid, raise an exception."]]

gr.Interface(
    generate_and_process,
    inputs=[input_text, "radio"],
    outputs=output_text,
    title=title,
    description=description,
    examples=examples
).launch()