from transformers import AutoTokenizer import transformers import os import sys import fire import torch import gradio as gr def main( base_model="ise-uiuc/Magicoder-S-DS-6.7B" ): pipeline = transformers.pipeline( "text-generation", model=base_model, torch_dtype=torch.bfloat16, device_map="auto" ) def evaluate_magicoder( instruction, temperature=1, max_length=2048, ): MAGICODER_PROMPT = """You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions. @@ Instruction {instruction} @@ Response """ prompt = MAGICODER_PROMPT.format(instruction=instruction) if temperature > 0: sequences = pipeline( prompt, do_sample=True, temperature=temperature, max_length=max_length, ) else: sequences = pipeline( prompt, max_length=max_length, ) for seq in sequences: generated_text = seq["generated_text"].replace(prompt, "") return generated_text gr.Interface( fn=evaluate_magicoder, inputs=[ gr.components.Textbox( lines=3, label="Instruction", placeholder="Anything you want to ask Magicoder ?", value="Write a snake game in Python using the turtle library (the game is created by Magicoder).", ), gr.components.Slider(minimum=0, maximum=1, value=0, label="Temperature"), gr.components.Slider( minimum=1, maximum=2048, step=1, value=2048, label="Max tokens" ), ], outputs=[ gr.components.Textbox( lines=30, label="Output", ) ], title="🎩 Magicoder", description="This is a playground for Magicoder-S-DS-6.7B! Follow us on Github: https://github.com/ise-uiuc/magicoder and Huggingface: https://huggingface.co/ise-uiuc.", ).queue().launch() if __name__ == "__main__": fire.Fire(main)