import gradio as gr from transformers import pipeline # Load a GPT-Neo model fine-tuned for code generation generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B') def generate_code(prompt): # Generate code from the model responses = generator(prompt, max_length=500, num_return_sequences=1, temperature=0.1) # responses = generator(prompt, max_length=50, num_return_sequences=1, temperature=0.5) return responses[0]['generated_text'] input_textbox = gr.Textbox(label="We shall get back to you!", lines=3) outputs = gr.Markdown() # ex = gr.Examples(examples_list, input_textbox, examples_per_page = 50) # demo = gr.Interface( # fn=greet, # inputs = [input_textbox], # examples = examples_list, # outputs = outputs, # title = title, # description="Your Python Projects Coding Companion!", # allow_flagging="never", # examples_per_page = 50, # theme=gr.themes.Default(), # ) # Create a Gradio Interface iface = gr.Interface( fn=generate_code, # inputs=[gr.inputs.Textbox(lines=10, label="Type your code description here")], inputs=input_textbox, #"text", outputs=outputs, #[gr.outputs.Textbox(label="Generated Code")], examples=[["Create a Python function to add two numbers"]], ) # Run the interface if __name__ == "__main__": iface.launch() #import gradio as gr #from transformers import pipeline # Load a small GPT model fine-tuned for Python code generation #generator = pipeline('text-generation', model='microsoft/CodeGPT-small-py') #def generate_code(prompt): # # Generate code from the model # responses = generator(prompt, max_length=150, num_return_sequences=1, temperature=0.5) # return responses[0]['generated_text'] # Create a Gradio Interface #iface = gr.Interface( # fn=generate_code, # inputs=[gr.inputs.Textbox(lines=10, label="Type your code description here")], # outputs=[gr.outputs.Textbox(label="Generated Code")], # examples=[["Define a Python function to calculate factorial."]], #) # Run the interface #if __name__ == "__main__": # iface.launch()