import torch import transformers from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import gradio as gr tokenizer = AutoTokenizer.from_pretrained("AhmedSSoliman/MarianCG-CoNaLa-Large") model = AutoModelForSeq2SeqLM.from_pretrained("AhmedSSoliman/MarianCG-CoNaLa-Large") def generate_code(NL): inputs = tokenizer(NL, padding="max_length", truncation=True, max_length=512, return_tensors="pt") input_ids = inputs.input_ids attention_mask = inputs.attention_mask outputs = model.generate(input_ids, attention_mask=attention_mask) output_code = tokenizer.decode(outputs[0], skip_special_tokens=True) return output_code iface = gr.Interface(fn=generate_code, inputs="text", outputs="text", examples=[["create array containing the maximum value of respective elements of array `[2, 3, 4]` and array `[1, 5, 2]"], ["check if all elements in list `mylist` are identical"], ["enable debug mode on flask application `app`"], ["getting the length of `my_tuple`"], ['find all files in directory "/mydir" with extension ".txt"']], title="MarianCG: A Code Generation Transformer Model Inspired by Machine Translation", description="This is a code generation model which can generate code from the natural language description") iface.launch() #iface.launch(share=True) #output_text = gr.outputs.Textbox() #gr.Interface(generate_code,"textbox", output_text, title="MarianCG model for Code Generation", description="MarianCG model for Code Generation").launch()