|
import torch |
|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Mr-Vicky-01/Gemma-2B-Finetuined-pythonCode") |
|
model = AutoModelForCausalLM.from_pretrained("Mr-Vicky-01/Gemma-2B-Finetuined-pythonCode") |
|
|
|
def generate_code(text): |
|
prompt_template = f""" |
|
<start_of_turn>user based on given instruction create a solution\n\nhere are the instruction {text} |
|
<end_of_turn>\n<start_of_turn>model |
|
""" |
|
prompt = prompt_template |
|
encodeds = tokenizer(prompt, return_tensors="pt", add_special_tokens=True).input_ids |
|
|
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") |
|
model.to(device) |
|
inputs = encodeds.to(device) |
|
|
|
|
|
|
|
generated_ids = model.generate(inputs, max_new_tokens=500, do_sample=False, pad_token_id=tokenizer.eos_token_id) |
|
ans = '' |
|
for i in tokenizer.decode(generated_ids[0], skip_special_tokens=True).split('<end_of_turn>')[:2]: |
|
ans += i |
|
|
|
|
|
model_answer = ans.split("model")[1].strip() |
|
return model_answer.split("user")[1] |
|
|
|
|
|
demo = gr.Interface(fn=generate_code, inputs='text',outputs='text',title='Text Summarization') |
|
demo.launch(debug=True,share=True) |