hikinegi's picture
Update app.py
6f0c605
# -*- coding: utf-8 -*-
"""orca_mini_3b_T4_GPU.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/#fileId=https%3A//huggingface.co/psmathur/orca_mini_3b/blob/main/orca_mini_3b_T4_GPU.ipynb
"""
import torch
from transformers import LlamaForCausalLM, LlamaTokenizer
# Hugging Face model_path
model_path = 'psmathur/orca_mini_3b'
tokenizer = LlamaTokenizer.from_pretrained(model_path)
model = LlamaForCausalLM.from_pretrained(
model_path, torch_dtype=torch.float16, device_map='auto',
)
#generate text function
def predict(system, instruction, input=None):
if input:
prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
else:
prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Response:\n"
tokens = tokenizer.encode(prompt)
tokens = torch.LongTensor(tokens).unsqueeze(0)
tokens = tokens.to('cuda')
instance = {'input_ids': tokens,'top_p': 1.0, 'temperature':0.7, 'generate_len': 1024, 'top_k': 50}
length = len(tokens[0])
with torch.no_grad():
rest = model.generate(
input_ids=tokens,
max_length=length+instance['generate_len'],
use_cache=True,
do_sample=True,
top_p=instance['top_p'],
temperature=instance['temperature'],
top_k=instance['top_k']
)
output = rest[0][length:]
string = tokenizer.decode(output, skip_special_tokens=True)
return f'[!] Response: {string}'
import gradio as gr
# Define input components
prompt_input = gr.inputs.Textbox(label="System")
instruction_input = gr.inputs.Textbox(label="Instruction")
context_input = gr.inputs.Textbox(label="Context")
# Define output component
output_text = gr.outputs.Textbox(label="Output")
# Create the interface
gr.Interface(fn=predict,
inputs=[prompt_input, instruction_input, context_input],
outputs=output_text,enable_queue=True).launch(debug=True)