|
import os |
|
|
|
|
|
os.system("pip install transformers torch psutil") |
|
|
|
|
|
result = os.system("pip install transformers") |
|
|
|
|
|
from transformers import pipeline |
|
import gradio as gr |
|
import psutil |
|
|
|
def get_memory_usage(): |
|
process = psutil.Process() |
|
memory_usage = process.memory_info().rss / 1024 / 1024 |
|
return f"Memory Usage: {memory_usage:.2f} MB" |
|
|
|
|
|
generator = pipeline("text-generation", model="abeja/gpt-neox-japanese-2.7b") |
|
|
|
def generate_text(prompt): |
|
generated = generator( |
|
prompt, |
|
max_length=300, |
|
do_sample=True, |
|
num_return_sequences=3, |
|
top_p=0.95, |
|
top_k=50 |
|
) |
|
return generated |
|
|
|
iface = gr.Interface(fn=generate_text, inputs="text", outputs="list", description=get_memory_usage(),) |
|
iface.launch() |
|
|