File size: 1,320 Bytes
24130a4 90fa538 f3da54c 24130a4 fc033f3 24130a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
"""
import torch
import tensorflow as tf
import flax
import gradio as gr
from transformers import pipeline
sentiment_pipeline= pipeline("sentiment-analysis", model="cardiffnlp/twitter-roberta-base-sentiment")
# texts = ["Hugging face? weired, but memorable.", "I am despirate"]
# results = sentiment_pipeline(texts)
# for text, results in zip(texts, results):
# print(f"Text: {text}")
# print(f"Sentiment: {result['label']}, Score: {result['score']:.4f}\n")
def predict_sentiment(text):
result = sentiment_pipeline(text)
return result[0]['label'], result[0]['score']
iface = gr.Interface(fn=predict_sentiment, inputs="text", outputs = ["label","number"])
if __name__ == "__main__":
iface.launch()
"""
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelForCausalLM.from_pretrained("gpt2", pad_token_id=tokenizer.eos_token_id).to(torch_device)
model_inputs = tokenizer('An explanation of Linear Regression: ', return_tensors='pt').to(torch_device)
output = model.generate(**model_inputs, max_new_tokens=50, do_sample=True, top_p=0.92, top_k=0, temperature=0.6)
print(tokenizer.decode(output[0],skip_special_tokens=True))
|