Spaces:
Runtime error
Runtime error
""" A Utility calss which contains most commonly used functions """ | |
import huggingface_hub | |
import huggingface_hub.hf_api | |
import psutil | |
import torch | |
import functools | |
import socket | |
import cryptography | |
import cryptography.fernet | |
import os | |
class Utility(object): | |
def __init__(self, name="Utility") -> None: | |
self.name = name | |
self.author = "Duc Haba, Girish" | |
self._pp("Hello from class", str(self.__class__) + " Class: " + str(self.__class__.__name__)) | |
self._pp("Code name", self.name) | |
#Define encrypted keys | |
self._huggingface_key="gAAAAABkgtmOIjpnjwXFWmgh1j2et2kMjHUze-ym6h3BieAp34Sqkqv3EVYvRinETvpw-kXu7RSRl5_9FqrYe-7unfakMvMkU8nHrfB3hBSC76ZTXwkVSzlN0RfBNs9NL8BGjaSJ8mz8" | |
#Key for crypto | |
self._fkey=os.getenv("hf_encrypt_decrypt_key") | |
return | |
# Print : Pretty print output name-value line | |
def _pp(self, a, b,is_print=True): | |
# print("%34s : %s" % (str(a), str(b))) | |
x = f'{"%34s" % str(a)} : {str(b)}' | |
y = None | |
if (is_print): | |
print(x) | |
else: | |
y = x | |
return y | |
# Print : Pretty print the header or footer lines | |
def _ph(self,is_print=True): | |
x = f'{"-"*34} : {"-"*34}' | |
y = None | |
if (is_print): | |
print(x) | |
else: | |
y = x | |
return y | |
# Hugging face : Login to Hugging face | |
def _login_hface(self): | |
huggingface_hub.login(self._decrypt_it(self._huggingface_key), | |
add_to_git_credential=True) # non-blocking login | |
self._ph() | |
return | |
# Hugging face : Push files to Hugging face | |
def push_hface_files(self, | |
hf_names, | |
hf_space="GirishKiran/yml", | |
local_dir="/content/"): | |
f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names)) | |
try: | |
for f in hf_names: | |
lo = local_dir + f | |
huggingface_hub.upload_file( | |
path_or_fileobj=lo, | |
path_in_repo=f, | |
repo_id=hf_space, | |
repo_type=huggingface_hub.REPO_TYPE_SPACE) | |
except Exception as e: | |
self._pp("*Error", e) | |
return | |
# Hugging face : Push folders to Hugging face | |
def push_hface_folder(self, hf_folder, hf_space_id, hf_dest_folder=None): | |
api = huggingface_hub.HfApi() | |
api.upload_folder(folder_path=hf_folder, | |
repo_id=hf_space_id, | |
path_in_repo=hf_dest_folder, | |
repo_type="space") | |
return | |
# System Info : Fetch available CPU and RAM of the system | |
def fetch_system_info(self): | |
s='' | |
# Get CPU usage as a percentage | |
cpu_usage = psutil.cpu_percent() | |
# Get available memory in bytes | |
mem = psutil.virtual_memory() | |
# Convert bytes to gigabytes | |
mem_total_gb = mem.total / (1024 ** 3) | |
mem_available_gb = mem.available / (1024 ** 3) | |
mem_used_gb = mem.used / (1024 ** 3) | |
# Print the results | |
s += f"CPU usage: {cpu_usage}%\n" | |
s += f"Total memory: {mem_total_gb:.2f} GB\n" | |
s += f"Available memory: {mem_available_gb:.2f} GB\n" | |
# print(f"Used memory: {mem_used_gb:.2f} GB") | |
s += f"Memory usage: {mem_used_gb/mem_total_gb:.2f}%\n" | |
return | |
# System Info : Fetch GPU information of the system | |
def fetch_gpu_info(self): | |
s='' | |
try: | |
s += f'Your GPU is the {torch.cuda.get_device_name(0)}\n' | |
s += f'GPU ready staus {torch.cuda.is_available()}\n' | |
s += f'GPU allocated RAM: {round(torch.cuda.memory_allocated(0)/1024**3,1)} GB\n' | |
s += f'GPU reserved RAM {round(torch.cuda.memory_reserved(0)/1024**3,1)} GB\n' | |
except Exception as e: | |
s += f'**Warning, No GPU: {e}' | |
return s | |
# System Info : Fetch host ip address | |
def fetch_host_ip(self): | |
s='' | |
hostname = socket.gethostname() | |
ip_address = socket.gethostbyname(hostname) | |
s += f"Hostname: {hostname}\n" | |
s += f"IP Address: {ip_address}\n" | |
return s | |
# Create and writes data to the file | |
def write_file(self,fname, txt): | |
f = open(fname, "w") | |
f.writelines("\n".join(txt)) | |
f.close() | |
return | |
# Crypto : Fetch crypto key | |
def _fetch_crypt(self,is_generate=False): | |
s=self._fkey[::-1] | |
if (is_generate): | |
s=open(self._xkeyfile, "rb").read() | |
return s | |
# Crypto : Decrypt value | |
def _decrypt_it(self, x): | |
y = self._fetch_crypt() | |
f = cryptography.fernet.Fernet(y) | |
m = f.decrypt(x) | |
return m.decode() | |
# Crypto : Encrypt value | |
def _encrypt_it(self, x): | |
key = self._fetch_crypt() | |
p = x.encode() | |
f = cryptography.fernet.Fernet(key) | |
y = f.encrypt(p) | |
return y | |
# Capitalize : Capitalizes the first letter of each word in a list. | |
def capitalize_first_letter(self, list_of_words): | |
capitalized_words = [] | |
for word in list_of_words: | |
capitalized_word = word[0].upper() + word[1:] | |
capitalized_words.append(capitalized_word) | |
return capitalized_words | |
# Add method to class | |
def add_method(cls): | |
def decorator(func): | |
def wrapper(*args, **kwargs): | |
return func(*args, **kwargs) | |
setattr(cls, func.__name__, wrapper) | |
return func # returning func means func can still be used normally | |
return decorator | |
""" This file contains multiple Python classes and responssible to provide Emotions based on the given user input | |
Currently it supports emotions like Anger, Joy, Optimism and Sadness""" | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
from matplotlib.colors import LinearSegmentedColormap | |
import scipy | |
import scipy.special | |
import pandas | |
class SentimentAnalyser(object): | |
global utility | |
# initialize the object | |
def __init__(self, name="Sentiment",*args, **kwargs): | |
super(SentimentAnalyser, self).__init__(*args, **kwargs) | |
self.author = "Duc Haba, Girish" | |
self.name = name | |
utility = Utility(name="Calling From SentimentAnalyser") | |
self.utility = utility | |
utility._ph() | |
utility._pp("Hello from class", str(self.__class__) + " Class: " + str(self.__class__.__name__)) | |
utility._pp("Code name", self.name) | |
utility._pp("Author is" , self.author) | |
utility._ph() | |
print(utility.fetch_system_info()) | |
utility._ph() | |
print(utility.fetch_gpu_info()) | |
utility._ph() | |
print(utility.fetch_host_ip()) | |
utility._ph() | |
self._init_model() | |
utility._login_hface() | |
return | |
# initalise the model | |
def _init_model(self): | |
modelLink = "bhadresh-savani/distilbert-base-uncased-emotion" | |
self.tokenizer = AutoTokenizer.from_pretrained(modelLink) | |
self.model = AutoModelForSequenceClassification.from_pretrained(modelLink) | |
return | |
sentiment = SentimentAnalyser(name="EmotionAnalyser") | |
def _predict_sentiment(p): | |
# Tokenize input | |
inputs = sentiment.tokenizer(p, return_tensors="pt") | |
# Pass inputs through model | |
outputs = sentiment.model(**inputs) | |
print(outputs) | |
out_data = outputs[0][0] | |
scores = out_data.detach().numpy() | |
print(out_data) | |
scores = scipy.special.softmax(scores) | |
sentiment_map = sentiment.utility.capitalize_first_letter(sentiment.model.config.label2id.keys()) | |
df_out = pandas.DataFrame([scores], columns=sentiment_map) | |
df_out = df_out[['Love' , 'Joy', 'Surprise' , 'Fear', 'Sadness', 'Anger']] | |
return df_out | |
def draw_bar_plot(df_data, title='Sentiment Analysis', xlabel='p string', ylabel='Emotion Score'): | |
graphCmap=LinearSegmentedColormap.from_list('gr',["g", "w", "r"]) | |
pic = df_data.plot.bar(cmap=graphCmap, | |
title=title, | |
ylabel=ylabel, | |
xlabel=xlabel, | |
grid=True) | |
return pic | |
def predict_sentiment(p): | |
df_out = _predict_sentiment(p) | |
max_column = df_out.loc[0].idxmax() | |
max_value = df_out.loc[0].max() | |
title = f'Sentiment Analysis: {max_column}: {round(max_value*100,1)}%' | |
xlabel= f'Input: {p}' | |
pic = draw_bar_plot(df_out, title=title, xlabel=xlabel) | |
return pic.get_figure(), df_out.to_json() | |
import gradio | |
in_box = [gradio.Textbox(lines=1, label="Input", placeholder="type text here")] | |
out_box = [gradio.Plot(label="Sentiment Score:"), | |
gradio.Textbox(lines=4, label="Raw JSON Response:")] | |
title = "Sentiment Analysis: Understanding the Emotional Tone of Text" | |
desc = "Sentiment analysis is a powerful tool that can be used to gain insights into how people feel about the world around them." | |
exp = [ | |
['I am feeling very bad today.'], | |
['I hate to swim early morning.'] | |
] | |
arti= "<b>DistilBERT is 27 times faster than OpenAI, making it the clear winner for speed-sensitive applications.</b>\n\nWe did a comparision of OpenAI vs DestilBert model (which we are currently using in this space) by running 31 sentences in a loop and found DestilBert is 27 times faster than OpenAI." | |
gradio.Interface(fn=predict_sentiment, | |
inputs=in_box, | |
outputs=out_box, | |
title=title, | |
description=desc, | |
examples=exp, | |
article=arti).launch(debug=True) | |