njain1978 commited on
Commit
62a27fc
1 Parent(s): 1d6d335

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +142 -0
  2. requirements.txt +1 -0
  3. src/constants.py +11 -0
  4. src/utils.py +38 -0
app.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import streamlit and requests
2
+ from src.utils import (get_output, show_output)
3
+ from src.constants import TASKS
4
+ import streamlit as st
5
+ import json
6
+
7
+ with st.sidebar:
8
+ st.markdown(
9
+ "## How to use\n"
10
+ "1. Enter your [Huggingface API token](https://huggingface.co/settings/tokens) (Recommended, as some model won't work)\n"
11
+ "2. Choose a task to perform \n"
12
+ "3. Choose a LLM model correspong to task \n"
13
+ "4. Enter a different LLM model id from Huggingface hub (Optional) \n"
14
+ )
15
+
16
+ st.write("##")
17
+
18
+ # Create a text input for accepting the Huggingface API Token
19
+ huggingface_api_token = st.text_input("Huggingface API Token (Optional)",
20
+ key="hf_api_token",
21
+ type="password",
22
+ help="You can get your API token from https://huggingface.co/settings/tokens."
23
+ )
24
+
25
+ # Create a select box for choosing the ML task
26
+ task = st.selectbox("Choose a task", list(TASKS.keys()))
27
+
28
+ # Create a select box for choosing the model based on the task
29
+ select_model = st.selectbox("Choose a model", TASKS[task])
30
+
31
+ # Create a text box for putting the model id (from Huggingface hub) based on the selected task
32
+ optional_model = st.text_input("Enter Huggingface model (Optional)")
33
+
34
+ if (optional_model != ""):
35
+ model = optional_model
36
+ else:
37
+ model = select_model
38
+
39
+ # Create a title for the app
40
+ st.header("Huggingface Hub Model Explorer")
41
+
42
+ # Create a header for displaying the input and output widgets
43
+ st.subheader(f"{task} with {model}")
44
+
45
+ # Create different input and output widgets based on the task
46
+ if task == "Text Generation":
47
+ # Create a text input for entering the text to generate from
48
+ text_input = st.text_input("Enter some text to generate from")
49
+
50
+ # Create a button for generating the text
51
+ generate_button = st.button("Generate")
52
+
53
+ # If the button is clicked and the input is not empty
54
+ if generate_button and text_input:
55
+ # Create a payload with the input text
56
+ data = json.dumps({"inputs": text_input})
57
+
58
+ # Get the output from the API
59
+ output = get_output(task, model, data, api_token=huggingface_api_token)
60
+
61
+ # Display the output text on the output widget
62
+ show_output(output)
63
+
64
+ elif task == "Text Summarization":
65
+ # Create a text area for entering the text to summarize
66
+ text_area = st.text_area("Enter some text to summarize")
67
+
68
+ # Create a button for summarizing the text
69
+ summarize_button = st.button("Summarize")
70
+
71
+ # If the button is clicked and the input is not empty
72
+ if summarize_button and text_area:
73
+ # Create a payload with the input text
74
+ data = json.dumps({"inputs": text_area})
75
+
76
+ # Get the output from the API
77
+ output = get_output(task, model, data, api_token=huggingface_api_token)
78
+
79
+ # Display the output summary on the output widget
80
+ show_output(output)
81
+
82
+ elif task == "Text Classification":
83
+ # Create a text input for entering the text to classify
84
+ text_input = st.text_input("Enter some text to classify")
85
+
86
+ # Create a button for classifying the text
87
+ classify_button = st.button("Classify")
88
+
89
+ # If the button is clicked and the input is not empty
90
+ if classify_button and text_input:
91
+ # Create a payload with the input text
92
+ data = json.dumps({"inputs": text_input})
93
+
94
+ # Get the output from the API
95
+ output = get_output(task, model, data, api_token=huggingface_api_token)
96
+
97
+ # Display the output label and score on the output widget
98
+ ref_output = output[0]
99
+ for out in ref_output:
100
+ st.text(f"{out['label']}: {round(out['score'] * 100, 1)}%")
101
+
102
+ elif task == "Text-to-Speech":
103
+ # Create a text input for entering the text to synthesize
104
+ text_input = st.text_input("Enter some text to synthesize")
105
+
106
+ # Create a button for synthesizing the speech
107
+ synthesize_button = st.button("Synthesize")
108
+
109
+ # If the button is clicked and the input is not empty
110
+ if synthesize_button and text_input:
111
+ # Create a payload with the input text
112
+ data = json.dumps({"inputs": text_input})
113
+
114
+ # Get the output from the API
115
+ output = get_output(task, model, data, api_token=huggingface_api_token, model_type="audio")
116
+
117
+ # Display the output audio on the output widget
118
+ with open("audio.wav", "wb") as f:
119
+ f.write(output.content)
120
+ st.audio("audio.wav")
121
+
122
+ elif task == "Image Classification":
123
+ # Create a file uploader for uploading an image to classify
124
+ image_file = st.file_uploader("Upload an image to classify")
125
+
126
+ # Create a button for classifying the image
127
+ classify_button = st.button("Classify")
128
+
129
+ # Create an empty output widget
130
+ output_widget = st.empty()
131
+
132
+ # If the button is clicked and the file is not None
133
+ if classify_button and image_file:
134
+ # Read the image file as bytes
135
+ image_bytes = image_file.read()
136
+
137
+ # Get the output from the API
138
+ output = get_output(task, model, data=image_bytes, api_token=huggingface_api_token)
139
+
140
+ # Display the output label and score on the output widget
141
+ for items in output:
142
+ st.text(f"{items['label']}: {round(items['score'] * 100, 1)}%")
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ streamlit==1.27.0
src/constants.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define the Huggingface Inference API base URL
2
+ HF_API_URL = "https://api-inference.huggingface.co"
3
+
4
+ # Define a dictionary of ML tasks and their corresponding models
5
+ TASKS = {
6
+ "Text Generation": ["gpt2", "gpt2-medium", "gpt2-large", "gpt2-xl"],
7
+ "Text Summarization": ["facebook/bart-large-cnn", "philschmid/bart-large-cnn-samsum", "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b"],
8
+ "Text Classification": ["distilbert-base-uncased-finetuned-sst-2-english", "ProsusAI/finbert", "cardiffnlp/twitter-roberta-base-sentiment-latest", "bert-base-uncased", "bert-large-uncased", "distilbert-base-uncased"],
9
+ "Text-to-Speech": ["microsoft/speecht5_tts", "facebook/wav2vec2-base-960h", "facebook/wav2vec2-large-960h-lv60", "facebook/wav2vec2-large-xlsr-53"],
10
+ "Image Classification": ["google/vit-base-patch16-224", "google/vit-large-patch16-224", "google/vit-large-patch32-224"]
11
+ }
src/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.constants import HF_API_URL
2
+ import streamlit as st
3
+ import requests
4
+
5
+
6
+ # Define a function to get the output from the Huggingface Inference API
7
+ def get_output(task, model, data, api_token, model_type=None):
8
+ # Construct the request URL
9
+ url = f"{HF_API_URL}/models/{model}?task={task}"
10
+ print(f"api_token:: {api_token}")
11
+ # Send a POST request with the payload as JSON data
12
+ if (api_token != ""):
13
+ headers = {"Authorization": f"Bearer {api_token}"}
14
+ response = requests.post(url, headers=headers, data=data)
15
+ else:
16
+ response = requests.post(url, data=data)
17
+
18
+ # Check if the request was successful
19
+ if model_type is None:
20
+ if response.status_code == 200:
21
+ return response.json()
22
+ elif response.status_code == 404:
23
+ return {"error": f"Request failed with status code {response.status_code}",
24
+ "description": "Please check whether Model id is correct or Inference API is available for this model."}
25
+ else:
26
+ return {"error": f"Request failed with status code {response.status_code}",
27
+ "description": None}
28
+ else:
29
+ return response
30
+
31
+
32
+ ## Function for displaying the output of a epecified task choosen
33
+ def show_output(output):
34
+ if isinstance(output, list):
35
+ st.text_area(label="Generated Text Output", value=output[0][list(output[0].keys())[0]])
36
+ else:
37
+ st.error(output['error'])
38
+ st.error(output['description'])