File size: 7,157 Bytes
ec2757f
 
 
 
 
 
 
 
 
 
52ecdad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec2757f
 
 
 
 
 
52ecdad
 
 
ec2757f
 
 
 
 
a7c6e6a
ec2757f
 
 
52ecdad
 
 
 
 
 
 
 
 
ec2757f
 
52ecdad
 
 
 
 
c8780fe
52ecdad
 
 
 
 
ec2757f
52ecdad
 
 
ec2757f
 
52ecdad
 
ec2757f
 
52ecdad
 
ec2757f
52ecdad
ec2757f
 
 
52ecdad
 
 
 
 
 
 
 
ec2757f
52ecdad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec2757f
 
52ecdad
 
 
ec2757f
 
 
 
 
da8c66e
 
4a513e9
e5b39a3
b52f045
4a513e9
b52f045
 
 
 
 
 
3814707
 
a9b576d
d52b8f4
 
a9b576d
 
 
 
 
 
 
 
 
 
499b8f6
4de0aea
a9b576d
4de0aea
 
f0bac1b
ec2757f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import requests
import time
import gradio as gr
import os

########################
## Loading the model: ##
#######################
api_key = os.environ.get("HF_API_KEY_INFERENCE")

#7 dimensions:
API_URL1 = "https://api-inference.huggingface.co/models/chernandezc/EEMM_7_categories_WB" #Api endpoint.
headers = {"Authorization": f"Bearer {api_key}"} #Api Key, eqwual for both.


def query1(payload): #Function to use the API.
	response = requests.post(API_URL1, headers=headers, json=payload)
	return response.json() #Return Json.


#3 levels:
API_URL2 = "https://api-inference.huggingface.co/models/chernandezc/EEMM_3_dimensions_1201" #Api endpoint.


def query2(payload): #Function to use the API.
	response = requests.post(API_URL2, headers=headers, json=payload)
	return response.json() #Return Json.

# ##########################################################
# Function to process the output and print classifications #
############################################################
def classify_output(item):

  #Dictionary for dimensions.
    label_dict1 = {
        'LABEL_0': 'Cognition',
        'LABEL_1': 'Affect',
        'LABEL_2': 'Self',
        'LABEL_3': 'Motivation',
        'LABEL_4': 'Attention',
        'LABEL_5': 'Overt Behavior',
        'LABEL_6': 'Context'
    }

  #Dictionary for levels.
    label_dict2 = {
        'LABEL_0': 'Social',
        'LABEL_1': 'Psychological',
        'LABEL_2': 'Physical'
    }


    output1 = query1({ #Try to query the endpoint.
      "inputs": item,
      })
    output2 = query2({ #Try to query the endpoint.
      "inputs": item,
      })

    # Initial minimal delay
    min_delay = 5  # seconds
    #If model is idle wait and try again.
    while 'error' in output1 or 'error' in output2:
        if 'error' in output1:
            time.sleep(min(output1.get("estimated_time", min_delay), min_delay))
            output1 = query1({"inputs": item})

        if 'error' in output2:
            time.sleep(min(output2.get("estimated_time", min_delay), min_delay))
            output2 = query2({"inputs": item})

    # Store classifications in a list
    classifications1 = []
    classifications2 = []

    # Find the item with the highest score
    highest_score_item1 = max(output1[0], key=lambda x: x['score'])
    highest_score_item2 = max(output2[0], key=lambda x: x['score'])

    for item in output1[0]:
        # Check if the score is greater than or equal to 0.5
        if item['score'] >= 0.5:
            # Append the category and score to the classifications list
            classifications1.append((label_dict1[item['label']], item['score']))

    for item in output2[0]:
        # Check if the score is greater than or equal to 0.5
        if item['score'] >= 0.5:
            # Append the category and score to the classifications list
            classifications2.append((label_dict2[item['label']], item['score']))

    # Construct and print the classification message
    if (classifications1 and classifications2):
        classification_str1 = ', '.join([f"{label} ({score:.2f})" for label, score in classifications1])
        classification_str2 = ', '.join([f"{label} ({score:.2f})" for label, score in classifications2])

        output_clas_and_lev = f"For dimensions: {classification_str1}\nFor levels: {classification_str2}"

        return output_clas_and_lev

    elif classifications1 and not classifications2:
        classification_str1 = ', '.join([f"{label} ({score:.2f})" for label, score in classifications1])

        output_clas_no_lev = f"For dimensions: {classification_str1}\nFor levels: No classifications with a score of 0.5 or higher were found.\nHowever, the highest probability was for: '{label_dict2[highest_score_item2['label']]}' ({round(highest_score_item2['score'],2)})\n Use this classification with caution due to uncertainty"

        return output_clas_no_lev

    elif classifications2 and not classifications1:
        classification_str2 = ', '.join([f"{label} ({score:.2f})" for label, score in classifications2])

        output_lev_no_clas = f"For levels: {classification_str2}\nFor dimensions: No classifications with a score of 0.5 or higher were found.\nHowever, the highest probability was for: '{label_dict1[highest_score_item1['label']]}' ({round(highest_score_item1['score'],2)}) \n  Use this classification with caution due to uncertainty"

        return output_lev_no_clas

    else:
        output_lev_no_no = f"No classification with a score of 0.5 or higher were found for both levels and dimensions\nThe highest probability for dimensions was: '{label_dict1[highest_score_item1['label']]}' ({round(highest_score_item1['score'],2)}\nThe highest probability for level was: '{label_dict2[highest_score_item2['label']]}' ({round(highest_score_item2['score'],2)} \n  Use this classification with caution due to uncertainty"

        return output_lev_no_no
    
    #########################################
    ######## RUN GRADIO APP #################
    #########################################



#Article text.
article_text = "### Have fun!"

#THe app. 
txtbx = gr.Textbox(value = 'I would like to feel better', label = 'Please enter your item:', container = 'True')
txtbxopt = gr.Textbox(label = 'The item you provided was classified as:', container = 'True')
hf_writer = gr.HuggingFaceDatasetSaver(api_key, 'flagging_EEMM_V05')
demo = gr.Interface(fn=classify_output, inputs=txtbx, outputs=txtbxopt,
                    theme = gr.themes.Soft(primary_hue='orange'),
                    title = 'EEMM Item Classification Machine V 0.5',
                    description = """
                    
                    ¡Hello! This app showcases two machine learning models to classify items and phrases into the EEMM grid. 
                    It will output a classification for dimensions as *Cognition, Self, Affect, Motivation, Attention, Overt Behavior or Context*.
                    It will also output levels as *Social, Psychological or Physical*. 
                    You will also see the probability assigned by the model in parentheses (e.g: Affect (0.92)) 

                    This is a proof of concept, so the model will not be perfect right away and may make some mistakes. 
                    **You can help us improve the models by flagging wrong answers**. 
                    Using the buttons below the output you can tell us the output lacks a category, 
                    if levels and/or dimensions are wrong, or if you want to tell us that the model did well. When flagging, your item/phrase will be saved with your answer. 
                    Only flagged items will be saved for model improvement.

                    **Please note that the machine goes idle after a period of inactivity.** 
                    If this occurs, waking it up may take around 20 seconds. Be patient ;)""",
                    article = article_text,
                    allow_flagging = 'manual',
                    flagging_options = ['wrong dimension/level','lacks dimension/level', 'wrong and lacking :(', 'spot on! :)'],
                    flagging_callback= hf_writer
)
demo.launch()