Ariel Hsieh commited on
Commit
bf6bc58
1 Parent(s): b2c3df8

update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -40,9 +40,7 @@ if selection == "Ariel8/toxic-tweets-classification":
40
  outputs = model(**batch)
41
  predictions = torch.sigmoid(outputs.logits)*100
42
  probs = predictions[0].tolist()
43
- # for i in range(len(probs)):
44
- # st.write(f"{labels[i]}: {round(probs[i], 3)}%")
45
- # results.append(probs)
46
  first_max = max(probs)
47
  fm_index = probs.index(first_max)
48
  main_class.append((first_max,fm_index))
@@ -51,8 +49,8 @@ if selection == "Ariel8/toxic-tweets-classification":
51
  toxic_types.append((second_max,sm_index))
52
 
53
 
54
- d = {'Tweet':tweets,'Main Classification':[labels[main_class[i][1]] for i in range(len(main_class))],'Score':[round(main_class[i][0],3) for i in range(len(main_class))],
55
- 'Toxicity Type':[labels[toxic_types[i][1]] for i in range(len(toxic_types))],'Toxicity Score':[round(toxic_types[i][0],3) for i in range(len(toxic_types))]}
56
  dataframe = pd.DataFrame(data=d)
57
  st.table(dataframe)
58
  else:
 
40
  outputs = model(**batch)
41
  predictions = torch.sigmoid(outputs.logits)*100
42
  probs = predictions[0].tolist()
43
+
 
 
44
  first_max = max(probs)
45
  fm_index = probs.index(first_max)
46
  main_class.append((first_max,fm_index))
 
49
  toxic_types.append((second_max,sm_index))
50
 
51
 
52
+ d = {'Tweet':tweets,'Highest Class':[labels[main_class[i][1]] for i in range(len(main_class))],'Classification Score':[round(main_class[i][0],3) for i in range(len(main_class))],
53
+ 'Toxicity Type':[labels[toxic_types[i][1]] for i in range(len(toxic_types))],'Toxicity Type Score':[round(toxic_types[i][0],3) for i in range(len(toxic_types))]}
54
  dataframe = pd.DataFrame(data=d)
55
  st.table(dataframe)
56
  else: