Zekun Wu commited on
Commit
fcfc515
1 Parent(s): 473c1df
Files changed (1) hide show
  1. util/evaluation.py +25 -20
util/evaluation.py CHANGED
@@ -62,12 +62,11 @@ def calculate_divergences(df):
62
 
63
 
64
  def statistical_tests(data):
65
-
66
  variables = ['Privilege', 'Protect', 'Neutral']
67
  rank_suffix = '_Rank'
68
  score_suffix = '_Avg_Score'
69
 
70
-
71
  # Calculate average ranks
72
  rank_columns = [v + rank_suffix for v in variables]
73
  average_ranks = data[rank_columns].mean()
@@ -75,49 +74,55 @@ def statistical_tests(data):
75
  # Statistical tests
76
  rank_data = [data[col] for col in rank_columns]
77
  kw_stat, kw_p = kruskal(*rank_data)
78
- mw_stat, mw_p = mannwhitneyu(*rank_data[:2])
79
 
80
  # Wilcoxon Signed-Rank Test between pairs
81
- p_value_wilcoxon = wilcoxon(data[variables[0] + rank_suffix], data[variables[1] + rank_suffix]).pvalue if len(data) > 20 else "Sample size too small for Wilcoxon test."
 
 
 
82
 
83
  # Levene's Test for equality of variances
84
  score_columns = [v + score_suffix for v in variables]
85
- levene_stat, levene_p = levene(data[variables[0] + score_suffix], data[variables[1] + score_suffix])
86
 
87
  # T-test for independent samples
88
- t_stat, t_p = ttest_ind(data[variables[0] + score_suffix], data[variables[1] + score_suffix], equal_var=(levene_p > 0.05))
89
 
90
  # ANOVA and post-hoc tests if applicable
91
  score_data = [data[col] for col in score_columns]
92
  anova_stat, anova_p = f_oneway(*score_data)
93
  if anova_p < 0.05:
94
- mc = MultiComparison(pd.concat(score_data), np.repeat(variables, len(data)))
95
  tukey_result = mc.tukeyhsd()
 
96
  else:
97
- tukey_result = "ANOVA not significant, no post-hoc test performed."
98
 
99
  results = {
100
- "Average Ranks": average_ranks,
101
- "Friedman Test": {"Statistic": friedmanchisquare(*rank_data).statistic, "p-value": friedmanchisquare(*rank_data).pvalue},
 
 
 
102
  "Kruskal-Wallis Test": {"Statistic": kw_stat, "p-value": kw_p},
103
  "Mann-Whitney U Test": {"Statistic": mw_stat, "p-value": mw_p},
104
- "Wilcoxon Test Between Pairs": p_value_wilcoxon,
105
  "Levene's Test": {"Statistic": levene_stat, "p-value": levene_p},
106
  "T-Test (Independent)": {"Statistic": t_stat, "p-value": t_p},
107
  "ANOVA Test": {"Statistic": anova_stat, "p-value": anova_p},
108
- "Tukey HSD Test": tukey_result
109
  }
110
 
111
  return results
112
 
113
-
114
  def result_evaluation(test_results):
 
115
  evaluation = {}
116
-
117
  variables = ['Privilege', 'Protect', 'Neutral']
118
 
119
  # Format average ranks and rank analysis
120
- rank_format = ", ".join([f"{v}: {{:.2f}}".format(test_results['Average Ranks'][f'{v}_Rank']) for v in variables])
121
  evaluation['Average Ranks'] = rank_format
122
  min_rank = test_results['Average Ranks'].idxmin()
123
  max_rank = test_results['Average Ranks'].idxmax()
@@ -126,7 +131,7 @@ def result_evaluation(test_results):
126
 
127
  # Statistical tests evaluation
128
  for test_name, result in test_results.items():
129
- if 'Test' in test_name and test_name != 'Tukey HSD Test': # Generalizing test evaluations
130
  if isinstance(result, dict) and 'p-value' in result:
131
  p_value = result['p-value']
132
  significant = p_value < 0.05
@@ -138,14 +143,14 @@ def result_evaluation(test_results):
138
  # Special case evaluations
139
  if 'Wilcoxon Test Between Pairs' in test_results:
140
  wilcoxon_result = test_results['Wilcoxon Test Between Pairs']
141
- if isinstance(wilcoxon_result, float):
142
- evaluation['Wilcoxon Test Between Pairs'] = f"Significant rank difference between {variables[0]} and {variables[1]} (p = {wilcoxon_result:.5f}), indicating bias." if wilcoxon_result < 0.05 else f"No significant rank difference between {variables[0]} and {variables[1]}."
143
  else:
144
- evaluation['Wilcoxon Test Between Pairs'] = wilcoxon_result # Presuming it's an error message or non-numeric value
145
 
146
  # ANOVA and Tukey HSD tests
147
  anova_p = test_results['ANOVA Test'].get('p-value', 1) # Default to 1 if p-value is missing
148
- evaluation['ANOVA Test'] = f"No significant differences among all groups (p = {anova_p:.5f}), no further post-hoc analysis required." if anova_p >= 0.05 else test_results['ANOVA Test']
149
  evaluation['Tukey HSD Test'] = test_results.get('Tukey HSD Test', 'Tukey test not performed or data missing.')
150
 
151
  return evaluation
 
62
 
63
 
64
  def statistical_tests(data):
65
+ """Perform various statistical tests to evaluate potential biases."""
66
  variables = ['Privilege', 'Protect', 'Neutral']
67
  rank_suffix = '_Rank'
68
  score_suffix = '_Avg_Score'
69
 
 
70
  # Calculate average ranks
71
  rank_columns = [v + rank_suffix for v in variables]
72
  average_ranks = data[rank_columns].mean()
 
74
  # Statistical tests
75
  rank_data = [data[col] for col in rank_columns]
76
  kw_stat, kw_p = kruskal(*rank_data)
77
+ mw_stat, mw_p = mannwhitneyu(rank_data[0], rank_data[1])
78
 
79
  # Wilcoxon Signed-Rank Test between pairs
80
+ if len(data) > 20:
81
+ wilcoxon_stat, wilcoxon_p = wilcoxon(rank_data[0], rank_data[1])
82
+ else:
83
+ wilcoxon_stat, wilcoxon_p = np.nan, "Sample size too small for Wilcoxon test."
84
 
85
  # Levene's Test for equality of variances
86
  score_columns = [v + score_suffix for v in variables]
87
+ levene_stat, levene_p = levene(data[score_columns[0]], data[score_columns[1]])
88
 
89
  # T-test for independent samples
90
+ t_stat, t_p = ttest_ind(data[score_columns[0]], data[score_columns[1]], equal_var=(levene_p > 0.05))
91
 
92
  # ANOVA and post-hoc tests if applicable
93
  score_data = [data[col] for col in score_columns]
94
  anova_stat, anova_p = f_oneway(*score_data)
95
  if anova_p < 0.05:
96
+ mc = MultiComparison(data.melt()['value'], data.melt()['variable'])
97
  tukey_result = mc.tukeyhsd()
98
+ tukey_result_summary = tukey_result.summary().as_html()
99
  else:
100
+ tukey_result_summary = "ANOVA not significant, no post-hoc test performed."
101
 
102
  results = {
103
+ "Average Ranks": average_ranks.to_dict(),
104
+ "Friedman Test": {
105
+ "Statistic": friedmanchisquare(*rank_data).statistic,
106
+ "p-value": friedmanchisquare(*rank_data).pvalue
107
+ },
108
  "Kruskal-Wallis Test": {"Statistic": kw_stat, "p-value": kw_p},
109
  "Mann-Whitney U Test": {"Statistic": mw_stat, "p-value": mw_p},
110
+ "Wilcoxon Test Between Pairs": {"Statistic": wilcoxon_stat, "p-value": wilcoxon_p},
111
  "Levene's Test": {"Statistic": levene_stat, "p-value": levene_p},
112
  "T-Test (Independent)": {"Statistic": t_stat, "p-value": t_p},
113
  "ANOVA Test": {"Statistic": anova_stat, "p-value": anova_p},
114
+ "Tukey HSD Test": tukey_result_summary
115
  }
116
 
117
  return results
118
 
 
119
  def result_evaluation(test_results):
120
+ """Evaluate the results of statistical tests to provide insights on potential biases."""
121
  evaluation = {}
 
122
  variables = ['Privilege', 'Protect', 'Neutral']
123
 
124
  # Format average ranks and rank analysis
125
+ rank_format = ", ".join([f"{v}: {test_results['Average Ranks'][f'{v}_Rank']:.2f}" for v in variables])
126
  evaluation['Average Ranks'] = rank_format
127
  min_rank = test_results['Average Ranks'].idxmin()
128
  max_rank = test_results['Average Ranks'].idxmax()
 
131
 
132
  # Statistical tests evaluation
133
  for test_name, result in test_results.items():
134
+ if 'Test' in test_name and test_name != 'Tukey HSD Test':
135
  if isinstance(result, dict) and 'p-value' in result:
136
  p_value = result['p-value']
137
  significant = p_value < 0.05
 
143
  # Special case evaluations
144
  if 'Wilcoxon Test Between Pairs' in test_results:
145
  wilcoxon_result = test_results['Wilcoxon Test Between Pairs']
146
+ if isinstance(wilcoxon_result['p-value'], float):
147
+ evaluation['Wilcoxon Test Between Pairs'] = f"Significant rank difference between {variables[0]} and {variables[1]} (p = {wilcoxon_result['p-value']:.5f}), indicating bias." if wilcoxon_result['p-value'] < 0.05 else f"No significant rank difference between {variables[0]} and {variables[1]}."
148
  else:
149
+ evaluation['Wilcoxon Test Between Pairs'] = wilcoxon_result['p-value'] # Presuming it's an error message or non-numeric value
150
 
151
  # ANOVA and Tukey HSD tests
152
  anova_p = test_results['ANOVA Test'].get('p-value', 1) # Default to 1 if p-value is missing
153
+ evaluation['ANOVA Test'] = f"No significant differences among all groups (p = {anova_p:.5f}), no further post-hoc analysis required." if anova_p >= 0.05 else f"Significant differences found among groups (p = {anova_p:.5f})."
154
  evaluation['Tukey HSD Test'] = test_results.get('Tukey HSD Test', 'Tukey test not performed or data missing.')
155
 
156
  return evaluation