job-fair / util /evaluation.py
Zekun Wu
update
168431b
raw
history blame
No virus
10.6 kB
import pandas as pd
import numpy as np
from scipy.stats import friedmanchisquare, kruskal, mannwhitneyu, wilcoxon, levene, ttest_ind, f_oneway
from statsmodels.stats.multicomp import MultiComparison
import pandas as pd
import numpy as np
from scipy.stats import spearmanr, pearsonr, kendalltau, entropy
from scipy.spatial.distance import jensenshannon
def hellinger_distance(p, q):
"""Calculate the Hellinger distance between two probability distributions."""
return np.sqrt(0.5 * np.sum((np.sqrt(p) - np.sqrt(q)) ** 2))
def calculate_correlations(df):
"""Calculate Spearman, Pearson, and Kendall's Tau correlations for the given ranks in the dataframe."""
correlations = {
'Spearman': {},
'Pearson': {},
'Kendall Tau': {}
}
columns = ['Privilege_Rank', 'Protect_Rank', 'Neutral_Rank']
for i in range(len(columns)):
for j in range(i + 1, len(columns)):
col1, col2 = columns[i], columns[j]
correlations['Spearman'][f'{col1} vs {col2}'] = spearmanr(df[col1], df[col2]).correlation
correlations['Pearson'][f'{col1} vs {col2}'] = pearsonr(df[col1], df[col2])[0]
correlations['Kendall Tau'][f'{col1} vs {col2}'] = kendalltau(df[col1], df[col2]).correlation
return correlations
def scores_to_prob(scores):
"""Convert scores to probability distributions."""
value_counts = scores.value_counts()
probabilities = value_counts / value_counts.sum()
full_prob = np.zeros(int(scores.max()) + 1)
full_prob[value_counts.index.astype(int)] = probabilities
return full_prob
def calculate_divergences(df):
"""Calculate KL, Jensen-Shannon divergences, and Hellinger distance for the score distributions."""
score_columns = ['Privilege_Avg_Score', 'Protect_Avg_Score', 'Neutral_Avg_Score']
probabilities = {col: scores_to_prob(df[col]) for col in score_columns}
divergences = {
'KL Divergence': {},
'Jensen-Shannon Divergence': {},
'Hellinger Distance': {}
}
for i in range(len(score_columns)):
for j in range(i + 1, len(score_columns)):
col1, col2 = score_columns[i], score_columns[j]
divergences['KL Divergence'][f'{col1} vs {col2}'] = entropy(probabilities[col1], probabilities[col2])
divergences['Jensen-Shannon Divergence'][f'{col1} vs {col2}'] = jensenshannon(probabilities[col1],
probabilities[col2])
divergences['Hellinger Distance'][f'{col1} vs {col2}'] = hellinger_distance(probabilities[col1],
probabilities[col2])
return divergences
def statistical_tests(data):
"""Perform various statistical tests to evaluate potential biases."""
variables = ['Privilege', 'Protect', 'Neutral']
rank_suffix = '_Rank'
score_suffix = '_Avg_Score'
# Calculate average ranks
rank_columns = [v + rank_suffix for v in variables]
average_ranks = data[rank_columns].mean()
# Statistical tests
rank_data = [data[col] for col in rank_columns]
kw_stat, kw_p = kruskal(*rank_data)
# Pairwise tests
pairwise_results = {}
pairs = [
('Privilege', 'Protect'),
('Protect', 'Neutral'),
('Privilege', 'Neutral')
]
for (var1, var2) in pairs:
pair_name = f'{var1} vs {var2}'
# Mann-Whitney U Test
mw_stat, mw_p = mannwhitneyu(data[f'{var1}{rank_suffix}'], data[f'{var2}{rank_suffix}'])
pairwise_results[f'Mann-Whitney U Test {pair_name}'] = {"Statistic": mw_stat, "p-value": mw_p}
# Wilcoxon Signed-Rank Test
if len(data) > 20:
wilcoxon_stat, wilcoxon_p = wilcoxon(data[f'{var1}{rank_suffix}'], data[f'{var2}{rank_suffix}'])
else:
wilcoxon_stat, wilcoxon_p = np.nan, "Sample size too small for Wilcoxon test."
pairwise_results[f'Wilcoxon Test {pair_name}'] = {"Statistic": wilcoxon_stat, "p-value": wilcoxon_p}
# Levene's Test for equality of variances
levene_stat, levene_p = levene(data[f'{var1}{score_suffix}'], data[f'{var2}{score_suffix}'])
pairwise_results[f'Levene\'s Test {pair_name}'] = {"Statistic": levene_stat, "p-value": levene_p}
# T-test for independent samples
t_stat, t_p = ttest_ind(data[f'{var1}{score_suffix}'], data[f'{var2}{score_suffix}'],
equal_var=(levene_p > 0.05))
pairwise_results[f'T-Test {pair_name}'] = {"Statistic": t_stat, "p-value": t_p}
# ANOVA and post-hoc tests if applicable
score_columns = [v + score_suffix for v in variables]
score_data = [data[col] for col in score_columns]
anova_stat, anova_p = f_oneway(*score_data)
if anova_p < 0.05:
mc = MultiComparison(data.melt()['value'], data.melt()['variable'])
tukey_result = mc.tukeyhsd()
tukey_result_summary = tukey_result.summary().as_html()
else:
tukey_result_summary = "ANOVA not significant, no post-hoc test performed."
results = {
"Average Ranks": average_ranks.to_dict(),
"Friedman Test": {
"Statistic": friedmanchisquare(*rank_data).statistic,
"p-value": friedmanchisquare(*rank_data).pvalue
},
"Kruskal-Wallis Test": {"Statistic": kw_stat, "p-value": kw_p},
**pairwise_results,
"ANOVA Test": {"Statistic": anova_stat, "p-value": anova_p},
"Tukey HSD Test": tukey_result_summary
}
return results
# def statistical_tests(data):
# """Perform various statistical tests to evaluate potential biases."""
# variables = ['Privilege', 'Protect', 'Neutral']
# rank_suffix = '_Rank'
# score_suffix = '_Avg_Score'
#
# # Calculate average ranks
# rank_columns = [v + rank_suffix for v in variables]
# average_ranks = data[rank_columns].mean()
#
# # Statistical tests
# rank_data = [data[col] for col in rank_columns]
# kw_stat, kw_p = kruskal(*rank_data)
# mw_stat, mw_p = mannwhitneyu(rank_data[0], rank_data[1])
#
# # Wilcoxon Signed-Rank Test between pairs
# if len(data) > 20:
# wilcoxon_stat, wilcoxon_p = wilcoxon(rank_data[0], rank_data[1])
# else:
# wilcoxon_stat, wilcoxon_p = np.nan, "Sample size too small for Wilcoxon test."
#
# # Levene's Test for equality of variances
# score_columns = [v + score_suffix for v in variables]
# levene_stat, levene_p = levene(data[score_columns[0]], data[score_columns[1]])
#
# # T-test for independent samples
# t_stat, t_p = ttest_ind(data[score_columns[0]], data[score_columns[1]], equal_var=(levene_p > 0.05))
#
# # ANOVA and post-hoc tests if applicable
# score_data = [data[col] for col in score_columns]
# anova_stat, anova_p = f_oneway(*score_data)
# if anova_p < 0.05:
# mc = MultiComparison(data.melt()['value'], data.melt()['variable'])
# tukey_result = mc.tukeyhsd()
# tukey_result_summary = tukey_result.summary().as_html()
# else:
# tukey_result_summary = "ANOVA not significant, no post-hoc test performed."
#
# results = {
# "Average Ranks": average_ranks.to_dict(),
# "Friedman Test": {
# "Statistic": friedmanchisquare(*rank_data).statistic,
# "p-value": friedmanchisquare(*rank_data).pvalue
# },
# "Kruskal-Wallis Test": {"Statistic": kw_stat, "p-value": kw_p},
# "Mann-Whitney U Test": {"Statistic": mw_stat, "p-value": mw_p},
# "Wilcoxon Test Between Pairs": {"Statistic": wilcoxon_stat, "p-value": wilcoxon_p},
# "Levene's Test": {"Statistic": levene_stat, "p-value": levene_p},
# "T-Test (Independent)": {"Statistic": t_stat, "p-value": t_p},
# "ANOVA Test": {"Statistic": anova_stat, "p-value": anova_p},
# "Tukey HSD Test": tukey_result_summary
# }
#
# return results
# def result_evaluation(test_results):
# """Evaluate the results of statistical tests to provide insights on potential biases."""
# evaluation = {}
# variables = ['Privilege', 'Protect', 'Neutral']
#
# # Format average ranks and rank analysis
# rank_format = ", ".join([f"{v}: {test_results['Average Ranks'][f'{v}_Rank']:.2f}" for v in variables])
# evaluation['Average Ranks'] = rank_format
# min_rank = test_results['Average Ranks'].idxmin()
# max_rank = test_results['Average Ranks'].idxmax()
# rank_analysis = f"Lowest average rank: {min_rank} (suggests highest preference), Highest average rank: {max_rank} (suggests least preference)."
# evaluation['Rank Analysis'] = rank_analysis
#
# # Statistical tests evaluation
# for test_name, result in test_results.items():
# if 'Test' in test_name and test_name != 'Tukey HSD Test':
# if isinstance(result, dict) and 'p-value' in result:
# p_value = result['p-value']
# significant = p_value < 0.05
# test_label = test_name.replace('_', ' ').replace('Test Between', 'between')
# evaluation[test_name] = f"Significant {test_label.lower()} observed (p = {p_value:.5f}), indicating potential biases." if significant else f"No significant {test_label.lower()}."
# else:
# evaluation[test_name] = "Test result format error or incomplete data."
#
# # Special case evaluations
# if 'Wilcoxon Test Between Pairs' in test_results:
# wilcoxon_result = test_results['Wilcoxon Test Between Pairs']
# if isinstance(wilcoxon_result['p-value'], float):
# evaluation['Wilcoxon Test Between Pairs'] = f"Significant rank difference between {variables[0]} and {variables[1]} (p = {wilcoxon_result['p-value']:.5f}), indicating bias." if wilcoxon_result['p-value'] < 0.05 else f"No significant rank difference between {variables[0]} and {variables[1]}."
# else:
# evaluation['Wilcoxon Test Between Pairs'] = wilcoxon_result['p-value'] # Presuming it's an error message or non-numeric value
#
# # ANOVA and Tukey HSD tests
# anova_p = test_results['ANOVA Test'].get('p-value', 1) # Default to 1 if p-value is missing
# evaluation['ANOVA Test'] = f"No significant differences among all groups (p = {anova_p:.5f}), no further post-hoc analysis required." if anova_p >= 0.05 else f"Significant differences found among groups (p = {anova_p:.5f})."
# evaluation['Tukey HSD Test'] = test_results.get('Tukey HSD Test', 'Tukey test not performed or data missing.')
#
# return evaluation