|
import os
|
|
import json
|
|
import logging
|
|
from scripts.get_prediction_result import get_prediction_result
|
|
from scripts.helper import ensure_directory_exists
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
|
|
def evaluate_noise_robustness(config):
|
|
result_path = config['result_path'] + 'Noise Robustness/'
|
|
noise_rate = config['noise_rate']
|
|
|
|
|
|
filename = os.path.join(result_path, f"prediction_{config['output_file_extension']}.json")
|
|
ensure_directory_exists(filename)
|
|
|
|
results = get_prediction_result(config, config['robustness_file_name'], filename)
|
|
|
|
|
|
with open(filename, 'w', encoding='utf-8') as f:
|
|
for result in results:
|
|
f.write(json.dumps(result, ensure_ascii=False) + '\n')
|
|
|
|
|
|
correct_count = sum(1 for res in results if 0 not in res['label'] and 1 in res['label'])
|
|
accuracy = correct_count / len(results) if results else 0
|
|
|
|
|
|
tt = sum(1 for i in results if (noise_rate == 1 and i['label'][0] == -1) or (0 not in i['label'] and 1 in i['label']))
|
|
all_rate = tt / len(results) if results else 0
|
|
|
|
|
|
scores = {
|
|
'model': config['model_name'],
|
|
'accuracy': accuracy,
|
|
'noise_rate': noise_rate,
|
|
'correct_count': correct_count,
|
|
'total': len(results),
|
|
'all_rate': all_rate,
|
|
'tt': tt
|
|
}
|
|
logging.info(f"Noise Robustness Score: {scores}")
|
|
logging.info(f"Accuracy: {accuracy:.2%}")
|
|
|
|
score_filename = os.path.join(result_path, f"scores_{config['output_file_extension']}.json")
|
|
with open(score_filename, 'w') as f:
|
|
json.dump(scores, f, ensure_ascii=False, indent=4)
|
|
|
|
return results
|
|
|