import json, re

data_folder = '../data'

def get_gsm8k():
	with open(f'{data_folder}/GSM8K/test_all.json') as fp:
		return json.load(fp)

def eval_gsm8k(filename):
	from exp import math_eval
	with open(filename) as fp:
		res = json.load(fp)
	num_corrent = 0
	for r in res:
		gt, ans = r['solution'], r.get('answer','')
		eq, gt_, ans_ = math_eval.check(gt, ans)
		if eq:
			num_corrent += 1
		# else:
		# 	print(gt_, ans_)
	print(f'ACC = {num_corrent}/{len(res)} = {num_corrent/len(res)*100:0.2f}%')

def get_AIME():
	import pandas as pd
	df = pd.read_csv(f'{data_folder}/AIME_Dataset_1983_2024.csv')
	data = []
	# print(df.columns)
	for i in range(len(df)):
		data.append({'id':df.loc[i,'ID'], 
					'problem':df.loc[i,'Question'], 
					'solution':df.loc[i,'Answer'],
					'pid':len(data),
					})
	return data

def get_gpqa():
	import pandas as pd
	df = pd.read_csv(f'{data_folder}/gpqa_diamond.csv')
	data = []
	for i in range(len(df)):
		choices = [
			df.loc[i,'Correct Answer'],
			df.loc[i,'Incorrect Answer 1'],
			df.loc[i,'Incorrect Answer 2'],
			df.loc[i,'Incorrect Answer 3'],
			]
		data.append({
					'id':f'{i+1}', 
					'record_id':df.loc[i,'Record ID'], 
					'question':df.loc[i,'Question'], 
					'choices':choices,
					})
	return data

pattern_gpqa = re.compile(r'A|B|C|D')
def eval_gpqa(filename):
	def extract_label(text) -> str:
		prefixes = ['- Result:', '####', 'answer:']
		for p in prefixes:
			if p in text:
				text = text.split(p)[-1]
		choices = pattern_gpqa.findall(text)
		if not choices:
			return None
		if '\n####' in text or 'The answer is' in text:
			return choices[0]
		else :
			return choices[-1]
	with open(filename) as fp:
		res = json.load(fp)
	num_corrent = 0
	for r in res:
		gt = r['gt_answer']
		ans = extract_label(r.get('answer',''))
		# print(ans, gt)
		if ans and ans == gt:
			num_corrent += 1
	print(f'ACC = {num_corrent}/{len(res)} = {num_corrent/len(res)*100:0.2f}%')

def get_hard_hotpot():
	import pandas as pd
	from numpy import array
	df = pd.read_csv(f'{data_folder}/hard_hotpot.csv')
	data = []
	for i in range(len(df)):
		entry = df.iloc[i].to_dict()
		# Test sets may not have an answer
		assert entry.get('answer'), entry.get('answer')
		try:
			# Use eval with numpy array handling
			context_dict = eval(entry["context"])
		except Exception as e:
			print(e)
			continue
		# Convert title array to list
		titles = context_dict["title"].tolist()
		# List of numpy arrays of sentences 
		paragraphs = context_dict["sentences"] 
		context = []
		for title, sentences_array in zip(titles, paragraphs):
			# Convert sentences array to list
			context.append([title, "\n".join(sentences_array.tolist())])
		data.append({
					'id':f'{i+1}', 
					'record_id':entry['id'], 
					'question':entry['question'], 
					'gt_answer':entry['answer'], 
					'context':context, 
					})
	return data

def extract_label(text) -> str:
	prefixes = ['- Result:', '####', 'answer:']
	for p in prefixes:
		if p in text:
			text = text.split(p)[-1]
	return text

def eval_hotpot(filename):
	from eval.hotpot_evaluate_v1 import update_answer
	with open(filename) as fp:
		res = json.load(fp)
	metrics = {'em': 0, 'f1': 0, 'prec': 0, 'recall': 0}
	for r in res:
		pred = extract_label(r.get('answer', ''))
		update_answer(metrics, pred, r['gt_answer'])
	N = len(res)
	for k in metrics.keys():
		metrics[k] /= N
	print(metrics)

def get_arc():
	import pandas as pd
	parquet_file_path = f'{data_folder}/ai2_arc/ARC-Challenge/test-00000-of-00001.parquet'
	df = pd.read_parquet(parquet_file_path, engine='pyarrow')
	data_ = df.to_dict(orient='records')
	data = []
	for d in data_:
		answer_index = d['answerKey']
		if answer_index in '12345':
			answer_index = int(answer_index) - 1
		else:
			answer_index = 'ABCDE'.index(answer_index)
		choices_ = d['choices']['text'].tolist()
		choices = choices_[:]
		assert len(choices) <= 5, choices
		if answer_index != 0:
			choices[0], choices[answer_index] = choices[answer_index], choices[0]
		assert choices_[answer_index] == choices[0]
		data.append({
			'question': d['question'],
			'choices': choices,
			})
	return data

def get_math500():
	path = f'{data_folder}/MATH500-test.jsonl'
	data = []
	with open(path) as fp:
		for line in fp:
			d = json.loads(line)
			data.append({
				'problem': d['problem'],
				'cot': d['solution'],
				'solution': d['answer'],
				'subject': d['subject'],
				'pid':len(data),
				})
	return data

def get_data(task_name, cache=True):
	import os
	if cache and os.path.isfile(f'{task_name}.json'):
		with open(f'{task_name}.json') as fp:
			return json.load(fp)
	task_loaders = {
		'gsm8k': get_gsm8k,
		'AIME': get_AIME,
		'gpqa': get_gpqa,
		'hard_hotpot': get_hard_hotpot,
		'ARC': get_arc,
		'MATH500': get_math500,
		}
	return task_loaders[task_name]()

def eval_any(task_name, filename):
	print(filename)
	if task_name in ('gsm8k', 'AIME', 'MATH500'):
		eval_gsm8k(filename)
	if task_name in ('gpqa', 'ARC'):
		eval_gpqa(filename)
	if task_name == 'hard_hotpot':
		eval_hotpot(filename)

def eval_set(size):
	for task_name in ['gsm8k', 'AIME', 'hard_hotpot']:
		data = get_data(task_name, False)
		import random, json
		random.shuffle(data)
		data = data[:size]
		with open(f'{task_name}.json', 'w') as fp:
			json.dump(data, fp, ensure_ascii=False, indent='\t')

if __name__ == '__main__':
	for task in ['ARC', 'MATH500']:
		data = get_data(task)
		print(task, len(data))
