# import warnings
# warnings.filterwarnings("ignore", category=UserWarning)

import json, os, tqdm, time
from util import color, parallel
from llm import emb
from exp import data

def load_examples(paths):
	print('load_examples ...')
	examples = []
	for path in tqdm.tqdm(paths):
		examples.extend(load_jsonl(path))
	return examples

def dump_jsonl(res, file):
	with open(file, 'w') as fp:
		for r in res:
			fp.write(json.dumps(r, ensure_ascii=False) + '\n')

def load_jsonl(file):
	res = []
	with open(file) as fp:
		for line in fp:
			if line == '': continue
			r = json.loads(line)
			res.append(r)
	return res

def gen_emb(situation, retries=10, retry_delay=30):
	for i in range(retries):
		try:
			return emb.emb(situation)
		except:
			time.sleep(retry_delay)
			pass
	return None

# Calculate the embedding for each Insight created in step 1
def gen_embs(examples, outfile, retries=10, retry_delay=30):
	print('gen_embs ...')
	def task(example):
		res = [example['index']]
		for idea in example['insights']:
			e = gen_emb(idea['Situation'])
			if e is None:
				print('Fail to obtain embedding for:', idea['Situation'])
			res.append(e)
		return res
	for i, e in enumerate(examples):
		e['index'] = i
	res = parallel.run(task, examples, num_tasks=50)
	res.sort(key=lambda r: r[0])
	res = [r[1:] for r in res]
	dump_jsonl(res, outfile)

def load_emb(emb_path, max_size=None):
	print('load_emb ...')
	embs = []
	emb_index = []
	total = max_size or 5000
	with open(emb_path) as fp:
		line_num = 0
		for line in tqdm.tqdm(fp, total=total):
			if line == '': continue
			es = json.loads(line)
			for i,e in enumerate(es):
				emb_index.append([line_num, i])
				if e is None:
					e = [.0] * 1024
				embs.append(e)
			line_num += 1
			if max_size is not None and line_num >= max_size:
				break
	import torch
	return torch.Tensor(embs), emb_index

def retrieve_insights(emb, pid, examples, embs, emb_index, topk):
	if isinstance(emb, list):
		import torch
		emb = torch.Tensor(emb)
	score = embs @ emb.unsqueeze(1)
	n = embs.size(0)
	if topk > n:
		topk = n
	extra = 50
	while True:
		k = topk+extra
		if k > n:
			k = n
		top_index = score.view(-1).topk(k=k)[1].tolist()
		ideas = [emb_index[i] for i in top_index if examples[emb_index[i][0]]['pid'] != pid]
		if len(ideas) >= topk:
			return ideas[:topk]
		extra *= 2

def exam_similarity_acc_1(examples, embs, emb_index, topk=50):
	math500 = data.get_data('MATH500')
	subject_count = {}
	subject_index = {}
	for i,j in emb_index:
		pid = examples[i]['pid']
		subject = math500[pid]['subject']
		if subject not in subject_count:
			subject_index[subject] = len(subject_count)
			subject_count[subject] = 0
		subject_count[subject] += 1
	print('subject_count:', subject_count)
	acc = [[0 for _ in subject_index] for _ in subject_index]
	for i in tqdm.trange(embs.size(0)):
		emb = embs[i]
		pid = examples[emb_index[i][0]]['pid']
		ideas = retrieve_insights(emb, pid, examples, embs, emb_index, topk)
		for j,k in ideas:
			subject1 = math500[pid]['subject']
			pid2 = examples[j]['pid']
			subject2 = math500[pid2]['subject']
			acc[subject_index[subject1]][subject_index[subject2]] += 1
	for a in acc:
		for s,i in subject_index.items():
			count = subject_count[s]
			a[i] /= count
		sum_a = sum(a)
		for i,ai in enumerate(a):
			a[i] = ai / sum_a
	print('subject_index:', subject_index)
	print('acc:', acc)
	return acc

def exam_similarity_acc(examples, embs, emb_index, acc_file):
	accs = []
	for topk in range(1, 50+1):
		print('topk:', topk)
		acc = exam_similarity_acc_1(examples, embs, emb_index, topk)
		accs.append(acc)
	dump_jsonl(accs, acc_file)

def plot_curve(res, xlabel, ylabel, title=None, file=None):
	import matplotlib.pyplot as plt
	plt.rcParams.update({'font.size': plt.rcParams['font.size'] * 1.5})
	x = list(range(1, len(res) + 1))
	plt.plot(x, res, marker='o', linestyle='-', color='b')
	plt.xlabel(xlabel)
	plt.ylabel(ylabel)
	if title is not None:
		plt.title(title)
	# Add grid for better readability
	plt.grid(True, linestyle='--', alpha=0.7)
	if file is not None:
		plt.savefig(file)
	else:
		plt.show()
	plt.close()

def plot_heatmap(data, row_names, col_names, title=None, xlabel=None, ylabel=None, file=None):
	import seaborn as sns
	import matplotlib.pyplot as plt
	import numpy as np
	data_array = np.array(data)
	plt.rcParams.update({'font.size': plt.rcParams['font.size'] * 1.5})
	plt.figure(figsize=(15, 12))  # Increased figure size to accommodate larger fonts
	ax = sns.heatmap(
		data_array,
		annot=True,
		fmt=".2f",
		cmap="YlGnBu",
		linewidths=.5,
		xticklabels=col_names,  # Set column names
		yticklabels=row_names,  # Set row names
		cbar_kws={"label": "Value"}
	)
	# Set plot title and axis labels
	if title is not None:
		plt.title(title, fontsize=plt.rcParams['font.size'] * 1.2)
	if xlabel is not None:
		plt.xlabel(xlabel, fontsize=plt.rcParams['font.size'])
	if ylabel is not None:
		plt.ylabel(ylabel, fontsize=plt.rcParams['font.size'])
	# Rotate x-axis labels for better readability
	if col_names is not None:
		plt.xticks(rotation=45, ha='right')
	# Display the plot
	plt.tight_layout()
	if file is not None:
		plt.savefig(file)
	else:
		plt.show()
	plt.close()

examples_path = '../OUT/created-examples'

# - Step 2 (emb.py):
# 	- Calculate the embedding for each Insight created in step 1
# 	- Estimate the accuracy of matching with embeddings with the "subject" category of each problem
def step2(test_set='MATH500', topk=1):
	acc_file = f'{examples_path}/step2_{test_set}_topk_acc.jsonl'
	if not os.path.isfile(acc_file):
		paths = [f'{examples_path}/step1_{test_set}.jsonl']
		examples = load_examples(paths)
		print(f'#examples {len(examples)}')
		emb_file = f'{examples_path}/step2_{test_set}_emb.jsonl'
		if not os.path.isfile(emb_file):
			gen_embs(examples, emb_file)
		embs, emb_index = load_emb(emb_file)
		exam_similarity_acc(examples, embs, emb_index, acc_file)
	acc_pdf = f'{examples_path}/step2_topk_acc.pdf'
	heatmap_pdf = f'{examples_path}/step2_acc_heatmap.pdf'
	if (not os.path.isfile(acc_pdf)) or (not os.path.isfile(heatmap_pdf)):
		accs = load_jsonl(acc_file)
		acc_ = []
		best_acc = 0
		heatmap = None
		for acc in accs:
			ca = [a[i] for i,a in enumerate(acc)]
			ca = sum(ca)/len(ca)
			acc_.append(ca)
			if ca >= acc_[best_acc]:
				best_acc = len(acc_) - 1
			if len(acc_) == topk:
				heatmap = acc
		plot_curve(acc_, 'topk', 'Acc', file=acc_pdf)
		subject_index = {'Precalculus': 0, 'Intermediate Algebra': 1, 'Algebra': 2, 'Number Theory': 3, 'Prealgebra': 4, 'Geometry': 5, 'Counting & Probability': 6}
		row_names = {v:k for k,v in subject_index.items()}
		row_names = [row_names[i] for i in range(len(row_names))]
		plot_heatmap(heatmap, row_names, row_names, ylabel='Acc', file=heatmap_pdf)

if __name__ == '__main__':
	step2()