import json, random, time, os, copy
from util import parallel, color
from exp import data
from llm import gen

DEBUG = True

prompt_args = {}
def make_prompt(prompt_path, prompt_name, examples_prefix=None, **kwargs):
	assert (examples_prefix is None) or ('examples' not in kwargs) or (kwargs['examples'] is None)
	key = f'{prompt_name}-+-+-{examples_prefix}'
	examples = None
	if key not in prompt_args:
		with open(f'{prompt_path}/{prompt_name}.md') as fp:
			prompt = fp.read()
		if examples_prefix is not None:
			examples = []
			for file in os.listdir(prompt_path):
				if file.startswith(examples_prefix) and file.endswith('.md'):
					file = os.path.join(prompt_path, file)
					with open(file) as fp:
						examples.append(fp.read())
		create_prompt_args = [prompt, examples]
	else:
		prompt, examples = create_prompt_args
		if examples_prefix is None:
			assert examples is None
			examples = kwargs.get('examples', None)
	if examples is not None:
		random.shuffle(examples)
		examples = '\n\n'.join([f'## Example {i+1}\n\n{e}' for i,e in enumerate(examples)])
		kwargs['examples'] = examples
	return prompt.format(**kwargs)

def generate(model_id, question_, prompt_path, prompt_name, examples_prefix=None, verbal=False, **kwargs):
	prompt = make_prompt(prompt_path, prompt_name, examples_prefix, **kwargs)
	if verbal and DEBUG:
		color.text(f'\nPrompt ({prompt_name}):', 'yellow', style='bright')
		print(prompt)
	if verbal:
		color.text(f'\nResponse ({prompt_name}):', 'yellow', style='bright')
	messages = gen.make_prompt(user_prompt=prompt)
	resp, _, usage = gen.generate(model_id, messages, max_tokens=1024, verbal=verbal)
	question_.context.add_costs(*usage)
	return resp

class CostContext:
	def __init__(self):
		self.ts = [time.time()]
		self.prompt_tokens = 0
		self.completion_tokens = 0
	def add_costs(self, prompt_tokens, completion_tokens, timeout=60*15):
		cur_time = time.time()
		self.ts.append(cur_time)
		assert cur_time - self.ts[0] < timeout
		self.prompt_tokens += prompt_tokens
		self.completion_tokens += completion_tokens
	def get_costs(self):
		return {
			'start_time': self.ts[0],
			'end_time': self.ts[-1],
			'prompt_tokens': self.prompt_tokens,
			'completion_tokens': self.completion_tokens,
			}

class MathQuestion:
	def __init__(self, sample):
		self.pid = sample['pid']
		self.question = sample['problem']
		self.cot = sample.get('cot', None)
		self.solution = sample.get('solution', None)
		self.gt_answer = None
		self.context = CostContext()
		if self.solution is not None:
			self.gt_answer = self.solution.split('####')[-1].strip()
		self.asnwer_format = 'The answer is: <num>'
	def extra_asnwer(self, answer):
		return answer.split('The answer is:')[-1].strip()
	def clone(self):
		q = MathQuestion({'pid':self.pid, 'problem':self.question, 'solution':self.solution})
		q.context = self.context
		return q
	def key(self):
		return self.pid

def _update_count(count, samples, q_type, outfile):
	index = {}
	for i, sample in enumerate(samples):
		q = q_type(sample)
		assert q.key() not in index, q.key()
		index[q.key()] = i
	with open(outfile) as fp:
		for line in fp:
			line = line.strip()
			if line:
				sample = json.loads(line)
				q = q_type(sample)
				i = index[q.key()]
				count[i][0] += 1
				count[i][1] += 1

model_ids = [
	'Qwen/Qwen2.5-7B-Instruct',
	# 'internlm/internlm2_5-7b-chat',
	# 'THUDM/glm-4-9b-chat',
	# 'Qwen/Qwen3-8B',
	# 'THUDM/GLM-Z1-9B-0414',
	# 'THUDM/GLM-4-9B-0414',
	# 'deepseek-ai/DeepSeek-R1-Distill-Qwen-7B',
	# 'deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B',
	# 'Qwen/Qwen2.5-7B-Instruct',
	# 'Qwen/Qwen2.5-Coder-7B-Instruct',
	# 'Qwen/Qwen2-7B-Instruct',
	# 'Qwen/Qwen2-1.5B-Instruct',
	# 'THUDM/chatglm3-6b',
	]
	
def run_exp(samples, solve, q_type=MathQuestion, num_samples=10, max_retries=50, 
		model_id=model_ids[0], outfile='result.jsonl', parallel_tasks=None, **kwargs):
	def add_result(r):
		with open(outfile, 'a') as fp:
			fp.write(json.dumps(r, ensure_ascii=False) + '\n')
	count = {i:[0,0] for i in range(len(samples))}
	if os.path.isfile(outfile):
		_update_count(count, samples, q_type, outfile)
	def make_samples():
		input_samples = []
		for i, sample in enumerate(samples):
			if count[i][0] < num_samples and count[i][1] < max_retries:
				for j in range(num_samples - count[i][0]):
					input_samples.append([i, copy.deepcopy(sample)])
		random.shuffle(input_samples)
		return input_samples
	def task(args):
		i, sample = args
		question = q_type(sample)
		try:
			answer, target, reasoning_steps = solve(model_id, question, verbal=False, **kwargs)
			sample['steps'] = reasoning_steps
			sample['costs'] = question.context.get_costs()
			sample['pred'] = [target, answer]
			return i, sample
		except:
			import traceback
			ex = traceback.format_exc()
			with open('exceptions.txt', 'a') as fp:
				fp.write(ex)
				fp.write('\n--------------------\n')
			return i, None
	ex = False
	while not ex:
		input_samples = make_samples()
		if len(input_samples) == 0:
			print('Done')
			break
		print(f'# input samples:', len(input_samples))
		if parallel_tasks is None:
			from llm import siliconflow
			parallel_tasks = len(siliconflow.api_keys)
		res, ex = parallel.run(task, input_samples, num_tasks=parallel_tasks)
		for ir in res:
			if ir is not None:
				i, r = ir
				if r is not None:
					add_result(r)
					count[i][0] += 1
				count[i][1] += 1
	return ex

def test(sample, solve, q_type=MathQuestion, model_id=model_ids[0], **kwargs):
	# for sample_id in range(1, 10):
	question = q_type(sample)
	color.text(model_id, 'red')
	color.text('Question:', 'red')
	print(question.question)
	def print_gt_ans():
		if hasattr(question, 'solution'):
			color.text('Solution:', 'red')
			print(question.solution)
		else:
			color.text('GT Answer:', 'red')
			print(question.gt_answer)
	print_gt_ans()
	try:
		answer, target, reasoning_steps = solve(model_id, question, verbal=True, **kwargs)
		# print(json.dumps(reasoning_steps, indent='\t'))
		print(f'# steps {len(reasoning_steps)}')
		print(question.context.get_costs())
		print(f'{target}: {answer}')
		print_gt_ans()
	except KeyboardInterrupt:
		raise
	except:
		import traceback
		traceback.print_exc()

if __name__ == '__main__':
	exp_math()
