
def make_prompt(system_prompt=None, user_prompt=None, assitant_prompt=None):
	messages = []
	if system_prompt is not None:
		messages.append({"role": "system", "content": system_prompt})
	if user_prompt is not None:
		messages.append({"role": "user", "content": user_prompt})
	if assitant_prompt is not None:
		messages.append({"role": "assistant", "content": assitant_prompt})
	return messages

next_api_key_index_ = None
def next_api_key(api_keys):
	import random
	global next_api_key_index_
	if next_api_key_index_ is None:
		next_api_key_index_ = random.randint(0, len(api_keys)-1)
	else:
		next_api_key_index_ = (next_api_key_index_ + 1) % len(api_keys)
	return api_keys[next_api_key_index_]

def generate_stream(messages, model, verbal=True, api_key=None, timeout=60, **args):
	import requests, json
	from util import color
	from . import siliconflow
	for sp in [siliconflow]:
		if model in sp.model_ids:
			api_key_ = api_key or next_api_key(sp.api_keys)
			headers = sp.make_headers(api_key_)
			api_url = sp.api_url
			break
	data = {
		"model": model,
		"messages": messages,
		"stream": True,
		**args,
	}
	reasoning = ''
	resp = ''
	prompt_tokens = 0
	completion_tokens = 0
	with requests.post(api_url, headers=headers, json=data, stream=True, timeout=timeout) as response:
		if response.status_code == 200:
			for chunk in response.iter_lines():
				if not chunk: continue
				chunk = chunk.decode('utf-8')
				lines = chunk.split('\n\n')
				for line in lines:
					line = line.strip()
					if not line.startswith('data:'): continue
					line = line[5:].strip()
					if line == '[DONE]':
						break
					line = json.loads(line)
					if 'choices' not in line or not line['choices']: continue
					prompt_tokens = line['usage']['prompt_tokens']
					completion_tokens = line['usage']['completion_tokens']
					delta = line['choices'][0]['delta']
					reasoning_content = delta.get('reasoning_content')
					content = delta.get('content')
					if reasoning_content is not None:
						reasoning += reasoning_content
						if verbal:
							color.text(reasoning_content, 'cyan', end='')
					if content is not None:
						resp += content
						if verbal:
							color.text(content, 'green', end='')
		else:
			err_msg = f'{model} Error #{response.status_code}: {response.text}'
			if verbal:
				color.text(err_msg, 'red')
			raise RuntimeError(err_msg)
	return resp, reasoning, [prompt_tokens, completion_tokens]

def generate(
	model,
	messages,
	max_tokens=None,
	temperature=0.2,
	top_k=40,
	top_p=0.7,
	stop=None,
	n=1,
	response_format=None,
	verbal=True,
	api_key=None,
	**args,
):
	import time
	from util import color
	args_ = {
		"temperature": temperature,
		"max_tokens": max_tokens,
		"top_p": top_p,
		"stop": stop,
		"seed": 1,
		"n": n,
	}
	args_.update(args)
	if response_format is not None:
		args_['response_format'] = response_format
	if model == 'Qwen/Qwen3-8B':
		args_['enable_thinking'] = False
	timeout = 0
	while True:
		try:
			if timeout > 0:
				time.sleep(timeout)
			return generate_stream(messages, model, verbal, api_key, **args_)
		except KeyboardInterrupt:
			raise
		except:
			if timeout == 0 or timeout > 30: timeout = 5
			else: timeout *= 2
			if verbal:
				import traceback
				color.text('\n'+traceback.format_exc().strip().split('\n')[-1], 'red')
				color.text(f'Will retry after {timeout} seconds ...', 'red')
