PORT=8000
API_KEY='orange'

import json, os, torch
import transformers
from typing import List

# 程序来自
# lm-evaluation-harness/lm_eval/base.py
# lm-evaluation-harness/lm_eval/models/huggingface.py
# lm-evaluation-harness/lm_eval/models/textsynth.py

class MultiTokenEOSCriteria(transformers.StoppingCriteria):
	"""Criteria to stop on the specified multi-token sequence."""

	def __init__(
		self,
		sequence: str,
		tokenizer: transformers.PreTrainedTokenizer,
		initial_decoder_input_length: int,
		batch_size: int,
	):
		self.initial_decoder_input_length = initial_decoder_input_length
		self.done_tracker = [False] * batch_size
		self.sequence = sequence
		self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False)
		self.sequence_id_len = len(self.sequence_ids)
		self.tokenizer = tokenizer

	def __call__(self, input_ids, scores, **kwargs) -> bool:
		# For efficiency, we compare the last n tokens where n is the number of tokens in the stop_sequence
		lookback_ids_batch = input_ids[:, self.initial_decoder_input_length :][
			:, -self.sequence_id_len :
		]

		lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch)

		for i, done in enumerate(self.done_tracker):
			if not done:
				self.done_tracker[i] = self.sequence in lookback_tokens_batch[i]
		return False not in self.done_tracker
	
def stop_sequences_criteria(
	tokenizer: transformers.PreTrainedTokenizer,
	stop_sequences: List[str],
	initial_decoder_input_length: int,
	batch_size: int,
) -> transformers.StoppingCriteriaList:
	return transformers.StoppingCriteriaList(
		[
			*[
				MultiTokenEOSCriteria(
					sequence, tokenizer, initial_decoder_input_length, batch_size
				)
				for sequence in stop_sequences
			],
		]
	)

def get_model(model_id, device_map="auto"):
	from transformers import AutoTokenizer, AutoModelForCausalLM
	assert os.path.isdir(model_id)
	tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
	model = AutoModelForCausalLM.from_pretrained(
		model_id,
		device_map=device_map,
		torch_dtype=torch.bfloat16,
		trust_remote_code=True,
	)
	return tokenizer, model

class Llama3_8B_Adapter:
	def __init__(self, model_id='Meta-Llama-3-8B-Instruct', device='cuda:0'):
		self.tokenizer, self.model = get_model(model_id, device)
		self.device = self.model.device

	# def completion(self, prompt, max_tokens, stream, stop):
	# 	input_ids = self.tokenizer(prompt,return_tensors="pt").input_ids.to(self.model.device)
	# 	outputs = self.model.generate(
	# 		input_ids,
	# 		max_new_tokens=max_tokens,
	# 		pad_token_id=self.tokenizer.eos_token_id,
	# 		eos_token_id=self.tokenizer.eos_token_id,
	# 		do_sample=False,
	# 		temperature=1,
	# 		top_p=1,
	# 		)
	# 	response = outputs[0][input_ids.shape[-1]:]
	# 	response = self.tokenizer.decode(response, skip_special_tokens=True)
	# 	yield {'text': response}

	def _model_generate(self, inputs, stop, max_gen_toks=256):
		input_ids = inputs["input_ids"]
		attention_mask = inputs["attention_mask"]
		input_ids = input_ids.to(self.device)
		attention_mask = attention_mask.to(self.device)
		stopping_criteria = stop_sequences_criteria(
			self.tokenizer, stop, input_ids.shape[1], input_ids.shape[0]
		)
		generations = self.model.generate(
			input_ids=input_ids,
			attention_mask=attention_mask,
			# GPT style models require the `generate` `max_length` arg to include the
			# context length, so we instead set `max_new_tokens` which is the number
			# of new tokens to generate, excluding the current number of tokens.
			max_new_tokens=max_gen_toks,
			stopping_criteria=stopping_criteria,
			do_sample=False,
			top_p=1,
			temperature=1,
			pad_token_id=self.tokenizer.eos_token_id,
			eos_token_id=self.tokenizer.eos_token_id,
		)
		return generations[:, inputs["input_ids"].size(1):]

	def greedy_until(self, context, stop, max_gen_toks=None):
		stop_sequences = stop if isinstance(stop, list) else [stop]
		assert isinstance(stop_sequences, list) or stop_sequences is None
		assert isinstance(max_gen_toks, int) or max_gen_toks is None
		if stop_sequences is None:
			until = [self.tokenizer.eos_token]
		else:
			until = stop_sequences + [self.tokenizer.eos_token]
		if max_gen_toks is None:
			max_gen_toks = 256
		token_context = self.tokenizer(
			context,
			add_special_tokens=False,
			return_tensors="pt",
		)
		responses = self._model_generate(
			inputs=token_context,
			stop=until,
			max_gen_toks=max_gen_toks,
		)
		responses = self.tokenizer.batch_decode(responses.tolist(), skip_special_tokens=True)
		response = responses[0]
		for term in until:
			response = response.split(term)[0]
		return {'text': response}


class Phi3_mini_Adapter(Llama3_8B_Adapter):
	def __init__(self, model_id='Phi-3-mini-128k-instruct', device='cuda:0'):
		super().__init__(model_id, device)

adapters = {
	'llama3_8B': Llama3_8B_Adapter(),
	# 'phi3_mini': Phi3_mini_Adapter(),
	}

def write_response_json(response, resp):
	response.write_first_line_(200)
	response.write_content_type_header_("application/json", "UTF-8")
	response.write_header_("Connection", "close")
	response.write_end_header_()
	resp = json.dumps(resp)+'\n\n'
	response.write_(resp)
	response.request_.socket_file_.flush()
	return True

def task(request, response, route_args):
	engine = route_args['engine'].strip()
	api = route_args['api'].strip()
	auth = request.headers_['authorization']
	max_gen_toks = request.params_['max_gen_toks']
	assert auth==f'Bearer {API_KEY}', auth
	if api=='completions':
		prompt = request.params_['prompt']
		stop = request.params_['stop']
		top_k = request.params_['top_k']
		assert top_k==1
		resp = adapters[engine].greedy_until([prompt], stop, max_gen_toks)
		# print()
		# print('prompt:',prompt)
		# print('resp:',resp)
		return write_response_json(response, resp)
	# if api=='logprob':
	# 	context = request.params_['context']
	# 	continuation = request.params_['continuation']
	# 	resp = adapters[engine].loglikelihood(context, continuation)
	# 	return write_response(response, resp)
	assert False, 'Wrong API'


from serv.lib.http_ import Http_
http = Http_(ip_='0.0.0.0', port_=PORT, web_path_='web', max_threads_=100)
http.add_route_('/engines/{engine}/{api}', task, 'POST')
print(f'服务器已启动,端口:{PORT}')
print('LLMs:',' '.join(sorted(adapters.keys())))
http.start_()
