import requests
import json
from abc import abstractmethod
import traceback
import time


DEFAULT_SYSTEM_MESSAGE = "你是一个人工智能助手！"

class BaseChat:
	def __init__(self, system_message=DEFAULT_SYSTEM_MESSAGE, 
			  url="", 
			  model_name=""):
		self.url = url
		self.model_name = model_name
		self.system_message = {
			"role": "system",
			"content": f"""{system_message}"""
		}
		self.message = [self.system_message]
		self.headers = {
			"Content-Type": "application/json"
		}

	@abstractmethod
	def _ouput_response(self, response, stream=False):
		raise NotImplementedError('Please implemented __output_response function.')

	def chat(self, prompt, message=[], stream=False, system_message=None, max_message_num=-1, **options):
		"""
		Chat Model. Automatically record contexts.
			prompt: Type Str, User input prompt words.
			messages: Type List, Dialogue History. role in [system, user, assistant]
			stream: Type Boolean, Is it streaming output. if `True` streaming output, otherwise not streaming output.
			system_message: Type Str, System Prompt. Default self.system_message.
			max_message_num: For controlling the number of contexts(Exclude System Prompt). Default -1(Means: No control over context length)
			**options: option items. Example temperature, max_tokens, top_p, etc.
		"""
		if system_message:
			self.system_message["content"] = system_message
		self.message = [self.system_message] + message
		self.message.append({"role": "user", "content": prompt})
		if 'max_tokens' in options:
			options['num_ctx'] = options['max_tokens']
		if  max_message_num != -1 and len(self.message) >= (max_message_num*2-1):
			self.message = self.message[0:1] + self.message[-max_message_num*2-1:]
		# print(self.message)
		data = {
			"model": self.model_name,
			"messages": self.message,
			"options": options,  # 适配ollama
			"stream": stream,
			"parameters": options  # 适配Qwen
		}
		data.update(options)
		responses = requests.post(self.url, headers=self.headers, json=data, stream=stream)
		try:
			return_text = self._ouput_response(responses, stream)
		except:
			print("Response:", responses.text)
			traceback.print_exc()
			return_text = "出错了，请向开发人员反馈！"
		return return_text
	
	def generate(self, prompt, stream=False, system_message=None, **options):
		'''
		Generate Model. No record contexts.
			prompt: Type Str, User input prompt words.
			stream: Type Boolean, Is it streaming output. if `True` streaming output, otherwise not streaming output.
			**options: option items. Example temperature, max_tokens, top_p, etc.
		'''
		return_text = self.chat(prompt, message=[], stream=stream, system_message=system_message, **options)
		return return_text
	
class OllamaChat(BaseChat):
	'''
	开发文档地址：https://github.com/ollama/ollama/blob/main/docs/api.md
	'''
	def __init__(self, system_message=DEFAULT_SYSTEM_MESSAGE, 
			  url="http://localhost:6008/api/chat", 
			  model_name="qwen2.5:7b"):
		super(OllamaChat, self).__init__(system_message, url, model_name)

	def _ouput_response(self, response, stream=False):
		if stream:
			return_text = ''
			# 流式接收输出
			for chunk in response.iter_content(chunk_size=2048):
				if chunk:
					text = json.loads(chunk.decode('utf-8'))['message']['content']
					return_text += text
					yield text
		else:
			return_text = ''.join([json.loads(response)['message']['content'] for response in response.text.split('\n') if len(response) != 0])	
			yield return_text

if __name__ == "__main__":
	Chat = OllamaChat()
	start_time = time.time()
	res = Chat.chat("你好", stream=False)
	for i in res:
		print(i)
	stop_time = time.time()
	print("耗时：", stop_time-start_time)
