planRun / degpt.py
sanbo
update sth. at 2025-01-06 22:20:39
f7c4454
raw
history blame
6.13 kB
"""
update time: 2025.01.06
"""
import json
import requests
def get_models():
models = {
"object": "list",
"data": [
{"id": "Qwen2.5-72B", "object": "model", "created": 0, "owned_by": "Qwen"},
{"id": "Llama3.3-70B", "object": "model", "created": 0, "owned_by": "Nemotron"},
{"id": "Pixtral-124B", "object": "model", "created": 0, "owned_by": "Pixtral"},
{"id": "Qwen2.5-Coder-32B", "object": "model", "created": 0, "owned_by": "Qwen"},
]
}
return json.dumps(models)
def is_model_available(model_id):
# Get the models JSON
models_json = get_models()
# Parse the JSON string into a Python dictionary
models_data = json.loads(models_json)
# Loop through the model list to check if the model ID exists
for model in models_data.get("data", []):
if model["id"] == model_id:
return True # Model ID found
return False # Model ID not found
def get_auto_model(model_id=None):
return "Llama3.3-70B"
# 新的方法,检查model是否有效并返回id
def get_model_by_autoupdate(model_id=None):
# 获取所有模型数据
models_data = json.loads(get_models())["data"]
# 提取所有有效的模型ID
valid_ids = [model["id"] for model in models_data]
# 如果model_id无效,默认返回Qwen2.5-72B
if model_id not in valid_ids:
model_id = "Llama3.3-70B"
# 根据model_id获取对应的模型数据
model_data = next((model for model in models_data if model["id"] == model_id), None)
# 返回model_data的id字段
return model_data["id"] if model_data else None
def chat_completion_message(
user_prompt,
user_id: str = None,
session_id: str = None,
system_prompt="You are a helpful assistant.",
model="Llama3.3-70B",
project="DecentralGPT", stream=False,
temperature=0.3, max_tokens=1024, top_p=0.5,
frequency_penalty=0, presence_penalty=0):
"""未来会增加回话隔离: 单人对话,单次会话"""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
return chat_completion_messages(messages,user_id,session_id, model, project, stream, temperature, max_tokens, top_p, frequency_penalty,
presence_penalty)
def chat_completion_messages(
messages,
model="Llama3.3-70B",
user_id: str = None,
session_id: str = None,
project="DecentralGPT", stream=False, temperature=0.3, max_tokens=1024, top_p=0.5,
frequency_penalty=0, presence_penalty=0):
url = 'https://usa-chat.degpt.ai/api/v0/chat/completion/proxy'
headers = {
'sec-ch-ua-platform': '"macOS"',
'Referer': 'https://www.degpt.ai/',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
'sec-ch-ua': 'Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
'DNT': '1',
'Content-Type': 'application/json',
'sec-ch-ua-mobile': '?0'
}
payload = {
"model": get_auto_model(model),
"messages": messages,
"project": project,
"stream": stream,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty
}
return chat_completion(url, headers, payload)
def chat_completion(url, headers, payload):
"""处理用户请求并保留上下文"""
try:
response = requests.post(url, headers=headers, json=payload)
response.encoding = 'utf-8'
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"请求失败: {e}")
return "请求失败,请检查网络或参数配置。"
except (KeyError, IndexError) as e:
print(f"解析响应时出错: {e}")
return "解析响应内容失败。"
return {}
def is_chatgpt_format(data):
"""Check if the data is in the expected ChatGPT format"""
try:
# If the data is a string, try to parse it as JSON
if isinstance(data, str):
try:
data = json.loads(data)
except json.JSONDecodeError:
return False # If the string can't be parsed, it's not in the expected format
# Now check if data is a dictionary and contains the necessary structure
if isinstance(data, dict):
# Ensure 'choices' is a list and the first item has a 'message' field
if "choices" in data and isinstance(data["choices"], list) and len(data["choices"]) > 0:
if "message" in data["choices"][0]:
return True
except Exception as e:
print(f"Error checking ChatGPT format: {e}")
return False
# if __name__ == '__main__':
# print(get_auto_model("Llama3.3-70B"))
# print(get_auto_model("Qwen"))
# print(is_model_available("Llama3.3-70B"))
# print(is_model_available("Qwen"))
# print("=========获取模型测试======")
# print(get_models())
# print("=========首次问答测试======")
# messages = [
# {'role': 'user', 'content': '你好,你是谁?'},
# ]
# response_content = chat_completion_messages(
# messages=messages,
# user_id="user_id",
# temperature=0.5,
# max_tokens=500,
# stream=False,
# model="Llama3.3-70B"
# )
# print(response_content)
# print(f"is chatgpt format: {is_chatgpt_format(response_content)}")
# print("=========问答测试======")
# resp = chat_completion_message("你是什么模型?")
# print(resp)
# print(f"is chatgpt format: {is_chatgpt_format(resp)}")
# # support Chinese
# if isinstance(response_content, str): # 如果已经是 JSON 字符串
# return Response(response_content, content_type="application/json; charset=utf-8")