ling-series-spaces / smart_writer_kit /agent_for_streaming_completion.py
GitHub Action
Sync ling-space changes from GitHub commit b5051bd
bb2bd12
raw
history blame
2.29 kB
from model_handler import ModelHandler
from config import LING_FLASH_2_0
def fetch_flow_suggestion_agent(editor_content: str, style: str = "", kb=None, short_outline=None, long_outline=None):
"""
Agent for fetching a short, real-time continuation.
This agent calls a real LLM.
"""
if not editor_content or len(editor_content.strip()) < 4:
return "(请输入更多内容以获取建议...)"
try:
model_handler = ModelHandler()
# For a simple continuation, we can use a concise system prompt.
system_prompt = f"""你是一个写作助手,根据用户输入的内容,紧接着写一句 **简短、流畅** 的续写。
- 不要重复用户已输入的内容,直接开始写你续写的部分。
- 遵循 **整体章程** 中的风格和指导原则。
整体章程:
{style}"""
# We use editor_content as the user prompt.
# 我们的上下文编排方式,根据当前光标所在位置,来决定续写的位置。
# ---
# <之前的内容>...</之前的内容>{续写这里}
# <之后的内容>...</之后的内容>
# 写出 {续写这里} 部分的内容。
# ---
user_prompt = f"""===之前的内容===\n{editor_content[-80:]}"""
# Use generate_code as it's a simple generator for direct content.
# We need to provide a dummy code_type and a model_choice.
# The model_choice here is the display name, but we can pass the constant.
response_generator = model_handler.generate_code(
system_prompt=system_prompt,
user_prompt=user_prompt,
model_choice=LING_FLASH_2_0
)
# Assemble the streamed response
full_response = "".join(chunk for chunk in response_generator)
return full_response.strip()
except Exception as e:
print(f"[Agent] Error fetching flow suggestion: {e}")
return f"(获取建议时出错: {e})"
def accept_flow_suggestion_agent(current_text: str, suggestion: str):
"""
Agent for accepting a flow suggestion.
"""
if not suggestion or "等待输入" in suggestion or "出错" in suggestion:
result = current_text
else:
result = current_text + suggestion
return result