pabloce commited on
Commit
532ca99
·
verified ·
1 Parent(s): 4a09280

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -1
app.py CHANGED
@@ -122,6 +122,26 @@ def respond(
122
  outputs += output
123
  yield outputs
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  demo = gr.ChatInterface(
126
  respond,
127
  additional_inputs=[
@@ -174,7 +194,8 @@ demo = gr.ChatInterface(
174
  undo_btn="Undo",
175
  clear_btn="Clear",
176
  submit_btn="Send",
177
- description="Llama-cpp-agent: Chat multi llm selection"
 
178
  )
179
 
180
  if __name__ == "__main__":
 
122
  outputs += output
123
  yield outputs
124
 
125
+ PLACEHOLDER = """
126
+ <div class="container" style="max-width: 600px; margin: 0 auto; padding: 30px; background-color: #fff; box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);">
127
+ <h1 style="font-size: 28px; margin-bottom: 15px;">llama-cpp-agent: Simplify LLM Interactions</h1>
128
+ <p style="font-size: 16px; line-height: 1.5; margin-bottom: 15px;">The llama-cpp-agent framework simplifies interactions with Large Language Models (LLMs), providing an interface for chatting, executing function calls, generating structured output, performing retrieval augmented generation, and processing text using agentic chains with tools.</p>
129
+ <p style="font-size: 16px; line-height: 1.5; margin-bottom: 15px;">The framework uses guided sampling to constrain model output to user-defined structures, enabling models not fine-tuned for function calling and JSON output to do so. It is compatible with llama.cpp server, llama-cpp-python and its server, TGI, and vllm servers.</p>
130
+ <h2 style="font-size: 22px; margin-bottom: 10px;">Key Features</h2>
131
+ <ul style="list-style-type: none; padding: 0;">
132
+ <li style="font-size: 16px; line-height: 1.5; margin-bottom: 8px;"><strong>Simple Chat Interface</strong>: Engage in seamless conversations with LLMs.</li>
133
+ <li style="font-size: 16px; line-height: 1.5; margin-bottom: 8px;"><strong>Structured Output</strong>: Generate structured output (objects) from LLMs.</li>
134
+ <li style="font-size: 16px; line-height: 1.5; margin-bottom: 8px;"><strong>Function Calling</strong>: Execute functions using LLMs.</li>
135
+ <li style="font-size: 16px; line-height: 1.5; margin-bottom: 8px;"><strong>RAG</strong>: Perform retrieval augmented generation with colbert reranking.</li>
136
+ <li style="font-size: 16px; line-height: 1.5; margin-bottom: 8px;"><strong>Agent Chains</strong>: Process text using agent chains with tools.</li>
137
+ <li style="font-size: 16px; line-height: 1.5; margin-bottom: 8px;"><strong>Guided Sampling</strong>: Allows most 7B LLMs to do function calling and structured output.</li>
138
+ <li style="font-size: 16px; line-height: 1.5; margin-bottom: 8px;"><strong>Multiple Providers</strong>: Works with various servers and providers.</li>
139
+ <li style="font-size: 16px; line-height: 1.5; margin-bottom: 8px;"><strong>Compatibility</strong>: Works with python functions, pydantic tools, llama-index tools, and OpenAI tool schemas.</li>
140
+ <li style="font-size: 16px; line-height: 1.5;"><strong>Flexibility</strong>: Suitable for various applications, from casual chatting to specific function executions.</li>
141
+ </ul>
142
+ </div>
143
+ """
144
+
145
  demo = gr.ChatInterface(
146
  respond,
147
  additional_inputs=[
 
194
  undo_btn="Undo",
195
  clear_btn="Clear",
196
  submit_btn="Send",
197
+ description="Llama-cpp-agent: Chat multi llm selection",
198
+ chatbot=gr.Chatbot(scale=1, placeholder=PLACEHOLDER)
199
  )
200
 
201
  if __name__ == "__main__":