Update app.py
Browse files
app.py
CHANGED
@@ -22,14 +22,16 @@ chunk_size = 2500 # Adjust this size as needed
|
|
22 |
info_md_chunks = textwrap.wrap(info_md_content, chunk_size)
|
23 |
info2_md_chunks = textwrap.wrap(info2_md_content, chunk_size)
|
24 |
|
25 |
-
|
26 |
-
|
27 |
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
29 |
prompt = "<s>"
|
30 |
-
# Sequentially add chunks from both files
|
31 |
-
all_chunks = get_all_chunks(info_md_chunks + info2_md_chunks)
|
32 |
-
prompt += f"{all_chunks}\n\n" # Add all chunks at the beginning
|
33 |
prompt += f"{system_prompt_text}\n\n" # Add the system prompt
|
34 |
|
35 |
if history:
|
@@ -49,7 +51,7 @@ def chat_inf(prompt, history, seed, temp, tokens, top_p, rep_p):
|
|
49 |
seed=seed,
|
50 |
)
|
51 |
|
52 |
-
formatted_prompt = format_prompt_mixtral(prompt, history
|
53 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
54 |
output = ""
|
55 |
for response in stream:
|
|
|
22 |
info_md_chunks = textwrap.wrap(info_md_content, chunk_size)
|
23 |
info2_md_chunks = textwrap.wrap(info2_md_content, chunk_size)
|
24 |
|
25 |
+
# Combine both sets of chunks
|
26 |
+
all_chunks = info_md_chunks + info2_md_chunks
|
27 |
|
28 |
+
# Initialize history with these chunks
|
29 |
+
history = []
|
30 |
+
for chunk in all_chunks:
|
31 |
+
history.append(("System Information", chunk))
|
32 |
+
|
33 |
+
def format_prompt_mixtral(message, history):
|
34 |
prompt = "<s>"
|
|
|
|
|
|
|
35 |
prompt += f"{system_prompt_text}\n\n" # Add the system prompt
|
36 |
|
37 |
if history:
|
|
|
51 |
seed=seed,
|
52 |
)
|
53 |
|
54 |
+
formatted_prompt = format_prompt_mixtral(prompt, history)
|
55 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
56 |
output = ""
|
57 |
for response in stream:
|