Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,6 +17,8 @@ except Exception as e:
|
|
| 17 |
tokenizer = None
|
| 18 |
base_model = None
|
| 19 |
peft_model = None
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 22 |
global tokenizer, peft_model
|
|
@@ -47,20 +49,24 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
| 47 |
|
| 48 |
# Get the last message from the generated text
|
| 49 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 50 |
-
def
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
-
|
| 62 |
|
| 63 |
-
return
|
| 64 |
|
| 65 |
except Exception as e:
|
| 66 |
return f"Generation error: {e}"
|
|
|
|
| 17 |
tokenizer = None
|
| 18 |
base_model = None
|
| 19 |
peft_model = None
|
| 20 |
+
|
| 21 |
+
generated_text = None
|
| 22 |
|
| 23 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 24 |
global tokenizer, peft_model
|
|
|
|
| 49 |
|
| 50 |
# Get the last message from the generated text
|
| 51 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 52 |
+
def extract_content_after_output(text):
|
| 53 |
+
"""
|
| 54 |
+
Extracts and returns the content that follows the word 'output' in the given text.
|
| 55 |
+
If 'output' is not found, returns an empty string.
|
| 56 |
+
"""
|
| 57 |
+
# Define the regular expression pattern to match 'output' and capture everything after it
|
| 58 |
+
pattern = re.compile(r'output\s*:\s*(.*)', re.IGNORECASE | re.DOTALL)
|
| 59 |
+
match = pattern.search(text)
|
| 60 |
+
if match:
|
| 61 |
+
# Return the captured group, which is the content after 'output'
|
| 62 |
+
return match.group(1).strip()
|
| 63 |
+
else:
|
| 64 |
+
# If 'output' is not found, return an empty string
|
| 65 |
+
return "Retry to get output, the model failed to generated required output(This occurs rarely🤷♂️)"
|
| 66 |
|
| 67 |
+
lines = extract_user_content(generated_text)
|
| 68 |
|
| 69 |
+
return lines
|
| 70 |
|
| 71 |
except Exception as e:
|
| 72 |
return f"Generation error: {e}"
|