Solshine commited on
Commit
841129f
1 Parent(s): 6f5234b

Added DSPy synthetic prompt modifications

Browse files
Files changed (1) hide show
  1. app.py +60 -4
app.py CHANGED
@@ -20,12 +20,68 @@ phi2 = pipeline(
20
  eos_token_id=tokenizer.eos_token_id,
21
  device_map="cpu"
22
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  # Function that accepts a prompt and generates text using the phi2 pipeline
25
  def generate(message, chat_history, max_new_tokens):
26
 
27
- instruction = "You are a helpful assistant to 'User'. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
28
- final_prompt = f"Instruction: {instruction}\n"
 
29
 
30
  for sent, received in chat_history:
31
  final_prompt += "User: " + sent + "\n"
@@ -58,8 +114,8 @@ def generate(message, chat_history, max_new_tokens):
58
  # Chat interface with gradio
59
  with gr.Blocks() as demo:
60
  gr.Markdown("""
61
- # Phi-2 Chatbot Demo
62
- This chatbot was created using Microsoft's 2.7 billion parameter [phi-2](https://huggingface.co/microsoft/phi-2) Transformer model.
63
 
64
  In order to reduce the response time on this hardware, `max_new_tokens` has been set to `21` in the text generation pipeline. With this default configuration, it takes approximately `60 seconds` for the response to start being generated, and streamed one word at a time. Use the slider below to increase or decrease the length of the generated text.
65
  """)
 
20
  eos_token_id=tokenizer.eos_token_id,
21
  device_map="cpu"
22
  )
23
+ # DSPy-based prompt generation
24
+ from dspy.agents import Agent
25
+ from dspy import spawn_processes
26
+ from dspy.utils import SentenceSplitter, SentimentAnalyzer, NamedEntityRecognizer
27
+
28
+ def dspy_generate_agent_prompts(prompt):
29
+ """
30
+ Generates prompts for different agents based on the provided prompt and DSPy functionalities.
31
+
32
+ Args:
33
+ prompt (str): The user-provided prompt (e.g., customer reviews).
34
+
35
+ Returns:
36
+ list: A list containing agent-specific prompts.
37
+ """
38
+
39
+ # 1. Split the prompt into individual sentences
40
+ sentences = SentenceSplitter().process(prompt)
41
+
42
+ # 2. Analyze sentiment for each sentence
43
+ sentiment_analyzer = SentimentAnalyzer()
44
+ sentiment_labels = []
45
+ for sentence in sentences:
46
+ sentiment_labels.append(sentiment_analyzer.analyze(sentence))
47
+
48
+ # 3. Extract named entities related to specific topics
49
+ ner = NamedEntityRecognizer(model_name="en_core_web_sm")
50
+ extracted_entities = {}
51
+ for sentence in sentences:
52
+ entities = ner.process(sentence)
53
+ for entity in entities:
54
+ if entity.label_ in ["FOOD", "ORG", "LOCATION"]: # Customize entity labels based on needs
55
+ extracted_entities.setdefault(entity.label_, []).append(entity.text)
56
+
57
+ # 4. Craft prompts for each agent (incomplete)
58
+ agent_prompts = []
59
+
60
+ # **Sentiment Analyzer Prompt:**
61
+ sentiment_prompt = f"Analyze the sentiment of the following sentences:\n" + "\n".join(sentences)
62
+ agent_prompts.append(sentiment_prompt)
63
+
64
+ # **Topic Extractor Prompt:** (Modify based on your specific topics)
65
+ topic_prompt = f"Extract the main topics discussed in the following text, focusing on food, service, and ambiance:\n{prompt}"
66
+ agent_prompts.append(topic_prompt)
67
+
68
+ # **Recommendation Generator Prompt:** (Modify based on your requirements)
69
+ positive_count = sum(label == "POSITIVE" for label in sentiment_labels)
70
+ negative_count = sum(label == "NEGATIVE" for label in sentiment_labels)
71
+ neutral_count = sum(label == "NEUTRAL" for label in sentiment_labels)
72
+ topic_mentions = "\n".join(f"{k}: {','.join(v)}" for k, v in extracted_entities.items())
73
+
74
+ recommendation_prompt = f"""Based on the sentiment analysis (positive: {positive_count}, negative: {negative_count}, neutral: {neutral_count}) and extracted topics ({topic_mentions}), suggest recommendations for organic farming methods to address user's concerns in their location."""
75
+ agent_prompts.append(recommendation_prompt)
76
+
77
+ return agent_prompts
78
 
79
  # Function that accepts a prompt and generates text using the phi2 pipeline
80
  def generate(message, chat_history, max_new_tokens):
81
 
82
+ dspy_generate_agent_prompts(message) = synth_message
83
+ instruction = "You are a helpful organic farming assistant to 'User'. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'. You are an agricultural assistant committed to regenerative practices. You are being supplied with a list of tasks which you will need to walk the user through with the compassionate heart of a teacher and using easily understandable language."
84
+ final_prompt = f"Instruction: {instruction} {synth_message}\n"
85
 
86
  for sent, received in chat_history:
87
  final_prompt += "User: " + sent + "\n"
 
114
  # Chat interface with gradio
115
  with gr.Blocks() as demo:
116
  gr.Markdown("""
117
+ # LEAP Phi-2 Agentic Chatbot Demo
118
+ This multi-agent chatbot was created for LEAP hackathon, to offer interfce with a team of experts for organic farming advice, using Microsoft's 2.7 billion parameter [phi-2](https://huggingface.co/microsoft/phi-2) Transformer model and DSPy synthetic prompt agentics.
119
 
120
  In order to reduce the response time on this hardware, `max_new_tokens` has been set to `21` in the text generation pipeline. With this default configuration, it takes approximately `60 seconds` for the response to start being generated, and streamed one word at a time. Use the slider below to increase or decrease the length of the generated text.
121
  """)