File size: 6,800 Bytes
5c0e14a
 
 
 
69b2fee
5c0e14a
 
9aba8de
5c0e14a
 
 
 
 
 
 
9aba8de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
764acaf
98adb4f
 
 
 
 
 
d41580c
764acaf
 
 
 
 
 
 
 
 
 
d41580c
 
 
3ff0bbb
9aba8de
 
 
 
 
 
 
 
d41580c
 
 
 
 
9aba8de
 
 
 
 
5c0e14a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0fcf12
5c0e14a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17fba42
 
 
 
 
 
 
d41580c
17fba42
d41580c
 
 
 
 
17fba42
5c0e14a
 
d41580c
5c0e14a
cf93843
 
5c0e14a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
from huggingface_hub import InferenceClient
import gradio as gr

client = InferenceClient(
    "mistralai/Mistral-7B-Instruct-v0.2"
)

"""
def format_prompt(message, history):
  prompt = "<s>"
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt
"""

def format_prompt(message, history):
  """
  Formats a prompt for the chatbot based on history and a provided template.

  Args:
      message (str): The user's current message.
      history (list): A list of tuples containing past user prompts and bot responses.

  Returns:
      str: The formatted prompt for the chatbot.
  """

  prompt = "<s>[INST]\n"  # Newline after opening prompt tag for readability

  # Introduction (replace placeholders with actual content if needed)
  prompt += f"""**Introduction:**
  Perform survye on `model` Wireless Headphones`: This name is broad enough to encompass various headphone types (over-ear, on-ear, earbuds) while highlighting a key feature (wireless) that's relevant across categories.
  **Asking Questions (One at a Time) in very short and concise way and don't tell about exmple response while asking questions:**
  * Feel free to rephrase or clarify the questions based on user.
    
  * Acknowledge my feedback and show appreciation for my participation.
  \n
  some sample questions 
0. **What model you have purchased, and from which brand?**
1. **How satisfied are you with the overall sound quality of your `model` Wireless Headphones?** (Very satisfied, Somewhat satisfied, Neutral, Somewhat dissatisfied, Very dissatisfied)
2. **On a scale of 1 (strongly disagree) to 5 (strongly agree), how comfortable are your `model` Wireless Headphones to wear for extended periods?**
3. **Did the battery life of your `model` Wireless Headphones meet your expectations based on the product description?** (Yes/No)
4. **How easy was it to connect your `model` Wireless Headphones to your devices via Bluetooth?** (Very easy, Somewhat easy, Neutral, Somewhat difficult, Very difficult)
5. **Do the controls on your `model` Wireless Headphones (volume, play/pause, etc.) function smoothly and intuitively?** (Yes/No)
6. **For over-ear or on-ear headphones only: How well do the ear cups of your `model` Wireless Headphones block out external noise?** (Very well, Somewhat well, Neutral, Not very well, Not at all)
7. **For earbuds only: How secure is the fit of your `model` Wireless Headphones in your ears?** (Very secure, Somewhat secure, Neutral, Not very secure, Not secure at all)
8. **Do the features advertised for your `model` Wireless Headphones (noise cancellation, microphone quality, etc.) function as expected?** (Yes/No)
9. **Overall, how likely are you to recommend these `model` Wireless Headphones to a friend or family member?** (Very likely, Somewhat likely, Neutral, Somewhat unlikely, Not likely at all)
10. **(Optional) Is there any additional feedback you'd like to share about your experience with your `model` Wireless Headphones?** (Open ended text response)
# end the conversation with few last questions and give the summary of survey feedback
11. "What do you like most about our product?", 
12. "How can we improve our product?", 
13. "Would you recommend our product to others?"""

  # Add past conversation history
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]\n{bot_response}\n"

  # Add current user message
  prompt += f"[INST] {message} [/INST]\n"

  current_question = len(history) + 1
  if current_question <= 13:
    prompt += "next question"
  else:
    prompt += "end-up the conversation with survey summary. Thank you for your participation! The survey is now complete."

  prompt += "\n[/INST]</s>"  # Closing prompt and tags

  return prompt


def generate(
    prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    formatted_prompt = format_prompt(prompt, history)

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return output


additional_inputs=[
    gr.Slider(
        label="Temperature",
        value=0.9,
        minimum=0.0,
        maximum=1.0,
        step=0.05,
        interactive=True,
        info="Higher values produce more diverse outputs",
    ),
    gr.Slider(
        label="Max new tokens",
        value=256,
        minimum=0,
        maximum=1048,
        step=64,
        interactive=True,
        info="The maximum numbers of new tokens",
    ),
    gr.Slider(
        label="Top-p (nucleus sampling)",
        value=0.90,
        minimum=0.0,
        maximum=1,
        step=0.05,
        interactive=True,
        info="Higher values sample more low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.2,
        minimum=1.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Penalize repeated tokens",
    )
]

css = """
  #mkd {
    height: 500px; 
    overflow: auto; 
    border: 1px solid #ccc; 
  }
"""
title = "Share Your Experience on Versatile Wireless Headphones: & Help Us Improve"

example = ["I recently purchased the NovaBlast Over-Ear Wireless Headphones. They looked really comfortable and have great noise cancellation features.",  # (This response specifies the type (over-ear) and highlights a desired feature (noise cancellation).)
           # (This response specifies the type (earbuds) and mentions a specific activity where the wireless feature is beneficial.)
           "I bought a pair of the SonicFit Wireless Earbuds. They're perfect for running because they stay in my ears securely.",
           # (This response focuses on the wireless aspect and highlights the user's consideration of both headphone types.)
           "I got the RhythmPlus Wireless Headphones. I wasn't sure if I wanted over-ear or earbuds, so these seemed like a good compromise."]
with gr.Blocks(css=css) as demo:
    gr.ChatInterface(
        generate,
        title=title,
        additional_inputs=additional_inputs,
        # examples="I recently purchased the NovaBlast Over-Ear Wireless Headphones. They looked really comfortable and have great noise cancellation features."
    )

demo.queue().launch(debug=True)