Yash Malviya commited on
Commit
31600e0
·
0 Parent(s):

Added Hugging Face Space code

Browse files
Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +12 -0
  3. app.py +75 -0
  4. requirements.txt +4 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Mlops
3
+ emoji: 💬
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.36.1
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ from transformers import pipeline
4
+
5
+ """
6
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
+ """
8
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
+
10
+ # Initialize a sentiment analysis pipeline
11
+ sentiment_analyzer = pipeline("sentiment-analysis")
12
+
13
+ def respond(
14
+ message,
15
+ history: list[tuple[str, str]],
16
+ system_message,
17
+ max_tokens,
18
+ temperature,
19
+ top_p,
20
+ ):
21
+ messages = [{"role": "system", "content": system_message}]
22
+
23
+ # Add sentiment analysis before responding
24
+ sentiment_result = sentiment_analyzer(message)[0]
25
+ sentiment = sentiment_result['label']
26
+ confidence = sentiment_result['score']
27
+
28
+ # Append the sentiment information to the history or system message
29
+ sentiment_info = f"(Sentiment: {sentiment}, Confidence: {confidence:.2f})"
30
+
31
+ # Append user message and sentiment information to the chat history
32
+ for val in history:
33
+ if val[0]:
34
+ messages.append({"role": "user", "content": val[0]})
35
+ if val[1]:
36
+ messages.append({"role": "assistant", "content": val[1]})
37
+
38
+ # Append the user message and sentiment to the assistant's context
39
+ messages.append({"role": "user", "content": message + " " + sentiment_info})
40
+
41
+ response = ""
42
+
43
+ # Generate the response from the chatbot using Hugging Face's Inference API
44
+ for message in client.chat_completion(
45
+ messages,
46
+ max_tokens=max_tokens,
47
+ stream=True,
48
+ temperature=temperature,
49
+ top_p=top_p,
50
+ ):
51
+ token = message.choices[0].delta.content
52
+ response += token
53
+ yield response
54
+
55
+ """
56
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
57
+ """
58
+ demo = gr.ChatInterface(
59
+ respond,
60
+ additional_inputs=[
61
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
62
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
63
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
64
+ gr.Slider(
65
+ minimum=0.1,
66
+ maximum=1.0,
67
+ value=0.95,
68
+ step=0.05,
69
+ label="Top-p (nucleus sampling)",
70
+ ),
71
+ ],
72
+ )
73
+
74
+ if __name__ == "__main__":
75
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio==4.39.*
2
+ torch==2.4.*
3
+ transformers==4.43.*
4
+ accelerate==0.33.*