mylessss commited on
Commit
95e304b
0 Parent(s):

initial commit

Browse files
Files changed (6) hide show
  1. .gitattributes +35 -0
  2. Dockerfile +16 -0
  3. README.md +13 -0
  4. app.py +152 -0
  5. development.md +8 -0
  6. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12
2
+ COPY --from=ghcr.io/astral-sh/uv:0.4.20 /uv /bin/uv
3
+
4
+ RUN useradd -m -u 1000 user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+ ENV UV_SYSTEM_PYTHON=1
7
+
8
+ WORKDIR /app
9
+
10
+ COPY --chown=user ./requirements.txt requirements.txt
11
+ RUN uv pip install -r requirements.txt
12
+
13
+ COPY --chown=user . /app
14
+ USER user
15
+
16
+ CMD ["marimo", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: marimo chatbot template
3
+ emoji: 🤖
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: docker
7
+ pinned: true
8
+ license: mit
9
+ short_description: Template for deploying a marimo chatbot to HF
10
+ ---
11
+
12
+ Check out marimo at <https://github.com/marimo-team/marimo>
13
+ Check out the configuration reference at <https://huggingface.co/docs/hub/spaces-config-reference>
app.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import marimo
2
+
3
+ __generated_with = "0.9.14"
4
+ app = marimo.App(width="medium")
5
+
6
+
7
+ @app.cell
8
+ def __():
9
+ import marimo as mo
10
+ import os
11
+ from huggingface_hub import InferenceClient
12
+ return InferenceClient, mo, os
13
+
14
+
15
+ @app.cell
16
+ def __():
17
+ MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
18
+ return (MODEL_NAME,)
19
+
20
+
21
+ @app.cell(hide_code=True)
22
+ def __(MODEL_NAME, mo):
23
+ mo.md(f"""
24
+ # Chat with **{MODEL_NAME}**
25
+ """)
26
+ return
27
+
28
+
29
+ @app.cell
30
+ def __(max_tokens, mo, system_message, temperature, top_p):
31
+ mo.hstack(
32
+ [
33
+ system_message,
34
+ mo.vstack([temperature, top_p, max_tokens], align="end"),
35
+ ],
36
+ )
37
+ return
38
+
39
+
40
+ @app.cell
41
+ def __(mo, respond):
42
+ chat = mo.ui.chat(
43
+ model=respond,
44
+ prompts=["Tell me a joke.", "What is the square root of {{number}}?"],
45
+ )
46
+ chat
47
+ return (chat,)
48
+
49
+
50
+ @app.cell
51
+ def __(InferenceClient, MODEL_NAME, os):
52
+ """
53
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.26.2/en/guides/inference
54
+ """
55
+
56
+ hf_token = os.environ.get("HF_TOKEN")
57
+ if not hf_token:
58
+ print("HF_TOKEN not set, may have limited access.")
59
+
60
+ client = InferenceClient(
61
+ MODEL_NAME,
62
+ token=hf_token,
63
+ )
64
+ return client, hf_token
65
+
66
+
67
+ @app.cell
68
+ def __(client, mo):
69
+ # Create UI controls
70
+ system_message = mo.ui.text_area(
71
+ value="You are a friendly Chatbot.",
72
+ label="System message",
73
+ )
74
+ max_tokens = mo.ui.slider(
75
+ start=1,
76
+ stop=2048,
77
+ value=512,
78
+ step=1,
79
+ label="Max new tokens",
80
+ show_value=True,
81
+ )
82
+ temperature = mo.ui.slider(
83
+ start=0.1,
84
+ stop=4.0,
85
+ value=0.7,
86
+ step=0.1,
87
+ label="Temperature",
88
+ show_value=True,
89
+ )
90
+ top_p = mo.ui.slider(
91
+ start=0.1,
92
+ stop=1.0,
93
+ value=0.95,
94
+ step=0.05,
95
+ label="Top-p (nucleus sampling)",
96
+ show_value=True,
97
+ )
98
+
99
+ # Add more configuration options if needed.
100
+
101
+
102
+ # Create chat callback
103
+ def respond(messages: list[mo.ai.ChatMessage], config):
104
+ chat_messages = [{"role": "system", "content": system_message.value}]
105
+
106
+ for message in messages:
107
+ parts = []
108
+ # Add text
109
+ parts.append({"type": "text", "text": message.content})
110
+
111
+ # Add attachments
112
+ if message.attachments:
113
+ for attachment in message.attachments:
114
+ content_type = attachment.content_type or ""
115
+ # This example only supports image attachments
116
+ if content_type.startswith("image"):
117
+ parts.append(
118
+ {
119
+ "type": "image_url",
120
+ "image_url": {"url": attachment.url},
121
+ }
122
+ )
123
+ else:
124
+ raise ValueError(
125
+ f"Unsupported content type {content_type}"
126
+ )
127
+
128
+ chat_messages.append({"role": message.role, "content": parts})
129
+
130
+ response = client.chat_completion(
131
+ chat_messages,
132
+ max_tokens=max_tokens.value,
133
+ temperature=temperature.value,
134
+ top_p=top_p.value,
135
+ stream=False,
136
+ )
137
+
138
+ # You can return strings, markdown, charts, tables, dataframes, and more.
139
+ return response.choices[0].message.content
140
+ return max_tokens, respond, system_message, temperature, top_p
141
+
142
+
143
+ @app.cell
144
+ def __():
145
+ # If you need to do anything _reactively_ to the chat messages,
146
+ # you can access the chat messages using the `chat.value` attribute.
147
+ # chat.value
148
+ return
149
+
150
+
151
+ if __name__ == "__main__":
152
+ app.run()
development.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Development
2
+
3
+ ## Testing your Dockerfile locally
4
+
5
+ ```bash
6
+ docker build -t marimo-app .
7
+ docker run -it --rm -p 7860:7860 marimo-app
8
+ ```
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ huggingface-hub==0.26.2
2
+ marimo
3
+ # Or a specific version
4
+ # marimo>=0.9.0
5
+
6
+ # Add other dependencies as needed