fierval commited on
Commit
b02efd5
1 Parent(s): 317b9d3

drop of the space

Browse files
Files changed (6) hide show
  1. .gitattributes +2 -10
  2. Dockerfile +24 -0
  3. README.md +8 -5
  4. app.py +118 -0
  5. packages.txt +5 -0
  6. requirements.txt +4 -0
.gitattributes CHANGED
@@ -1,35 +1,27 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
16
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:1.13.1-cuda11.6-cudnn8-runtime
2
+ WORKDIR /app
3
+
4
+ COPY ./requirements.txt /app/requirements.txt
5
+ COPY ./packages.txt /app/packages.txt
6
+
7
+ RUN apt-get update && xargs -r -a /app/packages.txt apt-get install -y && \
8
+ rm -rf /var/lib/apt/lists/*
9
+
10
+ RUN pip3 install --no-cache-dir -r /app/requirements.txt
11
+ RUN pip3 install --no-cache-dir jinja2==3.0.1
12
+ # User
13
+ RUN useradd -m -u 1000 user
14
+ USER user
15
+ ENV HOME /home/user
16
+ ENV PATH $HOME/.local/bin:$PATH
17
+
18
+ WORKDIR $HOME
19
+ RUN mkdir app
20
+ WORKDIR $HOME/app
21
+ COPY . $HOME/app
22
+
23
+ EXPOSE 8501
24
+ CMD streamlit run app.py
README.md CHANGED
@@ -1,10 +1,13 @@
1
  ---
2
- title: Docker Demo Example
3
- emoji: 💻
4
- colorFrom: pink
5
- colorTo: blue
6
  sdk: docker
 
7
  pinned: false
 
 
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: docker-demo-example
3
+ emoji: 🐳
4
+ colorFrom: green
5
+ colorTo: purple
6
  sdk: docker
7
+ app_file: app.py
8
  pinned: false
9
+ license: afl-3.0
10
+ app_port: 8501
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import jax.numpy as jnp
4
+ import jax
5
+ import matplotlib.pyplot as plt
6
+
7
+ # Set random key
8
+ seed=321
9
+ key = jax.random.PRNGKey(seed)
10
+
11
+ st.title('Fitting simple models with JAX')
12
+ st.header('A quadratric regression example')
13
+
14
+ st.markdown(f"[The original streamlit space](https://huggingface.co/spaces/alkzar90/streamlit-demo-example)")
15
+
16
+ st.markdown('*\"Parametrised models are simply functions that depend on inputs and trainable parameters. There is no fundamental difference between the two, except that trainable parameters are shared across training samples whereas the input varies from sample to sample.\"* [(Yann LeCun, Deep learning course)](https://atcold.github.io/pytorch-Deep-Learning/en/week02/02-1/#Parametrised-models)')
17
+
18
+ st.latex(r'''h(\boldsymbol x, \boldsymbol w)= \sum_{k=1}^{K}\boldsymbol w_{k} \phi_{k}(\boldsymbol x)''')
19
+
20
+
21
+ # Sidebar inputs
22
+ number_of_observations = st.sidebar.slider('Number of observations', min_value=50, max_value=150, value=100)
23
+ noise_standard_deviation = st.sidebar.slider('Standard deviation of the noise', min_value = 0.0, max_value=2.0, value=1.0)
24
+ cost_function = st.sidebar.radio('What cost function you want to use for the fitting?', options=('RMSE-Loss', 'Huber-Loss'))
25
+
26
+ # Generate random data
27
+ np.random.seed(2)
28
+
29
+ w = jnp.array([3.0, -20.0, 32.0]) # coefficients
30
+ X = np.column_stack((np.ones(number_of_observations),
31
+ np.random.random(number_of_observations)))
32
+ X = jnp.column_stack((X, X[:,1] ** 2)) # add x**2 column
33
+
34
+ additional_noise = 8 * np.random.binomial(1, 0.03, size = number_of_observations)
35
+ y = jnp.array(np.dot(X, w) + noise_standard_deviation * np.random.randn(number_of_observations) \
36
+ + additional_noise)
37
+
38
+ # Plot the data
39
+ fig, ax = plt.subplots(dpi=320)
40
+ ax.set_xlim((0,1))
41
+ ax.set_ylim((-5,26))
42
+ ax.scatter(X[:,1], y, c='#e76254' ,edgecolors='firebrick')
43
+
44
+ st.pyplot(fig)
45
+
46
+ st.subheader('Train a model')
47
+
48
+ st.markdown('*\"A Gradient Based Method is a method/algorithm that finds the minima of a function, assuming that one can easily compute the gradient of that function. It assumes that the function is continuous and differentiable almost everywhere (it need not be differentiable everywhere).\"* [(Yann LeCun, Deep learning course)](https://atcold.github.io/pytorch-Deep-Learning/en/week02/02-1/#Parametrised-models)')
49
+
50
+ st.markdown('Using gradient descent we find the minima of the loss adjusting the weights in each step given the following formula:')
51
+
52
+ st.latex(r'''\bf{w}\leftarrow \bf{w}-\eta \frac{\partial\ell(\bf{X},\bf{y}, \bf{w})}{\partial \bf{w}}''')
53
+
54
+ st.markdown('The training loop:')
55
+
56
+ code = '''NUM_ITER = 1000
57
+ # initialize parameters
58
+ w = np.array([3., -2., -8.])
59
+ for i in range(NUM_ITER):
60
+ # update parameters
61
+ w -= learning_rte * grad_loss(w)'''
62
+
63
+ st.code(code, language='python')
64
+
65
+ # Fitting by the respective cost_function
66
+ w = jnp.array(np.random.random(3))
67
+ learning_rate = 0.05
68
+ NUM_ITER = 1000
69
+
70
+ if cost_function == 'RMSE-Loss':
71
+
72
+ def loss(w):
73
+ return 1/X.shape[0] * jax.numpy.linalg.norm(jnp.dot(X, w) - y)**2
74
+
75
+ st.write('You selected the RMSE loss function.')
76
+ st.latex(r'''\ell(X, y, w)=\frac{1}{m}||Xw - y||_{2}^2''')
77
+ st.latex(r'''\ell(X, y, w)=\frac{1}{m}\big(\sqrt{(Xw - y)\cdot(Xw - y)}\big)^2''')
78
+ st.latex(r'''\ell(X, y, w)= \frac{1}{m}\sum_1^m (\hat{y}_i - y_i)^2''')
79
+
80
+
81
+ progress_bar = st.progress(0)
82
+ status_text = st.empty()
83
+ grad_loss = jax.grad(loss)
84
+
85
+ # Perform gradient descent
86
+ progress_counter = 0
87
+ for i in range(1,NUM_ITER+1):
88
+ if i %10==0:
89
+ # Update progress bar.
90
+ progress_counter += 1
91
+ progress_bar.progress(progress_counter)
92
+
93
+ # Update parameters.
94
+ w -= learning_rate * grad_loss(w)
95
+
96
+ # Update status text.
97
+ if (i)%100==0:
98
+ # report the loss at the current epoch
99
+ status_text.text(
100
+ 'Trained loss at epoch %s is %s' % (i, loss(w)))
101
+ # Plot the final line
102
+ fig, ax = plt.subplots(dpi=120)
103
+ ax.set_xlim((0,1))
104
+ ax.set_ylim((-5,26))
105
+ ax.scatter(X[:,1], y, c='#e76254' ,edgecolors='firebrick')
106
+ ax.plot(X[jnp.dot(X, w).argsort(), 1], jnp.dot(X, w).sort(), 'k-', label='Final line')
107
+ st.pyplot(fig)
108
+ status_text.text('Done!')
109
+
110
+
111
+ else:
112
+ st.write("You selected the Huber loss function.")
113
+ st.latex(r'''
114
+ \ell_{H} =
115
+ \begin{cases}
116
+ (y^{(i)}-\hat{y}^{(i)})^2 & \text{for }\quad |y^{(i)}-\hat{y}^{(i)}|\leq \delta \\
117
+ 2\delta|y^{(i)}-\hat{y}^{(i)}| - \delta^2 & \text{otherwise}
118
+ \end{cases}''')
packages.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ libgraphviz-dev
2
+ graphviz
3
+ git
4
+ gcc
5
+ build-essential
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit==1.24.1
2
+ pandas
3
+ jax
4
+ jaxlib