Krzysiek111 commited on
Commit
89c4568
1 Parent(s): 7b37229

removed unnecessary code

Browse files
Files changed (2) hide show
  1. app.py +2 -3
  2. predict.py +4 -57
app.py CHANGED
@@ -2,13 +2,12 @@ import streamlit as st
2
  import numpy as np
3
  import matplotlib.pyplot as plt
4
  import seaborn as sns
5
- import requests
6
- import json
7
 
8
  from predict import predict_series
9
 
 
 
10
  st.set_page_config(page_title='RNN Playground')
11
- #st.set_option('deprecation.showPyplotGlobalUse', False)
12
 
13
  pages = {'Intro': 0, 'Implementation details': 1, 'The model': 2}
14
  choice = pages[st.sidebar.radio("Select the chapter: ", tuple(pages.keys()))]
 
2
  import numpy as np
3
  import matplotlib.pyplot as plt
4
  import seaborn as sns
 
 
5
 
6
  from predict import predict_series
7
 
8
+ #TODO: Refactor this module
9
+
10
  st.set_page_config(page_title='RNN Playground')
 
11
 
12
  pages = {'Intro': 0, 'Implementation details': 1, 'The model': 2}
13
  choice = pages[st.sidebar.radio("Select the chapter: ", tuple(pages.keys()))]
predict.py CHANGED
@@ -4,6 +4,7 @@ from sklearn.preprocessing import StandardScaler
4
 
5
  verbose = 0
6
 
 
7
 
8
  def predict_series(values, r1_nodes=5, r2_nodes=0, fc1_nodes=0, steps=20, use_lstm=True, *args, **kwargs):
9
 
@@ -25,56 +26,11 @@ def predict_series(values, r1_nodes=5, r2_nodes=0, fc1_nodes=0, steps=20, use_ls
25
  X = np.array(X).reshape(-1, T, 1)
26
  Y = np.array(Y)
27
 
28
- nb_stats = 0
29
- """
30
- X_temp = np.zeros(X.size + nb_stats * len(X)).reshape(-1, T + nb_stats)
31
-
32
- step_size = 1 / (len(X) + steps)
33
-
34
-
35
- def update_stats(row):
36
- new_stat = row[T:]
37
- new_stat[0] += step_size # number of sample
38
-
39
- minimum = min(row[:T]) # minimum value, and when it occurred
40
- if minimum < row[T + 1]:
41
- new_stat[1], new_stat[2] = minimum, new_stat[0]
42
-
43
- maximum = max(row[:T]) # maximum value, and when it occurred
44
- if maximum > row[T + 3]:
45
- new_stat[3], new_stat[4] = maximum, new_stat[0]
46
-
47
- new_stat[5] = (row[T + 5] * row[T] + row[T - 1]) / (new_stat[0]) # rolling average
48
-
49
- difference10 = row[T - 1] - row[T - 11] # the biggest difference within 10 items
50
- if difference10 > row[T + 6]:
51
- new_stat[6], new_stat[7] = difference10, new_stat[0]
52
- if difference10 < row[T + 8]:
53
- new_stat[8], new_stat[9] = difference10, new_stat[0]
54
-
55
- abs_difference10 = abs(difference10) # the biggest absolute difference within 10 items
56
- if abs_difference10 > row[T + 10]:
57
- new_stat[10], new_stat[11] = abs_difference10, new_stat[0]
58
- if abs_difference10 < row[T + 12]:
59
- new_stat[12], new_stat[13] = abs_difference10, new_stat[0]
60
-
61
- return new_stat
62
-
63
- X_temp[0] = X[0] #np.append(X[0])#, [0, np.inf, 0, -np.inf, 0]) #, 0, -np.inf, 0, +np.inf, 0, 0, 0, np.inf, 0])
64
- for i in range(1, len(X)):
65
- X_temp[i] = np.append(X[i][:T], X_temp[i - 1][T:])
66
- X_temp[i][T:] = update_stats(X_temp[i])
67
- """
68
- #X = X_temp[1:].reshape(-1, T + nb_stats, 1)
69
- #Y = Y[1:]
70
-
71
- i = tf.layers.Input(shape=(T + nb_stats, 1))
72
-
73
  if use_lstm:
74
  rnn_layer = tf.layers.LSTM
75
  else:
76
  rnn_layer = tf.layers.GRU
77
-
78
  if r2_nodes:
79
  x = rnn_layer(r1_nodes, return_sequences=True)(i)
80
  x = rnn_layer(r2_nodes)(x)
@@ -85,16 +41,13 @@ def predict_series(values, r1_nodes=5, r2_nodes=0, fc1_nodes=0, steps=20, use_ls
85
  x = tf.layers.Dense(1)(x)
86
  model = tf.models.Model(i, x)
87
 
88
-
89
  """lr_schedule = tf.optimizers.schedules.ExponentialDecay(
90
  initial_learning_rate=0.2,
91
  decay_steps=10,
92
  decay_rate=0.8)
93
  optimizer = tf.optimizers.Ftrl(learning_rate=0.001, learning_rate_power=-0.1)"""
94
- #for i in range(0, 500, 10):
95
- #print('{}: {}'.format(i, lr_schedule(i)))
96
-
97
-
98
  model.compile(
99
  loss='mse', #tf.losses.LogCosh(),
100
  optimizer=tf.optimizers.Adamax(lr=0.1) #LogCosh()'sgd'
@@ -116,16 +69,10 @@ def predict_series(values, r1_nodes=5, r2_nodes=0, fc1_nodes=0, steps=20, use_ls
116
  for _ in range(steps):
117
  p = model.predict(last_x.reshape(1, -1, 1))[0, 0]
118
  pred = np.append(pred, p)
119
- #last_x[:T] = np.roll(last_x[:T], -1)
120
- #last_x[T - 1] = p
121
- #last_x[T:] = update_stats(last_x)
122
  last_x = np.roll(last_x, -1)
123
  last_x[-1] = p
124
 
125
  pred = sc.inverse_transform(pred.reshape(-1, 1))
126
- # pred = np.array(pred).astype('float64')
127
- # pred = list(pred)
128
- # logging.info(pred)
129
 
130
  pred.reshape(-1)
131
  pred[0] = train_last_value + pred[0]
 
4
 
5
  verbose = 0
6
 
7
+ # TODO: Refactor this module
8
 
9
  def predict_series(values, r1_nodes=5, r2_nodes=0, fc1_nodes=0, steps=20, use_lstm=True, *args, **kwargs):
10
 
 
26
  X = np.array(X).reshape(-1, T, 1)
27
  Y = np.array(Y)
28
 
29
+ i = tf.layers.Input(shape=(T, 1))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  if use_lstm:
31
  rnn_layer = tf.layers.LSTM
32
  else:
33
  rnn_layer = tf.layers.GRU
 
34
  if r2_nodes:
35
  x = rnn_layer(r1_nodes, return_sequences=True)(i)
36
  x = rnn_layer(r2_nodes)(x)
 
41
  x = tf.layers.Dense(1)(x)
42
  model = tf.models.Model(i, x)
43
 
44
+ # TODO: optimize execution time
45
  """lr_schedule = tf.optimizers.schedules.ExponentialDecay(
46
  initial_learning_rate=0.2,
47
  decay_steps=10,
48
  decay_rate=0.8)
49
  optimizer = tf.optimizers.Ftrl(learning_rate=0.001, learning_rate_power=-0.1)"""
50
+
 
 
 
51
  model.compile(
52
  loss='mse', #tf.losses.LogCosh(),
53
  optimizer=tf.optimizers.Adamax(lr=0.1) #LogCosh()'sgd'
 
69
  for _ in range(steps):
70
  p = model.predict(last_x.reshape(1, -1, 1))[0, 0]
71
  pred = np.append(pred, p)
 
 
 
72
  last_x = np.roll(last_x, -1)
73
  last_x[-1] = p
74
 
75
  pred = sc.inverse_transform(pred.reshape(-1, 1))
 
 
 
76
 
77
  pred.reshape(-1)
78
  pred[0] = train_last_value + pred[0]