Spaces:
Sleeping
Sleeping
Add source files
Browse files- src/elm/__init__.py +1 -0
- src/elm/elm.py +83 -0
src/elm/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .elm import ELMClassifier
|
src/elm/elm.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
3 |
+
|
4 |
+
def softmax(x):
|
5 |
+
|
6 |
+
max = np.max(x, axis=1, keepdims=True) # Returns max of each row and keeps same dims
|
7 |
+
e_x = np.exp(x - max) # Subtracts each row with its max value
|
8 |
+
sum = np.sum(e_x, axis=1, keepdims=True) # Returns sum of each row and keeps same dims
|
9 |
+
f_x = e_x / sum
|
10 |
+
|
11 |
+
return f_x
|
12 |
+
|
13 |
+
class ELMClassifier(BaseEstimator, TransformerMixin):
|
14 |
+
|
15 |
+
def __init__(self, L, random_state=None):
|
16 |
+
|
17 |
+
self.L = L # number of hidden neurons
|
18 |
+
self.random_state = random_state # random state
|
19 |
+
|
20 |
+
def fit(self, X, y=None):
|
21 |
+
|
22 |
+
M = np.size(X, axis=0) # Number of examples
|
23 |
+
N = np.size(X, axis=1) # Number of features
|
24 |
+
|
25 |
+
np.random.seed(seed=self.random_state) # set random seed
|
26 |
+
|
27 |
+
self.w1 = np.random.uniform(low=-1, high=1, size=(self.L, N+1)) # Weights with bias
|
28 |
+
|
29 |
+
bias = np.ones(M).reshape(-1, 1) # Bias definition
|
30 |
+
Xa = np.concatenate((bias, X), axis=1) # Input with bias
|
31 |
+
|
32 |
+
S = Xa.dot(self.w1.T) # Weighted sum of hidden layer
|
33 |
+
H = np.tanh(S) # Activation function f(x) = tanh(x), dimension M X L
|
34 |
+
|
35 |
+
bias = np.ones(M).reshape(-1, 1) # Bias definition
|
36 |
+
Ha = np.concatenate((bias, H), axis=1) # Activation function with bias
|
37 |
+
|
38 |
+
# One-hot encoding
|
39 |
+
n_classes = len(np.unique(y))
|
40 |
+
y = np.eye(n_classes)[y]
|
41 |
+
|
42 |
+
self.w2 = (np.linalg.pinv(Ha).dot(y)).T # w2' = pinv(Ha)*D
|
43 |
+
|
44 |
+
return self
|
45 |
+
|
46 |
+
def predict(self, X):
|
47 |
+
|
48 |
+
M = np.size(X, axis=0) # Number of examples
|
49 |
+
N = np.size(X, axis=1) # Number of features
|
50 |
+
|
51 |
+
bias = np.ones(M).reshape(-1, 1) # Bias definition
|
52 |
+
Xa = np.concatenate((bias, X), axis=1) # Input with bias
|
53 |
+
|
54 |
+
S = Xa.dot(self.w1.T) # Weighted sum of hidden layer
|
55 |
+
H = np.tanh(S) # Activation function f(x) = tanh(x), dimension M X L
|
56 |
+
|
57 |
+
bias = np.ones(M).reshape(-1, 1) # Bias definition
|
58 |
+
Ha = np.concatenate((bias, H), axis=1) # Activation function with bias
|
59 |
+
|
60 |
+
y_pred = softmax(Ha.dot(self.w2.T)) # Predictions
|
61 |
+
|
62 |
+
# Revert one-hot encoding
|
63 |
+
y_pred = np.argmax(y_pred, axis=1) # axis=1 means that we want to find the index of the maximum value in each row
|
64 |
+
|
65 |
+
return y_pred
|
66 |
+
|
67 |
+
def predict_proba(self, X):
|
68 |
+
|
69 |
+
M = np.size(X, axis=0) # Number of examples
|
70 |
+
N = np.size(X, axis=1) # Number of features
|
71 |
+
|
72 |
+
bias = np.ones(M).reshape(-1, 1) # Bias definition
|
73 |
+
Xa = np.concatenate((bias, X), axis=1) # Input with bias
|
74 |
+
|
75 |
+
S = Xa.dot(self.w1.T) # Weighted sum of hidden layer
|
76 |
+
H = np.tanh(S) # Activation function f(x) = tanh(x), dimension M X L
|
77 |
+
|
78 |
+
bias = np.ones(M).reshape(-1, 1) # Bias definition
|
79 |
+
Ha = np.concatenate((bias, H), axis=1) # Activation function with bias
|
80 |
+
|
81 |
+
y_pred = softmax(Ha.dot(self.w2.T)) # Predictions
|
82 |
+
|
83 |
+
return y_pred
|