File size: 4,039 Bytes
4affd03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import numpy as np
import cv2
from sklearn.base import BaseEstimator, TransformerMixin

def visual_words(X, bovw):

    # X = cv2.cvtColor(X, cv2.COLOR_RGB2GRAY)

    N = len(X) # Number of images
    K = bovw.n_clusters # Number of visual words

    # SIFT object
    sift = cv2.SIFT_create()

    # Feature vector histogram: new and better representation of the images
    feature_vector = np.zeros((N, K))
    visial_word_pos = 0 # Position of the visual word

    # For each image
    for i in range(N):

        # Extract the keypoints descriptors of the current image
        _, curr_des = sift.detectAndCompute(X[i], None)

        # Define the feature vector of the current image
        feature_vector_curr = np.zeros(bovw.n_clusters, dtype=np.float32)

        # Uses the BoVW to predict the visual words of each keypoint descriptors of the current image
        word_vector = bovw.predict(np.asarray(curr_des, dtype=float))
        
        # For each unique visual word
        for word in np.unique(word_vector):
            res = list(word_vector).count(word) # Count the number of word in word_vector
            feature_vector_curr[word] = res # Increments histogram for that word
        
        # Normalizes the current histogram
        cv2.normalize(feature_vector_curr, feature_vector_curr, norm_type=cv2.NORM_L2)

        feature_vector[visial_word_pos] = feature_vector_curr # Assined the current histogram to the feature vector
        visial_word_pos += 1 # Increments the position of the visual word

    return feature_vector

class ELMClassifier(BaseEstimator, TransformerMixin):

    def __init__(self, L, random_state=None):
        
        self.L = L # number of hidden neurons
        self.random_state = random_state # random state

    def fit(self, X, y=None):

        M = np.size(X, axis=0) # Number of examples
        N = np.size(X, axis=1) # Number of features

        np.random.seed(seed=self.random_state) # set random seed

        self.w1 = np.random.uniform(low=-1, high=1, size=(self.L, N+1)) # Weights with bias

        bias = np.ones(M).reshape(-1, 1) # Bias definition
        Xa = np.concatenate((bias, X), axis=1) # Input with bias

        S = Xa.dot(self.w1.T) # Weighted sum of hidden layer
        H = np.tanh(S) # Activation function f(x) = tanh(x), dimension M X L

        bias = np.ones(M).reshape(-1, 1) # Bias definition
        Ha = np.concatenate((bias, H), axis=1) # Activation function with bias

        # One-hot encoding
        n_classes = len(np.unique(y))
        y = np.eye(n_classes)[y]

        self.w2 = (np.linalg.pinv(Ha).dot(y)).T # w2' = pinv(Ha)*D

        return self

    def predict(self, X):

        M = np.size(X, axis=0) # Number of examples
        N = np.size(X, axis=1) # Number of features

        bias = np.ones(M).reshape(-1, 1) # Bias definition
        Xa = np.concatenate((bias, X), axis=1) # Input with bias

        S = Xa.dot(self.w1.T) # Weighted sum of hidden layer
        H = np.tanh(S) # Activation function f(x) = tanh(x), dimension M X L

        bias = np.ones(M).reshape(-1, 1) # Bias definition
        Ha = np.concatenate((bias, H), axis=1) # Activation function with bias

        y_pred = Ha.dot(self.w2.T) # Predictions
        
        # Revert one-hot encoding
        y_pred = np.argmax(y_pred, axis=1) # axis=1 means that we want to find the index of the maximum value in each row

        return y_pred

    def predict_proba(self, X):

        M = np.size(X, axis=0) # Number of examples
        N = np.size(X, axis=1) # Number of features

        bias = np.ones(M).reshape(-1, 1) # Bias definition
        Xa = np.concatenate((bias, X), axis=1) # Input with bias

        S = Xa.dot(self.w1.T) # Weighted sum of hidden layer
        H = np.tanh(S) # Activation function f(x) = tanh(x), dimension M X L

        bias = np.ones(M).reshape(-1, 1) # Bias definition
        Ha = np.concatenate((bias, H), axis=1) # Activation function with bias

        y_pred = Ha.dot(self.w2.T) # Predictions

        return y_pred