#!/usr/bin/env python3

__author__ = 'z.Wick.Tone.Burst'
__doc__ = """Perceptron Learning Algorithm of Machine Learning."""


from zt.ML.utils import *



class PLA:
    """The Perceptron Learning Algorithm."""
    def __init__(self, w, y0, eta=1):
        """ args:
            w: `W[0]`, -threshold
            y0: `h(0)`, critical value
            eta: `efficiency`, the increasement of each update
        """
        self.W = [w, ]
        self.y0 = y0
        assert eta > 0
        self.eta = eta
        self.ctr = 0


    def init(self, x):
        if len(self.W) < 2:
            self.W += [0] * len(x)
        assert len(self.W) == len(x) + 1


    def copy(self):
        pla = PLA(self.W[0], self.y0, self.eta)
        pla.W = self.W.copy()
        return pla


    def sign(self, x):
        self.init(x)
        y = sum(map(lambda a, b: a * b, self.W, [1,] + x))
        if y == 0:
            y = self.y0
        return int(y / abs(y))


    def update(self, x, y):
        if self.sign(x) == y:
            return 0
        self.W = list(map(lambda a, b: a + self.eta * y * b, self.W, [1,] + x))
        return 1


    def train(self, dset):
        ctr = 0
        while True:
            self.ctr = 0
            for x, y in dset:
                self.ctr += self.update(x, y)
            # print('PLA has updated %d rounds.' % self.ctr)
            ctr += self.ctr
            if self.ctr == 0:
                break
        # print('Totally %d updates are made!' % ctr)
        return self.W, ctr


    def mistakes(self, tdset):
        errno = 0
        # if remove the `self.` of the eset
        # the eset won't update with the W
        # the pocket will fail to W = zeros()
        self.eset = []
        for i in range(len(tdset)):
            x, y = tdset[i]
            if self.sign(x) is not y:
                errno += 1
                self.eset += [i, ]
        return errno




class Pocket:
    def __init__(self, pla):
        self.pla = pla
        self.best = pla.copy()


    def update(self, dset):
        x, y = dset[random.choice(self.pla.eset)]
        if self.pla.update(x, y) == 0:
            return 0
        if self.pla.mistakes(dset) < self.best.mistakes(dset):
            self.best = self.pla.copy()
            return 1
        return 0


    def train(self, dset, updates, rounds):
        ups = 0
        self.pla.mistakes(dset)
        for i in range(rounds):
            ups += self.update(dset)
            # print(ups, 'updates have been made, now is the', i+1, 'rounds')
            if ups >= updates:
                break
        # print(self.best.W, self.best.mistakes(dset))
        return self.best




def test_PLA():
    pla = PLA(0, -1, eta=0.5)
    data = load_data('./data/pla_train.txt')
    return pla.train(randomize(data))[1]
    # return pla.train(data)[1]


def test_pocket():
    pla = PLA(0, -1, eta=0.5)
    data = load_data('./data/pocket_train.txt')
    pocket = Pocket(pla)
    pocket.train(data, 50, 2000)
    td = load_data('./data/pocket_test.txt')
    return pocket.best.mistakes(td) / len(td)





if __name__ == '__main__':

    def run():
        # return test_PLA()
        return test_pocket()

    c = 0
    rounds = 1
    import time
    st = time.time()
    for i in range(rounds):

        c += run()

    print(c / rounds)
    et = time.time()
    print(et - st)