import numpy as np


class LinearRegression:
    def __init__(self):
        self.coef_ = 0.0
        self.intercept = 0.
        self.theta = None
        pass

    def hypothetic(self, x):
        return np.dot(self.theta, x) + self.intercept

    def error_dist(self, x, y):
        return self.hypothetic(x) - y

    def Jfunction(self):
        sum = 0

        for i in range(0, self.m):
            err = self.error_dist(self.x[i], self.y[i])
            sum += np.dot(err, err)

        return 1 / (2 * self.m) * sum

    def partialderiv_J_func(self):
        sum = 0

        for i in range(0, self.m):
            err = self.error_dist(self.x[i], self.y[i])
            sum += np.dot(self.x[i], err)

        return 1 / self.m * sum

    def partialderiv_J_func_for_intersect(self):
        sum = 0

        for i in range(0, self.m):
            err = self.error_dist(self.x[i], self.y[i])
            sum += err

        return 1 / self.m * sum

    def gradient_descent(self):
        cost = 100000.0
        threshold = 0.01
        alpha = 0.3

        # 循环直到收敛
        while threshold < cost:
            self.theta = self.theta - alpha * self.partialderiv_J_func()
            self.intercept = self.intercept - alpha * self.partialderiv_J_func_for_intersect()
            cost = self.Jfunction()

    def fit(self, x, y):
        if x.shape[0] != y.shape[0]:
            raise 'x, y have different length!'

        self.x = x
        self.y = y
        self.m = x.shape[0]
        self.theta = np.zeros(x[0].size)

        self.gradient_descent()

        self.coef_ = self.theta
        pass

    def predict(self, x):
        y_pred = []
        for element in x:
            y_pred.append(self.hypothetic(element))

        return y_pred


class LogisticRegression:
    def __init__(self, normalize=True, alpha=0.3):
        self.coef_ = 0.0
        self.intercept = 0.
        self.theta = None
        self.normalize = normalize
        self.offset = 1.0
        self.scalar = 1.0
        self.alpha = alpha
        self.iter_count = 0
        self.cost_history = []
        pass

    def sigmoid(self, z):
        epart = np.exp(z)
        return epart / (1 + epart)

    def hypothetic(self, x):
        z = np.dot(self.theta, x) + self.intercept
        return self.sigmoid(z)

    def error_dist(self, x, y):
        return self.hypothetic(x) - y

    def loglikelihood(self, x, y):
        h = self.hypothetic(x)
        return y * np.log(h) + (1 - y) * np.log(1 - h)

    def Jfunction(self):
        sum = 0

        for i in range(0, self.m):
            sum += self.loglikelihood(self.x[i], self.y[i])

        return 1 / self.m * sum

    def partialderiv_J_func(self, ):

        h = np.zeros(self.m)
        for i in range(0, self.m):
            h[i] = self.hypothetic(self.x[i])

        dist = h - self.y

        return np.asarray(np.mat(dist.T) * self.x) / self.m

    def partialderiv_J_func_for_intersect(self):
        sum = 0

        for i in range(0, self.m):
            err = self.error_dist(self.x[i], self.y[i])
            sum += err

        return 1 / self.m * sum

    def gradient_descent(self):
        cost = 100000.0
        last_cost = 200000.0
        threshold = 0.01

        self.iter_count = 0
        # 循环直到收敛
        while abs(cost - last_cost) > 0.0001:
            last_cost = cost
            self.theta = self.theta - self.alpha * self.partialderiv_J_func()
            self.intercept = self.intercept - self.alpha * self.partialderiv_J_func_for_intersect()
            cost = -self.Jfunction()
            self.cost_history.append(cost)
            print('iter=%d deltaCost=%f' % (self.iter_count, last_cost - cost))
            self.iter_count += 1

    def norm_params(self, x):
        offset = np.zeros(self.n_feature)
        scalar = np.ones(self.n_feature)
        for feature_idx in range(0, self.n_feature):
            col = x[:, np.newaxis, feature_idx]
            min = col.min()
            max = col.max()
            mean = col.mean()

            if (min != max):
                scalar[feature_idx] = 1.0 / (max - min)
            else:
                scalar[feature_idx] = 1.0 / max

            offset[feature_idx] = mean

        return offset, scalar

    def normalize(self, x):
        return (x - self.offset) * self.scalar

    def fit(self, x, y):
        if x.shape[0] != y.shape[0]:
            raise 'x, y have different length!'

        self.m = x.shape[0]
        self.n_feature = x.shape[1]
        self.theta = np.zeros(x[0].size)

        if self.normalize:
            self.offset, self.scalar = self.norm_params(x)
            self.x = self.normalize(x)
        else:
            self.x = x

        self.y = y

        self.gradient_descent()

        self.coef_ = self.theta
        pass

    def predict(self, x):
        y_pred = []
        for element in x:
            xi = element
            if self.normalize:
                xi = self.normalize(element)

            y_pred.append(self.hypothetic(xi))

        return y_pred