import numpy as np
import random
from decimal import *



class LinearRegression(object):

    def __init__(self):
        
        self.precision = 3        
        self.alpha = 0.01
        self.x = []
        self.y = []        
        
    def hypothesis(self, x, theta):
        
        h = 0
        for i in range(len(x)):
            h += x[i]*float(theta[i])

        return h

    def cost_function(self, X, y, theta):

        self.m = len(X)
        self.j = 0

        for i in range(self.m):
            self.j += (self.hypothesis(X[i], theta) + y[i])**2


        self.j = self.j/(2*self.m)

        return self.j

    def gradient_descent(self, X, y, theta, alpha=0.03, num_iters=400):

        getcontext().prec = self.precision

        m = len(y)
        temp = np.zeros(len(theta)).tolist()
                
        for t in range(num_iters):

            for j in range(len(theta)):
                # Compute the dirivative
            
                d = 0
                for i in range(m):
                    d +=  (self.hypothesis(X[i], theta )-y[i]) * X[i][j]
        
                temp[j] = theta[j] - Decimal(str((self.alpha / m) * d))
            
            
            if theta == temp:
                break
            

            theta = list(temp)

        
        return theta, t


'''
    this class has some nice method to implement the logistic regression.
    ATTENTION: always use the bias term.
'''


class LogisticRegression():

    '''        
        alfa: learning rate
        l (lambda): regularization term
    '''
    def __init__(self, alpha=0.03, l=0):

        self.alpha = alpha
        self.l = l

    def hypothesis(self, x, theta):

        z = 0

        for i in range(len(x)):
            z += theta[i] * x[i]

        if 1 / (1 + np.exp(-z)) >= 0.5:
            return 1
        return 0

    def cost_function(self, theta, x, y):

        j = 0

        for i in range(len(x)):
            j += np.square(self.hipothesys(theta, x[i]) - y[i])

        return j / (2 * len(x))

    def gradient(self, x, y, theta):

        m = len(x)

        temp = np.zeros(len(theta)).tolist()

        for i in range(len(theta)):
            h = 0
            r = 0
            for j in range(m):
                if j > 0:
                    r = (self.l / m) * theta[i]
                h += (self.hypothesis(theta, x[j]) - y[j]) * x[j][i] - r

            temp[i] = theta[i] - (self.alpha * h)

        return temp

    def init_theta(self, d, min, max):

        theta = []

        for i in range(d):
            theta.append(random.randint(min, max))

        return theta

    def get_parameters(self, x, y, theta=None, iteration=400):

        if theta == None:
            theta = self.init_theta(len(x[0]), -50, 50)

        for i in range(iteration):
            temp = theta
            theta = self.gradient(x, y, theta)
            if theta == temp:
                break

        return theta, i


class k_means():

    '''
     x: training set
     n: number of cluster we want.
    '''
    def __init__(self, x, c=1):

        self.x = x
        self.c = c
        self.mu = self.rnd_clusters()

    '''
     x: training set
     n: number of cluster we want.
    '''
    def rnd_clusters(self):

        c = []

        for i in range(self.c):
            j = random.randint(0, len(self.x) - 1)
            if self.x[j] not in c:
                c.append(self.x[j])

        return c
    '''
     in this case x is not a vector. It is a single element of the
     training set
    '''
    def min_distance(self, x):

        mean = None
        c = 0

        for i, m in enumerate(self.mu):
            temp = np.array(x) - np.array(m)
            temp = (np.linalg.norm(temp))  # quadratic of the norm.
            temp = temp ** 2

            if temp < mean or mean == None:
                mean = temp
                c = i

        return c + 1, mean

    def cluster_assignement(self):

        c = []

        for i in self.x:
            c.append(self.min_distance(i)[0])

        return c

    def move_centroids(self):

        new_centroid = []

        c = self.cluster_assignement()

        for k, m in enumerate(self.mu):
            temp = np.array(np.zeros(len(self.x[0])))
            n_points = 0  # numbers of points in a cluster.
            for i, j in enumerate(c):
                if j == k + 1:
                    n_points += 1
                    temp += np.array(self.x[i])
            if n_points > 0:
                new_centroid.append((temp / n_points).tolist())

        return new_centroid

    '''
        Main function:
            n is the number of steps
            Run initialization.
            Run the cluster assignament step.
            run the move centroids step.
    '''
    def run_clustering(self, n=20):

        self.mu = prev = self.rnd_clusters()

        for i in range(n):
            self.mu = self.move_centroids()
            if self.mu == prev:
                return i, self.mu
            prev = self.mu

        return n, self.mu
