import numpy as np
from scipy.optimize import minimize
import sys
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_dir, ".."))
from data_process import prepare_for_training as pr
from torch import sigmoid

class Logistic_Regression:
    def __init__(self,data,labels):
        (data_processed,data_mean,data_std) = pr.prepare_for_training(data)
        self.data = data_processed
        self.labels = labels
        self.unique_labels = np.unique(self.labels)
        self.features_num = self.data.shape[1]
        self.samples_num = self.data.shape[0]
        self.data_mean = data_mean
        self.data_std = data_std
        self.unique_labels_num = self.unique_labels.shape[0]
        self.w = np.zeros([self.unique_labels_num,self.features_num])

    def train(self,num_iterations=1000):
        all_cost_history = []
        for index,unique_label in enumerate(self.unique_labels):
            current_class_w = np.copy(self.w[index].reshape([self.features_num,1]))
            current_labels = (self.labels==unique_label).astype(float)
            (optimized_w,cost_history) = self.gradient_descent(num_iterations,current_class_w,current_labels)
            self.w[index] = optimized_w
            all_cost_history.append(cost_history)
        print("training finish!")
        return all_cost_history

    def gradient_descent(self,num_iterations,current_class_w,current_labels):
        cost_history = []
        result = minimize(
            fun=lambda current_class_w:self.cost_function(current_labels,current_class_w),
            x0=current_class_w,
            method='CG',
            jac=lambda current_class_w:self.gradient_step(current_labels,current_class_w),
            callback=lambda current_class_w:cost_history.append(self.cost_function(current_labels,current_class_w)),
            options={'maxiter':num_iterations}
        )
        if not result.success:
            raise ArithmeticError(result.message)
        optimized_w = result.x
        return optimized_w,cost_history

    def cost_function(self,current_labels,current_w):
        predictions = sigmoid.sigmoid(np.dot(self.data,current_w))
        predictions = predictions.reshape((predictions.shape[0],1))
        pos_num = current_labels[current_labels==1].shape[0]
        pos_labels = current_labels[current_labels==1].reshape((pos_num,1))
        neg_labels = current_labels[current_labels==0].reshape((self.samples_num-pos_num,1))
        pos_predictions = predictions[current_labels==1].reshape((pos_num,1))
        neg_predictions = predictions[current_labels==0].reshape((self.samples_num-pos_num,1))
        y_positive = np.dot(pos_labels.T,np.log(pos_predictions))
        y_negtive = np.dot(1-neg_labels.T,np.log(1-neg_predictions))
        cost = -(1/self.samples_num)*(y_positive + y_negtive)
        return cost[0][0]
    
    def gradient_step(self,current_labels,current_w):
        predictions = sigmoid.sigmoid(np.dot(self.data,current_w))
        predictions = predictions.reshape((predictions.shape[0],1)) 
        diff = predictions - current_labels
        gradients = (1/self.samples_num)*np.dot(self.data.T,diff)
        return gradients.flatten()

    def predict(self,data):
        data_processed = pr.prepare_for_training(data)[0]
        prob = sigmoid.sigmoid(np.dot(data_processed,self.w.T))
        max_indexes = np.argmax(prob,axis=1)
        class_predictions = np.empty(max_indexes.shape,dtype=object)
        for i,label in enumerate(self.unique_labels):
            class_predictions[max_indexes==i] = label
        return class_predictions.reshape((class_predictions.shape[0],1))