import csv
import random
import numpy as np


def load_csv(filename):
    dataset = []
    with open(filename, 'r') as file:
        csv_reader = csv.reader(file)
        for row in csv_reader:
            if not row:
                continue
            dataset.append(row)
    return dataset


def split_dataset(dataset, split_ratio):
    train_size = int(len(dataset) * split_ratio)
    train_set = []
    test_set = list(dataset)
    while len(train_set) < train_size:
        index = random.randrange(len(test_set))
        train_set.append(test_set.pop(index))
    return train_set, test_set


def euclidean_distance(x1, x2):
    return np.sqrt(np.sum((x1 - x2) ** 2))


def get_k_nearest_neighbors(train_set, test_instance, k):
    distances = []
    for train_instance in train_set:
        dist = euclidean_distance(train_instance[:-1], test_instance[:-1])
        distances.append((train_instance, dist))
    distances.sort(key=lambda x: x[1])
    neighbors = [distances[i][0] for i in range(k)]
    return neighbors


def predict_classification(train_set, test_instance, k):
    neighbors = get_k_nearest_neighbors(train_set, test_instance, k)
    output_values = [neighbor[-1] for neighbor in neighbors]
    prediction = max(set(output_values), key=output_values.count)
    return prediction


def knn(train_set, test_set, k):
    predictions = []
    for test_instance in test_set:
        output = predict_classification(train_set, test_instance, k)
        predictions.append(output)
    return predictions


def standardize_dataset(dataset):
    X = np.array([x[:-1] for x in dataset], dtype=float)
    y = np.array([x[-1] for x in dataset], dtype=float)
    mean = np.mean(X, axis=0)
    std = np.std(X, axis=0)
    X_std = (X - mean) / std
    standardized_data = np.column_stack((X_std, y))
    return standardized_data


def main():
    filename = 'iris.csv'
    split_ratio = 0.67
    k = 3

    dataset = load_csv(filename)
    train_set, test_set = split_dataset(dataset, split_ratio)

    train_set = standardize_dataset(train_set)
    test_set = standardize_dataset(test_set)

    predictions = knn(train_set, test_set, k)

    accuracy = sum([1 for i in range(len(test_set)) if test_set[i][-1] == predictions[i]]) / float(len(test_set))
    print('Accuracy:', accuracy)


if __name__ == '__main__':
    main()
