import math

def calc_entropy(y):
    hist = {}
    for yi in y:
        if yi not in hist:
            hist[yi] = 0
        hist[yi] += 1
    entropy = 0.0
    for yi in hist:
        p_yi = hist[yi] / len(y)
        entropy -= p_yi * math.log(p_yi, 2)
    return entropy

def calc_info_gain(X, y, feature_index):
    original_entropy = calc_entropy(y)
    values = set([example[feature_index] for example in X])
    weighted_entropy = 0.0
    for value in values:
        sub_y = [row[-1] for row in X if row[feature_index] == value]
        weighted_entropy += (len(sub_y) / len(y)) * calc_entropy(sub_y)
    info_gain = original_entropy - weighted_entropy
    return info_gain

def id3(X, y, features):
    if len(set(y)) == 1:
        return y[0]
    if len(features) == 0:
        return max(set(y), key=y.count)
    
    best_feature = max(features, key=lambda feature: calc_info_gain(X, y, feature))
    tree = {best_feature: {}}
    features = [i for i in features if i != best_feature]
    
    for value in set([example[best_feature] for example in X]):
        sub_X = [row for row in X if row[best_feature] == value]
        sub_y = [row[-1] for row in sub_X]
        subtree = id3(sub_X, sub_y, features)
        tree[best_feature][value] = subtree
    return tree

def load_wine_dataset(filename):
    dataset = []
    with open(filename, 'r') as file:
        for line in file.readlines():
            values = line.strip().split(',')
            dataset.append(values)
    return dataset

if __name__ == "__main__":

    wine_dataset = load_wine_dataset('wine.data')
    features = list(range(1, 14))  
    labels = [row[-1] for row in wine_dataset]  

    wine_tree = id3(wine_dataset, labels, features)
    print(wine_tree)