import numpy as np
import pandas as pd
import math
import csv

class Node:
   def __init__(self, feature=None, value=None, label=None, left=None, right=None):
       self.feature = feature  # 划分特征
       self.value = value  # 划分特征的取值
       self.label = label  # 叶子节点的类别
       self.left = left  # 左子树
       self.right = right  # 右子树

class DecisionTree:
   def __init__(self, epsilon=0.1):
       self.epsilon = epsilon  # 阈值
       self.tree = None  # 决策树

   def fit(self, X, y):
       self.tree = self.build_tree(X, y)

   def predict(self, X):
       y_pred = []
       for x in X:
           node = self.tree
           while node.label is None:
               if x[node.feature] == node.value:
                   node = node.left
               else:
                   node = node.right
           y_pred.append(node.label)
       return y_pred

   def build_tree(self, X, y):
       if len(set(y)) == 1:
           return Node(label=y[0])
       if len(X[0]) == 0:
           return Node(label=self.majority_class(y))
       best_feature, best_value = self.choose_best_feature(X, y)
       if best_feature is None:
           return Node(label=self.majority_class(y))
       left_X, left_y, right_X, right_y = self.split_dataset(X, y, best_feature, best_value)  # 划分数据集
       left_node = self.build_tree(left_X, left_y)  # 递归构建左子树
       right_node = self.build_tree(right_X, right_y)  # 递归构建右子树
       return Node(feature=best_feature, value=best_value, left=left_node, right=right_node)

   def choose_best_feature(self, X, y):
       num_features = len(X[0])
       base_entropy = self.entropy(y)
       best_info_gain = 0.0
       best_feature = None
       best_value = None
       for i in range(num_features):
           feature_values = set([x[i] for x in X])
           for value in feature_values:
               left_X, left_y, right_X, right_y = self.split_dataset(X, y, i, value)
               if len(left_y) == 0 or len(right_y) == 0:  # 如果划分后的数据集为空，跳过本次循环
                   continue
               info_gain = base_entropy - (len(left_y) / len(y)) * self.entropy(left_y) - (len(right_y) / len(y)) * self.entropy(right_y)  # 计算信息增益
               if info_gain > best_info_gain:  # 更新最优特征和最优划分点
                   best_info_gain = info_gain
                   best_feature = i
                   best_value = value
       if best_info_gain < self.epsilon:  # 如果信息增益小于阈值，返回None
           return None, None
       return best_feature, best_value

   def split_dataset(self, X, y, feature_index, threshold):
       left_X, left_y, right_X, right_y = [], [], [], []
       for i in range(len(X)):
           if X[i][feature_index] <= threshold:
               left_X.append(X[i])
               left_y.append(y[i])
           else:
               right_X.append(X[i])
               right_y.append(y[i])
       return np.array(left_X), np.array(left_y), np.array(right_X), np.array(right_y)

   def entropy(self, y):
       num_samples = len(y)
       label_counts = {}
       for label in y:
           if label not in label_counts:
               label_counts[label] = 0
           label_counts[label] += 1
       entropy = 0.0
       for label in label_counts:
           prob = float(label_counts[label]) / num_samples
           entropy -= prob * math.log(prob, 2)
       return entropy

   def majority_class(self, y):
       label_counts = {}
       for label in y:
           if label not in label_counts:
               label_counts[label] = 0
           label_counts[label] += 1
       majority_label = None
       majority_count = 0
       for label in label_counts:
           if label_counts[label] > majority_count:
               majority_label = label
               majority_count = label_counts[label]
       return majority_label

def load_data():
   data = pd.read_csv('wine_data.csv')
   X = data.iloc[:, 1:].values
   y = data.iloc[:, 0].values
   return X, y


def k_fold_cross_validation(X, y, model, k=5):
   num_samples = len(X)
   fold_size = num_samples // k
   indices = np.arange(num_samples)
   np.random.shuffle(indices)
   accuracies = []

   for i in range(k):
       test_indices = indices[i * fold_size: (i+1) * fold_size]
       train_indices = np.concatenate((indices[:i * fold_size], indices[(i+1) * fold_size:]))

       X_train, X_test = X[train_indices], X[test_indices]
       y_train, y_test = y[train_indices], y[test_indices]

       model.fit(X_train, y_train)
       y_pred = model.predict(X_test)

       accuracy = np.mean(y_pred == y_test)
       accuracies.append(accuracy)
       print(f'Fold {i+1} Accuracy:', accuracy)

   avg_accuracy = np.mean(accuracies)
   print(f'Average Accuracy: {avg_accuracy}')
X, y = load_data()
tree = DecisionTree()
k_fold_cross_validation(X, y, tree, k=5)