import numpy as np
import math

def load_wine_data(file_path):
    data = []
    with open(file_path, 'r') as f:
        for line in f:
            if line.strip():
                values = line.strip().split(',')
                data.append([int(values[0])] + [float(x) for x in values[1:]])
    return np.array(data)

def filter_classes(data, classes=[1, 2]):
    return data[np.isin(data[:, 0], classes)]

def normalize_features(data):
    features = data[:, 1:]
    features = (features - np.mean(features, axis=0)) / np.std(features, axis=0)
    labels = data[:, 0]
    return np.hstack((labels.reshape(-1, 1), features))

file_path = "wine.data"
raw_data = load_wine_data(file_path)
filtered_data = filter_classes(raw_data)
normalized_data = normalize_features(filtered_data)


def pca(data, n_components=2):
    features = data[:, 1:]
    cov_matrix = np.cov(features.T)
    eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)
    sorted_indices = np.argsort(-eigenvalues)
    top_eigenvectors = eigenvectors[:, sorted_indices[:n_components]]
    reduced_features = features @ top_eigenvectors
    return np.hstack((data[:, 0].reshape(-1, 1), reduced_features))

pca_data = pca(normalized_data)

def lda(data, n_components=1):
    labels = data[:, 0]
    features = data[:, 1:]
    class_labels = np.unique(labels)

    mean_vectors = []
    S_within = np.zeros((features.shape[1], features.shape[1]))
    for label in class_labels:
        class_features = features[labels == label]
        mean_vector = np.mean(class_features, axis=0)
        mean_vectors.append(mean_vector)
        S_within += (class_features - mean_vector).T @ (class_features - mean_vector)

    overall_mean = np.mean(features, axis=0)
    S_between = np.zeros((features.shape[1], features.shape[1]))
    for mean_vector, label in zip(mean_vectors, class_labels):
        n = features[labels == label].shape[0]
        mean_diff = (mean_vector - overall_mean).reshape(-1, 1)
        S_between += n * (mean_diff @ mean_diff.T)

    eigvals, eigvecs = np.linalg.eig(np.linalg.inv(S_within) @ S_between)
    sorted_indices = np.argsort(-eigvals.real)
    top_eigenvectors = eigvecs[:, sorted_indices[:n_components]].real
    reduced_features = features @ top_eigenvectors
    return np.hstack((labels.reshape(-1, 1), reduced_features))

lda_data = lda(normalized_data)

class DecisionTree:
    def __init__(self):
        self.tree = None

    def entropy(self, labels):
        _, counts = np.unique(labels, return_counts=True)
        probabilities = counts / len(labels)
        return -np.sum(probabilities * np.log2(probabilities))

    def information_gain(self, data, labels, feature_index):
        total_entropy = self.entropy(labels)
        feature_values = data[:, feature_index]
        unique_values = np.unique(feature_values)

        if len(unique_values) > 10:
            thresholds = (unique_values[:-1] + unique_values[1:]) / 2
            best_gain = 0
            best_threshold = None
            for threshold in thresholds:
                left_indices = feature_values <= threshold
                right_indices = feature_values > threshold
                left_entropy = self.entropy(labels[left_indices])
                right_entropy = self.entropy(labels[right_indices])
                weighted_entropy = (
                    (sum(left_indices) / len(labels)) * left_entropy +
                    (sum(right_indices) / len(labels)) * right_entropy
                )
                gain = total_entropy - weighted_entropy
                if gain > best_gain:
                    best_gain = gain
                    best_threshold = threshold
            return best_gain, best_threshold
        else:
            weighted_entropy = 0
            for value in unique_values:
                subset = labels[feature_values == value]
                weighted_entropy += (len(subset) / len(labels)) * self.entropy(subset)
            return total_entropy - weighted_entropy, None

    def build_tree(self, data, labels):
        if len(np.unique(labels)) == 1:
            return labels[0]
        if data.shape[1] == 0:
            return np.bincount(labels).argmax()

        best_feature = None
        best_threshold = None
        best_gain = -1

        for i in range(data.shape[1]):
            gain, threshold = self.information_gain(data, labels, i)
            if gain > best_gain:
                best_gain = gain
                best_feature = i
                best_threshold = threshold

        if best_threshold is not None:
            tree = {best_feature: {'threshold': best_threshold, 'left': None, 'right': None}}
            left_indices = data[:, best_feature] <= best_threshold
            right_indices = data[:, best_feature] > best_threshold
            tree[best_feature]['left'] = self.build_tree(data[left_indices], labels[left_indices])
            tree[best_feature]['right'] = self.build_tree(data[right_indices], labels[right_indices])
        else:
            tree = {best_feature: {}}
            for value in np.unique(data[:, best_feature]):
                subset_data = data[data[:, best_feature] == value]
                subset_labels = labels[data[:, best_feature] == value]
                tree[best_feature][value] = self.build_tree(subset_data, subset_labels)

        return tree

    def fit(self, data, labels):
        self.tree = self.build_tree(data, labels)

    def predict(self, sample):
        tree = self.tree
        while isinstance(tree, dict):
            feature = next(iter(tree))
            if 'threshold' in tree[feature]:
                threshold = tree[feature]['threshold']
                tree = tree[feature]['left'] if sample[feature] <= threshold else tree[feature]['right']
            else:
                value = sample[feature]
                tree = tree[feature].get(value, None)
                if tree is None:
                    return None
        return tree

features = pca_data[:, 1:]
labels = pca_data[:, 0]
indices = np.arange(len(features))
np.random.shuffle(indices)

features = features[indices]
labels = labels[indices]

train_data, test_data = features[:100], features[100:]
train_labels, test_labels = labels[:100], labels[100:]

tree = DecisionTree()
tree.fit(train_data, train_labels)
predictions = [tree.predict(sample) for sample in test_data]
accuracy = np.mean(predictions == test_labels)
print(f"分类准确率: {accuracy * 100:.2f}%")
