
import csv, math, random, os
from collections import Counter
from typing import List, Dict, Any, Tuple


def load_wine(path: str = "wine.data") -> List[List[float]]:
    data = []
    with open(path, newline="D:\jiqixuexi\wine\wine.data") as f:
        for row in csv.reader(f):
            data.append([float(x) for x in row])
    return data

def normalize(dataset: List[List[float]]) -> List[List[float]]:
    cols = len(dataset[0])
    rows = len(dataset)
    trans = list(zip(*dataset))
    mins = [min(trans[i]) for i in range(1, cols)]
    maxs = [max(trans[i]) for i in range(1, cols)]
    out = []
    for row in dataset:
        tmp = [row[0]]
        for i in range(1, cols):
            if maxs[i - 1] == mins[i - 1]:
                tmp.append(0.0)
            else:
                tmp.append((row[i] - mins[i - 1]) / (maxs[i - 1] - mins[i - 1]))
        out.append(tmp)
    return out

def cross_validation_split(dataset: List[Any], k: int = 10):
    data = dataset[:]
    random.shuffle(data)
    fold_size = len(data) 
    folds = [data[i * fold_size : (i + 1) * fold_size] for i in range(k)]
    folds[-1].extend(data[k * fold_size :])
    splits = []
    for i in range(k):
        test = folds[i]
        train = []
        for j in range(k):
            if j != i:
                train.extend(folds[j])
        splits.append((train, test))
    return splits

#KNN
def euclidean(a: List[float], b: List[float]) -> float:
    return math.sqrt(sum((a[i] - b[i]) ** 2 for i in range(1, len(a))))

def knn_predict(train: List[List[float]], sample: List[float], k: int = 5) -> float:
    dists = [(euclidean(sample, t), t[0]) for t in train]
    dists.sort(key=lambda x: x[0])
    top_k = [lbl for _, lbl in dists[:k]]
    return Counter(top_k).most_common(1)[0][0]

def knn_evaluate(train: List[List[float]], test: List[List[float]], k: int = 5) -> float:
    correct = 0
    for t in test:
        if knn_predict(train, t, k) == t[0]:
            correct += 1
    return correct / len(test)

#ID3 决策树
def entropy(labels: List[float]) -> float:
    n = len(labels)
    if n == 0:
        return 0.0
    cnt = Counter(labels)
    return -sum(v / n * math.log2(v / n) for v in cnt.values())

def best_split_continuous(data: List[List[float]], feat_idx: int) -> Tuple[float, float]:
    vals = sorted(row[feat_idx] for row in data)
    candidates = []
    for i in range(1, len(vals)):
        if vals[i] != vals[i - 1]:
            candidates.append((vals[i - 1] + vals[i]) / 2)
    if not candidates:
        return (0.0, 0.0)
    base_ent = entropy([row[0] for row in data])
    best_gain, best_th = -1, candidates[0]
    for th in candidates:
        left = [row for row in data if row[feat_idx] <= th]
        right = [row for row in data if row[feat_idx] > th]
        if not left or not right:
            continue
        gain = base_ent - (
            len(left) / len(data) * entropy([r[0] for r in left])
            + len(right) / len(data) * entropy([r[0] for r in right])
        )
        if gain > best_gain:
            best_gain, best_th = gain, th
    return (best_th, best_gain)

class ID3Tree:

    def __init__(self):
        self.tree = None

    def _build(self, data: List[List[float]], feats: List[int]) -> Dict[str, Any]:
        labels = [row[0] for row in data]
        if len(set(labels)) == 1:
            return {"leaf": labels[0]}
        if not feats:
            return {"leaf": Counter(labels).most_common(1)[0][0]}
        best_feat, best_th, best_gain = None, 0.0, -1
        for f in feats:
            th, gain = best_split_continuous(data, f)
            if gain > best_gain:
                best_gain, best_th, best_feat = gain, th, f
        if best_feat is None or best_gain <= 0:
            return {"leaf": Counter(labels).most_common(1)[0][0]}
        left_data = [row for row in data if row[best_feat] <= best_th]
        right_data = [row for row in data if row[best_feat] > best_th]
        if not left_data or not right_data:
            return {"leaf": Counter(labels).most_common(1)[0][0]}
        remain_feats = [f for f in feats if f != best_feat]
        left_sub = self._build(left_data, remain_feats)
        right_sub = self._build(right_data, remain_feats)
        return {
            "feat": best_feat,
            "th": best_th,
            "left": left_sub,
            "right": right_sub,
        }

    def fit(self, train: List[List[float]]):
        feats = list(range(1, len(train[0])))  # 跳过类别列
        self.tree = self._build(train, feats)

    def _predict_one(self, sample: List[float], node: Dict[str, Any]) -> float:
        if "leaf" in node:
            return node["leaf"]
        if sample[node["feat"]] <= node["th"]:
            return self._predict_one(sample, node["left"])
        else:
            return self._predict_one(sample, node["right"])

    def predict(self, sample: List[float]) -> float:
        return self._predict_one(sample, self.tree)

    def accuracy(self, test: List[List[float]]) -> float:
        correct = sum(1 for t in test if self.predict(t) == t[0])
        return correct / len(test)

def main():
    raw = load_wine("wine.data")
    data = normalize(raw)
    folds = cross_validation_split(data, k=10)

    #KNN
    acc_knn = []
    for train, test in folds:
        acc_knn.append(knn_evaluate(train, test, k=5))
    print("KNN 10-fold accuracy:", [f"{a:.3f}" for a in acc_knn])
    print("KNN mean:", f"{sum(acc_knn)/10:.3f}")

    #ID3
    acc_id3 = []
    for train, test in folds:
        tree = ID3Tree()
        tree.fit(train)
        acc_id3.append(tree.accuracy(test))
    print("ID3 10-fold accuracy:", [f"{a:.3f}" for a in acc_id3])
    print("ID3 mean:", f"{sum(acc_id3)/10:.3f}")


if __name__ == "__main__":
    main()