import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_moons

# Generate a 2D dataset
def generate_data():
    X, y = make_moons(n_samples=100, noise=0.2, random_state=42)
    return X, y

# Plot decision boundary for each split
def plot_decision_boundary(clf, X, y, step=0.02, title=""):
    # Generate a mesh grid
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, step), np.arange(y_min, y_max, step))
    
    # Predict on each point of the grid
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    
    # Plot the decision boundary
    plt.contourf(xx, yy, Z, alpha=0.3, cmap='coolwarm')
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolor='k', s=40)
    plt.title(title)
    plt.xlabel("Feature 1")
    plt.ylabel("Feature 2")
    plt.draw()
    plt.pause(1)  # Adjust to control speed of animation

# Train decision tree and animate each split
def animate_decision_tree(X, y, max_depth=3):
    clf = DecisionTreeClassifier(max_depth=1, random_state=42)
    
    fig, ax = plt.subplots(figsize=(8, 6))
    plt.ion()  # Turn on interactive mode for animation
    
    # Train the model incrementally, increasing the depth at each step
    for depth in range(1, max_depth + 1):
        clf.set_params(max_depth=depth)
        clf.fit(X, y)
        
        # Plot the decision boundary at the current depth
        ax.clear()
        plot_decision_boundary(clf, X, y, title=f"Decision Tree - Depth {depth}")
    
    plt.ioff()  # Turn off interactive mode
    plt.show()

if __name__ == "__main__":
    # Generate data and animate decision tree construction
    X, y = generate_data()
    animate_decision_tree(X, y, max_depth=3)