import math
from data_loader import load_wine_data, standardize, train_test_split

def matrix_multiply(A, B):
    """矩阵乘法"""
    if len(A[0]) != len(B):
        raise ValueError("矩阵维度不匹配")
    
    result = [[0] * len(B[0]) for _ in range(len(A))]
    
    for i in range(len(A)):
        for j in range(len(B[0])):
            for k in range(len(B)):
                result[i][j] += A[i][k] * B[k][j]
    
    return result

def matrix_transpose(M):
    """矩阵转置"""
    if not M:
        return []
    
    n_rows = len(M)
    n_cols = len(M[0])
    
    return [[M[j][i] for j in range(n_rows)] for i in range(n_cols)]

def covariance_matrix(X):
    """计算协方差矩阵"""
    if not X:
        return []
    
    n_samples = len(X)
    n_features = len(X[0])
    
    # 计算均值
    means = [0] * n_features
    for sample in X:
        for i in range(n_features):
            means[i] += sample[i]
    means = [m / n_samples for m in means]
    
    # 计算协方差
    cov = [[0] * n_features for _ in range(n_features)]
    for i in range(n_features):
        for j in range(n_features):
            for k in range(n_samples):
                cov[i][j] += (X[k][i] - means[i]) * (X[k][j] - means[j])
            cov[i][j] /= (n_samples - 1)
    
    return cov

def eigenvalues_and_eigenvectors(A, max_iter=1000, tolerance=1e-10):
    """计算特征值和特征向量（简化版幂迭代法）"""
    n = len(A)
    
    # 初始随机向量
    import random
    v = [random.random() for _ in range(n)]
    
    # 归一化
    norm = math.sqrt(sum(x*x for x in v))
    v = [x / norm for x in v]
    
    for _ in range(max_iter):
        # Av
        Av = [0] * n
        for i in range(n):
            for j in range(n):
                Av[i] += A[i][j] * v[j]
        
        # 估计特征值
        eigenvalue = sum(Av[i] * v[i] for i in range(n))
        
        # 归一化
        norm = math.sqrt(sum(x*x for x in Av))
        if norm == 0:
            break
        new_v = [x / norm for x in Av]
        
        # 检查收敛
        diff = sum(abs(new_v[i] - v[i]) for i in range(n))
        if diff < tolerance:
            break
        
        v = new_v
    
    return eigenvalue, v

def pca(X, n_components=2):
    """PCA主成分分析"""
    # 计算协方差矩阵
    cov = covariance_matrix(X)
    
    # 计算特征值和特征向量（简化版，只计算前两个）
    eigen_pairs = []
    for _ in range(n_components):
        eigenvalue, eigenvector = eigenvalues_and_eigenvectors(cov)
        eigen_pairs.append((eigenvalue, eigenvector))
        # 简化处理：这里应该进行矩阵收缩，但为了简单我们直接取前两个
    
    # 构建投影矩阵
    W = matrix_transpose([eigen_pairs[i][1] for i in range(n_components)])
    
    # 投影数据
    X_pca = matrix_multiply(X, W)
    
    return X_pca, W

def lda(X, y, n_components=1):
    """LDA线性判别分析（简化版）"""
    n_features = len(X[0])
    
    # 计算总体均值
    mean_total = [0] * n_features
    for sample in X:
        for i in range(n_features):
            mean_total[i] += sample[i]
    mean_total = [m / len(X) for m in mean_total]
    
    # 计算类内散度矩阵 Sw（简化计算）
    Sw = [[0] * n_features for _ in range(n_features)]
    classes = list(set(y))
    
    for c in classes:
        # 类内样本
        X_c = [X[i] for i in range(len(X)) if y[i] == c]
        if not X_c:
            continue
            
        # 类内均值
        mean_c = [0] * n_features
        for sample in X_c:
            for i in range(n_features):
                mean_c[i] += sample[i]
        mean_c = [m / len(X_c) for m in mean_c]
        
        # 类内散度
        for sample in X_c:
            for i in range(n_features):
                for j in range(n_features):
                    Sw[i][j] += (sample[i] - mean_c[i]) * (sample[j] - mean_c[j])
    
    # 简化版LDA：使用第一个特征作为投影方向
    # 在实际应用中应该求解广义特征值问题
    W = [[1.0] for _ in range(n_features)]  # 简化投影矩阵
    
    # 投影数据
    X_lda = matrix_multiply(X, W)
    
    return X_lda, W

def main():
    print("=== Wine数据PCA和LDA降维（纯Python实现） ===\n")
    
    # 加载数据
    X, y = load_wine_data()
    
    # 只取类别1和2
    X_filtered = []
    y_filtered = []
    for i in range(len(y)):
        if y[i] in [1, 2]:
            X_filtered.append(X[i])
            y_filtered.append(y[i])
    
    print(f"数据形状: {len(X_filtered)} 样本, {len(X_filtered[0])} 特征")
    print(f"类别分布: 类别1 - {y_filtered.count(1)} 样本, 类别2 - {y_filtered.count(2)} 样本")
    
    # 数据标准化
    X_std = standardize(X_filtered)
    
    # PCA降维
    print("\n进行PCA降维...")
    X_pca, W_pca = pca(X_std, n_components=2)
    
    print("PCA降维后的前5个样本(两维特征):")
    for i in range(min(5, len(X_pca))):
        print(f"样本{i+1}: [{X_pca[i][0]:.4f}, {X_pca[i][1]:.4f}]")
    
    # LDA降维
    print("\n进行LDA降维...")
    X_lda, W_lda = lda(X_std, y_filtered, n_components=1)
    
    print("LDA降维后的前5个样本:")
    for i in range(min(5, len(X_lda))):
        print(f"样本{i+1}: [{X_lda[i][0]:.4f}]")
    
    # 简单文本可视化
    print("\nPCA结果文本可视化:")
    print("类别1: ★, 类别2: ●")
    print("-" * 50)
    
    # 找到PCA结果的范围
    pca_x = [sample[0] for sample in X_pca]
    pca_y = [sample[1] for sample in X_pca]
    min_x, max_x = min(pca_x), max(pca_x)
    min_y, max_y = min(pca_y), max(pca_y)
    
    # 创建简单的文本网格
    grid_size = 20
    grid = [[' ' for _ in range(grid_size)] for _ in range(grid_size)]
    
    for i in range(len(X_pca)):
        x_idx = int((X_pca[i][0] - min_x) / (max_x - min_x) * (grid_size - 1))
        y_idx = int((X_pca[i][1] - min_y) / (max_y - min_y) * (grid_size - 1))
        x_idx = max(0, min(grid_size - 1, x_idx))
        y_idx = max(0, min(grid_size - 1, y_idx))
        
        if y_filtered[i] == 1:
            grid[y_idx][x_idx] = '★'
        else:
            grid[y_idx][x_idx] = '●'
    
    # 打印网格
    for row in reversed(grid):  # 反转Y轴以正确显示
        print(''.join(row))

if __name__ == "__main__":
    main()