# test_cnn.py (你可以放在项目根目录运行)
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

import numpy as np
from ml_lib.core.tensor import Tensor
from ml_lib.nn.layers import Conv2d, ReLU, MaxPool2d, Flatten, Linear
from ml_lib.nn.module import  Module, Parameter
from ml_lib.optim import SGD
from ml_lib.nn.loss import CrossEntropyLoss
import time
import pickle
import gzip
import urllib.request
from typing import Tuple, List

def download_mnist_files():
    """
    直接从MNIST官网下载原始数据文件
    """
    base_url = 'http://yann.lecun.com/exdb/mnist/'
    files = [
        'train-images-idx3-ubyte.gz',
        'train-labels-idx1-ubyte.gz', 
        't10k-images-idx3-ubyte.gz',
        't10k-labels-idx1-ubyte.gz'
    ]
    
    cache_dir = os.path.join(os.path.dirname(__file__), "data", "mnist_raw")
    os.makedirs(cache_dir, exist_ok=True)
    
    for filename in files:
        filepath = os.path.join(cache_dir, filename)
        if not os.path.exists(filepath):
            print(f"下载 {filename}...")
            urllib.request.urlretrieve(base_url + filename, filepath)
            print(f"下载完成: {filename}")
    
    return cache_dir

def load_mnist_from_files(data_dir):
    """
    从下载的MNIST文件中读取数据
    """
    def read_idx_file(filename):
        """读取IDX格式文件"""
        with gzip.open(os.path.join(data_dir, filename), 'rb') as f:
            # 读取magic number和维度信息
            magic = int.from_bytes(f.read(4), 'big')
            if magic == 2051:  # 图像文件
                num_images = int.from_bytes(f.read(4), 'big')
                rows = int.from_bytes(f.read(4), 'big')
                cols = int.from_bytes(f.read(4), 'big')
                data = np.frombuffer(f.read(), dtype=np.uint8)
                return data.reshape(num_images, rows, cols)
            elif magic == 2049:  # 标签文件
                num_labels = int.from_bytes(f.read(4), 'big')
                data = np.frombuffer(f.read(), dtype=np.uint8)
                return data
    
    # 读取训练数据
    train_images = read_idx_file('train-images-idx3-ubyte.gz')
    train_labels = read_idx_file('train-labels-idx1-ubyte.gz')
    
    # 读取测试数据
    test_images = read_idx_file('t10k-images-idx3-ubyte.gz')
    test_labels = read_idx_file('t10k-labels-idx1-ubyte.gz')
    
    return (train_images, train_labels), (test_images, test_labels)

def load_mnist() -> Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]:
    """
    加载真实的MNIST手写数字数据集
    
    Returns:
        ((train_images, train_labels), (test_images, test_labels))
    """
    # 检查缓存目录
    cache_dir = os.path.join(os.path.dirname(__file__), "data")
    os.makedirs(cache_dir, exist_ok=True)
    cache_file = os.path.join(cache_dir, "real_mnist_dataset.pkl")
    
    # 如果缓存文件存在，从缓存加载
    if os.path.exists(cache_file):
        print(f"从本地缓存加载真实MNIST数据: {cache_file}")
        with open(cache_file, 'rb') as f:
            data_dict = pickle.load(f)
            # 转换回Tensor对象
            train_images = Tensor(data_dict['train_images'])
            train_labels = Tensor(data_dict['train_labels'])
            test_images = Tensor(data_dict['test_images'])
            test_labels = Tensor(data_dict['test_labels'])
            return (train_images, train_labels), (test_images, test_labels)
    
    print("尝试下载真实的MNIST手写数字数据集...")
    
    train_images, train_labels, test_images, test_labels = None, None, None, None
    
    # 方法1：尝试从开放数据源下载已处理的numpy文件
    try:
        import urllib.request
        print("尝试从numpy格式源下载MNIST数据...")
        
        base_url = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
        
        # 先尝试TensorFlow格式的数据
        npz_file = os.path.join(cache_dir, "mnist.npz")
        if not os.path.exists(npz_file):
            print("下载mnist.npz...")
            urllib.request.urlretrieve(base_url + "mnist.npz", npz_file)
        
        # 加载数据
        with np.load(npz_file) as data:
            train_images = data['x_train'].astype(np.float32)
            train_labels = data['y_train'].astype(np.int32)
            test_images = data['x_test'].astype(np.float32) 
            test_labels = data['y_test'].astype(np.int32)
            
        print("成功从numpy格式源下载MNIST数据")
            
    except Exception as e:
        print(f"从numpy格式源下载失败: {e}")
        
        # 方法2：尝试使用sklearn
        try:
            from sklearn.datasets import fetch_openml
            print("尝试使用sklearn下载MNIST数据...")
            mnist = fetch_openml('mnist_784', version=1, as_frame=False, parser='auto')
            
            # sklearn返回的数据需要重新整理
            images = mnist.data.astype(np.float32)
            labels = mnist.target.astype(np.int32)
            
            # 打乱数据并分割训练集和测试集
            indices = np.random.permutation(len(images))
            train_size = 60000
            
            train_indices = indices[:train_size]
            test_indices = indices[train_size:train_size+10000]
            
            train_images = images[train_indices].reshape(-1, 28, 28)
            train_labels = labels[train_indices]
            test_images = images[test_indices].reshape(-1, 28, 28)
            test_labels = labels[test_indices]
            
            print("使用sklearn成功下载MNIST数据")
        except ImportError:
            print("sklearn不可用")
        except Exception as e:
            print(f"sklearn下载失败: {e}")
    
    # 如果所有方法都失败，使用模拟数据
    if train_images is None:
        print("无法下载真实MNIST数据，将生成模拟的手写数字数据...")
        return generate_digit_like_data()
    
    # 数据预处理
    print(f"原始数据形状: 训练集 {train_images.shape}, 测试集 {test_images.shape}")
    
    # 将图像展平为784维向量并归一化到0-1范围
    train_images = train_images.reshape(train_images.shape[0], -1).astype(np.float32) / 255.0
    test_images = test_images.reshape(test_images.shape[0], -1).astype(np.float32) / 255.0
    
    # 标签转换为int32
    train_labels = train_labels.astype(np.int32)
    test_labels = test_labels.astype(np.int32)
    
    print(f"预处理后数据形状: 训练集 {train_images.shape}, 测试集 {test_images.shape}")
    print(f"标签范围: 训练集 {train_labels.min()}-{train_labels.max()}, 测试集 {test_labels.min()}-{test_labels.max()}")
    
    # 可选：使用较小的子集进行快速训练（取消注释下面的行）
    # train_images = train_images[:10000]  # 使用前10000个训练样本
    # train_labels = train_labels[:10000]
    # test_images = test_images[:2000]     # 使用前2000个测试样本  
    # test_labels = test_labels[:2000]
    
    # 创建Tensor对象
    train_images_tensor = Tensor(train_images)
    train_labels_tensor = Tensor(train_labels)
    test_images_tensor = Tensor(test_images)
    test_labels_tensor = Tensor(test_labels)
    
    print(f"创建训练集: {train_images_tensor.shape[0]} 个样本")
    print(f"创建测试集: {test_images_tensor.shape[0]} 个样本")
    
    # 保存到本地缓存
    data_dict = {
        'train_images': train_images,
        'train_labels': train_labels,
        'test_images': test_images,
        'test_labels': test_labels
    }
    print(f"缓存数据集到: {cache_file}")
    with open(cache_file, 'wb') as f:
        pickle.dump(data_dict, f)
        
    return (train_images_tensor, train_labels_tensor), (test_images_tensor, test_labels_tensor)

def generate_digit_like_data() -> Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]:
    """
    生成类似手写数字的模拟数据（当无法下载真实MNIST时的备选方案）
    """
    print("生成模拟手写数字数据...")
    
    n_train = 6000
    n_test = 1000
    img_size = 28
    input_dim = img_size * img_size  # 784
    
    def create_digit_pattern(digit, size=28):
        """为每个数字创建简单的模式"""
        img = np.zeros((size, size))
        center = size // 2
        
        if digit == 0:  # 圆形
            y, x = np.ogrid[:size, :size]
            mask = (x - center)**2 + (y - center)**2 <= (center-3)**2
            img[mask] = 1.0
            inner_mask = (x - center)**2 + (y - center)**2 <= (center-7)**2
            img[inner_mask] = 0.0
        elif digit == 1:  # 竖线
            img[:, center-1:center+2] = 1.0
        elif digit == 2:  # S形
            img[2:6, 8:20] = 1.0
            img[6:12, 14:26] = 1.0  
            img[12:18, 2:14] = 1.0
            img[18:22, 8:20] = 1.0
        elif digit == 3:  # 类似E的形状
            img[2:22, 2:6] = 1.0
            img[2:6, 6:18] = 1.0
            img[10:14, 6:14] = 1.0
            img[18:22, 6:18] = 1.0
        elif digit == 4:  # 类似4的形状
            img[2:14, 2:6] = 1.0
            img[10:14, 6:18] = 1.0
            img[2:22, 14:18] = 1.0
        elif digit == 5:  # 类似5的形状
            img[2:6, 2:18] = 1.0
            img[6:14, 2:6] = 1.0
            img[10:14, 6:14] = 1.0
            img[14:22, 14:18] = 1.0
            img[18:22, 2:14] = 1.0
        elif digit == 6:  # 类似6的形状
            img[6:18, 2:6] = 1.0
            img[2:6, 6:14] = 1.0
            img[6:10, 6:14] = 1.0
            img[14:18, 6:14] = 1.0
            img[18:22, 8:12] = 1.0
        elif digit == 7:  # 类似7的形状
            img[2:6, 2:18] = 1.0
            img[6:22, 14:18] = 1.0
        elif digit == 8:  # 类似8的形状（两个圆）
            y, x = np.ogrid[:size, :size]
            # 上圆
            mask1 = (x - center)**2 + (y - 8)**2 <= 36
            img[mask1] = 1.0
            inner1 = (x - center)**2 + (y - 8)**2 <= 16
            img[inner1] = 0.0
            # 下圆
            mask2 = (x - center)**2 + (y - 20)**2 <= 36
            img[mask2] = 1.0
            inner2 = (x - center)**2 + (y - 20)**2 <= 16
            img[inner2] = 0.0
        else:  # digit == 9，类似9的形状
            img[2:6, 8:18] = 1.0
            img[6:14, 2:6] = 1.0
            img[6:14, 14:18] = 1.0
            img[10:14, 6:14] = 1.0
            img[14:22, 14:18] = 1.0
        
        return img
    
    # 创建训练数据
    train_images = []
    train_labels = []
    for i in range(n_train):
        digit = i % 10
        base_pattern = create_digit_pattern(digit, img_size)
        # 添加噪声和变形
        noise = np.random.randn(img_size, img_size) * 0.1
        pattern = base_pattern + noise
        pattern = np.clip(pattern, 0, 1)
        train_images.append(pattern.flatten())
        train_labels.append(digit)
    
    # 创建测试数据
    test_images = []
    test_labels = []
    for i in range(n_test):
        digit = i % 10
        base_pattern = create_digit_pattern(digit, img_size)
        # 添加不同的噪声
        noise = np.random.randn(img_size, img_size) * 0.08
        pattern = base_pattern + noise
        pattern = np.clip(pattern, 0, 1)
        test_images.append(pattern.flatten())
        test_labels.append(digit)
    
    # 转换为numpy数组
    train_images = np.array(train_images, dtype=np.float32)
    train_labels = np.array(train_labels, dtype=np.int32)
    test_images = np.array(test_images, dtype=np.float32)
    test_labels = np.array(test_labels, dtype=np.int32)
    
    # 创建Tensor对象
    train_images_tensor = Tensor(train_images)
    train_labels_tensor = Tensor(train_labels)
    test_images_tensor = Tensor(test_images)
    test_labels_tensor = Tensor(test_labels)
    
    print(f"生成训练集: {train_images_tensor.shape[0]} 个样本")
    print(f"生成测试集: {test_images_tensor.shape[0]} 个样本")
    
    return (train_images_tensor, train_labels_tensor), (test_images_tensor, test_labels_tensor)

# class CNNMnist(Module):
#     def __init__(self):
#         super().__init__()
#         self.conv1 = Conv2d(in_channels=1, out_channels=8, kernel_size=3, stride=1, padding=1)
#         self.relu1 = ReLU()
#         self.pool1 = MaxPool2d(kernel_size=2, stride=2)
        
#         self.conv2 = Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1)
#         self.relu2 = ReLU()
#         self.pool2 = MaxPool2d(kernel_size=2, stride=2)
        
#         self.flatten = Flatten()
#         self.fc1 = Linear(in_features=7*7*16, out_features=128)
#         self.relu3 = ReLU()
#         self.fc2 = Linear(in_features=128, out_features=10)

#     def forward(self, x: Tensor) -> Tensor:
#         x = self.conv1(x)
#         x = self.relu1(x)
#         x = self.pool1(x)
        
#         x = self.conv2(x)
#         x = self.relu2(x)
#         x = self.pool2(x)
        
#         x = self.flatten(x)
#         x = self.fc1(x)
#         x = self.relu3(x)
#         x = self.fc2(x)
#         return x

class CNNMnist(Module):
    def __init__(self):
        super().__init__()
        self.conv = Conv2d(in_channels=1, out_channels=8, kernel_size=3, stride=1, padding=1)
        self.relu = ReLU()
        self.pool = MaxPool2d(kernel_size=2, stride=2)
        self.flatten = Flatten()
        self.fc = Linear(in_features=14*14*8, out_features=10)

    def forward(self, x: Tensor) -> Tensor:
        x = self.conv(x)
        x = self.relu(x)
        x = self.pool(x)   # from (28x28) -> (14x14)
        x = self.flatten(x)
        x = self.fc(x)
        return x

def train(model: Module, criterion: Module, optimizer: SGD, 
          train_data: Tuple[Tensor, Tensor], test_data: Tuple[Tensor, Tensor],
          num_epochs: int = 30, batch_size: int = 32, lr_decay: float = 0.9) -> List[float]:

    train_images, train_labels = train_data
    n_samples = train_images.shape[0]
    indices = np.arange(n_samples)
    
    epoch_losses = []
    base_lr = optimizer.param_groups[0]['lr']
    
    best_accuracy = 0.0
    best_model_state = None
    best_epoch = 0
    
    for epoch in range(num_epochs):
        current_lr = base_lr * (lr_decay ** epoch)
        for group in optimizer.param_groups:
            group['lr'] = current_lr

        np.random.shuffle(indices)
        total_loss = 0.0
        correct = 0
        num_batches = (n_samples + batch_size - 1) // batch_size

        start_time = time.time()
        for i in range(num_batches):
            batch_start_time = time.time()

            start_idx = i * batch_size
            end_idx = min(start_idx + batch_size, n_samples)
            batch_indices = indices[start_idx:end_idx]

            # 数据准备时间
            data_prep_start = time.time()
            inputs = Tensor(train_images.data[batch_indices].reshape(-1, 1, 28, 28))  # 1 channel
            targets = Tensor(train_labels.data[batch_indices])
            data_prep_end = time.time()

            # 前向传播时间
            forward_start = time.time()
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            forward_end = time.time()

            # 反向传播时间
            backward_start = time.time()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            backward_end = time.time()

            total_loss += loss.data.item() if np.isscalar(loss.data) else loss.data.sum()
            predictions = np.argmax(outputs.data, axis=1)
            correct += np.sum(predictions == targets.data)

            batch_end_time = time.time()

            print(f"Epoch {epoch+1}, Batch {i+1}/{num_batches} — "
                  f"Data prep: {(data_prep_end - data_prep_start):.4f}s, "
                  f"Forward: {(forward_end - forward_start):.4f}s, "
                  f"Backward: {(backward_end - backward_start):.4f}s, "
                  f"Total batch: {(batch_end_time - batch_start_time):.4f}s, "
                  f"Loss: {loss.data:.4f}")

            if (i+1) % max(1, num_batches // 10) == 0:
                batch_acc = np.sum(predictions == targets.data) / len(targets.data)
                print(f"Epoch {epoch+1}/{num_epochs}, Batch {i+1}/{num_batches}, Loss: {loss.data:.4f}, Acc: {batch_acc:.4f}")
        
        avg_loss = total_loss / num_batches
        train_acc = correct / n_samples
        epoch_losses.append(avg_loss)
        
        current_accuracy = evaluate(model, test_data, batch_size)

        if current_accuracy > best_accuracy:
            best_accuracy = current_accuracy
            best_epoch = epoch + 1
            best_model_state = {name: param.data.data.copy() for name, param in model.named_parameters()}

        end_time = time.time()
        print(f"Epoch {epoch+1}/{num_epochs} completed, Avg Loss: {avg_loss:.4f}, Train Acc: {train_acc:.4f}, Test Acc: {current_accuracy:.4f}, LR: {current_lr:.6f}, Time: {end_time-start_time:.2f}s")
        print(f"Best Accuracy so far: {best_accuracy:.4f} (Epoch {best_epoch})")

    # 加载最佳模型
    for name, param in model.named_parameters():
        if name in best_model_state:
            if isinstance(param, Parameter):
                param.data = Tensor(best_model_state[name], requires_grad=True)
            else:
                param.data = best_model_state[name]

    return epoch_losses, best_accuracy, best_epoch


def evaluate(model: Module, test_data: Tuple[Tensor, Tensor], batch_size: int = 32) -> float:
    test_images, test_labels = test_data
    n_samples = test_images.shape[0]
    num_batches = (n_samples + batch_size - 1) // batch_size

    correct = 0
    total = 0

    for i in range(num_batches):
        start_idx = i * batch_size
        end_idx = min(start_idx + batch_size, n_samples)
        inputs = Tensor(test_images.data[start_idx:end_idx].reshape(-1, 1, 28, 28))
        targets = test_labels.data[start_idx:end_idx]

        outputs = model(inputs)
        predictions = np.argmax(outputs.data, axis=1)
        correct += np.sum(predictions == targets)
        total += len(targets)

    accuracy = correct / total
    return accuracy



def main() -> None:
    # 加载数据
    train_data, test_data = load_mnist()  # 输出应为 Tensor 对象

    # 创建 CNN 模型
    model = CNNMnist()

    # 损失函数和优化器
    criterion = CrossEntropyLoss()
    optimizer = SGD(model.parameters(), lr=0.01, momentum=0.9)

    # 开始训练
    print("开始训练 CNN 模型...")
    losses, best_accuracy, best_epoch = train(
        model, criterion, optimizer,
        train_data, test_data,
        num_epochs=10, batch_size=64, lr_decay=0.99
    )

    # 最终评估
    print("评估最佳模型...")
    accuracy = evaluate(model, test_data)
    print(f"最佳模型在测试集上的准确率为: {accuracy:.4f} (第 {best_epoch} 轮)")

    model_file = os.path.join(os.path.dirname(__file__), "models", "best_mnist_cnn.pkl")
    print(f"最佳模型已保存至: {model_file}")

if __name__ == "__main__":
    main()



