import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from pathlib import Path

class NumpyDataset(Dataset):
    def __init__(self, data_dir):
        self.data_dir = data_dir
        p = Path(data_dir)
        # self.label_dict = {"one":0, "two":1, "three":2, "four":3, "five": 4, "six": 5, "seven": 6, "eight": 7, "nine": 8, "ok": 9}
        # self.label_dict = {"oeye": 0, "omouth": 1, "ceye": 2}
        self.label_dict = {"frown": 0, "open_eye":1, "close_eye":2, "open_mouth":3}
        self.file_paths = []
        self.labels = []
        for dirs in os.listdir(data_dir):
            cate_path = p / dirs
            for file in os.listdir(cate_path):
                self.labels.append(self.label_dict[dirs])
                self.file_paths.append(cate_path / file)
    def __len__(self):
        return len(self.file_paths)

    def __getitem__(self, idx):
        file_path = self.file_paths[idx]
        data = np.load(file_path)  # 加载数据
        data = torch.from_numpy(data).float()  # 转换为 PyTorch 张量
        data = data.unsqueeze(0)  # Add channel dimension
        # 从文件名中提取标签
        label = self.labels[idx]  # 假设文件名格式为 "label.npy"

        return data, label

# 创建训练数据集和 DataLoader
train_dataset = NumpyDataset('points/train')  # 替换为你的数据目录
train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
print(next(iter(train_dataloader))[0].shape)
# 创建验证数据集和 DataLoader
val_dataset = NumpyDataset('points/val')  # 替换为你的数据目录
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=True)
# print(len(next(iter(train_dataloader))[0][0]))
print("训练集数量：", len(train_dataset), "\n验证集数量：", len(val_dataset))
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchsummary import summary
import torch
import torch.nn as nn

class LiteNet(nn.Module):
    def __init__(self, num_classes=4):
        super(LiteNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 8, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(8, 16, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(16, 8, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(8, 8, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Flatten(),
            nn.Linear(8 * 30 * 40, 16),  # Adjust input size if needed
            nn.ReLU(inplace=True),
            nn.Linear(16, num_classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = self.classifier(x)
        return x

# 定义设备 (GPU 或 CPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 创建网络实例
model = LiteNet().to(device)
# 打印网络结构
summary(model, (1, 480, 640))
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()  # 假设你使用交叉熵损失函数
optimizer = optim.Adam(model.parameters(), lr=0.001)  # 使用 Adam 优化器
# 训练循环
num_epochs = 10  # 训练轮数
for epoch in range(num_epochs):
    # 训练阶段
    model.train()  # 设置模型为训练模式
    train_loss = 0
    correct = 0
    for batch_idx, (data, target) in enumerate(train_dataloader):
        data, target = data.to(device), target.to(device)  # 将数据移动到设备
        optimizer.zero_grad()  # 清零梯度
        output = model(data)  # 前向传播
        loss = criterion(output, target)  # 计算损失
        loss.backward()  # 反向传播
        optimizer.step()  # 更新参数
        train_loss += loss.item()
        pred = output.argmax(dim=1, keepdim=True)  # 获取预测结果
        correct += pred.eq(target.view_as(pred)).sum().item()  # 统计正确预测的数量
        # ... (打印训练信息，例如损失值) ...
    train_loss /= len(train_dataloader.dataset)
    train_accuracy = 100. * correct / len(train_dataloader.dataset)
    print(f"Epoch [{epoch+1}/{num_epochs}], Train Loss: {train_loss:.4f}, Train Accuracy: {train_accuracy:.2f}%")
    # 验证阶段
    model.eval()  # 设置模型为评估模式
    val_loss = 0
    correct = 0
    with torch.no_grad():  # 禁用梯度计算
        for data, target in val_dataloader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            val_loss += criterion(output, target).item()  # 累加损失
            pred = output.argmax(dim=1, keepdim=True)  # 获取预测结果
            correct += pred.eq(target.view_as(pred)).sum().item()  # 统计正确预测的数量

    val_loss /= len(val_dataloader.dataset)  # 计算平均损失
    val_accuracy = 100. * correct / len(val_dataloader.dataset)  # 计算准确率
    print(f"Epoch [{epoch+1}/{num_epochs}], Val Loss: {val_loss:.4f}, Val Accuracy: {val_accuracy:.2f}%")
