import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, random_split
from torchvision import transforms
from PIL import Image
from model.vit import ViT
from model.utils import batch_image_segmentation
from tqdm import tqdm
from SE import ConvNetWithSELiteOptimized as cnn
#from torch.utils.tensorboard import SummaryWriter

class CustomDataset(Dataset):
    def __init__(self, root_dir, transform=None):
        self.root_dir = root_dir
        self.transform = transform
        self.data = []  # 存储图片路径和标签的列表
        self.label_to_idx = {}  # 标签到索引的映射

        # 遍历 root_dir 下的所有子文件夹
        for label in os.listdir(root_dir):
            label_dir = os.path.join(root_dir, label)
            if os.path.isdir(label_dir) and label != '__pycache__':
                if label not in self.label_to_idx:
                    self.label_to_idx[label] = len(self.label_to_idx)
                # 遍历子文件夹中的所有图片
                for f in os.listdir(label_dir):
                    if f.endswith(('.png', '.jpg', '.jpeg')):
                        image_path = os.path.join(label_dir, f)
                        self.data.append((image_path, label))

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        image_path, label = self.data[idx]
        image = Image.open(image_path).convert('RGB')  # 确保图片是RGB格式
        if self.transform:
            image = self.transform(image)
        label_idx = self.label_to_idx[label]  # 获取数值标签
        return image, label_idx
    
# 定义数据变换
transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 设置数据集路径
dataset_dir = r'data'

# 创建数据集实例
dataset = CustomDataset(root_dir=dataset_dir, transform=transform)

# 划分训练集和测试集
train_size = int(0.8 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])

# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False, num_workers=4)

def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=10):
    #writer = SummaryWriter(r'.runs/model_training')
    for epoch in range(num_epochs):
        
        # log parameters
        #for name, param in model.named_parameters():
            #writer.add_histogram(name, param, epoch)
            #if param.grad is not None:
                #writer.add_histogram(f'{name}.grad', param.grad, epoch)
        model.train()
        running_loss = 0.0
        running_corrects = 0

        for inputs, labels in tqdm(train_loader):
            #inputs = batch_image_segmentation(inputs)  # 对输入进行分割处理
            #inputs, labels = inputs.to(device), labels.to(device)
            if torch.cuda.is_available():
                #inputs, labels=inputs.to('cuda'), labels.to('cuda')
                labels=labels.to('cuda')
            # 前向传播
            outputs = model(inputs)
            #print(outputs.device, labels.device)
            loss = criterion(outputs, labels)
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # 统计
            running_loss += loss.item()
            _, preds = torch.max(outputs, 1)
            running_corrects += torch.sum(preds == labels.data)

        epoch_loss = running_loss / len(train_loader.dataset)
        epoch_acc = running_corrects.double() / len(train_loader.dataset)

        print(f'Epoch {epoch+1}/{num_epochs} Train Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')

        # 验证
        model.eval()
        running_loss = 0.0
        running_corrects = 0

        with torch.no_grad():
            for inputs, labels in val_loader:
                #inputs = batch_image_segmentation(inputs)  # 对验证数据进行分割处理
                if torch.cuda.is_available():
                    #inputs, labels=inputs.to('cuda'), labels.to('cuda')
                    labels=labels.to('cuda')

                # 前向传播
                outputs = model(inputs)
                loss = criterion(outputs, labels)

                # 统计
                running_loss += loss.item() * inputs.size(0)
                _, preds = torch.max(outputs, 1)
                running_corrects += torch.sum(preds == labels.data)

        epoch_loss = running_loss / len(val_loader.dataset)
        epoch_acc = running_corrects.double() / len(val_loader.dataset)

        print(f'Epoch {epoch+1}/{num_epochs} Val Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')

def test_model(model, test_loader):
    model.eval()
    running_corrects = 0

    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)

            # 前向传播
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            running_corrects += torch.sum(preds == labels.data)

    total_acc = running_corrects.double() / len(test_loader.dataset)
    print(f'Test Acc: {total_acc:.4f}')
    
# 指定设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 定义模型
model = ViT().to(device)
#model=cnn().to(device)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练模型
train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=10)

# 保存模型
torch.save(model.state_dict(), r'params/model.pth')

# 创建测试数据加载器
test_dir = 'path/to/your/test/dataset'
test_dataset = CustomDataset(root_dir=test_dir, transform=transform)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4)

# 测试模型
test_model(model, test_loader)
