﻿# import argparse  # 用于解析命令行参数
# import torch
# import torch.optim as optim  # PyTorch中的优化器
# from torch.utils.data import DataLoader  # PyTorch中用于加载数据的工具
# from tqdm import tqdm  # 用于在循环中显示进度条
# from torch.optim.lr_scheduler import CosineAnnealingLR  # 余弦退火学习率调度器
# import torch.nn.functional as F  # PyTorch中的函数库
# from torchvision import datasets  # PyTorch中的视觉数据集
# import torchvision.transforms as transforms  # PyTorch中的数据变换操作

import os
import math
import random
import argparse
from time import time
import glob
import sys
from pathlib import Path
from typing import Iterable, Optional
import numpy as np
import torch
import torch.multiprocessing

from Pytorch.ResNet.resnet18 import ResNet18

torch.multiprocessing.set_sharing_strategy("file_system")
import torch.nn as nn
import torchvision

from torch.utils.tensorboard import SummaryWriter
from PIL import Image

import timm

from timm.utils import accuracy
from Pytorch.ResNet.util import misc
from Pytorch.ResNet.util.misc import NativeScalerWithGradNormCount as NativeScaler

txtName = "log.txt"


def printf(str1):
    with open(txtName, "a", encoding="utf-8") as file:
        print(str1)
        print(str1, file=file)  # 输出到文件


def get_args_parser():
    parser = argparse.ArgumentParser("MAE pre-training", add_help=False)
    parser.add_argument(
        "--batch_size",
        default=72,
        type=int,
        help="Batch size per GPU (effective batch size is batch_size *accum_iter * # gpus",
    )
    parser.add_argument("--epochs", default=400, type=int)
    parser.add_argument(
        "--accum_iter",
        default=1,
        type=int,
        help="Accumulate gradient iterations(for increasing the effective batch size under memory constraints)",
    )
    # Model parameters
    parser.add_argument("--input_size", default=128, type=int, help="images input size")
    # Optimizer parameters
    parser.add_argument(
        "--weight_decay", type=float, default=0.0001, help="weight decay(default:0.05)"
    )
    parser.add_argument(
        "--lr",
        type=float,
        default=0.0001,
        metavar="LR",
        help="learning rate (absolute lr)",
    )
    parser.add_argument(
        "--root_path",
        default="C:\\Users\\MaxShannon\\source\\repos\\Ai\\2048\\SysHandle\\Pytorch\\ResNet\\fruit",
        help="path where to save, empty for no saving",
    )
    parser.add_argument(
        "--output_dir",
        default="./output_dir_pretrained",
        help="path where to save,empty forno saving",
    )
    parser.add_argument(
        "--log_dir",
        default="./output_dir_pretrained",
        help="path where to tensorboard log",
    )
    parser.add_argument(
        "--resume",
        default="output_dir_pretrained/checkpoint-356.pth",
        help="resume from checkpoint",
    )
    # parser.add_argument("--resume", default="", help="resume from checkpoint")
    parser.add_argument(
        "--start_epoch", default=0, type=int, metavar="N", help="start epoch"
    )
    parser.add_argument("--num_workers", default=5, type=int)
    parser.add_argument(
        "--pin_mem",
        action="store_true",
        help="Pin CPU memory in DataLoader for more efficient (sometimes）transfer to GPU.",
    )
    parser.add_argument("--no_pin_mem", action="store_false", dest="pin_mem")
    parser.set_defaults(pin_mem=True)
    return parser


def build_transform(is_train, args):
    # mean = [0.485, 0.456, 0.406]  # ImageNet均值
    # std = [0.229, 0.224, 0.225]   # ImageNet标准差
    if is_train:
        # this should always dispatch to transforms_imagenet_train
        printf("train transform")

        return torchvision.transforms.Compose(
            [
                # 1. 调整图像尺寸
                torchvision.transforms.Resize((args.input_size, args.input_size)),
                # 2. 随机水平翻转（数据增强）
                torchvision.transforms.RandomHorizontalFlip(),
                # 3. 随机垂直翻转（数据增强）
                torchvision.transforms.RandomVerticalFlip(),
                # 4. 随机透视变换（模拟3D视角变化）
                torchvision.transforms.RandomPerspective(distortion_scale=0.6, p=1.0),
                # 5. 高斯模糊（模拟模糊效果）
                torchvision.transforms.GaussianBlur(kernel_size=(5, 9), sigma=(0.1, 5)),
                # 6. 转换为PyTorch张量并归一化
                torchvision.transforms.ToTensor(),
                # transforms.Normalize(mean, std)  # 添加归一化
            ]
        )

    # eval transform
    printf("eval transform")
    return torchvision.transforms.Compose(
        [
            # 1. 调整图像尺寸
            torchvision.transforms.Resize((args.input_size, args.input_size)),
            # 2. 转换为PyTorch张量并归一化
            torchvision.transforms.ToTensor(),
            # transforms.Normalize(mean, std)  # 添加归一化
        ]
    )


def build_dataset(is_train, args):
    transform = build_transform(is_train, args)
    path = os.path.join(args.root_path, "train" if is_train else "test")
    dataset = torchvision.datasets.ImageFolder(path, transform=transform)
    info = dataset.find_classes(path)  # train

    printf(f"finding classes from {path}:\t{info[0]}")
    printf(f"mapping classes from {path} to indexes:\t{info[1]}")

    return dataset


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


@torch.no_grad()
def evaluate(data_loader, model, device):
    criterion = torch.nn.CrossEntropyLoss()
    metric_logger = misc.MetricLogger(delimiter="  ")
    header = "Test:"
    # switch to evaluation mode
    model.eval()
    for batch in metric_logger.log_every(data_loader, 10, header):
        images = batch[0]
        target = batch[-1]
        images = images.to(device, non_blocking=True)
        target = target.to(device, non_blocking=True)
        # compute output

        output = model(images)
        loss = criterion(output, target)
        output = torch.nn.functional.softmax(output, dim=-1)
        acc1, acc5 = accuracy(output, target, topk=(1, 5))

        batch_size = images.shape[0]
        metric_logger.update(loss=loss.item())
        metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
        metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
    # gather thestatsfrom allprocesses
    metric_logger.synchronize_between_processes()
    printf(
        "* Acc@1{top1.global_avg:.3f} Acc@5{top5.global_avg:.3f} loss {losses.global_avg:.3f}".format(
            top1=metric_logger.acc1,
            top5=metric_logger.acc5,
            losses=metric_logger.loss,
        )
    )

    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}


def train_one_epoch(
    model: torch.nn.Module,
    criterion: torch.nn.Module,
    data_loader: Iterable,
    optimizer: torch.optim.Optimizer,
    device: torch.device,
    epoch: int,
    loss_scaler,
    max_norm: float = 0,
    log_writer=None,
    args=None,
):

    model.train(True)
    print_freq = 2
    accum_iter = args.accum_iter
    if log_writer is not None:
        printf("log_dir:{}".format(log_writer.log_dir))
    for data_iter_step, (samples, targets) in enumerate(data_loader):
        samples = samples.to(device, non_blocking=True)
        targets = targets.to(device, non_blocking=True)
        outputs = model(samples)
        # warmup_lr=args.lr*(min(1.0,epoch/2.))
        warmup_lr = args.lr
        optimizer.param_groups[0]["lr"] = warmup_lr
        loss = criterion(outputs, targets)
        loss /= accum_iter
        loss_scaler(
            loss,
            optimizer,
            clip_grad=max_norm,
            parameters=model.parameters(),
            create_graph=False,
            update_grad=(data_iter_step + 1) % accum_iter == 0,
        )
        loss_value = loss.item()
        if (data_iter_step + 1) % accum_iter == 0:
            optimizer.zero_grad()
        if not math.isfinite(loss_value):
            printf("Loss is {}, stopping training".format(loss_value))
            sys.exit(1)
        if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
            epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
            log_writer.add_scalar("loss", loss_value, epoch_1000x)
            log_writer.add_scalar("lr", warmup_lr, epoch_1000x)
            printf(f"Epoch:{epoch},Step:{data_iter_step},Loss:{loss},Lr:{warmup_lr}")


def main(args, mode="train", test_image_path=""):
    printf(f"{mode} mode...")
    if mode == "train":
        # 构建数据批次
        dataset_train = build_dataset(is_train=True, args=args)
        dataset_val = build_dataset(is_train=False, args=args)

        sampler_train = torch.utils.data.RandomSampler(dataset_train)
        sampler_val = torch.utils.data.SequentialSampler(dataset_val)

        data_loader_train = torch.utils.data.DataLoader(
            dataset_train,
            sampler=sampler_train,
            batch_size=args.batch_size,
            num_workers=args.num_workers,
            pin_memory=args.pin_mem,
            drop_last=True,
        )

        data_loader_val = torch.utils.data.DataLoader(
            dataset_val,
            sampler=sampler_val,
            batch_size=args.batch_size,
            # batch_size=1,
            num_workers=args.num_workers,
            pin_memory=args.pin_mem,
            drop_last=False,
        )
        # 构建模型

        # model = timm.create_model(
        #     "resnet18",
        #     pretrained=True,
        #     num_classes=36,
        #     drop_rate=0.1,
        #     drop_path_rate=0.1,
        # )

        model = ResNet18(num_classes=36)

        n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
        printf("number of trainable params(M):%.2f" % (n_parameters / 1.0e6))

        criterion = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.AdamW(
            model.parameters(), lr=args.lr, weight_decay=args.weight_decay
        )
        os.makedirs(args.log_dir, exist_ok=True)
        log_writer = SummaryWriter(log_dir=args.log_dir)
        loss_scaler = NativeScaler()  # 继承过来的
        # 读入已有的模 --resume == 空
        misc.load_model(
            args=args,
            model_without_ddp=model,
            optimizer=optimizer,
            loss_scaler=loss_scaler,
        )
        for epoch in range(args.start_epoch, args.epochs):
            printf(f"Epoch {epoch}")
            printf(f"length of data_loader_train is {len(data_loader_train)}")
            if epoch % 1 == 0:
                printf("Evaluating...")
                model.eval()
                test_stats = evaluate(data_loader_val, model, device)
                printf(
                    f"Accuracy of the network on the {len(dataset_val)} test images:{test_stats['acc1']:.1f}%"
                )
                if log_writer is not None:
                    log_writer.add_scalar("perf/test_acc1", test_stats["acc1"], epoch)
                    log_writer.add_scalar("perf/test_acc5", test_stats["acc5"], epoch)
                    log_writer.add_scalar("perf/test_loss", test_stats["loss"], epoch)
                model.train()

            printf("Training...")
            train_stats = train_one_epoch(
                model,
                criterion,
                data_loader_train,
                optimizer,
                device,
                epoch + 1,
                loss_scaler,
                None,
                log_writer=log_writer,
                args=args,
            )

            if args.output_dir:
                printf("Saving checkpoints...")
                misc.save_model(
                    args=args,
                    model=model,
                    model_without_ddp=model,
                    optimizer=optimizer,
                    loss_scaler=loss_scaler,
                    epoch=epoch,
                )

    else:
        # model = timm.create_model(
        #     "resnet18",
        #     pretrained=False,
        #     num_classes=36,
        #     drop_rate=0.1,
        #     drop_path_rate=0.1,
        # )
        model = ResNet18(num_classes=36)
        class_dict = {
            "apple": 0,
            "banana": 1,
            "beetroot": 2,
            "bell pepper": 3,
            "cabbage": 4,
            "capsicum": 5,
            "carrot": 6,
            "cauliflower": 7,
            "chilli pepper": 8,
            "corn": 9,
            "cucumber": 10,
            "eggplant": 11,
            "garlic": 12,
            "ginger": 13,
            "grapes": 14,
            "jalepeno": 15,
            "kiwi": 16,
            "lemon": 17,
            "lettuce": 18,
            "mango": 19,
            "onion": 20,
            "orange": 21,
            "paprika": 22,
            "pear": 23,
            "peas": 24,
            "pineapple": 25,
            "pomegranate": 26,
            "potato": 27,
            "raddish": 28,
            "soy beans": 29,
            "spinach": 30,
            "sweetcorn": 31,
            "sweetpotato": 32,
            "tomato": 33,
            "turnip": 34,
            "watermelon": 35,
        }

        n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)

        printf("number of trainable params (M): %.2f" % (n_parameters / 1.0e6))

        optimizer = torch.optim.AdamW(
            model.parameters(), lr=args.lr, weight_decay=args.weight_decay
        )
        os.makedirs(args.log_dir, exist_ok=True)
        loss_scaler = NativeScaler()

        misc.load_model(
            args=args,
            model_without_ddp=model,
            optimizer=optimizer,
            loss_scaler=loss_scaler,
        )

        model.eval()

        image = Image.open(test_image_path).convert("RGB")
        image = image.resize(
            (args.input_size, args.input_size), Image.Resampling.LANCZOS
        )
        image = torchvision.transforms.ToTensor()(image).unsqueeze(0)
        with torch.no_grad():
            output = model(image)
        output = torch.nn.functional.softmax(output, dim=-1)
        class_idx = torch.argmax(output, dim=1)[0]
        score = torch.max(output, dim=1)[0][0]
        printf(f"image path is{test_image_path}")
        printf(
            f"score is {score.item()}, class id is {class_idx.item()}, class name is {list(class_dict.keys())[list(class_dict.values()).index(class_idx)]}"
        )


if __name__ == "__main__":
    args = get_args_parser()
    args = args.parse_args()
    if args.output_dir:
        Path(args.output_dir).mkdir(parents=True, exist_ok=True)
    mode = "infer"  # infer or train
    if mode == "train":
        main(args, mode=mode)
    else:
        images = glob.glob(
            "./Pytorch/ResNet/Fruit/test/*/*.jpg"
        )  # 仅做测试,需要改成你的路径
        for image in images:
            printf("\n")
            main(args, mode=mode, test_image_path=image)