# Copyright (c) 2024 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#      http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import time
import warnings
from copy import deepcopy
from pathlib import Path
from typing import List, Tuple
import openvino as ov
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from fastdownload import FastDownload
from torch.jit import TracerWarning

from nncf import compress_weights, CompressWeightsMode
import nncf
import nncf.torch
from nncf.common.logging.track_progress import track

warnings.filterwarnings("ignore", category=TracerWarning)
warnings.filterwarnings("ignore", category=UserWarning)

BASE_MODEL_NAME = "resnet18"
IMAGE_SIZE = 64
BATCH_SIZE = 128
TRAINING_EPOCHS = 2

ROOT = Path(__file__).parent.resolve()
BEST_CKPT_NAME = "resnet18_int8_best.pt"
CHECKPOINT_URL = (
    "https://storage.openvinotoolkit.org/repositories/nncf/openvino_notebook_ckpts/302_resnet18_fp32_v1.pth"
)
DATASET_URL = "http://cs231n.stanford.edu/tiny-imagenet-200.zip"
DATASET_PATH = "./nncf/datasets"


def download_dataset() -> Path:
    downloader = FastDownload(base=DATASET_PATH, archive="downloaded", data="extracted")
    return downloader.get(DATASET_URL)


def load_checkpoint(model: torch.nn.Module) -> torch.nn.Module:
    checkpoint = torch.hub.load_state_dict_from_url(CHECKPOINT_URL, map_location=torch.device("cpu"), progress=False)
    model.load_state_dict(checkpoint["state_dict"])
    return model, checkpoint["acc1"]


def get_resnet18_model(device: torch.device) -> torch.nn.Module:
    num_classes = 200  # 200 is for Tiny ImageNet, default is 1000 for ImageNet
    model = models.resnet18(weights=None)
    # Update the last FC layer for Tiny ImageNet number of classes.
    model.fc = nn.Linear(in_features=512, out_features=num_classes, bias=True)
    model.to(device)
    return model


def train_epoch(
        train_loader: torch.utils.data.DataLoader,
        model: torch.nn.Module,
        criterion: torch.nn.Module,
        optimizer: torch.optim.Optimizer,
        device: torch.device,
):
    # Switch to train mode.
    model.train()

    for images, target in track(train_loader, total=len(train_loader), description="Fine tuning:"):
        images = images.to(device)
        target = target.to(device)

        # Compute output.
        output = model(images)
        loss = criterion(output, target)

        # Compute gradient and do opt step.
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


def validate(val_loader: torch.utils.data.DataLoader, model: torch.nn.Module, device: torch.device) -> float:
    top1_sum = 0.0

    # Switch to evaluate mode.
    model.eval()

    with torch.no_grad():
        for images, target in track(val_loader, total=len(val_loader), description="Validation:"):
            images = images.to(device)
            target = target.to(device)

            # Compute output.
            output = model(images)

            # Measure accuracy and record loss.
            [acc1] = accuracy(output, target, topk=(1,))
            top1_sum += acc1.item()

        num_samples = len(val_loader)
        top1_avg = top1_sum / num_samples
    return top1_avg


def accuracy(output: torch.Tensor, target: torch.tensor, topk: Tuple[int, ...] = (1,)):
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res


def create_data_loaders():
    dataset_path = download_dataset()

    prepare_tiny_imagenet_200(dataset_path)
    print(f"Successfully downloaded and prepared dataset at: {dataset_path}")

    train_dir = dataset_path / "train"
    val_dir = dataset_path / "val"

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        train_dir,
        transforms.Compose(
            [
                transforms.Resize(IMAGE_SIZE),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]
        ),
    )
    val_dataset = datasets.ImageFolder(
        val_dir,
        transforms.Compose(
            [
                transforms.Resize(IMAGE_SIZE),
                transforms.ToTensor(),
                normalize,
            ]
        ),
    )

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=0, pin_memory=True, sampler=None
    )

    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=0, pin_memory=True
    )

    # Creating separate dataloader with batch size = 1
    # as dataloaders with batches > 1 are not supported yet.
    calibration_dataset = torch.utils.data.DataLoader(
        val_dataset, batch_size=1, shuffle=False, num_workers=0, pin_memory=True
    )

    return train_loader, val_loader, calibration_dataset


def prepare_tiny_imagenet_200(dataset_dir: Path):
    # Format validation set the same way as train set is formatted.
    val_data_dir = dataset_dir / "val"
    val_images_dir = val_data_dir / "images"
    if not val_images_dir.exists():
        return

    val_annotations_file = val_data_dir / "val_annotations.txt"
    with open(val_annotations_file, "r") as f:
        val_annotation_data = map(lambda line: line.split("\t")[:2], f.readlines())
    for image_filename, image_label in val_annotation_data:
        from_image_filepath = val_images_dir / image_filename
        to_image_dir = val_data_dir / image_label
        if not to_image_dir.exists():
            to_image_dir.mkdir()
        to_image_filepath = to_image_dir / image_filename
        from_image_filepath.rename(to_image_filepath)
    val_annotations_file.unlink()
    val_images_dir.rmdir()


def get_model_size(ir_path: str, m_type: str = "Mb") -> float:
    xml_size = os.path.getsize(ir_path)
    bin_size = os.path.getsize(os.path.splitext(ir_path)[0] + ".bin")
    for t in ["bytes", "Kb", "Mb"]:
        if m_type == t:
            break
        xml_size /= 1024
        bin_size /= 1024
    model_size = xml_size + bin_size
    return model_size


def main():
    torch.manual_seed(0)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using {device} device")

    ###############################################################################
    # Step 1: Prepare model and dataset
    print(os.linesep + "[Step 1] Prepare model and dataset")

    model = get_resnet18_model(device)
    model, acc1_fp32 = load_checkpoint(model)

    train_loader, val_loader, calibration_dataset = create_data_loaders()

    def transform_fn(data_item):
        return data_item[0].to(device)

    quantization_dataset = nncf.Dataset(calibration_dataset, transform_fn)
    ###############################################################################
    # Step 2: Compress to INT8 Symmetric
    # Backend Pytorch: only int8 support
    # Backend OpenVINO: INT4_SYM stands for a mixed-precision weights quantization with 4-bit integer as a primary precision.
    compress_model = compress_weights(deepcopy(model), mode=CompressWeightsMode.INT8_SYM, dataset=quantization_dataset)

    # Step3: validate compress model acc
    t1 = time.perf_counter()
    acc_uncompress = validate(val_loader, model, device)
    t2 = time.perf_counter()
    acc_compress = validate(val_loader, compress_model, device)
    t3 = time.perf_counter()
    """
        Accuracy@1 of uncompress fp32 model: 55.36985759493671, time cost: 7.6188611s
        Accuracy@1 of compress int8 model: 55.270965189873415, time cost: 9.425898799999999s
    """
    print(f"Accuracy@1 of uncompress fp32 model: {acc_uncompress}, time cost: {t2 - t1}s")
    print(f"Accuracy@1 of compress int8 model: {acc_compress}, time cost: {t3 - t2}s")
    ###############################################################################
    # Step4: output model
    input_shape = (1, 3, IMAGE_SIZE, IMAGE_SIZE)
    example_input = torch.randn(*input_shape).cpu()

    # Export uncompress model to OpenVINO™ IR
    uncompress_ir_path = f"{ROOT}/{BASE_MODEL_NAME}_uncompress.xml"
    ov_model = ov.convert_model(model.cpu(), example_input=example_input, input=input_shape)
    ov.save_model(ov_model, uncompress_ir_path, compress_to_fp16=False)
    print(f"Original model path: {uncompress_ir_path}")

    # Export compress model to OpenVINO™ IR
    compress_ir_path = f"{ROOT}/{BASE_MODEL_NAME}_compress.xml"
    ov_model = ov.convert_model(compress_model.cpu(), example_input=example_input, input=input_shape)
    ov.save_model(ov_model, compress_ir_path, compress_to_fp16=False)
    print(f"Original model path: {compress_ir_path}")

    # print size
    """
        uncompress model size: 43.06930065155029MB
        compress model size: 10.956914901733398MB
    """
    print("uncompress model size: {}MB".format(get_model_size(f"{ROOT}/{BASE_MODEL_NAME}_uncompress.xml")))
    print("compress model size: {}MB".format(get_model_size(f"{ROOT}/{BASE_MODEL_NAME}_compress.xml")))


if __name__ == "__main__":
    main()
