# Copyright 2020 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torchvision
import torchvision.transforms as T
import config_classical_features_oil as config
import torch_classical_features_oil_engine as torch_engine
from torch_classical_features_model_oil import ClassicalFeaturesEncoder
from tools.faissindex import FaissIndex

import numpy as np
import os


def get_ids_from_dataset(train_dataset):
    if isinstance(train_dataset, torchvision.datasets.ImageFolder) == True:
        datas = train_dataset.imgs
        image_links = [item[0] for item in datas]
        image_labels = [item[1] for item in datas]
    elif isinstance(train_dataset, torchvision.datasets.VisionDataset) == True:
        image_links = train_dataset.data
        image_labels = train_dataset.targets
    elif isinstance(train_dataset, torchvision.datasets.DatasetFolder) == True:
        image_links = train_dataset.samples
        image_labels = train_dataset.targets
    else:
        raise "unknown dataset type!"
    return image_links, image_labels



def create_index(image_dir, encoder_model, transforms, index_save_dir, deivce="cuda"):
    """Create an FAISS index and save to disc

    Args:
        image_dir (str): path to directory of images whose features are added to the index
        index_save_dir (str): directory to save index on disc.
    Returns:
        lib.FaissIndex:  lib.FaissIndex object.
    """
    train_dataset = torchvision.datasets.ImageFolder(root=image_dir, transform=transforms)
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=config.TRAIN_BATCH_SIZE, shuffle=False, drop_last=True
    )
    ids, labels = get_ids_from_dataset(train_dataset)
    train_features, train_labels = torch_engine.feature_extrat_step(
        encoder_model, train_loader, deivce
    )
    index = FaissIndex(train_features.shape[1])
    index.add(train_features, id_strings=ids)
    index.save(index_save_dir)
    return index


def update_index(index, encoder_model, transforms, image_dir, index_save_dir, device="cuda"):
    """Update an existing lib.FaissIndex 

    Args:
        index (lib.FaissIndex): index to be updated
        image_dir (str): directory of images to be added to index
        index_save_dir (str): directory to save index on disc.     
    """
    val_dataset = torchvision.datasets.ImageFolder(root=image_dir, transform=transforms)
    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=config.VAL_BATCH_SIZE, shuffle=False, drop_last=True
    )
    ids, labels = get_ids_from_dataset(val_dataset)
    val_features, val_labels = torch_engine.feature_extrat_step(
        encoder_model, val_loader, device
    )
    print(val_features.shape, len(ids))
    index.add(val_features, id_strings=ids)
    index.save(index_save_dir)


def create_index_basedon_features(dataset, features, index_save_dir):
    """Create an FAISS index and save to disc

    Args:
        dataset (torchvision.datasets.folder): : torchvision.datasets.ImageFolder | VisionFolder |DatasetFolder
        features (tensor):  features are added to the index
        index_save_dir (str): directory to save index on disc.
    Returns:
        lib.FaissIndex:  lib.FaissIndex object.
    """
    image_links, labels = get_ids_from_dataset(dataset)
    index = FaissIndex(features.shape[1])
    if len(image_links) < len(features):
        image_links.insert(0, "no image")
    else:
        # 如果地址大于样本数，因为存在drop last
        if len(image_links) > len(features):
            image_links = image_links[:len(features)]
    index.add(features, id_strings=image_links)
    index.save(index_save_dir)
    return index


def update_index_basedon_features(index, val_features ,val_dataset, index_save_dir):
    """Update an existing lib.FaissIndex

    Args:
        dataset (torchvision.datasets.folder): : torchvision.datasets.ImageFolder | VisionFolder |DatasetFolder
        index (lib.FaissIndex): index to be updated
        image_dir (str): directory of images to be added to index
        index_save_dir (str): directory to save index on disc.
    """
    image_links, labels = get_ids_from_dataset(val_dataset)
    if len(image_links) < len(val_features):
        image_links.insert(0, "no image")
    print(val_features.shape, len(image_links))
    index.add(val_features, id_strings=image_links)
    index.save(index_save_dir)


def create_labels_index_basedon_features(features, labels, index_save_dir, index_name, index_map):
    """Create an FAISS index and save to disc

    Args:
        labels (ndarray):  labels are added to the index
        index_save_dir (str): directory to save index on disc.
        index_name(str): label and features index name,
        index_map(str):label and features index map, config.
    Returns:
        lib.FaissIndex:  lib.FaissIndex object.
    """
    index = FaissIndex(features.shape[1])
    if len(labels) < len(features):
        labels.insert(0, "-1")
    index.add(features, id_strings=labels)
    index.save(index_save_dir,index_name, index_map)
    return index


def update_labels_index_basedon_features(index, val_features , val_labels,
                                         index_save_dir, index_name, index_map):
    """Update an existing lib.FaissIndex

    Args:
        index (lib.FaissIndex): index to be updated
        image_dir (str): directory of images to be added to index
        index_save_dir (str): directory to save index on disc.
    """

    if len(val_labels) < len(val_features):
        val_labels.insert(0, "-1")
    # print(val_features.shape, len(val_labels))
    index.add(val_features, id_strings=val_labels)
    index.save(index_save_dir, index_name, index_map)


if __name__ == "__main__":

    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    # Specify path to images in the repo
    # base_path = os.getcwd() + "/app/frontend/build/assets/semsearch/datasets/"
    fashion_images_dir = config.TRAIN_DATA_PATH
    iconic_images_dir = config.TEST_DATA_PATH
    encoder_model = ClassicalFeaturesEncoder(config.FEATURE_LIST)

    image_resize_size = config.IMG_HEIGHT
    # transforms = T.Compose([T.Resize(image_resize_size), T.ToTensor()])
    transforms = T.Compose([
        T.RandomResizedCrop(image_resize_size),
        T.Grayscale(num_output_channels=1),
        T.PILToTensor()
    ])

    index = create_index(fashion_images_dir, encoder_model, transforms, config.MODEL_DIR_PATH, device)
    update_index(index, encoder_model, transforms, iconic_images_dir, config.MODEL_DIR_PATH, device)
