from __future__ import absolute_import
from __future__ import print_function

import os
import numpy as np
import dicom
import tarfile

from utils import image_utils
from utils import s3_utils

resize = True  # True to crop and resize images
#size = 64  # shape to resize image (size, size)


def create_train_data(data_dir, size=64):
    """
    Loads the training data
    """
    curr_dir = os.path.dirname(os.path.abspath(__file__))
    ids, images, metadata = load_images(data_dir + '/train', size=size)  # load images and their ids
    studies_to_results = map_studies_results(data_dir)  # load the dictionary of studies to targets

    X = []
    y = []
    m = []

    for id in ids:
        study = images[id]
        study_metadata = metadata[id]
        outputs = studies_to_results[id]
        for i in range(study.shape[0]):
            X.append(study[i, :, :, :])
            y.append(outputs)
            m.append(study_metadata[i])
    X = np.array(X)
    y = np.array(y)
    m = np.array(m)
    np.save(curr_dir + '/npy_files/' + 'X_train.npy', X)
    np.save(curr_dir + '/npy_files/' + 'y_train.npy', y)
    np.save(curr_dir + '/npy_files/' + 'm_train.npy', m)


def create_validation_data(data_dir, size=64):
    """
    Loads the validation data
    """
    curr_dir = os.path.dirname(os.path.abspath(__file__))
    ids, images, metadata = load_images(data_dir + '/validate', size=size)

    X = []
    study_ids = []
    m = []

    for id in ids:
        study = images[id]
        study_metadata = metadata[id]
        for i in range(study.shape[0]):
            study_ids.append(id)
            X.append(study[i, :, :, :])
            m.append(study_metadata[i])
    X = np.array(X)
    m = np.array(m)
    np.save(curr_dir + '/npy_files/' + 'X_validate.npy', X)
    np.save(curr_dir + '/npy_files/' + 'm_validate.npy', m)
    np.save(curr_dir + '/npy_files/' + 'ids_validate.npy', study_ids)


def load_images(data_dir, verbose=True, size=64):
    """
    Load images in the form study x image x width x length
    each image contains 30 frames so that it is ready for the convolutional network
    Attributes:
        data_dir: the root directory
        verbose: If true then print data
    """
    current_study_sub = ""  # saves the current study sub_folder
    current_study = ""  # saves the current study folder
    current_study_images = []  # holds current study images
    current_study_metadata = []
    ids = []  # keeps the ids of the studies
    study_to_images = dict()  # dictionary for studies to images
    study_to_metadata = dict()
    total = 0
    images = []  # saves 30-frame-images
    images_dicom = []
    data_dir = data_dir if data_dir.endswith('/') else data_dir + '/'
    for subdir, dir, files in os.walk(data_dir):
        subdir = subdir.replace('\\', '/')
        bits = subdir.split("/")
        study_id = bits[-3]
        if "sax" in subdir:
            for f in files:
                image_path = os.path.join(subdir, f)
                if not image_path.endswith('.dcm'):
                    continue

                image_dicom = dicom.read_file(image_path)
                image = image_dicom.pixel_array.astype(np.float32)
                image /= np.max(image)  # scale to [0,1]

                if resize:
                    image = image_utils.crop_center(image)
                    image = image_utils.resize(image, size)

                image *= 255

                if current_study_sub != subdir:
                    x = 0
                    try:
                        while len(images) < 30:
                            images.append(images[x])
                            x += 1
                        if len(images) > 30:
                            images = images[0:30]

                    except IndexError:
                        pass

                    current_study_sub = subdir
                    current_study_images.append(images)
                    if len(images_dicom) > 0:
                        current_study_metadata.append(extract_metadata(images_dicom[0]))
                    else:
                        current_study_metadata.append([])
                    images_dicom = []
                    images = []

                if current_study != study_id:
                    study_to_images[current_study] = np.array(current_study_images)
                    study_to_metadata[current_study] = np.array(current_study_metadata)
                    if current_study != "":
                        ids.append(current_study)
                    current_study = study_id
                    current_study_images = []
                    current_study_metadata = []

                images.append(image)
                images_dicom.append(image_dicom)
                if verbose:
                    if total % 1000 == 0:
                        print("Files proccessed " + str(total))
                total += 1
    x = 0
    try:
        while len(images) < 30:
            images.append(images[x])
            x += 1
        if len(images) > 30:
            images = images[0:30]
    except IndexError:
        pass

    current_study_images.append(images)
    current_study_metadata.append(extract_metadata(images_dicom[0]))
    study_to_images[current_study] = np.array(current_study_images)
    study_to_metadata[current_study] = np.array(current_study_metadata)
    if current_study != "":
        ids.append(current_study)

    return ids, study_to_images, study_to_metadata


def extract_metadata(dicom_image):
    slice_thickness = dicom_image.SliceThickness
    slice_location = dicom_image.SliceLocation
    pixel_spacing = dicom_image.PixelSpacing
    return [slice_thickness, slice_location, pixel_spacing[0], pixel_spacing[1]]


def map_studies_results(data_dir):
    """
    Maps studies to their respective targets
    """
    id_to_results = dict()
    fo = open(os.path.join(data_dir, "train.csv"))
    lines = fo.readlines()
    i = 0
    for item in lines:
        if i == 0:
            i = 1
            continue
        id, diastole, systole = item.replace("\n", "").split(",")
        id_to_results[id] = [float(diastole), float(systole)]

    return id_to_results


def download_train_from_s3():
    print('Downloading training data from S3 bucket.')
    dir = os.path.dirname(os.path.abspath(__file__))
    for i in range(13):
        batch_to_load = str(i).zfill(2)
        print('-' * 50)
        print('Downloading X batch {0}'.format(batch_to_load))
        s3_utils.get_from_s3("/keras-ndsb/train_npy_files/X_train_" + str(batch_to_load) + ".npy.tar.gz",
                             str(dir)+"/train_npy_files/X_train_" + str(batch_to_load) + ".npy.tar.gz")
        print('Downloading y batch {0}'.format(batch_to_load))
        s3_utils.get_from_s3("/keras-ndsb/train_npy_files/y_train_" + str(batch_to_load) + ".npy",
                             str(dir) + "/train_npy_files/y_train_" + str(batch_to_load)+".npy")

        print('Extracting X batch {0}'.format(batch_to_load))
        tar = tarfile.open(dir + "/train_npy_files/" + "X_train_" + str(batch_to_load) + ".npy.tar.gz")
        tar.extractall(path=dir + "/train_npy_files/")
        tar.close()
        print('-' * 50)

    print('Done!')


def download_test_from_s3():
    print('Downloading validation data from S3 bucket.')
    dir = os.path.dirname(os.path.abspath(__file__))
    for i in range(5):
        batch_to_load = str(i).zfill(2)
        print('-' * 50)
        print('Downloading X batch {0}'.format(batch_to_load))
        s3_utils.get_from_s3("/keras-ndsb/validation_npy_files/X_val" + str(batch_to_load) + ".npy.tar.gz",
                             dir + "/validation_npy_files" + "/X_val" + str(batch_to_load) + ".npy.tar.gz")

        print('Extracting X batch {0}'.format(batch_to_load))
        tar = tarfile.open(dir + "/validation_npy_files/X_val" + str(batch_to_load) + ".npy.tar.gz")
        tar.extractall(path=dir + "/validation_npy_files/")
        tar.close()
        print('-' * 50)

    print('Downloading ids')
    s3_utils.get_from_s3("/keras-ndsb/validation_npy_files/id_val.npy.tar.gz",
                         dir + "/validation_npy_files/" + "id_val.npy.tar.gz")
    print('Extracting ids')
    tar = tarfile.open(dir + "/validation_npy_files/" + "id_val.npy.tar.gz")
    tar.extractall(path=dir + "/validation_npy_files/")
    tar.close()

    print('Done!')
