#!/usr/bin/env python
# _*_coding:utf-8 _*_
"""
Created on 2022/5/12 14:11
#@Author  : Xuewen Hou
Function: Save all data1 and infor as numpy file
          Split all data1 to train and valid dataset and save its as numpy and tfrecord file
"""

import numpy as np
import tensorflow as tf
import cmr_fid
import csv
import os
from random import shuffle
import matplotlib.pyplot as plt
from sklearn import preprocessing
import random



# The following functions can be used to convert a value to a type compatible
# with tf.Example.
def _bytes_feature(value):
    """Returns a bytes_list from a string / byte."""
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))

def _float_feature(value):
    """Returns a float_list from a float / double."""
    return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))

def _int64_feature(value):
    """Returns an int64_list from a bool / enum / int / uint."""
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


def get_fid_paths(data_folder, series_list_file, channel_count):
    """
    Created on Sun Jul 15 10:41:24 2018

    @author: Xiwei
    """
    """Get fid file path specified in csv file

    Args:
        data_folder: root folder of fid files
        series_list_file: csv file, specify series in data_folder
            col1: site uid
            col2: study_id
            col[3-n] series number
            example:
            '1.3.6.1.4.1.31636.7484848481., 180713016, 2, 3, 4'
            '1.3.6.1.4.1.31636.7484848481., 180713017, 2, 3'
        channel_count: fid file number in series

    Returns:
        file_paths: file path list
    """
    with open(series_list_file) as f:
        file_name = []
        for ch in range(channel_count):
            file_name.append('ChannelData%02d.fid' % (ch + 1))

        file_paths = []
        file_names = []
        reader = csv.reader(f)
        for row in reader:
            if len(row) < 3:    ##判断文件名是否分成三个
                print('series_list_file row ', reader.line_num, ' error')
                continue

            for index in range(2, len(row)):
                folder = data_folder + '//' + row[0] + row[1] + '//' + row[index] + '//'
                fids = []
                ids = []
                for ch_index, name in enumerate(file_name):  ##enumerate添加一个下标序号，0开始
                    if os.path.exists(folder + name):
                        fids.append(folder + name)
                        ids.append(row[0] + row[1] + '.' + row[index] + ('.ch%02d' % (ch_index + 1)))
                if len(fids) != channel_count:
                    print(folder, ' miss file')
                    continue

                file_paths.extend(fids)
                file_names.extend(ids)

        return file_paths, file_names


def amend_fid_shape(fid):
    rows = fid.shape[0]
    cols = fid.shape[1]

    new_fid = fid

    if rows < 256 and cols == 256:
        residual = (256 - rows) // 2
        new_fid = np.concatenate([np.zeros((residual, cols)).astype(complex), new_fid, np.zeros((residual, cols)).astype(complex)], axis=0)
    elif rows > 256 and cols == 256:
        residual = (rows - 256) // 2
        new_fid = new_fid[residual:residual+256, :]

    if cols < 256:
        residual = (256 - cols) // 2
        new_fid = np.concatenate([np.zeros((residual, cols)).astype(complex), new_fid, np.zeros((residual, cols)).astype(complex)], axis=1)
    elif cols > 256:
        residual = (cols - 256) // 2
        new_fid = new_fid[:, residual:residual+256]


    return new_fid


def fid_to_img(fid_data, mask, method='max'):




    if method == 'T2_max':
        fid_data = amend_fid_shape(fid_data)
        cs_fid = np.multiply(mask, fid_data)
        cs_img = abs(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(cs_fid))))
        cs_img_max_value = np.max(cs_img)
        cs_img = cs_img / cs_img_max_value

        # plt.ion()
        # plt.figure()
        # plt.imshow(cs_img, cmap='gray')
        # plt.ioff()


        img = abs(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(fid_data))))
        img_max_value = np.max(img)
        img = img / img_max_value

        # plt.ion()
        # plt.figure()
        # plt.imshow(img, cmap='gray')
        # plt.ioff()

    elif method == 'T1_max':

        phase = np.ones(fid_data.shape)
        phase[1:fid_data.shape[0]:2, :] = np.exp(1j * np.pi)
        fid_data = fid_data * phase

        fid_data = amend_fid_shape(fid_data)
        cs_fid = np.multiply(mask, fid_data)



        cs_img = abs(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(cs_fid))))
        cs_img_max_value = np.max(cs_img)
        cs_img = cs_img / cs_img_max_value

        # plt.ion()
        # plt.figure()
        # plt.imshow(cs_img, cmap='gray')
        # plt.ioff()


        img = abs(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(fid_data))))
        img_max_value = np.max(img)
        img = img / img_max_value

        # plt.ion()
        # plt.figure()
        # plt.imshow(img, cmap='gray')
        # plt.ioff()


    return img, cs_img, fid_data, cs_fid, img_max_value, cs_img_max_value

# write images, label and info in numoy file (each slice) and read them out
def alldata_save_to_numpy(fid_file_paths, fid_file_names, mask, save_path, method):
    """ write into tfrecord file """
    if len(fid_file_paths) != len(fid_file_names):
        raise RuntimeError('Input Size Error')

    fid_slice_list = []
    fid_slice_info = []
    for idx, path in enumerate(fid_file_paths):
        fid, _ = cmr_fid.read(path)
        shape = fid.shape
        fid = fid.reshape(-1, shape[-2], shape[-1])
        for slice in range(fid.shape[0]):
            fid_slice_info.append(fid_file_names[idx] + ('.s%02d' % (slice + 1)))
            fid_slice_list.append(fid[slice])

    img_data_slice_list_cs = []
    img_data_slice_list = []
    img_data_slice_info = fid_slice_info
    fid_data_slice_list_cs = []
    fid_data_slice_list = []
    img_data_value_slice_list_cs = []
    img_data_value_slice_list = []
    for slice in range(len(fid_slice_list)):
        img, cs_img, fid_DC, cs_fid_DC, img_value, cs_img_value = fid_to_img(fid_slice_list[slice], mask, method=method)
        img_data_slice_list_cs.append(cs_img)
        img_data_slice_list.append(img)
        fid_data_slice_list_cs.append(cs_fid_DC)
        fid_data_slice_list.append(fid_DC)
        img_data_value_slice_list_cs.append(cs_img_value)
        img_data_value_slice_list.append(img_value)

    img_data_slice_list_cs = np.array(img_data_slice_list_cs)
    img_data_slice_list = np.array(img_data_slice_list)
    img_data_slice_info = np.array(img_data_slice_info)
    fid_data_slice_list_cs = np.array(fid_data_slice_list_cs)
    fid_data_slice_list = np.array(fid_data_slice_list)
    img_data_value_slice_list_cs = np.array(img_data_value_slice_list_cs)
    img_data_value_slice_list = np.array(img_data_value_slice_list)
    np.save(save_path + method + "_all_img_cs.npy", img_data_slice_list_cs)
    np.save(save_path + method + "_all_img", img_data_slice_list)
    np.save(save_path + method + "_all_img_info.npy", img_data_slice_info)
    np.save(save_path + method + "_all_fid_cs.npy", fid_data_slice_list_cs)
    np.save(save_path + method + "_all_fid", fid_data_slice_list)
    np.save(save_path + method + "_all_img_value_cs.npy", img_data_value_slice_list_cs)
    np.save(save_path + method + "_all_img_value", img_data_value_slice_list)

    return 0

def alldata_split_and_save_two_tfrecords(fid_file_paths, fid_file_names, mask, save_path, method):
    """ write into tfrecord file """
    if len(fid_file_paths) != len(fid_file_names):
        raise RuntimeError('Input Size Error')

    fid_list = []
    shape = []
    for idx, path in enumerate(fid_file_paths):
        fid, _ = cmr_fid.read(path)
        shape = fid.shape
        fid = fid.reshape(-1, shape[-2], shape[-1])
        fid_list.append(fid)

    fid_data_list = fid_list
    fid_data_info = fid_file_names
    fid_data = list(zip(fid_data_list, fid_data_info))
    shuffle(fid_data)  ##所有元素随机排序
    fid_data_list[:], fid_data_info[:] = zip(*fid_data)

    train_img_data_slice_list, train_img_data_slice_list_cs, train_img_data_slice_info = tfrecord_write(
        fid_data_list, fid_data_info, mask, name='train', method=method, save_path=save_path)
    train_img_data_slice_list_cs = np.array(train_img_data_slice_list_cs)
    train_img_data_slice_list = np.array(train_img_data_slice_list)
    train_img_data_slice_info = np.array(train_img_data_slice_info)
    np.save(save_path + method + "_train_img_cs.npy", train_img_data_slice_list_cs)
    np.save(save_path + method + "_train_img", train_img_data_slice_list)
    np.save(save_path + method + "_train_img_info.npy", train_img_data_slice_info)

    val_img_data_slice_list, val_img_data_slice_list_cs, val_img_data_slice_info = tfrecord_write(
        fid_data_list, fid_data_info, mask, name='valid', method=method, save_path=save_path)
    val_img_data_slice_list_cs = np.array(val_img_data_slice_list_cs)
    val_img_data_slice_list = np.array(val_img_data_slice_list)
    val_img_data_slice_info = np.array(val_img_data_slice_info)
    np.save(save_path + method + "_validation_img_cs.npy", val_img_data_slice_list_cs)
    np.save(save_path + method + "_validation_img", val_img_data_slice_list)
    np.save(save_path + method + "_validation_info", val_img_data_slice_info)

    return 0

def gen_mask_uniform(kspace_shape, accel_factor=8, seed=None):
    # inspired by https://github.com/facebookresearch/fastMRI/blob/master/common/subsample.py
    shape = kspace_shape
    num_cols = shape[-2]

    center_fraction = (32 // accel_factor) / 100
    acceleration = accel_factor

    # Create the mask
    num_low_freqs = int(round(num_cols * center_fraction))
    prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
    # y  = np.random.standard_normal(size=10)
    # # y = np.reshape(aa, [1, 256])
    # x1 = range(len(y))
    # plt.ion()
    # plt.figure()
    # plt.plot(y)
    # plt.ioff()
    mask = np.random.default_rng(seed).uniform(size=num_cols) < prob
    # mask = np.random.default_rng(seed).standard_normal(size=num_cols)< prob
    #
    # mask = mask.reshape([1, 368])
    # fourier_mask = mask.astype(np.float)
    # aa = np.sum(fourier_mask)
    pad = (num_cols - num_low_freqs + 1) // 2
    mask[pad:pad + num_low_freqs] = True
    # fourier_mask = mask.astype(np.float)
    # aa = np.sum(fourier_mask)

    # Reshape the mask
    mask_shape = [1 for _ in shape]
    mask_shape[-1] = num_cols
    mask = mask.reshape(*mask_shape)
    mask = np.reshape(mask, [mask.shape[1], mask.shape[0]])
    fourier_mask = np.repeat(mask.astype(np.float), shape[1], axis=1)

    # plt.ion()
    # plt.figure()
    # plt.imshow(abs(fourier_mask), cmap='gray')
    # plt.ioff()
    return fourier_mask

def gen_mask_gaussian(kspace_shape, accel_factor=8, seed=None):
    # inspired by https://github.com/facebookresearch/fastMRI/blob/master/common/subsample.py
    shape = kspace_shape
    num_cols = shape[-2]

    center_fraction = (32 // accel_factor) / 100
    acceleration = accel_factor

    # Create the mask
    num_low_freqs = int(round(num_cols * center_fraction))
    num_high_freqs = int(round(num_cols / acceleration - num_low_freqs))


    high_idx_over = np.round(
        np.random.default_rng(seed).normal(
            (num_cols - num_low_freqs) // 2,
            (num_cols - num_low_freqs) // 6,
            num_high_freqs * 2
        )
    ).astype(int)
    high_idx_over = np.array(list((set(high_idx_over.squeeze().tolist()))))
    high_idx_over = np.delete(
        high_idx_over, np.where(high_idx_over < 0)
    )
    high_idx_over = np.delete(
        high_idx_over, np.where(high_idx_over >= (num_cols - num_low_freqs))
    )
    if len(high_idx_over) <= num_high_freqs:
        high_idx_tmp = high_idx_over
    else:
        len_over = len(high_idx_over) - num_high_freqs
        idx_over = random.sample(range(1, len(high_idx_over)), len_over)
        high_idx_tmp = np.delete(high_idx_over, idx_over)
    high_idx_low = high_idx_tmp[
        np.where(high_idx_tmp <= (num_cols - num_low_freqs) // 2)
    ]
    high_idx_high = high_idx_tmp[
                        np.where(high_idx_tmp > (num_cols - num_low_freqs) // 2)
                    ] + num_low_freqs
    high_idx = np.concatenate((high_idx_low, high_idx_high))
    mask = np.zeros(num_cols)
    pad = (num_cols - num_low_freqs + 1) // 2
    mask[pad: pad + num_low_freqs] = True
    mask[high_idx] = True

    # mask = mask.reshape([1, 256])
    # fourier_mask = mask.astype(np.float)
    # aa = np.sum(fourier_mask)

    # Reshape the mask
    mask_shape = [1 for _ in shape]
    mask_shape[-1] = num_cols
    mask = mask.reshape(*mask_shape)
    number = np.sum(mask.astype(np.float))

    mask = np.reshape(mask, [mask.shape[1], mask.shape[0]])

    # plt.figure()
    # x1 = np.arange(1, 257)
    # fourier = abs(np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(mask.astype(np.float)))))
    # plt.plot(x1, np.reshape(fourier, [-1]))
    # plt.show()

    fourier_mask = np.repeat(mask.astype(np.float), shape[1], axis=1)
    return fourier_mask

def gen_mask_gaussian2(kspace_shape, dividend=32, accel_factor=2, seed=None):
    # inspired by https://github.com/facebookresearch/fastMRI/blob/master/common/subsample.py
    shape = kspace_shape
    num_cols = shape[-2]

    center_fraction = (dividend // accel_factor) / 100      #af=2,32 af=3,48
    acceleration = accel_factor

    # Create the mask

    num_low_freqs = int(round(num_cols * center_fraction))
    _, remainder = divmod(num_low_freqs, 2)
    num_low_freqs = num_low_freqs if (remainder == 0) else (num_low_freqs + 1)
    num_high_freqs = int(round(num_cols / acceleration - num_low_freqs))

    high_idx_over = np.round(
        np.random.default_rng(seed).normal(
            num_cols // 2,
            num_cols // 4,
            int(num_high_freqs * 2.5)
        )
    ).astype(int)

    high_idx_over = np.array(list((set(high_idx_over.squeeze().tolist()))))   #set 删除重复数据

    high_idx_over = np.delete(
        high_idx_over, np.where(high_idx_over < 0)
    )

    high_idx_over = np.delete(
        high_idx_over, np.where(high_idx_over >= num_cols)
    )

    low = high_idx_over >= (num_cols // 2 - num_low_freqs // 2)
    high = high_idx_over < (num_cols // 2 + num_low_freqs // 2)
    high_idx_over = np.delete(
        high_idx_over, np.where(low & high)
    )

    if len(high_idx_over) <= num_high_freqs:
        high_idx_tmp = high_idx_over
    else:
        len_over = len(high_idx_over) - num_high_freqs
        random.seed(seed)
        idx_over = random.sample(range(1, len(high_idx_over)), len_over)
        high_idx_tmp = np.delete(high_idx_over, idx_over)

    high_idx = high_idx_tmp
    mask = np.zeros(num_cols)
    pad = (num_cols - num_low_freqs + 1) // 2
    mask[pad: pad + num_low_freqs] = True
    mask[high_idx] = True

    # Reshape the mask
    mask_shape = [1 for _ in shape]
    mask_shape[-1] = num_cols
    mask = mask.reshape(*mask_shape)
    mask = np.reshape(mask, [mask.shape[1], mask.shape[0]])
    fourier_mask = np.repeat(mask.astype(np.float), shape[1], axis=1)

    mask_low = np.zeros(num_cols)
    mask_low[pad: pad + num_low_freqs] = True
    mask_low = mask_low.reshape(*mask_shape)
    mask_low = np.reshape(mask_low, [mask_low.shape[1], mask_low.shape[0]])
    fourier_mask_low = np.repeat(mask_low.astype(np.float), shape[1], axis=1)
    return fourier_mask, fourier_mask_low



def main():
    # 需要手动Dataset指定目录文件名，保存的三个tfrecord文件名
    dataset_path = 'E:/HXW/python/MRI-Reconstruction/CS_Head1.5T/rawdata_head'
    T1_csv_filename = 'head_data_list_T2.CSV'
    channel_num = 8
    processing_method = 'T2_max'

    save_path = './Experiment-20221111/'
    seeds = [200]
    seeds_idx = ['psnr']


    T1_file_paths, T1_file_names = get_fid_paths(dataset_path, T1_csv_filename, channel_num)
    for path in T1_file_paths:
        print(path)
    for name in T1_file_names:
        print(name)

    FileDim, FileVersion = cmr_fid.read(T1_file_paths[0])
    print('FileVersion: ', FileVersion)
    print('FileDims: ', FileDim.shape)

    # mask = get_cs_mask(mask_file, FileDim)
    # mask = gen_mask_uniform(FileDim.shape[-2:], accel_factor=2, seed=205)
    # mask = gen_mask_gaussian(FileDim.shape[-2:], accel_factor=2, seed=seed)


    for iter in range(len(seeds)):
        seed = seeds[iter]
        seed_idx = seeds_idx[iter]
        save_path = save_path + seed_idx + '/dataset/'
        # mask = np.ones(FileDim.shape[-2:])
        # mask = gen_mask_gaussian2(FileDim.shape[-2:], accel_factor=3, seed=seed)
        mask, mask_low = gen_mask_gaussian2([256, 256], dividend=32, accel_factor=4, seed=1108)
        # mask = np.zeros([256, 256])
        # mask[96:160, :] = 1
        alldata_save_to_numpy(T1_file_paths, T1_file_names, mask, save_path, processing_method)
        alldata_split_and_save_two_tfrecords(T1_file_paths, T1_file_names, mask, save_path, processing_method)

if __name__ == '__main__':
    main()