# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
create train or eval dataset.
"""
import os
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.vision as C
import mindspore.dataset.transforms as C2
from mindspore.communication.management import init, get_rank, get_group_size
import pandas as pd
resize = 224

def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32,
                   target="Ascend", distribute=False):
    """
    create a train or eval imagenet2012 dataset for resnet50

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1
        batch_size(int): the batch size of dataset. Default: 32
        target(str): the device target. Default: Ascend
        distribute(bool): data for distribute or not. Default: False

    Returns:
        dataset
    """
    if target == "Ascend":
        device_num, rank_id = _get_rank_info()
    else:
        if distribute:
            init()
            rank_id = get_rank()
            device_num = get_group_size()
        else:
            device_num = 1

    if device_num == 1:
        data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
    else:
        data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
                                         num_shards=device_num, shard_id=rank_id)

    image_size = 224
    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]

    # define map operations
    if do_train:
        trans = [
            C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
            C.RandomHorizontalFlip(prob=0.5),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]
    else:
        trans = [
            C.Decode(),
            C.Resize(256),
            C.CenterCrop(image_size),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]

    type_cast_op = C2.TypeCast(mstype.int32)

    data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
    data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)

    # apply batch operations
    data_set = data_set.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    data_set = data_set.repeat(repeat_num)

    return data_set



def _get_rank_info():
    """
    get rank size and rank id
    """
    # rank_size = int(os.getenv("RANK_SIZE", default=1))
    rank_size = int(os.getenv("RANK_SIZE"))

    if rank_size > 1:
        rank_size = get_group_size()
        rank_id = get_rank()
    else:
        rank_size = 1
        rank_id = 0

    return rank_size, rank_id

def _creat_pji():

    neg_path = 'BCNN_x//all/neg_xcv_m//0' #数据集位置
    pos_path = 'BCNN_x//all//pos_xcv_m//0' #数据集位置
    path= 'BCNN_x//all//'
    imgs = os.listdir(pos_path)
    imgss=os.listdir(neg_path)
    pos_num = len(os.listdir(pos_path))
    neg_num =len(os.listdir(neg_path))
    num=neg_num+pos_num

    print(neg_num,pos_num)
    data = np.empty((num, resize, resize, 3), dtype="int32")
    label = np.empty((num, ), dtype="int32")
    k=0
    for i in range(neg_num):
        a=cv2.imread(path+ 'neg_xcv_m//0//' + imgss[i])
    #     print(path+ 'neg_xcv_m//0//' + imgss[i])
        data[i] = cv2.resize(a, (resize, resize))
        label[i] = 0
        k=k+1
    #     print(k,i)
        
    k=0
    for i in range(neg_num,neg_num+pos_num):    
        a=cv2.imread(path+ 'pos_xcv_m//0//' + imgs[k])
    #     print(path+ 'pos//' + imgs[k])
        data[i] = cv2.resize(a, (resize, resize))
        label[i] = 1
        k=k+1

def _dataxy():
    x_train, x_test, y_train, y_test,train_img_t,test_img_t = train_test_split(data, label, img_t,test_size=0.2, random_state=1)
    y_test_end=y_test
    y_train_end=y_train
    print(test_img_t.shape,y_test.shape,"print(y_train,y_test)",y_train,y_test,train_img_t,test_img_t.shape,train_img_t.shape)
    print("print(x_train,x_test)",x_train,x_test)
    x_train = np.array(x_train) / 255.0
    x_test = np.array(x_test) / 255.0
     
    x_train.reshape(-1, resize, resize, 1)
    y_train = np.array(y_train)
     
    x_test.reshape(-1, resize, resize, 1)
    y_test = np.array(y_test)

    print("print(y_train,y_test)",y_train,y_test)
    print("print(x_train,x_test)",x_train,x_test)

    # from sklearn.model_selection import train_test_split 
    # from keras.utils import to_categorical

    # x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=1)
    print("print(y_train,y_test)",y_train,y_test)
    print("print(x_train,x_test)",x_train,x_test)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    y_train = to_categorical(y_train, 2) #把label转成onehot化
    y_test = to_categorical(y_test, 2) #把label转成onehot化
    print(y_train,y_test)
    print("print(x_train,x_test)",x_train,x_test)
