import numpy as np
import cv2
import csv
import keras.preprocessing.image
import os, sys

from sklearn.preprocessing import LabelBinarizer

import get_data_utils
from keras_preprocessing.image import load_img, img_to_array
import parameter

#这个不加onehot
os.environ['CUDA_VISIBLE_DEVICES']='2, 3'

def get_data():
    new_DataSet_file_path = "F:\\MCM-ICM\\2021\\data\\2021_MCM_Problem_C_Data\\new_DataSet.csv"
    Images_by_GlobalID_file_path = "F:\\MCM-ICM\\2021\\data\\2021_MCM_Problem_C_Data\\2021MCM_ProblemC_ Images_by_GlobalID.csv"
    pic_file_path = "F:\\MCM-ICM\\2021\\data\\2021MCM_ProblemC_Files2"

    # 获得DataSet
    new_DataSet_csv_file = open(new_DataSet_file_path, encoding="utf-8")
    csv_reader_lines = csv.reader(new_DataSet_csv_file)
    data_PyList = []
    for one_line in csv_reader_lines:
        data_PyList.append(one_line)
        data_ndarray = np.array(data_PyList)
    new_DataSet = data_ndarray
    # print(new_DataSet)

    # 获得Images_by_GlobalID
    Images_by_GlobalID_file = open(Images_by_GlobalID_file_path, encoding="utf-8")
    csv_reader_lines = csv.reader(Images_by_GlobalID_file)
    data_PyList = []
    for one_line in csv_reader_lines:
        data_PyList.append(one_line)
        data_ndarray = np.array(data_PyList)
    Images_by_GlobalID = data_ndarray
    # print(Images_by_GlobalID)

    # 获得pic_dirs
    pic_dirs = os.listdir(pic_file_path)


    # 图片数量与规格
    num_pic = 3195
    width = parameter.width#460
    height = parameter.height#460
    channel = 3
    pic_np = np.zeros((num_pic, width, height, channel))  # 存图片
    # text_np=np.zeros((num_pic),dtype=np.string_)#存文本
    text_list = [0] * num_pic
    label_list = [0] * num_pic
    # label_np=np.zeros()#存标签

    i = 0
    for pic_name in pic_dirs:
        img3 = load_img(pic_file_path + "\\" + pic_name)


        img3 = img_to_array(img3)
        img3 = cv2.resize(img3, (width, height), interpolation=cv2.INTER_AREA)  # 调整图片形状为统一大小

        # 1.存图片
        pic_np[i] = img3

        # 2.存文本
        # text_np[i]=get_data_utils.get_row(pic_name,new_DataSet)[4]
        # s=np.zeros()
        # s=get_data_utils.get_row(pic_name, new_DataSet)[4]
        # encoder = LabelBinarizer()
        # n = encoder.fit_transform(s)
        # text_list[i]=n

        text_list[i] = get_data_utils.get_row(pic_name, new_DataSet)[4]
        # 3.存label
        label_list[i] = get_data_utils.get_row(pic_name, new_DataSet)[5]
        i += 1
    print(pic_np.shape)
    print(text_list)
    text_np = np.array(text_list)
    print(text_np.shape)
    print(label_list)
    label_np = np.array(label_list)
    print("label_np")
    print(label_np.shape)
    print(label_np[1])
    print("int(label_np[1])")
    label_np=get_data_utils.label_string_to_int(label_np)
    print(label_np[1])
    print(type(label_np[1]))



    return pic_np,text_np,label_np#这里直接返回np不要onehot


# 从数据集中随机取出batch_size个元素并返回
def get_batch(x_pic,y, batch_size):
    num_pic=3195
    index = np.random.randint(0, num_pic, batch_size)#产生batch_size个随机数
    return x_pic[index, :,:,:], y[index]#返回batch_size个图片
