import torch 
import torch.nn as nn
import os 
import numpy as np
import h5py
from PIL import Image
import matplotlib.pyplot as plt 
import matplotlib
from torch.utils.data import Dataset, DataLoader
print(matplotlib.get_backend())
# os._exit(0)
img_size = (256, 256)
class Self2DDataset(Dataset):
    def __init__(self, file_path):
        super(Self2DDataset, self).__init__()
        h5_file = h5py.File(file_path, "r")
        self.imgs = h5_file["raw"][()]
        self.labels = h5_file["label"][()]
    
    def __getitem__(self, idx):
        img = self.imgs[idx]
        label = self.labels[idx]
        mean_v = img.mean()
        std_v = img.std()
        img = (img - mean_v) / std_v
        return torch.tensor(img), label

    def __len__(self):
        return len(self.imgs)

if __name__ == "__main__":
    # all_num = 0
    # not_none_num = 0

    data_path = "./self_data_train/"
    new_raw = []
    new_label = []
    for each_file in os.listdir(data_path):
        path = data_path + each_file
        file_h5 = h5py.File(path, "r")
        raw = file_h5["raw"][()]
        label = file_h5["label"][()]
        file_h5.close()
        for each_img, each_label in zip(raw, label):
            print(each_img.shape)
            each_img = each_img.astype(np.float32)
            each_label = each_label.astype(np.float32)
            raw_img = Image.fromarray(each_img)
            # print(raw_img.shape)
            label_img = Image.fromarray(each_label)
            raw_img = raw_img.resize(img_size)
            label_img = label_img.resize(img_size)
            new_raw.append(np.array(raw_img))
            new_label.append(np.array(label_img))
    
    ## 在这里把有前景和无前景筛选一下。尽量保持平衡
    fg_img = []
    fg_img_label = []
    fg_img_num = 0

    bg_img = []
    bg_img_label = []
    for each_img, each_label in zip(new_raw, new_label):
        if each_label.sum() != 0:
            fg_img_num += 1
            fg_img.append(each_img)
            fg_img_label.append(each_label)
        else :
            bg_img.append(each_img)
            bg_img_label.append(each_label)

    print(fg_img_num)
    bg_img_new = []
    bg_img_label_new = []
    random_idx = np.random.randint(0, len(bg_img), len(fg_img))
    for ix in random_idx:
        bg_img_new.append(bg_img[ix])
        bg_img_label_new.append(bg_img_label[ix])

    fg_img.extend(bg_img_new)
    fg_img_label.extend(bg_img_label_new)

    # print("数据共： " + str(len(raw_img)))

    raw_array = np.stack(fg_img)
    label_array = np.stack(fg_img_label)

    print(raw_array.shape)
    print(label_array.shape)

    save_h5 = h5py.File("./self_data_2d_train.h5", "w")
    save_h5.create_dataset("raw", data=raw_array, compression="gzip")
    save_h5.create_dataset("label", data=label_array, compression="gzip")

    save_h5.close()

    # dataset = Self2DDataset("./self_data_2d_train.h5")

    # loader = DataLoader(dataset, shuffle=True, batch_size=1)

    # for img, label in loader:
    #     print(img.shape)
    #     print(label.shape)
    #     plt.imshow(img[0], cmap="gray")
    #     plt.show()
    #     plt.imshow(label[0], cmap="gray")
    #     plt.show()
        
    
    

    # for i in range(2000):
    #     raw = raw_array[i]
    #     label = label_array[i]
    #     if label.sum() != 0:
    #         plt.imshow(raw, cmap="gray")
    #         plt.show()
    #         plt.imshow(label, cmap="gray")
    #         plt.show()
        # all_num += label.shape[0]
        # label_sum_channel = label.sum(-1).sum(-1)
        # not_none_num += (label_sum_channel>0).sum()
    
    # print(all_num)
    # print(not_none_num)

    # loss_func = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([2]), reduction="none")

    # t1 = torch.tensor([0.6, 0.2, 0.5])

    # lable = torch.tensor([1, 1, 0], dtype=torch.float)

    # print(loss_func(t1, lable))

    
    # print(torch.log(torch.tensor([0.6])))