'''
半监督数据集加载没有标签的数据集
'''
import paddle
from dataset.transforms import *
import numpy as np
import random
from dataset.transforms import Compose
def worker_init_fn(worker_id):
    np.random.seed(random.randint(0, 100000))

#无监督数据集加载
class SemiDataset(paddle.io.Dataset):
    def __init__(self,root):
        self.root = root
        self.compose = Compose(mode='train')
        self.file_list = []
        with open(self.root+"/train_list.txt", 'r') as f:
            for line in f:
                image_path = self.root+"/JPEGImages/"+line
                self.file_list.append(image_path.splitlines()[0])
    def __len__(self):
        return len(self.file_list)
    def __getitem__(self, idx):
        image_path = self.file_list[idx]
        im,label = self.compose(im_path=image_path,label_path=None)
        return (im, 1)
#有监督数据集加载
class Dataset(paddle.io.Dataset):
    def __init__(self,root,mode='train'):
        self.compose = Compose(mode=mode)
        self.root = root
        self.train_path = root + '/train_list.txt'
        self.val_path = root + '/val_list.txt'
        self.mode = mode
        if self.mode != 'train' and self.mode != 'val':
            raise ValueError(
                "mode must be set train or val")
        self.file_list_path = self.train_path if mode == "train" else self.val_path
        self.file_list = []
        with open(self.file_list_path, 'r') as f:
            for line in f:
                items = line.strip().split(' ')
                if len(items) != 2:
                        raise ValueError(
                            "数据集路径请用空格分割")
                else:
                    image_path = self.root + "/" + items[0]
                    label_path = self.root + "/" + items[1]
                self.file_list.append([image_path, label_path])
    def __len__(self):
        return len(self.file_list)
    def __getitem__(self, idx):
        image_path = self.file_list[idx][0]
        label_path = self.file_list[idx][1]
        im,label = self.compose(im_path=image_path,label_path=label_path)
        return (im, label)

def load_data_loader(mode,root,args):
    dataset = Dataset(root,mode)
    batch_sampler = paddle.io.DistributedBatchSampler(
        dataset, batch_size=args.batch_size if mode=="train" else 1, shuffle=True if mode == "train" else False, drop_last=True)
    return paddle.io.DataLoader(dataset=dataset,batch_sampler=batch_sampler,worker_init_fn=worker_init_fn,num_workers=args.num_workers)
def load_semi_data_loader(root,args):
    dataset = SemiDataset(root=root)
    batch_sampler = paddle.io.DistributedBatchSampler(
        dataset,batch_size=args.batch_size,shuffle=True,drop_last=True
    )
    return paddle.io.DataLoader(dataset=dataset,batch_sampler=batch_sampler,worker_init_fn=worker_init_fn,num_workers=args.num_workers)





