from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import random
import numpy as np
from PIL import Image
import torch
import os
import time
from global_var import IS_SUBSET, NUM_SUBSET

# class imagenet_dataset(Dataset):
#     def __init__(self, root_dir, transform, num_class):
#         self.root = root_dir+'imagenet/val/'
#         self.transform = transform
#         self.val_data = []

#         for c in range(num_class):
#             imgs = os.listdir(self.root+str(c))
#             for img in imgs:
#                 self.val_data.append([c,os.path.join(self.root,str(c),img)])                
                
#     def __getitem__(self, index):
#         data = self.val_data[index]
#         target = data[0]
#         image = Image.open(data[1]).convert('RGB')   
#         img = self.transform(image) 
#         return img, target
    
#     def __len__(self):
#         return len(self.val_data)

class KNN_solver:
    def __init__(self, A, B, device) -> None:
        self.A = A
        self.B = B
        self.device = device

    def compute_distances_tensor(self, A, B, device):
        A = A.to(device)
        B = B.to(device)
        m = A.shape[0]
        n = B.shape[0]
        M = A @ B.T
        H = torch.square(A).sum(dim=1, keepdim=True).repeat(1,n)
        K = torch.square(B).sum(dim=1, keepdim=True).T.repeat(m,1)
        # print(A.device, B.device, M.device, H.device, K.device)
        return torch.sqrt(-2 * M + H + K)

    def nearest_neighbor(self, num_subset_A = 10000, num_subset_B = 10000):
        def gen_start_end(num_set, num_subset):
            len_gen = (num_set // num_subset) if (num_set % num_subset == 0) else  (num_set // num_subset + 1)
            for i in range(len_gen):
                start_idx = i * num_subset
                end_idx = min((i + 1) * num_subset, num_set)
                yield start_idx, end_idx

        num_set_A, num_set_B = self.A.shape[0], self.B.shape[0]

        A_start_end_iter = enumerate(gen_start_end(num_set_A, num_subset_A))
        B_start_end_iter = enumerate(gen_start_end(num_set_B, num_subset_B))

        C = torch.zeros((num_set_A, num_set_B), dtype=torch.float32)
        for i, (A_start_idx, A_end_idx) in A_start_end_iter:
            for j, (B_start_idx, B_end_idx) in B_start_end_iter:
                A_sub = self.A[A_start_idx:A_end_idx]
                B_sub = self.B[B_start_idx:B_end_idx]
                C_sub = self.compute_distances_tensor(A_sub, B_sub, self.device)
                C[A_start_idx:A_end_idx, B_start_idx:B_end_idx] = C_sub.clone().detach().cpu()

        nearest_idx = C.argmin(dim=1).numpy()
        return nearest_idx
       
class imagenet_dataset(Dataset):
    def __init__(self, root_dir, transform, num_class):
        self.root = root_dir+'imagenet/'
        self.transform = transform

        with open(self.root+'val.txt') as f:
            lines=f.readlines()
            self.val_imgs = []
            self.val_labels = {}
            for line in lines:
                img, target = line.split()
                target = int(target)
                if target<num_class:
                    self.val_imgs.append(img)
                    self.val_labels[img]=target
            if IS_SUBSET:
                self.val_imgs = random.sample(self.val_imgs, k=min(NUM_SUBSET, len(self.val_imgs)))
                
    def __getitem__(self, index):

        img_path = self.val_imgs[index]
        target = self.val_labels[img_path]     
        image = Image.open(self.root+'val/'+img_path).convert('RGB')   
        img = self.transform(image) 

        return img, target
        
    
    def __len__(self):
        return len(self.val_imgs)

class webvision_dataset(Dataset): 
    def __init__(self, root_dir, transform, mode, num_class, 
                 pred=[], probability=[], 
                 log=''): 
        '''
            pred, probability: for labeled and unlabeled

        '''
        self.root = root_dir
        self.transform = transform
        self.mode = mode  
     
        if self.mode=='test':
            with open(self.root+'info/val_filelist.txt') as f:
                lines=f.readlines()
            self.val_imgs = []
            self.val_labels = {}
            for line in lines:
                img, target = line.split()
                target = int(target)
                if target<num_class:
                    self.val_imgs.append(img)
                    self.val_labels[img]=target
            if IS_SUBSET:
                self.val_imgs = random.sample(self.val_imgs, k=min(NUM_SUBSET, len(self.val_imgs)))
        else:    
            with open(self.root+'info/train_filelist_google.txt') as f:
                lines=f.readlines()    
            train_imgs = []
            self.train_labels = {}
            for line in lines:
                img, target = line.split()
                target = int(target)
                if target<num_class:
                    train_imgs.append(img)
                    self.train_labels[img]=target
            if IS_SUBSET:
                train_imgs = random.sample(train_imgs, k=min(NUM_SUBSET, len(train_imgs)))
            if self.mode == 'all':
                self.train_imgs = train_imgs
            else:                   
                if self.mode == "labeled":
                    pred_idx = pred.nonzero()[0]
                    self.train_imgs = [train_imgs[i] for i in pred_idx]                
                    self.probability = [probability[i] for i in pred_idx]            
                    print("%s data has a size of %d"%(self.mode,len(self.train_imgs)))            
                    log.write('Numer of labeled samples:%d \n'%(pred.sum()))
                    log.flush()                          
                elif self.mode == "unlabeled":
                    pred_idx = (1-pred).nonzero()[0]                                               
                    self.train_imgs = [train_imgs[i] for i in pred_idx]                           
                    print("%s data has a size of %d"%(self.mode,len(self.train_imgs)))
                elif self.mode == "bpl":
                    self.pred_clean = pred
                    self.probability_clean = probability
                    # labeled and unlabeled
                    self.labeled_idx, self.unlabeled_idx = self.pred_clean.nonzero()[0], (1 - self.pred_clean).nonzero()[0]
                    self.train_imgs = train_imgs

    def create_bpl(self, 
                   pred=[], probability=[], 
                   embeddings=None,
                   pred_reliable=[], probability_reliable=[],
                   min_ratio_reliable=0.1, max_ratio_reliable=0.8,
                   knn_device=None,
                   log=None,
                   *args, **kargs):

        self.embeddings = embeddings
        self.pred_reliable = pred_reliable
        self.probability_reliable = probability_reliable
        self.knn_device = knn_device
        self.pred_clean = pred
        self.probability_clean = probability

        # labeled and unlabeled
        self.labeled_idx, self.unlabeled_idx = self.pred_clean.nonzero()[0], (1 - self.pred_clean).nonzero()[0]

        # reliable and unreliable
        n_samples = len(self.train_imgs)
        self.all_idx = [ i for i in range(n_samples)]
        self.reliable_idx = self.pred_reliable.nonzero()[0].tolist()
        kth_min, kth_max = int(n_samples * min_ratio_reliable), int(n_samples * max_ratio_reliable)
        if len(self.reliable_idx) < kth_min:
            threshold = np.partition(self.probability_reliable, kth=n_samples - kth_min)[n_samples - kth_min]
            self.pred_reliable = (self.probability_reliable > threshold)
            self.reliable_idx = self.pred_reliable.nonzero()[0].tolist()
        if len(self.reliable_idx) > kth_max:
            threshold = np.partition(self.probability_reliable, kth=n_samples - kth_max)[n_samples - kth_max]
            self.pred_reliable = (self.probability_reliable > threshold)
            self.reliable_idx = self.pred_reliable.nonzero()[0].tolist()

        self.unreliable_idx = list(set(self.all_idx) - set(self.reliable_idx))

        self.unreliable_embeddings = self.embeddings[self.unreliable_idx,:]

        self.reliable_embeddings = self.embeddings[self.reliable_idx,:]

        print("reliable num: {}".format(len(self.reliable_embeddings)))

        # + knn求与reliable最近的unreliable （gpu版本）
        t1 = time.time()
        with torch.no_grad():
            solver = KNN_solver(self.reliable_embeddings, self.unreliable_embeddings, knn_device)
            neighbor_idx = solver.nearest_neighbor(5000, 5000)
        print("Nearest Neighbor Module Use {} seconds. ".format(time.time()-t1))
        # - knn求与reliable最近的unreliable （gpu版本）

        unreliable_sub_idx = [ self.unreliable_idx[i] for i in neighbor_idx ]
        self.unreliable_idx = np.array(unreliable_sub_idx, dtype=np.int64)
        self.reliable_idx = np.array(self.reliable_idx, dtype=np.int64)

        # + knn求整个数据集的最近邻
        t1 = time.time()
        with torch.no_grad():
            solver = KNN_solver(self.embeddings, self.embeddings, knn_device)
            neighbor_idx = solver.nearest_neighbor()
        print("Nearest Neighbor Module Use {} seconds. ".format(time.time()-t1))
        # - knn求整个数据集的最近邻

        self.nn_idx = np.array([ i for i in neighbor_idx ], dtype=np.int64)
        self.labeled_nn_idx, self.unlabeled_nn_idx = self.nn_idx[self.labeled_idx], self.nn_idx[self.unlabeled_idx]
        self.labeled_reliable_mask = np.zeros_like(self.labeled_idx)
        self.unlabeled_reliable_mask = np.zeros_like(self.unlabeled_idx)
        self.labeled_unreliable_mask = np.zeros_like(self.labeled_idx)
        self.unlabeled_unreliable_mask = np.zeros_like(self.unlabeled_idx)
        # 将labeled_idx中可靠的idx对应的labeled_nn_idx替换成不可靠的idx
        for i in range(len(self.labeled_idx)):
            labeled_index = self.labeled_idx[i]
            if np.isin(labeled_index, self.reliable_idx):
                pos = np.where(self.reliable_idx == labeled_index)
                self.labeled_nn_idx[i] = self.unreliable_idx[pos]
                self.labeled_reliable_mask[i] = 1
            if np.isin(labeled_index, self.unreliable_idx):
                self.labeled_unreliable_mask[i] = 1
        # 将unlabeled_idx中可靠的idx对应的unlabeled_nn_idx替换成不可靠的idx
        for i in range(len(self.unlabeled_idx)):
            unlabeled_index = self.unlabeled_idx[i]
            if np.isin(unlabeled_index, self.reliable_idx):
                pos = np.where(self.reliable_idx == unlabeled_index)
                self.unlabeled_nn_idx[i] = self.unreliable_idx[pos]
                self.unlabeled_reliable_mask[i] = 1
            if np.isin(unlabeled_index, self.unreliable_idx):
                self.unlabeled_unreliable_mask[i] = 1

        # 将unlabeled_idx与label_idx长度对齐
        labeled_len = len(self.labeled_idx)
        unlabeled_len = len(self.unlabeled_idx)
        if labeled_len > unlabeled_len:
            diff_len = labeled_len - unlabeled_len
            unlabeled_len_idx = [ i for i in range(len(self.unlabeled_idx))]
            com_idx = [ random.choice(unlabeled_len_idx) for _ in range(diff_len)]
            self.unlabeled_idx, self.unlabeled_nn_idx, self.unlabeled_reliable_mask, self.unlabeled_unreliable_mask = \
                map(lambda x: np.concatenate((x, x[com_idx]), axis=0),
                    (self.unlabeled_idx, self.unlabeled_nn_idx, self.unlabeled_reliable_mask, self.unlabeled_unreliable_mask))

        print(len(self.labeled_idx),
              len(self.labeled_nn_idx),
              len(self.labeled_reliable_mask),
              len(self.labeled_unreliable_mask),
              len(self.unlabeled_idx),
              len(self.unlabeled_nn_idx),
              len(self.unlabeled_reliable_mask),
              len(self.unlabeled_unreliable_mask))


                    
    def __getitem__(self, index):
        if self.mode=='labeled':
            img_path = self.train_imgs[index]
            target = self.train_labels[img_path] 
            prob = self.probability[index]
            image = Image.open(self.root+img_path).convert('RGB')    
            img1 = self.transform(image) 
            img2 = self.transform(image) 
            return img1, img2, target, prob              
        elif self.mode=='unlabeled':
            img_path = self.train_imgs[index]
            image = Image.open(self.root+img_path).convert('RGB')    
            img1 = self.transform(image) 
            img2 = self.transform(image) 
            return img1, img2  
        elif self.mode=='all':
            img_path = self.train_imgs[index]
            target = self.train_labels[img_path]     
            image = Image.open(self.root+img_path).convert('RGB')   
            img = self.transform(image)
            return img, target, index        
        elif self.mode=='test':
            img_path = self.val_imgs[index]
            target = self.val_labels[img_path]     
            image = Image.open(self.root+'val_images_256/'+img_path).convert('RGB')   
            img = self.transform(image) 
            return img, target
        elif self.mode == 'bpl':
            labeled_index, unlabeled_index, labeled_nn_index, unlabeled_nn_index = \
                map(lambda x: x[index], (self.labeled_idx, self.unlabeled_idx, self.labeled_nn_idx, self.unlabeled_nn_idx))
            labeled_image, unlabeled_image, labeled_nn_image, unlabeled_nn_image = \
                map(lambda x: Image.open(self.root+self.train_imgs[x]).convert('RGB'), 
                    (labeled_index, unlabeled_index, labeled_nn_index, unlabeled_nn_index))
            (labeled_img1, labeled_img2), (unlabeled_img1, unlabeled_img2) = \
                map(lambda x: (self.transform(x), self.transform(x)), (labeled_image, unlabeled_image))

            labeled_nn_img, unlabeled_nn_img = map(lambda x: self.transform(x), (labeled_nn_image, unlabeled_nn_image))

            target = self.train_labels[self.train_imgs[labeled_index]] 
            prob = self.probability_clean[labeled_index]
            labeled_reliable_mask, unlabeled_reliable_mask = \
                self.labeled_reliable_mask[index], self.unlabeled_reliable_mask[index]
            labeled_unreliable_mask, unlabeled_unreliable_mask = \
                self.labeled_unreliable_mask[index], self.unlabeled_unreliable_mask[index]
            return \
                labeled_img1, labeled_img2, target, prob, \
                unlabeled_img1, unlabeled_img2, \
                labeled_nn_img, unlabeled_nn_img, \
                labeled_reliable_mask, unlabeled_reliable_mask, \
                labeled_unreliable_mask, unlabeled_unreliable_mask
        
    def __len__(self):
        if self.mode!='test':
            if self.mode == 'bpl':
                return len(self.labeled_idx)
            return len(self.train_imgs)
        else:
            return len(self.val_imgs)    


class webvision_dataloader():  
    def __init__(self, batch_size, num_class, num_workers, root_dir, log):

        self.batch_size = batch_size
        self.num_class = num_class
        self.num_workers = num_workers
        self.root_dir = root_dir
        self.log = log

        self.transform_train = transforms.Compose([
                transforms.Resize(320),
                transforms.RandomResizedCrop(299),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),
            ]) 
        self.transform_test = transforms.Compose([
                transforms.Resize(320),
                transforms.CenterCrop(299),
                transforms.ToTensor(),
                transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),
            ])  
        self.transform_imagenet = transforms.Compose([
                transforms.Resize(320),
                transforms.CenterCrop(299),
                transforms.ToTensor(),
                transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225)),
            ])         

    def run(self, mode, \
            pred=[],prob=[],
            embeddings=None,
            pred_reliable=[], prob_reliable=[],
            min_ratio_reliable=0.1, max_ratio_reliable=0.8,
            knn_device=None,):
        
        if mode=='warmup':
            all_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode="all", num_class=self.num_class)                
            trainloader = DataLoader(
                dataset=all_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers,
                pin_memory=True)                 
            return trainloader
                                     
        elif mode=='train':
            labeled_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode="labeled",num_class=self.num_class,pred=pred,probability=prob,log=self.log)              
            labeled_trainloader = DataLoader(
                dataset=labeled_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers,
                pin_memory=True)        
            
            unlabeled_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode="unlabeled",num_class=self.num_class,pred=pred,log=self.log)                    
            unlabeled_trainloader = DataLoader(
                dataset=unlabeled_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers,
                pin_memory=True)     
            return labeled_trainloader, unlabeled_trainloader
        
        elif mode=='bpl':
            train_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode="bpl", num_class=self.num_class, pred=pred, probability=prob, log=self.log)
            train_dataset.create_bpl(
                pred, prob,
                embeddings,
                pred_reliable, prob_reliable,
                knn_device=knn_device,
                log=self.log
            )
            trainloader = DataLoader(
                dataset=train_dataset, 
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=self.num_workers,
                pin_memory=True
            )
            return trainloader
        elif mode=='test':
            test_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_test, mode='test', num_class=self.num_class)      
            test_loader = DataLoader(
                dataset=test_dataset, 
                batch_size=self.batch_size,
                shuffle=False,
                num_workers=self.num_workers,
                pin_memory=True)               
            return test_loader
        
        elif mode=='eval_train':
            eval_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_test, mode='all', num_class=self.num_class)      
            eval_loader = DataLoader(
                dataset=eval_dataset, 
                batch_size=self.batch_size // 2,
                shuffle=False,
                num_workers=self.num_workers,
                pin_memory=True)               
            return eval_loader     
        
        elif mode=='imagenet':
            imagenet_val = imagenet_dataset(root_dir=self.root_dir, transform=self.transform_imagenet, num_class=self.num_class)      
            imagenet_loader = DataLoader(
                dataset=imagenet_val, 
                batch_size=self.batch_size,
                shuffle=False,
                num_workers=self.num_workers,
                pin_memory=True)               
            return imagenet_loader     

