

import random
import numpy as np
import torch as pt

from torch.utils.data import Dataset, Sampler, DataLoader

class trainSet(Dataset):
    def __init__(self, sparse_name_list, density, manual_feature, nnz_num):
        self.density, self.manual_feature, self.nnz_num = density, manual_feature, nnz_num
        self.sparse_name_list = sparse_name_list
        self.size = len(manual_feature)

    def __getitem__(self, idx):
        density = self.density[idx]
        manual_feature = self.manual_feature[idx]
        nnz_num = self.nnz_num[idx]
        sparse_name = self.sparse_name_list[idx]

        return {'sparse_name':sparse_name, 'density':density, 
                'manual_feature':manual_feature, 'nnz_num':nnz_num, 'idx':idx}

    def __len__(self):
        return self.size


class Dataset_Buffer(Dataset):
    def __init__(self, data_buffer):
        self.data_buffer = data_buffer
        self.size = len(data_buffer)

    def __getitem__(self, idx):
        data_buffer = self.data_buffer[idx]
        
        return data_buffer

    def __len__(self):
        return self.size
    
    
class Dataset_Buffer_load(Dataset):
    def __init__(self, data_buffer, data_buffer_matrix):
        self.data_buffer = data_buffer
        self.data_buffer_matrix = data_buffer_matrix
        self.size = len(data_buffer)

    def __getitem__(self, idx):
        data_buffer = self.data_buffer[idx]
        matrix_idx = data_buffer[6]
        data_buffer_matrix = self.data_buffer_matrix[matrix_idx]
        
        return [data_buffer, data_buffer_matrix]

    def __len__(self):
        return self.size
    
    
    
    
def collate_buffer_fn(mini_batch):
    
    state_batch = [data[0] for data in mini_batch]
    mcts_probs_batch = [data[1] for data in mini_batch]
    mcts_value_probs_batch = [data[2] for data in mini_batch]
    winner_batch = [data[3] for data in mini_batch]
    move_batch = [data[4] for data in mini_batch]

    res = [
        state_batch,
        mcts_probs_batch,
        mcts_value_probs_batch,
        winner_batch,
        move_batch
    ]
    return res

def collate_buffer_fn_load(mini_batch):
    
    state_batch =       [[data[1], data[0][0]] for data in mini_batch]
    mcts_probs_batch =          [data[0][1] for data in mini_batch]
    mcts_value_probs_batch =    [data[0][2] for data in mini_batch]
    winner_batch =              [data[0][3] for data in mini_batch]
    move_batch =                [data[0][4] for data in mini_batch]

    res = [
        state_batch,
        mcts_probs_batch,
        mcts_value_probs_batch,
        winner_batch,
        move_batch
    ]
    return res
    

    
    
# csr_store_length = cache['csr_store_length'][()]
# feature_buffer_all = cache['feature_buffer'][()]
# coordinate_buffer_all = cache['coordinate_buffer'][()]
# number_buffer_all = cache['number_buffer'][()]

# density_map_all = cache['density_map'][()]
# density_local_map_all = cache['density_local_map'][()]

class Voxel_trainSet(Dataset):
    def __init__(self, csr_store_length, feature_buffer, coordinate_buffer, number_buffer, 
                 density_buffer, nnz_num, sparse_name_list, csv_data, # ):
                 mean_gflops_list, std_gflops_list):
        # self.density_map, self.density_local_map, self.nnz_num = density_map, density_local_map, nnz_num
        
        self.nnz_num = nnz_num
        self.sparse_name_list = sparse_name_list
        self.size = len(sparse_name_list)
        self.csv_data = csv_data
        
        self.csr_store_length = csr_store_length
        self.feature_buffer = feature_buffer
        self.coordinate_buffer = coordinate_buffer
        self.number_buffer = number_buffer
        self.density_buffer = density_buffer
        
        self.mean_gflops_list = mean_gflops_list
        self.std_gflops_list = std_gflops_list

    def __getitem__(self, idx):
        # density_map = self.density_map[idx]
        # density_local_map = self.density_local_map[idx]
        
        
        nnz_num = self.nnz_num[idx]
        sparse_name = self.sparse_name_list[idx]
        mean_gflops = self.mean_gflops_list[idx]
        std_gflops = self.std_gflops_list[idx]
        csv_data = self.csv_data[idx]
        
        idx_start = self.csr_store_length[idx]
        idx_end = self.csr_store_length[idx + 1]
        
        feature_buffer = self.feature_buffer[idx_start:idx_end]
        coordinate_buffer = self.coordinate_buffer[idx_start:idx_end]
        number_buffer = self.number_buffer[idx_start:idx_end]
        density_buffer = self.density_buffer[idx_start:idx_end]

        # return  [idx, sparse_name, density_map, density_local_map, nnz_num, 
        #         feature_buffer, coordinate_buffer, number_buffer,]
    
        return  {'idx': idx,
                'sparse_name':sparse_name, 
                'mean_gflops':mean_gflops,
                'std_gflops':std_gflops,
                'csv_data':csv_data,
                
                # 'density_map':density_map, 
                # 'density_local_map':density_local_map, 
                
                'nnz_num':nnz_num,
                'feature_buffer':feature_buffer,
                'coordinate_buffer':coordinate_buffer,
                'number_buffer':number_buffer,
                'density_buffer':density_buffer,
                }

    def __len__(self):
        return self.size
    
def collate_fn(rets):
    
    return rets
    
    '''
    # idx = [ret[0] for ret in rets]
    # sparse_name = [ret[1] for ret in rets]
    # density_map = [ret[2] for ret in rets]
    # density_local_map = [ret[3] for ret in rets]
    # nnz_num = [ret[4] for ret in rets]
    
    # feature_buffer = [ret[5] for ret in rets]
    # coordinate_buffer_tmp = [ret[6] for ret in rets]
    # number_buffer = [ret[7] for ret in rets]
    
            # return  {'idx': idx,
            #     'sparse_name':sparse_name, 
            #     # 'density_map':density_map, 
            #     # 'density_local_map':density_local_map, 
            #     'nnz_num':nnz_num,
            #     'feature_buffer':feature_buffer,
            #     'coordinate_buffer':coordinate_buffer,
            #     'number_buffer':number_buffer,
            #     'density_buffer':density_buffer,
            #     }
    
    idx = [ret['idx'] for ret in rets]
    sparse_name = [ret['sparse_name'] for ret in rets]
    nnz_num = [ret['nnz_num'] for ret in rets]
    
    feature_buffer = [ret['feature_buffer'] for ret in rets]
    coordinate_buffer_tmp = [ret['coordinate_buffer'] for ret in rets]
    number_buffer = [ret['number_buffer'] for ret in rets]
    density_buffer = [ret['density_buffer'] for ret in rets]

    # Only for voxel
    # _, vox_feature, vox_number, vox_coordinate = build_input(voxel)
    
    batch_size = len(idx)
    coordinate_list = []
    for i, voxel_dict in zip(range(batch_size), coordinate_buffer_tmp):
        coordinate = voxel_dict        # (K, 2)
        coordinate_list.append(np.pad(coordinate, ((0, 0), (1, 0)), mode = 'constant', constant_values = i))

    res = {
            'idx':          np.array(idx),
            'sparse_name':  sparse_name,
            'nnz_num' :     np.array(nnz_num),
            
            'feature_buffer':   [pt.from_numpy(x) for x in feature_buffer],
            'coordinate_buffer':[pt.from_numpy(x) for x in coordinate_list],
            'number_buffer':    [pt.from_numpy(x) for x in number_buffer],
            'density_buffer':   [pt.from_numpy(x) for x in density_buffer],
        }
        
    # res = (
    #     np.array(idx),
    #     sparse_name,
    #     # np.array(density_map),
    #     # np.array(density_local_map),
    #     np.array(nnz_num),
    #     [pt.from_numpy(x) for x in density_buffer],
    #     [pt.from_numpy(x) for x in feature_buffer],
    #     [pt.from_numpy(x) for x in coordinate_list],
    #     [pt.from_numpy(x) for x in number_buffer],
    #     # np.array(number_buffer),
    # )

    return res
    '''
 
   
class FeedbackSampler(Sampler):
    def __init__(self, nnz_number):
        self.nnz_number = nnz_number
        self.size = len(self.nnz_number)
        self.weight = np.ones(self.size, dtype=np.float128) * 6
        self.weight_max = 12
        self.weight_min = 1

    def __iter__(self):
        lstidx = list(range(self.size))
        while True:
            bobsize = 32
            batchidx = random.choices(lstidx, weights=self.weight, k=bobsize)
            for k in range(bobsize):
                yield batchidx[k]

    def __len__(self):
        return self.size
    
    def update(self, index, reward):
        for a, b in zip(index, reward):
            for idx, rew in zip(a.tolist(), b.tolist()):
                if (rew >= 0):
                    # weight tends to be 1 if the predicted is true
                    self.weight[idx] = max(self.weight[idx] - 1, self.weight_min)
                else:
                    # weight tends to be max if the predicted is wrong
                    self.weight[idx] = min(self.weight[idx] + 1, self.weight_max)


class trainSet_databuffer(Dataset):
    def __init__(self, databuffer):
        self.databuffer = databuffer
        self.size = len(databuffer)

    def __getitem__(self, idx):
        data = self.databuffer[idx]
        
        state_batch = data[0]
        mcts_probs_batch = data[1]
        mcts_value_probs_batch = data[2]
        winner_batch = data[3]
        move_batch = data[4]

        return state_batch, mcts_probs_batch, mcts_value_probs_batch, winner_batch, move_batch

    def __len__(self):
        return self.size
    

    
