'''
Author: devis.dong
Email: devis.dong@gmail.com
Date: 2022-01-21 09:25:49
LastEditTime: 2022-11-18 01:03:36
LastEditors: devis.dong
FilePath: \retrieval-3d\datasets.py
Description:
'''

from cProfile import label
from time import sleep
from myutils import *
import torch
import random
import math
import numpy as np
from torch.utils.data import DataLoader, Dataset

def read_datafile(txt_file):
    data, label = [], []
    logger.info('reading file %s ...' % txt_file)
    with open(txt_file, 'r') as f:
        for line in  f.readlines():
            line = line.strip().split(' ')
            label.append(int(line[-1]))
            x = [list(map(float, item.split(','))) for item in line[0:-1]]
            x = preprocess_data(x)
            data.append(x)
    logger.info('reading file %s done.' % txt_file)
    return data, label

def read_datafiles(data_root, listfile):
    logger.info('reading files list from %s ....' % listfile)

    points, labels = [], [] # [B, N, 3], [B,]
    with open(listfile, 'r') as f:
        lines = f.readlines()
        N = len(lines)
        for i, line in enumerate(lines):
            file_label = line.strip().split(' ')
            shapefile = os.path.join(data_root, file_label[0])
            logger.info("%i/%i, reading points file %s ..." % (i, N, shapefile))
            data = np.loadtxt(shapefile, dtype=np.float32, delimiter=',')[:, :3]
            data = preprocess_data(data)
            points.append(data.tolist())
            labels.append(int(file_label[1]))

    logger.info('reading files list from %s done.' % listfile)
    return points, labels

def normalize_data(data):
    """ Normalize the data, use coordinates of the block centered at origin,
        Input:
            NxC array
        Output:
            NxC array
    """
    centroid = np.mean(data, axis=0)
    normal_data = data - centroid
    m = np.max(np.sqrt(np.sum(normal_data ** 2, axis=1)))
    normal_data = normal_data / m
    return normal_data

def preprocess_data(data:np.ndarray):
    """data [NxC]"""
    x = normalize_data(data)
    return x

def rotate_randomly(x:torch.Tensor):
    """x [BxNx3]"""
    while x.dim() < 3:
        x = x.unsqueeze(0)
    B = x.shape[0]
    ax = random.randint(0, 2)
    alpha = random.random() * math.pi
    Rx = torch.tensor([ [1,                 0,                  0],
                    [0,                 math.cos(alpha),    -math.sin(alpha)],
                    [0,                 math.sin(alpha),    math.cos(alpha)]]).view(1, 3, 3).repeat(B, 1, 1)
    beta = random.random() * math.pi
    Ry = torch.tensor([ [math.cos(beta),    0,                  -math.sin(beta)],
                    [0,                 1,                  0],
                    [math.sin(beta),    0,                  math.cos(beta)]]).view(1, 3, 3).repeat(B, 1, 1)
    theta = random.random() * math.pi
    Rz = torch.tensor([ [math.cos(theta),   -math.sin(theta),   0],
                        [math.sin(theta),   math.cos(theta),    0],
                        [0,                 0,                  1]]).view(1, 3, 3).repeat(B, 1, 1)
    if 0 == ax:
        x = torch.bmm(x, Ry.to(x.device))
        x = torch.bmm(x, Rz.to(x.device))
    elif 1 == ax:
        x = torch.bmm(x, Rx.to(x.device))
        x = torch.bmm(x, Rz.to(x.device))
    else:
        x = torch.bmm(x, Rx.to(x.device))
        x = torch.bmm(x, Ry.to(x.device))
    return x

def corrcoef(x:torch.Tensor):
        """传入一个tensor格式的矩阵x(x.shape(m,n))，输出其相关系数矩阵"""
        f = (x.shape[0] - 1) / x.shape[0]      # 方差调整系数
        x_reducemean = x - torch.mean(x, axis=0)
        numerator = torch.matmul(x_reducemean.T, x_reducemean) / x.shape[0]
        var_ = x.var(axis=0).reshape(x.shape[1], 1)
        denominator = torch.sqrt(torch.matmul(var_, var_.T)) * f
        corrcoef = numerator / denominator

class GeneralDataset(Dataset):
    def __init__(self, data_file):
        self.data, self.label = read_datafile(data_file)

    def __len__(self):
        return len(self.label)

    def __getitem__(self, index):
        x = self.data[index]
        l = self.label[index]
        return torch.tensor(x, dtype=torch.float32), torch.tensor(l, dtype=torch.long)

    def channels(self):
        return np.shape(self.data[0])[-1]

    def cls_num(self):
        return np.max(self.label)+1

class GeneralCollater():
    def __init__(self):
        self.params = None

    def __call__(self, data):
        # collate_fn的作用是把[(data, label),(data, label)...]转化成([data, data...],[label,label...])
        # 假设self.data的一个data的shape为(N, C), 每一个data的C相等,N不等，data[索引到数据index][索引到data或者label][索引到N]
        max_len = 0
        for i in range(len(data)):
            if len(data[i][0]) > max_len:
                max_len = len(data[i][0])
        data_list, label_list = [], []
        for batch in range(0, len(data)):
            cur_data = data[batch][0]
            cur_data = preprocess_data(cur_data)
            N, C = np.shape(cur_data)
            # choice = np.random.choice(len(cur_data), 1024, replace=True)
            # zero fill
            if N < max_len:
                cur_data = np.concatenate((cur_data, np.zeros((max_len-N, C))), axis=0).tolist()
            # cur_data = pt_utils.farthest_point_sample(cur_data, 1024)
            data_list.append(cur_data)
            label_list.append(data[batch][1])
        data_tensor = torch.tensor(data_list, dtype=torch.float32)
        label_tensor = torch.tensor(label_list, dtype=torch.long)
        return data_tensor, label_tensor

class GeneralDataLoader(DataLoader):
    def __init__(self, dataset, batch_size=32, shuffle=True, collate_fn=GeneralCollater()) -> None:
        super().__init__(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn)

class SiameseCollater():
    def __init__(self):
        self.params = None

    def __call__(self, data):
        max_len0, max_len1 = 0, 0
        for x0, x1, lb in data:
            if len(x0) > max_len0:
                max_len0 = len(x0)
            if len(x1) > max_len1:
                max_len1 = len(x1)
        x0_list, x1_list, lb_list = [], [], []
        for x0, x1, lb in data:
            x0 = preprocess_data(x0)
            x1 = preprocess_data(x1)
            N0, C = np.shape(x0)
            N1, C = np.shape(x1)
            # zero fill
            if N0 < max_len0:
                x0 = np.concatenate((x0, np.zeros((max_len0-N0, C))), axis=0).tolist()
            if N1 < max_len1:
                x1 = np.concatenate((x1, np.zeros((max_len1-N1, C))), axis=0).tolist()
            x0_list.append(x0)
            x1_list.append(x1)
            lb_list.append(lb)
        x0_tensor = torch.tensor(x0_list, dtype=torch.float32)
        x1_tensor = torch.tensor(x1_list, dtype=torch.float32)
        lb_tensor = torch.tensor(lb_list, dtype=torch.long)
        return x0_tensor, x1_tensor, lb_tensor

class SiameseDataset(Dataset):
    def __init__(self, data_file, negative=4):
        self.data, self.label = read_datafile(data_file)
        self.negative = negative # 异类与同类的比例值

    def __len__(self):
        return len(self.label)

    def __getitem__(self, index):
        idx0 = index

        flag = 1 if (0 == random.randint(0, self.negative)) else 0 # whther same class
        idx1 = random.randint(0, self.__len__()-1)
        if flag: # same class
            while (self.label[idx0] != self.label[idx1]):
                idx1 = random.randint(0, self.__len__()-1)
        else: # diff class
            while (self.label[idx0] == self.label[idx1]):
                idx1 = random.randint(0, self.__len__()-1)

        data0 = torch.tensor(self.data[idx0], dtype=torch.float32)
        data1 = torch.tensor(self.data[idx1], dtype=torch.float32)
        label = torch.tensor(flag, dtype=torch.long)

        return data0, data1, label

    def channels(self):
        return np.shape(self.data[0])[-1]

class SiameseDataLoader(DataLoader):
    def __init__(self, dataset, batch_size=32, shuffle=True, collate_fn=SiameseCollater()) -> None:
        super().__init__(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn)

