'''
Author: devis.dong
Email: devis.dong@gmail.com
Date: 2022-11-09 22:37:17
LastEditTime: 2022-12-14 04:15:30
LastEditors: devis.dong
FilePath: \20221210\sample.py
Description:
'''

import os
import cv2
import time
import torch
import random
import shutil
import json
import numpy as np
from mydata import read_datafiles, rotate_randomly


def k_nearest_sample(xyz, npoint):
    """
    Input:
        xyz: pointcloud data, [B, N, 3]
        npoint: number of samples
    Return:
        centroids: sampled pointcloud index, [B, npoint]
    """
    if torch.cuda.is_available():
        xyz = xyz.to("cuda")

    while xyz.dim() < 3:
        xyz = xyz.unsqueeze(0)

    device = xyz.device
    B, N, C = xyz.shape

    centroids = torch.zeros(B, npoint, dtype=torch.float).to(device)     # 采样点矩阵（B, npoint）
    distance = torch.ones(B, N, dtype=torch.float).to(device) * 1e10     # 采样点到所有点距离（B, N）

    batch_indices = torch.arange(B, dtype=torch.long).to(device)        # batch_size 数组

    nearest = torch.randint(0, N, (B,), dtype=torch.long).to(device)  # 初始时随机选择一点

    # # barycenter = torch.sum((xyz), 1)                                    #计算重心坐标 及 距离重心最远的点
    # # barycenter = barycenter/xyz.shape[1]
    # barycenter = torch.mean(xyz, dim=1, keepdim=True) #[B, 1, 3]
    # # barycenter = barycenter.view(B, 1, 3)

    # dist = torch.sum((xyz - barycenter) ** 2, -1, dtype=torch.float) # [B, N, 1]
    # nearest = torch.max(dist,1)[1]                                     #将距离重心最远的点作为第一个点

    centroid = xyz[batch_indices, nearest, :].view(B, 1, 3)        # 取出这个中心点的xyz坐标
    distance = torch.sum((xyz - centroid) ** 2, -1, dtype=torch.float)  # 计算所有点到中心点的距离 [B, N,]
    sorted_indices = torch.sort(distance, dim=1)[1] # [B, N]
    topn_indices = sorted_indices[:, :npoint] # [B, npoint]

    return topn_indices.cpu().int().tolist()


def farthest_point_sample(xyz, npoint):

    """
    Input:
        xyz: pointcloud data, [B, N, 3]
        npoint: number of samples
    Return:
        centroids: sampled pointcloud index, [B, npoint]
    """
    if torch.cuda.is_available():
        xyz = xyz.to("cuda")

    while xyz.dim() < 3:
        xyz = xyz.unsqueeze(0)

    device = xyz.device
    B, N, C = xyz.shape

    centroids = torch.zeros(B, npoint, dtype=torch.float).to(device)     # 采样点矩阵（B, npoint）
    distance = torch.ones(B, N, dtype=torch.float).to(device) * 1e10     # 采样点到所有点距离（B, N）

    batch_indices = torch.arange(B, dtype=torch.long).to(device)        # batch_size 数组

    #farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)  # 初始时随机选择一点

    # barycenter = torch.sum((xyz), 1)                                    #计算重心坐标 及 距离重心最远的点
    # barycenter = barycenter/xyz.shape[1]
    barycenter = torch.mean(xyz, dim=1, keepdim=True) #[B, 1, 3]
    # barycenter = barycenter.view(B, 1, 3)

    dist = torch.sum((xyz - barycenter) ** 2, -1, dtype=torch.float) # [B, N, 1]
    farthest = torch.max(dist,1)[1]                                     #将距离重心最远的点作为第一个点

    for i in range(npoint):
        # print("-------------------------------------------------------")
        # print("The %d farthest pts %s " % (i, farthest))
        centroids[:, i] = farthest                                      # 更新第i个最远点
        centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)        # 取出这个最远点的xyz坐标
        dist = torch.sum((xyz - centroid) ** 2, -1, dtype=torch.float)                     # 计算点集中的所有点到这个最远点的欧式距离
        # print("dist    : ", dist)
        mask = dist < distance
        # print("mask %i : %s" % (i,mask))
        distance[mask] = dist[mask]                                     # 更新distance，记录样本中每个点距离所有已出现的采样点的最小距离
        # print("distance: ", distance)

        farthest = torch.max(distance, -1)[1]                           # 返回最远点索引

    return centroids.cpu().int().tolist()


def fps(data, npoints):
    centroids = farthest_point_sample(torch.tensor(data), npoints)
    return data[centroids[0], :]


def fileslist2pointdata(root, file_fileslist, file_pointdata):
    points, labels = read_datafiles(root, file_fileslist)
    with open(file_pointdata, "w") as f:
        data_str = []
        for data, lb in zip(points, labels):
            data_str.append(' '.join([','.join(["%.6f" % x for x in p]) for p in data]) + ' ' + str(lb) + '\n')
        f.writelines(data_str)


def sample_data(npoints, filename_fileslist, batchsize, root_in, root_out, fsample=farthest_point_sample):
    with open(filename_fileslist, 'r') as fi:
        shapefiles = fi.readlines()
        for i in range(0, len(shapefiles), batchsize):
            print("sampling files %i - %i" % (i, i+batchsize))
            j = i
            files, datas = [], [] # [B, N, 6]
            for j in range(i, min(i+batchsize, len(shapefiles))):
                shapefile = shapefiles[j].strip().split(' ')[0]
                print("loading file %s" % shapefile)
                points = np.loadtxt(os.path.join(root_in, shapefile), delimiter=',') # [N, 6]
                datas.append(points.tolist())
                files.append(shapefile)
            datas = np.array(datas)
            print("knearest sampling ....")
            centroids = k_nearest_sample(torch.tensor(datas[:, :, :3]), 7500) # [B, npoints]
            print("knearest sampling done.")

            datas_knearest = []
            for k in range(len(centroids)):
                datas_knearest.append(datas[k, centroids[k], :3].tolist())
            datas_knearest = np.array(datas_knearest)

            print("farthest sampling ....")
            centroids = farthest_point_sample(torch.tensor(datas_knearest), 2048) # [B, npoints]
            print("farthest sampling done.")

            for k in range(len(files)):
                print("saving file %s" % files[k])
                subdir = os.path.join(root_out, files[k].split('/')[0])
                if not os.path.exists(subdir):
                    os.mkdir(subdir)
                np.savetxt(os.path.join(root_out, files[k]), datas_knearest[k, centroids[k], :], delimiter=',', fmt='%.6f')

def sample_data1(filename_fileslist, root_in, root_out, fsample=farthest_point_sample, npoints=2048, batchsize=40):
    with open(filename_fileslist, 'r') as fi:
        shapefiles = fi.readlines()
        for i in range(0, len(shapefiles), batchsize):
            print("sampling files %i - %i" % (i, i+batchsize))
            j = i
            files, datas = [], [] # [B, N, 6]
            for j in range(i, min(i+batchsize, len(shapefiles))):
                shapefile = shapefiles[j].strip().split(' ')[0]
                print("loading file %s" % shapefile)
                points = np.loadtxt(os.path.join(root_in, shapefile), delimiter=',') # [N, 6]
                datas.append(points.tolist())
                files.append(shapefile)
            datas = np.array(datas)

            print("farthest sampling ....")
            centroids = fsample(torch.tensor(datas[:, :, :3]), npoints) # [B, npoints]
            print("farthest sampling done.")

            for k in range(len(files)):
                print("saving file %s" % files[k])
                subdir = os.path.join(root_out, files[k].split('/')[0])
                if not os.path.exists(subdir):
                    os.mkdir(subdir)
                np.savetxt(os.path.join(root_out, files[k]), datas[k, centroids[k], :], delimiter=',', fmt='%.6f')

def data_augment_by_rotation(num_of_cls:dict, batchsize, root_in, root_out):
    cnt = 0
    files, datas = [], [] # [B, N, 6]
    for name, num in num_of_cls.items():
        subdir = os.path.join(root_in, name)
        files_list = os.listdir(subdir)
        for i in range(num):
            j = random.randint(0, len(files_list)-1)
            filename = files_list[j]
            filepath = os.path.join(subdir, filename)
            # print("loading file %s(%d/%d)" % (filepath, i, num))
            points = np.loadtxt(filepath, delimiter=',')[:, :3]
            datas.append(points.tolist())
            files.append("%s_%d.txt" % (filename.split(".")[0], i))
            cnt += 1
            print("loaded file", cnt)
    datas = np.array(datas)
    i = 0
    cnt = 0
    while i < len(files):
        j = min(len(files), i+batchsize)
        files_rotated = files[i:j]
        datas_rotated = rotate_randomly(torch.tensor(datas[i:j], dtype=torch.float)) # [B, N, 3]
        for filename, data in zip(files_rotated, datas_rotated):
            np.savetxt(os.path.join(root_out, filename), data, delimiter=',', fmt='%.6f')
            cnt += 1
            print("saved file", cnt)
        i += batchsize

def segment_data(num_of_cls:dict, batchsize, root_in, root_out):
    cnt = 0
    files, datas = [], [] # [B, N, 6]


    name2label = {}
    label2name = {}
    with open("E:/datasets/modelnet40/modelnet40_shapenames.txt") as f:
        for i, line in enumerate(f.readlines()):
            name = line.strip().split(' ')[0]
            name2label[name] = i
            label2name[i] = name


    for k, (name, num) in enumerate(num_of_cls.items()):
        subdir = os.path.join(root_in, name)
        # files_list = os.listdir(subdir)
        files_list = []
        lb = name2label[name]
        with open("E:/datasets/modelnet40/modelnet40_files_train.txt", 'r') as f:
            for line in f.readlines():
                ln = line.strip().split(' ')
                if int(ln[1]) == lb:
                    filename = ln[0].split('/')[1]
                    files_list.append(filename)

        for i in range(0, num):
            # j = random.randint(0, len(files_list)-1)
            j = i % len(files_list)
            filename = files_list[j]
            filepath = os.path.join(subdir, filename)
            # print("loading file %s(%d/%d)" % (filepath, i, num))
            points = np.loadtxt(filepath, delimiter=',')[:, :3]
            datas.append(points.tolist())
            files.append("%s_%d.txt" % (filename.split(".")[0], i))
            cnt += 1
            print("loaded file", cnt)
            if len(datas) == batchsize or (k == len(num_of_cls)-1 and i == num-1):
                datas = np.array(datas)
                print("knearest sampling ....")
                centroids = k_nearest_sample(torch.tensor(datas, dtype=torch.float), 7500) # [B, npoints]
                print("knearest sampling done.")

                datas_knearest = []
                for k in range(len(centroids)):
                    datas_knearest.append(datas[k, centroids[k], :3].tolist())
                datas_knearest = np.array(datas_knearest)

                print("farthest sampling ....")
                centroids = farthest_point_sample(torch.tensor(datas_knearest), 2048) # [B, npoints]
                print("farthest sampling done.")

                for k in range(len(files)):
                    # print("saving file %s" % filenames[k])
                    # subdir = os.path.join(root_out, filenames[k].split('/')[0])
                    # if not os.path.exists(subdir):
                    #     os.mkdir(subdir)
                    np.savetxt(os.path.join(root_out, files[k]), datas_knearest[k, centroids[k], :], delimiter=',', fmt='%.6f')
                    print("saved file", files[k])

                files, datas = [], [] # [B, N, 6]



    # filename_fileslist = "E:/datasets/modelnet40/modelnet40_files_train.txt"
    # with open(filename_fileslist, 'r') as fi:
    #     for shapefile in fi.readlines():
    #         shapefile = shapefile.strip().split(' ')[0]
    #         print("loading file %s" % shapefile)
    #         points = np.loadtxt(os.path.join(root_in, shapefile), delimiter=',')[:, :3] # [N, 3]
    #         datas.append(points.tolist())
    #         files.append(shapefile)

    # datas = np.array(datas)
    # i = 0
    # cnt = 0
    # while i < len(files):
    #     j = min(len(files), i+batchsize)
    #     filenames = files[i:j]

    #     batch_data = datas[i:j]
    #     print("knearest sampling ....")
    #     centroids = k_nearest_sample(torch.tensor(batch_data, dtype=torch.float), 7500) # [B, npoints]
    #     print("knearest sampling done.")

    #     datas_knearest = []
    #     for k in range(len(centroids)):
    #         datas_knearest.append(batch_data[k, centroids[k], :3].tolist())
    #     datas_knearest = np.array(datas_knearest)

    #     print("farthest sampling ....")
    #     centroids = farthest_point_sample(torch.tensor(datas_knearest), 2048) # [B, npoints]
    #     print("farthest sampling done.")

    #     for k in range(len(filenames)):
    #         # print("saving file %s" % filenames[k])
    #         # subdir = os.path.join(root_out, filenames[k].split('/')[0])
    #         # if not os.path.exists(subdir):
    #         #     os.mkdir(subdir)
    #         np.savetxt(os.path.join(root_out, filenames[k]), datas_knearest[k, centroids[k], :], delimiter=',', fmt='%.6f')
    #         cnt += 1
    #         print("saved file", cnt)
    #     i += batchsize

if __name__ == '__main__':
    time_start = time.time()
    print("start at %.3f" % time_start)

    # sample_data(npoints=7500,
    #             filename_fileslist="E:/datasets/modelnet40/modelnet40_files_train.txt",
    #             batchsize= 40,
    #             root_in="E:/datasets/modelnet40/modelnet40_10000",
    #             root_out="E:/datasets/modelnet40/modelnet40_2048_segment",
    #             fsample=k_nearest_sample)

    # num_of_cls = {}
    # with open("E:/datasets/modelnet40/modelnet40_shapenames.txt", "r") as f:
    #     for name in f.readlines():
    #         num_of_cls[name.strip()] = 0

    # with open("E:/datasets/modelnet40/modelnet40_files_train.txt", "r") as f:
    #     for line in f.readlines():
    #         name = line.strip().split('/')[0]
    #         num_of_cls[name] += 1

    # num_per_cls = 1000
    # for name in num_of_cls.keys():
    #     num_of_cls[name] = num_per_cls - num_of_cls[name]

    # segment_data(num_of_cls=num_of_cls,
    #             batchsize=40,
    #             root_in="E:/datasets/modelnet40/modelnet40_10000",
    #             root_out="E:/datasets/modelnet40/modelnet40_2048_segment3")



    # root = "E:/datasets/modelnet40/modelnet40_2048_segment3"
    # os.chdir(root)
    # name2label = {}
    # with open("E:/datasets/modelnet40/modelnet40_shapenames.txt", "r") as f:
    #     for i, line in enumerate(f.readlines()):
    #         name = line.strip()
    #         name2label[name] = i
    # shapedirs = os.listdir(root)
    # N = len(shapedirs)

    # data_label = []
    # for filename in os.listdir(root):
    #     filepath = root + '/' + filename
    #     shapename = '_'.join(filename.split('_')[:-2])
    #     print("loading file %s" % filepath)
    #     with open(filepath, 'r') as f:
    #         data_label.append(' '.join([line.strip() for line in f.readlines()]) + ' ' + str(name2label[shapename]))
    # datatxt = "E:/datasets/modelnet40/modelnet40_2048_datas_train_segment3.txt"
    # np.savetxt(datatxt, data_label, fmt='%s')

    # data0 = np.loadtxt("E:/datasets/modelnet40/modelnet40_2048_datas_train.txt", dtype=str).tolist()
    # data1 = np.loadtxt("E:/datasets/modelnet40/modelnet40_2048_datas_train_segment3.txt", dtype=str).tolist()
    # data = data0 + data1
    # np.savetxt("E:/datasets/modelnet40/modelnet40_2048_datas_train_originandsegment3.txt", data, fmt='%s')

    # root = "E:/datasets/shapenet/train_data"
    # for shapeid in os.listdir(root):
    #     subdir = os.path.join(root, shapeid)
    #     os.chdir(subdir)
    #     for oldname in os.listdir(subdir):
    #         print("processing file", oldname)
    #         filename = os.path.splitext(oldname)[0]+'.txt'
    #         os.rename(oldname, filename)
    #         data = np.loadtxt(filename, delimiter=' ', dtype=float)
    #         np.savetxt(filename, data, delimiter=',', fmt="%.5f")

    # id2name = {}
    # with open("E:/datasets/shapenet/shapename_ID.txt") as f:
    #     for line in f.readlines():
    #         line = line.strip().split(' ')
    #         id2name[line[1]] = line[0]
    # root = "E:/datasets/shapenet/train_data"
    # os.chdir(root)
    # for shapeid in os.listdir(root):
    #     os.rename(shapeid, id2name[shapeid])

    # name2label = {}
    # label2name = {}
    # with open("E:/datasets/shapenet/shapename_ID.txt") as f:
    #     for i, line in enumerate(f.readlines()):
    #         name = line.strip().split(' ')[0]
    #         name2label[name] = i
    #         label2name[i] = name
    # datatxt = "E:/datasets/shapenet/shapenet_2048_datas_train.txt"
    # with open(datatxt, 'w') as f:
    #     root = "E:/datasets/shapenet/train_data"
    #     for shapename in os.listdir(root):
    #         for filename in os.listdir(os.path.join(root, shapename)):
    #             filepath = root + '/' + shapename + '/' + filename
    #             print("loading file %s" % filepath)
    #             points = np.loadtxt(filepath, delimiter=',', dtype=float)
    #             points = fps(points, 2048)
    #             line = ' '.join([','.join(map(str, line)) for line in points]) + ' ' + str(name2label[shapename])
    #             f.write(line + '\n')


    # shapenametoadd = ['Bag', 'Cap', 'Earphone', 'Knife', 'Motorbike', 'Pistol', 'Rocket', 'Skateboard']
    # with open("E:/datasets/shapenet/shapenet8_shapenames.txt", "w") as f:
    #     for name in shapenametoadd:
    #         f.write(name + '\n')
    # str_to_add = []
    # with open("E:/datasets/shapenet/shapenet_datas_test.txt", 'r') as f:
    #     for i, line in enumerate(f.readlines()):
    #         print('processing line', i)
    #         l = line.strip().split(' ')
    #         lb = int(l[-1])
    #         name = label2name[lb]
    #         if name in shapenametoadd:
    #             new_lb = shapenametoadd.index(name)
    #             print('adding shape', name, 'new label', new_lb)
    #             str_to_add.append(' '.join(l[:-1]) + ' ' + str(new_lb))

    # with open("E:/datasets/shapenet/shapenet8_2048_datas_test.txt", 'w') as f:
    #     for line in str_to_add:
    #         f.write(line + '\n')

    # root_src = "E:/download/ModelNet40/ModelNet40"
    # root_dst = "E:/datasets/modelnet40/test"
    # for name in os.listdir(root_src):
    #     subdir = root_src + '/' + name + '/test'
    #     for srcfile in os.listdir(subdir):
    #         source = subdir + '/' + srcfile
    #         target = root_dst + '/' + srcfile
    #         print('copying', source, '-->', target)
    #         shutil.copy(source, target)

    # root_src = "E:/datasets/modelnet40/modelnet40_2048"
    # root_dst = "E:/datasets/modelnet40/test"
    # test_files = "E:/datasets/modelnet40/modelnet40_files_test.txt"
    # with open(test_files, 'r') as f:
    #     for line in f.readlines():
    #         filepath = line.split(' ')[0]
    #         source = root_src + '/' + filepath
    #         target = root_dst + '/' + filepath.split('/')[1]
    #         print('copying', source, '-->', target)
    #         shutil.copy(source, target)

    # files_test = []
    # name2lb = {}
    # with open("E:/datasets/shapenet/shapenet_shapenames.txt") as f:
    #     for i, line in enumerate(f.readlines()):
    #         name2lb[line.strip()] = i
    # for name in os.listdir("E:/datasets/shapenet/train_data"):
    #     if name in name2lb.keys():
    #         lb = name2lb[name]
    #         subdir = "E:/datasets/shapenet/train_data/" + name
    #         for filename in os.listdir(subdir):
    #             files_test.append(name + '/' + filename + ' ' + str(lb))
    # for name in os.listdir("E:/datasets/shapenet/test_data"):
    #     if name in name2lb.keys():
    #         lb = name2lb[name]
    #         subdir = "E:/datasets/shapenet/test_data/" + name
    #         for filename in os.listdir(subdir):
    #             files_test.append(name + '/' + filename + ' ' + str(lb))
    # np.savetxt("E:/datasets/shapenet/shapenet_files_all.txt", files_test, fmt="%s")


    id2name = {}
    with open("E:/download/shapenetcore_partanno_segmentation_benchmark_v0/synsetoffset2category.txt", 'r') as f:
        for i, line in enumerate(f.readlines()):
            l = line.strip().split(' ')
            name = l[0]
            id = l[1]
            id2name[id] = name
    with open("E:/download/shapenetcore_partanno_segmentation_benchmark_v0/train_test_split/shuffled_test_file_list.json", 'r') as f:
        files_test = json.load(f)

    root = "E:/download/shapenetcore_partanno_segmentation_benchmark_v0"
    for i, filepath in enumerate(files_test):
        s = filepath.split('/')
        id = s[1]
        name = id2name[id]
        filename = s[2]

        ptsfile = root + '/' + id + '/points/' + filename + '.pts'
        print("processing file", ptsfile)
        points = np.loadtxt(ptsfile, dtype=float, delimiter=' ')
        points = fps(points, 2048)
        target = root + '/test/' + name + '_' + str(i) + '.txt'
        np.savetxt(target, points, delimiter=',', fmt="%.5f")

        # imgfile = root + '/' + id + '/seg_img/' + filename + '.png'
        # print("processing file", imgfile)
        # img = cv2.imread(imgfile, cv2.IMREAD_GRAYSCALE)
        # # img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        # target = root + '/test/' + name + '_' + str(i) + '.png'
        # print("saving file", target)
        # cv2.imwrite(target, img)

    time_end = time.time()
    print("end at %.3f" % time_end)
    print("time cost %.3f" % (time_end-time_start))