#!/usr/bin/python
# coding=utf-8

import random
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
from multiprocessing import Pool
# from fancyimpute import MICE
import scipy
import scipy.sparse
import scipy.sparse.linalg
import numpy as np
import json
import sys
import time
import os
import utils
import xlwt, xlrd

DATA_DIR = "../new_data"
OBJ_DATA_DIR = 'D:\\开发\\Python工程\\measure\\static\\data_file\\humanModel'

# calculate normals
def compute_normals(vertex, facet):
  normals = []
  vertexNormalLists = [[] for i in range(0, len(vertex))]
  for face in facet:
    AB = np.array(vertex[face[0]]) - np.array(vertex[face[1]])
    AC = np.array(vertex[face[0]]) - np.array(vertex[face[2]])
    n = np.cross(AB, -AC)
    n /= np.linalg.norm(n)
    for i in range(0, 3):
      vertexNormalLists[face[i]].append(n)
  for idx, normalList in enumerate(vertexNormalLists):
    normalSum = np.zeros(3)
    for normal in normalList:
      normalSum += normal
    normal = normalSum / float(len(normalList))
    normal /= np.linalg.norm(normal)
    normals.append(map(float, normal.tolist()))
  return normals

# load facet information from txt file 模板txt的所有信息转npy保存，是一个二维数组【【第一个面的3个顶点】，【第二个面的3个顶点】...【第n个面的3个顶点】】
def convert_template():
  print("starting convert_template")
  facet = np.zeros((utils.F_NUM, 3), dtype=int)
  f = open(os.path.join(DATA_DIR, 'body_part.obj'), 'r')
  i = 0
  for line in f:
    if line[0] == 'f':
      tmp = list(map(int, line[1:].split()))
      facet[i, :] = tmp
      i += 1
  np.save(open(os.path.join(DATA_DIR,"facet.npy"), "wb"), facet)
  print("finish convert template from txt to npy")
  return facet

# loading data: file_list, vertex, mean, std
def obj2npy(label="female"):
  print(' [**] begin obj2npy about %s... '%label)
  start = time.time()
  # OBJ_DIR = os.path.join(DATA_DIR, "obj")
  obj_file_dir = os.path.join(OBJ_DATA_DIR, label)
  file_list = os.listdir(obj_file_dir)

  # load original data，提取数据集中所有obj文件的面信息，并进行归一化处理，并保存为npy数据，包括反归一化的相关信息
  vertex = []
  for i, obj in enumerate(file_list):
    sys.stdout.write('\r>> Converting %s body %d'%(label, i))
    sys.stdout.flush()
    f = open(os.path.join(obj_file_dir, obj), 'r')
    j = 0
    for line in f:
      if line[0] == '#':
        continue
      elif "v " in line:
        line.replace('\n', ' ')
        tmp = list(map(float, line[1:].split()))
        vertex.append(tmp)
        j += 1
      else:
        break
  # 这里vertex在reshape之前应该是二维的
  # normalize data
  # 将shape为[3, V_NUM*len(file_list)]的二维的vertex转置为(len(file_list), 3, V_NUM)
  vertex = np.array(vertex).reshape(len(file_list), utils.V_NUM, 3)
  # 以file_list每个元素为单位进行面数据的归一化处理
  for i in range(len(file_list)):
    # 对列求均值
    v_mean = np.mean(vertex[i,:,:], axis=0)
    vertex[i,:,:] -= v_mean
  mean_vertex = np.array(vertex.mean(axis=0)).reshape(utils.V_NUM, 3)
  std_vertex = np.array(vertex.std(axis=0)).reshape(utils.V_NUM, 3)
  # facet = np.load(open(os.path.join(DATA_DIR, "facet.npy"),"rb"))
  np.save(open(os.path.join(DATA_DIR, "%s_vertex.npy"%label), "wb"), vertex)
  np.save(open(os.path.join(DATA_DIR, "%s_mean_vertex.npy"%label), "wb"), mean_vertex)
  np.save(open(os.path.join(DATA_DIR, "%s_std_vertex.npy"%label), "wb"), std_vertex)
  print('\n [**] finish obj2npy in %fs.' %(time.time() - start))
  return [vertex, mean_vertex, std_vertex, file_list]

# convert cp from txt to npy
def convert_cp():
  print(' [**] begin load_cp ... ')
  start = time.time()
  f = open(os.path.join(DATA_DIR, 'body_control_points.txt'), "r")
  tmplist = []
  cp = []
  for line in f:
    if '#' in line:
      if len(tmplist) != 0:
        cp.append(tmplist)
        tmplist = []
    elif len(line.split()) == 1:
      continue
    else:
      tmplist.append(list(map(float, line.strip().split())))
  cp.append(tmplist)
  np.save(open(os.path.join(DATA_DIR, "cp.npy"), "wb"), cp)
  print(' [**] finish convert_cp from in %fs' %(time.time() - start))
  return cp

#load measure data from xls（这是我自己加的函数）
def load_measure_from_xls(xls_path, sheet_name):
  table_data = xlrd.open_workbook(xls_path).sheet_by_name(sheet_name)
  data_list = []
  row_num = table_data.nrows - 1
  col_num = table_data.ncols
  for i in range(row_num):
    row_data = []
    for j in range(1, col_num):
      row_data.append(table_data.cell_value(i + 1,j))
    data_list.append(row_data)
  data_list = np.array(data_list)[:, 0:utils.M_NUM]
  body_num = row_num
  return data_list.T.reshape(utils.M_NUM, body_num)

# calculate the measure data and convert to npy
# def convert_measure(cp, vertex, facet, label="female"):
def convert_measure(label="female"):
  print(' [**] begin load_measure_data ... ')
  start = time.time()
  # load measures data
  # OBJ_DIR = os.path.join(DATA_DIR, "obj")
  # measure = np.zeros((utils.M_NUM, vertex.shape[0]))
  # for i in range(vertex.shape[0]):
  #   sys.stdout.write('\r>> calc %s measure of body %d'%(label, i))
  #   sys.stdout.flush()
  #   # 测量值的格式为[M_num, p_num]
  #   measure[:, i] = calc_measure(cp, vertex[i, :, :], facet).flat
  # 读自己的测量参数
  measure = load_measure_from_xls('D:/桌面/body template/program/excel_data/' + label + 'All.xls', 'measure_data')
  np.save(open(os.path.join(DATA_DIR, "%s_measure_all.npy"%label), "wb"), measure)
  mean_measure = np.array(measure.mean(axis=1)).reshape(utils.M_NUM, 1)
  std_measure = np.array(measure.std(axis=1)).reshape(utils.M_NUM, 1)
  t_measure = measure - mean_measure
  t_measure /= std_measure
  np.save(open(os.path.join(DATA_DIR, "%s_measure_all.npy"%label), "wb"), measure)
  np.save(open(os.path.join(DATA_DIR, "%s_mean_measure_all.npy"%label), "wb"), mean_measure)
  np.save(open(os.path.join(DATA_DIR, "%s_std_measure_all.npy"%label), "wb"), std_measure)
  np.save(open(os.path.join(DATA_DIR, "%s_t_measure_all.npy"%label), "wb"), t_measure)
  print(' [**] finish load_measure for %s in %fs' %(label, time.time() - start))
  return [measure, mean_measure, std_measure, t_measure]

# calculate measure data from given vertex by control points
def calc_measure(cp, vertex, facet):
  measure_list = []
  # clac weight
  vol = 0.0
  kHumanbodyIntensity = 1026.0
  for i in range(0, utils.F_NUM):
    f = [c - 1 for c in facet[i, :]]
    v0 = vertex[f[0], :]
    v1 = vertex[f[1], :]
    v2 = vertex[f[2], :]
    vol += np.cross(v0, v1).dot(v2)
  vol = abs(vol) / 6.0
  weight = kHumanbodyIntensity * vol
  weight = weight**(1.0 / 3.0) * 1000
  measure_list.append(weight)
  # calc other measures
  for measure in cp:
    length = 0.0
    p2 = vertex[int(measure[0][1]), :]
    for i in range(1, len(measure)):
      p1 = p2
      if measure[i][0] == 1:
        p2 = vertex[int(measure[i][1]), :]
      elif measure[i][0] == 2:
        p2 = vertex[int(measure[i][1]), :] * measure[i][3] + \
          vertex[int(measure[i][2]), :] * measure[i][4]
      else:
        p2 = vertex[int(measure[i][1]), :] * measure[i][4] + \
          vertex[int(measure[i][2]), :] * measure[i][5] + \
          vertex[int(measure[i][3]), :] * measure[i][6]
      length += np.sqrt(np.sum((p1 - p2)**2.0))
    measure_list.append(length * 1000)
  return np.array(measure_list).reshape(utils.M_NUM, 1)

# loading deform-based data
def load_d_data(vertex, facet, label="female"):
  print(" [**] begin load_d_data ...")
  start = time.time()
  dets = []
  # 这是平均模型，shape为（12500，3），后面变形的计算是基于这个平均模型计算的
  mean_vertex = np.array(vertex.mean(axis=0)).reshape(utils.V_NUM, 3)
  d_inv_mean = get_inv_mean(mean_vertex, facet)
  deform = np.zeros((vertex.shape[0], utils.F_NUM, 9))
  # calculate deformation mat of each body shape
  for i in range(0, utils.F_NUM):
    sys.stdout.write('\r>> loading %s deformation of facet %d'%(label, i))
    sys.stdout.flush()
    # 提取每个索引面的顶点index
    v = [k - 1 for k in facet[i, :]]
    # 在当前遍历到的面下，遍历所有样本，提取所有样本变形矩阵的绝对值
    for j in range(0, vertex.shape[0]):
      v1 = vertex[j, v[0], :]
      v2 = vertex[j, v[1], :]
      v3 = vertex[j, v[2], :]
      # 求取当前面，当前样本的变形矩阵
      Q = assemble_face(v1, v2, v3).dot(d_inv_mean[i])
      dets.append(np.linalg.det(Q))
      Q.shape = (9, 1)
      # deform矩阵的shape应该为(p_NUM, F_NUM, 9)
      deform[j, i, :] = Q.flat
  # 面变形矩阵的绝对值矩阵
  dets = np.array(dets).reshape(utils.F_NUM, vertex.shape[0])
  # print(dets.shape, deform.shape)
  np.save(open(os.path.join(DATA_DIR, "%s_dets.npy"%label), "wb"), dets)
  np.save(open(os.path.join(DATA_DIR, "%s_d_inv_mean.npy"%label), "wb"), d_inv_mean)
  np.save(open(os.path.join(DATA_DIR, "%s_deform.npy"%label), "wb"), deform)
  mean_deform = np.array(deform.mean(axis=0))
  mean_deform.shape = (utils.F_NUM, 9)
  std_deform = np.array(deform.std(axis=0))
  std_deform.shape = (utils.F_NUM, 9)
  np.save(open(os.path.join(DATA_DIR, "%s_mean_deform.npy"%label), "wb"), mean_deform)
  np.save(open(os.path.join(DATA_DIR, "%s_std_deform.npy"%label), "wb"), std_deform)
  print('\n [**] finish load_deformation of %s in %fs' % (label, time.time() - start))
  return[d_inv_mean, deform, dets, mean_deform, std_deform]

# calculating the inverse of mean vertex matrix, v^-1
def get_inv_mean(mean_vertex, facet):
  print(" [**] begin get_inv_mean ...")
  start = time.time()
  d_inv_mean = np.zeros((utils.F_NUM, 3, 3))
  for i in range(0, utils.F_NUM):
    v = [j - 1 for j in facet[i, :]]
    v1 = mean_vertex[v[0], :]
    v2 = mean_vertex[v[1], :]
    v3 = mean_vertex[v[2], :]
    d_inv_mean[i] = assemble_face(v1, v2, v3)
    d_inv_mean[i] = np.linalg.inv(d_inv_mean[i])
  print(' [**] finish get_inv_mean in %fs' % (time.time() - start))
  return d_inv_mean

# import the 4th point of the triangle, and calculate the deformation
def assemble_face(v1, v2, v3):
  v21 = np.array((v2 - v1))
  v31 = np.array((v3 - v1))
  # 叉乘，flat返回一个迭代器
  v41 = np.cross(list(v21.flat), list(v31.flat))
  v41 /= np.sqrt(np.linalg.norm(v41))
  return np.column_stack((v21, np.column_stack((v31, v41))))

# build sparse matrix，构建稀疏矩阵
def build_equation(m_datas, basis_num):
  shape = (m_datas.shape[1] * basis_num, m_datas.shape[0] * basis_num)
  data = []
  rowid = []
  colid = []
  for i in range(0, m_datas.shape[1]):  # 1531
    for j in range(0, basis_num):  # 10
      # 这里的 + 操作可理解为批量append
      data += [c for c in m_datas[:, i].flat]
      rowid += [basis_num * i + j for a in range(m_datas.shape[0])]
      colid += [a for a in range(j * m_datas.shape[0],
        (j + 1) * m_datas.shape[0])]
  return scipy.sparse.coo_matrix((data, (rowid, colid)), shape)

# calculating vertex-based presentation(PCA) using t-vertex
def get_v_basis(vertex, label="female"):
  print(" [**] begin get_v_basis of %s ..."%label)
  start = time.time()
  body_num = vertex.shape[0]
  mean_vertex = np.array(vertex.mean(axis=0)).reshape(utils.V_NUM, 3)
  vertex -= mean_vertex
  std_vertex = np.array(vertex.std(axis=0)).reshape(utils.V_NUM, 3)
  vertex /= std_vertex
  vertex.shape = (vertex.shape[0], 3 * utils.V_NUM)
  v = vertex.transpose()
  # principle component analysis
  v_basis, v_sigma, V = np.linalg.svd(v, full_matrices=0)
  v_basis = np.array(v_basis[:, :utils.V_BASIS_NUM]).reshape(3 * utils.V_NUM, utils.V_BASIS_NUM)
  np.save(open(os.path.join(DATA_DIR, "%s_v_basis.npy"%label), "wb"), v_basis)
  # coefficient
  v_coeff = np.dot(v_basis.transpose(), v)
  v_pca_mean = np.array(np.mean(v_coeff, axis=1))
  v_pca_mean.shape = (v_pca_mean.size, 1)
  v_pca_std = np.array(np.std(v_coeff, axis=1))
  v_pca_std.shape = (v_pca_std.size, 1)
  vertex.shape = (body_num, utils.V_NUM, 3)
  vertex *= std_vertex
  vertex += mean_vertex
  np.save(open(os.path.join(DATA_DIR, "%s_v_basis.npy"%label),"wb"), v_basis)
  np.save(open(os.path.join(DATA_DIR, "%s_v_coeff.npy"%label),"wb"), v_coeff)
  print(' [**] finish get_v_basis in %fs' % (time.time() - start))
  return [v_basis, v_coeff, v_pca_mean, v_pca_std]

# calculating deform-based presentation(PCA)
def get_d_basis(deform, label="female"):
  print(" [**] begin get_d_basis of %s..."%label)
  start = time.time()
  body_num = deform.shape[0]
  mean_deform = np.array(deform.mean(axis=0))
  mean_deform.shape = (utils.F_NUM, 9)
  std_deform = np.array(deform.std(axis=0))
  std_deform.shape = (utils.F_NUM, 9)
  deform -= mean_deform
  deform /= std_deform
  # 原来shape为(p_NUM, F_NUM, 9)
  deform.shape = (deform.shape[0], 9 * utils.F_NUM)
  # 做转置操作后变为(9*F_NUM, p_NUM)
  d = deform.transpose()

  # principle component analysis，奇异值分解,basis为对角矩阵且矩阵行序按降序排列
  d_basis, d_sigma, V = np.linalg.svd(d, full_matrices=0)
  d_basis = np.array(d_basis[:, :utils.D_BASIS_NUM]).reshape(9 * utils.F_NUM, utils.D_BASIS_NUM)

  # 这一步没有很明白，是否是将9*F_NUM压缩成D_BASIS_NUM
  d_coeff = np.dot(d_basis.transpose(), d)
  d_pca_mean = np.array(np.mean(d_coeff, axis=1))
  d_pca_mean.shape = (d_pca_mean.size, 1)
  d_pca_std = np.array(np.std(d_coeff, axis=1))
  d_pca_std.shape = (d_pca_std.size, 1)

  np.save(open(os.path.join(DATA_DIR, "%s_d_basis.npy"%label), "wb"), d_basis)
  np.save(open(os.path.join(DATA_DIR, "%s_d_coeff.npy"%label), "wb"), d_coeff)
  # 这里改变了deform数组
  deform.shape = (body_num, utils.F_NUM, 9)
  deform *= std_deform
  deform += mean_deform
  print(' [**] finish get_d_basis of %s in %fs' % (label, time.time() - start))
  return [d_basis, d_coeff, d_pca_mean, d_pca_std]

# cosntruct the related matrix A to change deformation into vertex
def get_d2v_matrix(d_inv_mean, facet, label="female"):
  print(' [**] begin reload A&lu maxtrix of %s'%label)
  start = time.time()
  data = []
  rowidx = []
  colidx = []
  r = 0
  off = utils.V_NUM * 3
  shape = (utils.F_NUM * 9, (utils.V_NUM + utils.F_NUM) * 3)
  for i in range(0, utils.F_NUM):
    # coeff内容为[]，12个元素
    coeff = construct_coeff_mat(d_inv_mean[i])
    # 该面的三个顶点索引
    v = [c - 1 for c in facet[i, :]]
    # 使用range创建连续列表，元素为3*index~3*index+3，因为每个面索引包含3个顶点
    v1 = range(v[0] * 3, v[0] * 3 + 3)
    v2 = range(v[1] * 3, v[1] * 3 + 3)
    v3 = range(v[2] * 3, v[2] * 3 + 3)
    # 这里顶点后面的索引，也是三个一组，按面的index索引顺序排列
    v4 = range(off + i * 3, off + i * 3 + 3)
    for j in range(0, 3):
      data += [c for c in coeff.flat]
      rowidx += [r, r, r, r, r + 1, r + 1, r + 1,
        r + 1, r + 2, r + 2, r + 2, r + 2]
      colidx += [v1[j], v2[j], v3[j], v4[j], v1[j],
        v2[j], v3[j], v4[j], v1[j], v2[j], v3[j], v4[j]]
      r += 3
  d2v = scipy.sparse.coo_matrix((data, (rowidx, colidx)), shape=shape)
  np.savez(os.path.join(DATA_DIR, "%s_d2v"%label), row=d2v.row,
    col=d2v.col,data=d2v.data, shape=d2v.shape)
  # lu = scipy.sparse.linalg.splu(d2v.transpose().dot(d2v).tocsc())
  print(' [**] finish load A&lu of %s in %fs.' % (label, time.time() - start))
  # return [d2v, lu]
  return d2v


# construct the matrix = v_mean_inv.dot(the matrix consists of 0 -1...)
def construct_coeff_mat(mat):
  tmp = -mat.sum(0)
  # 行拼接，形成一个二维数组，然后再转置,即(-sum,mat)T
  return np.row_stack((tmp, mat)).transpose()

# calculate the mapping matrix from measures to vertex-based
def get_m2v(v_coeff, t_measure, label="female"):
  print(' [**] begin load_m2v of %s... '%label)
  start = time.time()
  V = v_coeff.copy()
  V.shape = (V.size, 1)
  M = build_equation(t_measure, utils.V_BASIS_NUM)
  # solve transform matrix
  MtM = M.transpose().dot(M)
  MtV = M.transpose().dot(V)
  ans = np.array(scipy.sparse.linalg.spsolve(MtM, MtV))
  ans.shape = (utils.V_BASIS_NUM, utils.M_NUM)
  np.save(open(os.path.join(DATA_DIR, "%s_m2v.npy"%label), "wb"), ans)
  print(' [**] finish get_m2v of %s in %fs' % (label, time.time() - start))
  return ans

# calculate global mapping from measure to deformation PCA coeff
def get_m2d(d_coeff, t_measure, label="female"):
  print(' [**] begin load_m2d of %s... '%label)
  start = time.time()
  D = d_coeff.copy()
  D.shape = (D.size, 1)
  M = build_equation(t_measure, utils.D_BASIS_NUM)
  # solve transform matrix
  MtM = M.transpose().dot(M)
  MtD = M.transpose().dot(D)
  ans = np.array(scipy.sparse.linalg.spsolve(MtM, MtD))
  ans.shape = (utils.D_BASIS_NUM, utils.M_NUM)
  np.save(open(os.path.join(DATA_DIR, "%s_m2d.npy"%label), "wb"), ans)
  print(' [**] finish load_n of %s in %fs' % (label, time.time() - start))
  return ans

# get color dict & mask
def get_map(facet):
  tmp = open(os.path.join(DATA_DIR, 'body_part.obj'), 'r').read()
  tmp = tmp[tmp.index('\nv'): tmp.index("\n#!") - 1].replace('v', '')
  tmp = list(map(float, tmp.replace('\n', ' ').split()))
  body = np.array(tmp).reshape(utils.V_NUM, 6)
  body = np.array(body[:, 3:])
  color_list = []
  for i in range(0, utils.V_NUM):
    color_list.append((body[i, 0], body[i, 1], body[i, 2]))
  mask = np.zeros((19, utils.F_NUM), dtype=bool)
  for i in range(0, utils.F_NUM):
    sys.stdout.write('\r>> get map of facet %d'%(i))
    sys.stdout.flush()
    v = facet[i, :] - 1
    tmp = set()
    for j in v:
      c = utils.PART.index(color_list[j])
      for k in utils.P2M[c]:
        tmp.add(k)
    for j in tmp:
      mask[j, i] = 1
  np.save(open(os.path.join(DATA_DIR, "mask.npy"), "wb"), mask)
  return [utils.PART, mask]

# local map matrix: measure->deform
def local_matrix(mask, deform, measure, label="female"):
  print(' [**] begin solve local_matrix of %s'%label)
  start = time.time()
  L_tosave = []
  body_num = deform.shape[0]
  for i in range(0, utils.F_NUM):
    sys.stdout.write('\r>> calc local map of %s NO.%d'%(label, i))
    sys.stdout.flush()
    S = np.array(deform[:, i, :])
    S.shape = (S.size, 1)
    t_mask = np.array(mask[:, i])
    t_mask.shape = (utils.M_NUM, 1)
    t_mask = t_mask.repeat(body_num, axis=1)
    m = np.array(measure[t_mask])
    m.shape = (m.size // body_num, body_num)
    M = build_equation(m, 9)
    # solve transform matrix
    MtM = M.transpose().dot(M)
    MtS = M.transpose().dot(S)
    ans = np.array(scipy.sparse.linalg.spsolve(MtM, MtS))
    ans.shape = (9, m.size // body_num)
    L_tosave.append(list(ans))
  np.save(open(os.path.join(DATA_DIR, "%s_local.npy"%label), "wb"), L_tosave)
  print('\n [**] finish solve local_matrix of %s in %fs' % (label, time.time() - start))

# calculate relationship directly
def rfe_local(dets, deform, measure, label="female", k_features=9):
  print(' [**] begin rfe_local of %s'%label)
  start = time.time()
  body_num = deform.shape[0]
  # 测量数据归一化并转置
  mean_measure = np.array(measure.mean(axis=1)).reshape(utils.M_NUM, 1)
  std_measure = np.array(measure.std(axis=1)).reshape(utils.M_NUM, 1)
  t_measure = measure - mean_measure
  t_measure /= std_measure
  # x为每一个身体尺寸归一化的数据，shape仍为(m,p)
  x = t_measure.transpose()

  # 定义一个进程池，最大为processess
  pool = Pool(processes = 8)
  # 传面index，该面index对应所有样本的变形矩阵的绝对值，该面index对应所有样本的变形矩阵，样本数量，归一化的测量值，原始测量值，指定的相关特征数量
  tasks = [(i, dets[i,:], deform[:,i,:], body_num, x, measure, k_features) for i in range(utils.F_NUM)]
  results = pool.starmap(rfe_multiprocess, tasks)
  pool.close()
  pool.join()

  rfe_mat = np.array([ele[0] for ele in results]).reshape(utils.F_NUM, 9, k_features)
  mask = np.array([ele[1] for ele in results]).reshape(utils.F_NUM, utils.M_NUM).transpose()

  np.save(open(os.path.join(DATA_DIR, "%s_rfemat.npy"%label), "wb"), rfe_mat)
  np.save(open(os.path.join(DATA_DIR, "%s_rfemask.npy"%label), "wb"), mask)
  print("[**] finish rfe_mat calc of %s in %fs"%(label, time.time()-start))
  return [dets, mask, rfe_mat]

# 面index，该面index对应所有样本的变形矩阵的绝对值，该面index对应所有样本的变形矩阵，样本数量，归一化的测量值，原始测量值，指定的相关特征数量
def rfe_multiprocess(i, dets, deform, body_num, x, measure, k_features):
  sys.stdout.write('>> calc rfe map NO.%d\n'%(i))
  sys.stdout.flush()
  y = np.array(dets).reshape(body_num, 1)
  model = LinearRegression()
  # recurcive feature elimination
  rfe = RFE(model, k_features)
  # x为每个尺寸下的所有样本测量数据，y为deform[i]面下的所有样本对应的变形矩阵的绝对值
  rfe.fit(x, y.ravel())
  # mask.append(rfe.support_)
  # support_为特征的选择情况，（True表示被选择，False表示被淘汰）
  flag = np.array(rfe.support_).reshape(utils.M_NUM, 1)
  # 将上述选择情况复制body_num次。flag的shape为(M_NUM*body_num)
  flag = flag.repeat(body_num, axis=1)

  # calculte linear mapping mat
  # (body_num, 9)
  S = np.array(deform)
  # size为S所有元素的个数（body_num*9）
  S.shape = (S.size, 1)
  # measure的shape为(m_Num, body_Num)
  m = np.array(measure[flag])
  # 经过flag的过滤后为(k_features, body_num)
  m.shape = (k_features, body_num)
  # 构建稀疏矩阵，稀疏矩阵的shape为(body_num*9, k_features*9)
  M = build_equation(m, 9)
  # MtM的shape应该是(k_features*9, k_features*9)，为对称矩阵
  MtM = M.transpose().dot(M)
  # (k_features*9, body_num*9) 点乘 （body_num*9）,MtS的shape应该是(k_features*9, 1)
  MtS = M.transpose().dot(S)
  # X'XF=X'Y为线性代数的最小二乘估计，这里没有用sklearn的训练方式，而是直接用最小二乘的估计方式计算映射矩阵
  # Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
  ans = np.array(scipy.sparse.linalg.spsolve(MtM, MtS))
  ans.shape = (9, k_features)
  return [ans, rfe.support_]

def random_multiprocess(i, dets, deform, body_num, x, measure, k_features):
  sys.stdout.write('>> calc random map NO.%d\n'%(i))
  # recurcive feature elimination
  flag = np.zeros((19, 1), dtype=bool)
  index = random.sample(range(19), k_features)
  for i in index:
    flag[i,0] = True
  F = flag.repeat(body_num, axis=1)

  # calculte linear mapping mat
  S = np.array(deform)
  S.shape = (S.size, 1)
  m = np.array(measure[F])
  m.shape = (k_features, body_num)
  M = build_equation(m, 9)
  MtM = M.transpose().dot(M)
  MtS = M.transpose().dot(S)
  ans = np.array(scipy.sparse.linalg.spsolve(MtM, MtS))
  ans.shape = (9, k_features)
  return [ans, flag]

# ===========================================================================

def global_train():
  genders = ["female"]
  for gender in genders:
      deform = np.load(open(os.path.join(DATA_DIR, gender + "_deform.npy"),"rb"))
      vertex = np.load(open(os.path.join(DATA_DIR, gender + "_vertex.npy"),"rb"))
      # 这是全局映射用的，不用看
      d_basis, d_coeff, _, _ = get_d_basis(deform, label=gender)
      # v_basis, v_coeff, _, _, = get_v_basis(vertex, label=gender)
      measure = np.load(open(os.path.join(DATA_DIR, gender + "_measure.npy"),"rb"))
      get_m2d(d_coeff, measure, label=gender)

# train all data
def local_train():
  genders = ["male"] #, "male"]
  for gender in genders:
    # generate and load control point from txt to npy file
    # 控制点的数据不读取，因为不使用它的尺寸定义方式
    # cp = convert_cp()

    # 提取模板的面信息
    facet = convert_template()
    # facet格式为[F_num, 3]
    facet = np.load(open(os.path.join(DATA_DIR, "facets.npy"),"rb"))
    # 提取数据集各个样本的面数据并归一化处理面的数据信息，索引0为顶点信息
    vertex = obj2npy(gender)[0]
    # vertex格式为[p_num, V_num, 3]
    vertex = np.load(open(os.path.join(DATA_DIR, gender + "_vertex.npy"),"rb"))
    d_inv_mean, deform, dets, _, _ = load_d_data(vertex, facet, label=gender)
    

    d_inv_mean = np.load(open(os.path.join(DATA_DIR, gender + "_d_inv_mean.npy"),"rb"))

    
    
    # deform格式为(p_NUM, F_NUM, 9)，dets格式为(F_NUM, p_num)
    deform = np.load(open(os.path.join(DATA_DIR, gender + "_deform.npy"),"rb"))
    dets = np.load(open(os.path.join(DATA_DIR, gender + "_dets.npy"),"rb"))
    # print(dets.shape, deform.shape)
    # measure格式为(m_Num, p_Num)
    measure = convert_measure(label=gender)
    measure = np.load(open(os.path.join(DATA_DIR, gender + "_measure.npy"),"rb"))
    # print(measure[:,0])

    # normals = np.load(open(os.path.join('../release_model', "normals.npy"),"rb"))
    # 测量数据归一化并转置
    # mean_measure = np.array(measure.mean(axis=1)).reshape(utils.M_NUM, 1)
    # std_measure = np.array(measure.std(axis=1)).reshape(utils.M_NUM, 1)
    # t_measure = measure - mean_measure
    # t_measure /= std_measure
    # x为每一个身体尺寸归一化的数据，shape仍为(m,p)
    # x = t_measure.transpose()
    # rfe_multiprocess(0, dets[0,:], deform[:,0,:], 1531, x, measure, 9)
    rfe_local(dets, deform, measure, label=gender, k_features=9)

    # measure格式为(m_Num, p_Num)
    # measure = convert_measure(label=gender)[0]
    


if __name__ == "__main__":
  local_train()
  # arr = np.array([[2, 3], [4, 5], [6, 7]])
  # print(arr.shape)
  # print(build_equation(arr, 3).shape)

  # global_train()
