// !!!WARNING:There are something unfinished:
// !!!WARNING:Initialize and finalize must be finished.
// !!!WARNING:All block that I mark "// do something to handle" must be filled.

// !!!WARNING:All block that I mark "// debug" and follow until blank line which changed something must be delete.
// !!!WARNING:All block that I mark "// debug" and follow until blank line which not changed anything could be delete.
// !!!WARNING:   and others.

// Finish: load 
// Finish: divde ------------------------------ unfinish user's divde
// Finish: train ------------------------------ unfinish user's train

#ifndef _M3_CPP
#define _M3_CPP

#include "M3.h"
using namespace std;


#define TIME_DEBUG_OUT debug_out << "TIME " <<  MPI_Wtime()-m3_start_time << "s ~~~~~: "
//////////////////////////added by hoss
namespace M3
{
	size_t trainLen;
	size_t testLen;

	size_t wc_l(const string &filename)
	{
		size_t result = 0;
		ifstream inf(filename.c_str(),ios::in|ios::binary);
		string line;
		while(getline(inf,line)) ++result;
		inf.close();
		return result;
	}
}
/////////////////////////////////////////////////////////////////////////

// Test whether this process is master.
bool M3::rank_master(int rank){
  return rank==M3_MASTER_RANK;
}

// Test whether this process is slave.
bool M3::rank_slave(int rank){
  return rank>=m3_start_slave_process_rank;
}

// Test whether this process is run.
bool M3::rank_run(int rank){
  return rank && rank<m3_start_slave_process_rank;
}


void M3::initialize(int argc,
		    char * argv[]){

  int init_flag;
  MPI_Initialized(&init_flag);
  if (init_flag){
    // do something to handle this error
  }

  // Initialize and get some process informaiton
  MPI_Init(&argc,
	   &argv);

  m3_start_time=MPI_Wtime();

  MPI_Comm_size(MPI_COMM_WORLD,
		&m3_all_process_num);
  MPI_Comm_rank(MPI_COMM_WORLD,
		&m3_my_rank);

  // Middle infomation file name
  string debug_name="out.debug_";
  char name_tmp[20];
  sprintf(name_tmp,
	  "%d",
	  m3_my_rank);
  debug_name+=name_tmp;
  debug_out.open(debug_name.c_str());

  // There are some parameter need to be determined.
  // For easy to test, I only to let them be a small const
  m3_start_slave_process_rank=4;
  m3_continue_subset_size=10;
  m3_subset_size=50;

  //Commit Data_Sample & Data_Node
  int data_node_len[2];
  data_node_len[0]=1;
  data_node_len[1]=1;
  MPI_Datatype data_node_type[2];
  data_node_type[0]=MPI_INT;
  data_node_type[1]=MPI_FLOAT;
  MPI_Aint data_node_offset[2];
  data_node_offset[0]=0;
  data_node_offset[1]=sizeof(int);
  MPI_Type_struct(2,
		  data_node_len,
		  data_node_offset,
		  data_node_type,
		  &MPI_Data_Node);
  MPI_Type_commit(&MPI_Data_Node);

  int data_sample_len[4];
  data_sample_len[0]=1;
  data_sample_len[1]=1;
  data_sample_len[2]=1;
  data_sample_len[3]=1;
  MPI_Datatype data_sample_type[4];
  data_sample_type[0]=MPI_INT;
  data_sample_type[1]=MPI_FLOAT;
  data_sample_type[2]=MPI_INT;
  data_sample_type[3]=MPI_INT;	// pointer as int
  MPI_Aint data_sample_offset[4];
  data_sample_offset[0]=0;
  data_sample_offset[1]=data_sample_offset[0]+sizeof(int);
  data_sample_offset[2]=data_sample_offset[1]+sizeof(float);
  data_sample_offset[3]=data_sample_offset[2]+sizeof(int);
  MPI_Type_struct(4,
		  data_sample_len,
		  data_sample_offset,
		  data_sample_type,
		  &MPI_Data_Sample);
  MPI_Type_commit(&MPI_Data_Sample);

  int subset_info_len[13];
  for (int i=0;i<13;i++)
    subset_info_len[i]=1;
  MPI_Datatype subset_info_type[13];
  subset_info_type[0]=MPI_FLOAT;
  subset_info_type[1]=MPI_FLOAT;
  for (int i=2;i<13;i++)
    subset_info_type[i]=MPI_INT;
  MPI_Aint subset_info_offset[13];
  subset_info_offset[0]=0;
  subset_info_offset[1]=subset_info_offset[0]+sizeof(float);
  subset_info_offset[2]=subset_info_offset[1]+sizeof(float);
  for (int i=3;i<13;i++)
    subset_info_offset[i]=subset_info_offset[i-1]+sizeof(int);
  MPI_Type_struct(13,
		  subset_info_len,
		  subset_info_offset,
		  subset_info_type,
		  &MPI_Subset_Info);
  MPI_Type_commit(&MPI_Subset_Info);

  // Make divider and others(havn't new)
  m3_divider=new Hyper_Plane();

  if (rank_master(m3_my_rank))
    m3_master=new M3_Master;
  else if (rank_slave(m3_my_rank))
    m3_slave=new M3_Slave;
  else if (rank_run(m3_my_rank))
    m3_run=new M3_Run;
}

void M3::finalize(){
  MPI_Type_free(&MPI_Data_Node);
  MPI_Type_free(&MPI_Data_Sample);
  MPI_Type_free(&MPI_Subset_Info);

  debug_out.close();

  MPI_Finalize();

  if (rank_master(m3_my_rank))
    delete m3_master;
  else if (rank_slave(m3_my_rank))
    delete m3_slave;
  else if (rank_run(m3_my_rank))
    delete m3_run;
}

void M3::load_train_data(const string &filename){
  if (rank_master(m3_my_rank))
    m3_master->load_train_data(filename);
  else if (rank_slave(m3_my_rank))
    m3_slave->load_train_data();
  else if (rank_run(m3_my_rank)){
    // do something to handle this error
  }

  MPI_Barrier(MPI_COMM_WORLD);
}

void M3::divide_train_data(int size){//added by hoss

  if (rank_master(m3_my_rank))
    m3_master->divide_train_data();
  else if (rank_slave(m3_my_rank))
    m3_slave->divide_train_data(size);//added by hoss
  else if (rank_run(m3_my_rank)){
    // do something to handle this error
  }

  MPI_Barrier(MPI_COMM_WORLD);
}

void M3::training_train_data(){
  if (rank_master(m3_my_rank))
    m3_master->training_train_data();
  else if (rank_slave(m3_my_rank))
    m3_slave->training_train_data();
  else if (rank_run(m3_my_rank))
    m3_run->training_train_data();

  MPI_Barrier(MPI_COMM_WORLD);
}

void M3::classify_test_data(const string &filename){
  if (rank_master(m3_my_rank))
    m3_master->classify_test_data(filename);
  else if (rank_run(m3_my_rank))
    m3_run->classify_test_data();

  MPI_Barrier(MPI_COMM_WORLD);
}

void M3::score_test_data(){
  if (rank_master(m3_my_rank))
    m3_master->score_test_data();
}





























//M3_Master


// Master initialize.
M3::M3_Master::M3_Master(){
  m_label_to_process.clear();
  m_process_to_label.clear();
  m_process_train_data_num.clear();
  m_process_train_subset_num.clear();
  m_label_to_index.clear();
  m_index_to_label.clear();

  m_free_process=m3_start_slave_process_rank;
}

// As name
float M3::M3_Master::string_to_float(char * str,
				     int ll,
				     int rr){
/*
  int i;
  float res_r=0,res_c=0;
  bool dot=false;
  for (i=ll;i<rr && str[i]!='.';i++)
    res_r=res_r*10.0+str[i]-'0';
  for (i=rr-1;i>=ll && str[i]!='.';i--)
    res_c=res_c/10.0+str[i]-'0';
  res_c/=10;
  if (i<=ll) res_c=0;
  return res_r+res_c;
*/
//////////////////////////////////added by hoss////////////////////
	char temp = str[rr];
	str[rr] = 0;
	float res = atof(&(str[ll]));
	str[rr] = temp;
	return res;
///////////////////////////////////////////////////////////////////
}

// As name
int M3::M3_Master::string_to_int(char * str,
				 int ll,
				 int rr){
/*
  int i;
  int res=0;
  for (i=ll;i<rr;i++)
    res=res*10+str[i]-'0';
  return res;
*/
//////////////////////////////////added by hoss////////////////////
	char temp = str[rr];
	str[rr] = 0;
	int res = atoi(&(str[ll]));
	str[rr] = temp;
	return res;
///////////////////////////////////////////////////////////////////
}

// Parse input data to our format.
void M3::M3_Master::parse_data(char * rbuf,
			       Data_Sample * dsp){
  int i=0,pri=0;
  int len=0;

  while (rbuf[i]!=' ') i++; 
  dsp->label=string_to_float(rbuf,
			     pri,
			     i);

  while (rbuf[i]){
    pri=++i;
    while (rbuf[i]!=':') i++;
    dsp->data_vector[len].index=string_to_int(rbuf,
					      pri,
					      i);
    pri=++i;
    while (rbuf[i] && rbuf[i]!=' ') i++;
    dsp->data_vector[len].value=string_to_float(rbuf,
						pri,
						i);
    len++;
  }
  dsp->data_vector_length=len;
}

// Package sample_buf & node_buf to be continue memory space.
// For MPI_Send.
void M3::M3_Master::data_package(Data_Sample * sample_buf,
				 Data_Node * node_buf,
				 Data_Sample * sample_buf_send,
				 Data_Node * node_buf_send,
				 int len,
				 float sp_l,
				 bool * been_sent,
				 int & sbs_len,
				 int & nbs_len){

  int sbs_offset=0,nbs_offset=0;

  sbs_len=0;
  nbs_len=0;
  int i;
  for (i=0;i<len;i++)
    if (sp_l==sample_buf[i].label){
      been_sent[i]=true;
      sample_buf_send[sbs_len]=sample_buf[i];

      sbs_len++;
      for (int j=0;j<sample_buf[i].data_vector_length;j++)
	node_buf_send[nbs_len+j]=sample_buf[i].data_vector[j];
      nbs_len+=sample_buf[i].data_vector_length;
    }
}

// Print middle information
void M3::M3_Master::check_load_data(){
  TIME_DEBUG_OUT << "Master use " 
		 << m_free_process-m3_start_slave_process_rank 
		 << " to store data!\n"
		 << endl;
  
  int i;
  for (i=m3_start_slave_process_rank;i<m_free_process;i++){
    TIME_DEBUG_OUT << "Process " 
		   << i 
		   << " store the data with label: " 
		   << m_process_to_label[i]
		   << " and num: "
		   << m_process_train_data_num[i]
		   << endl;
  }

  map<float,vector<int> >::iterator it;
  for (it=m_label_to_process.begin();it!=m_label_to_process.end();it++){
    vector<int> pr=(*it).second;
    TIME_DEBUG_OUT << "Label " 
		   << (*it).first
		   << " has store in "
		   << pr.size()
		   << " process(s) "
		   << endl;
    TIME_DEBUG_OUT << "They are: ";
    int i;
    for (i=0;i<pr.size();i++)
      debug_out << pr[i] 
		<< " ";
    debug_out << endl;
  }
}

// Main load block.
// file_name is the name of data file.
// need_train_index is bool arr that determine which data need to be think as train data.
void M3::M3_Master::load_train_data(string file_name,
				    vector<bool> need_train_index){

	
	
	// debug
  TIME_DEBUG_OUT << "master being to init its ele" << endl;

  const int READ_BUF_SIZE=1024;
  char * read_buf=new char[READ_BUF_SIZE];

  Data_Node * node_buf=new Data_Node[NODE_BUF_SIZE];
  Data_Node * node_buf_send=new Data_Node[NODE_BUF_SIZE];
  Data_Sample * sample_buf=new Data_Sample[SAMPLE_BUF_SIZE];
  Data_Sample * sample_buf_send=new Data_Sample[SAMPLE_BUF_SIZE];
  bool * been_sent=new bool[SAMPLE_BUF_SIZE];

  bool read_done_flag=false;
  int total_index=0;

  ifstream file_in(file_name.c_str());

  // debug
  TIME_DEBUG_OUT << "master all ele has bee init & file:" 
		 << file_name 
		 << " has been opend" 
		 << endl;

  while (!read_done_flag){

    // debug
    TIME_DEBUG_OUT << "master loop beging now!" << endl;

    int index=0,nb_offset=0;
    // Read data from data file.
    // This loop get out only when read over or the buf is full.
    while (index<SAMPLE_BUF_SIZE){

      // Read a line buf.
      memset(read_buf,
	     0,
	     sizeof(char)*READ_BUF_SIZE);
      if (!file_in.getline(read_buf,
			   READ_BUF_SIZE)){
	read_done_flag=true;	// the input is over
	file_in.close();

	// debug
	TIME_DEBUG_OUT << "master has close the file:" << file_name << endl;

	break;
      }      
      
      if (need_train_index[total_index]){
	sample_buf[index].data_vector=&node_buf[nb_offset];
	parse_data(read_buf,
		   &(sample_buf[index])); // parser data to our struct
	nb_offset+=sample_buf[index].data_vector_length;
	sample_buf[index++].index=total_index;
      }
      total_index++;
    }

    // debug
    TIME_DEBUG_OUT << "master has read_buf done!index && total_index: " 
		   << index 
		   << " " 
		   << total_index 
		   << endl;

    memset(been_sent,0,sizeof(bool)*SAMPLE_BUF_SIZE);
    while (true){
      int i;
      // Determine whether there is data which has not be send.
      for (i=0;i<index && been_sent[i];i++);
      if (i>=index) {

	// debug
	TIME_DEBUG_OUT << "this loop, no data need to be sent now !" << endl;

	break;	// all data has been send to salve;
      }

      // The i is the data's index that has not be send.
      // sp_l is the label of index i.
      float sp_l=sample_buf[i].label;
      int sbs_len,nbs_len;

      // debug
      TIME_DEBUG_OUT << "now, master sent lable:@" 
		     << i 
		     << " " 
		     << sample_buf[i].label 
		     << endl;

      // Package the all data in buf that label is sp_l as continue memory.
      data_package(sample_buf,
		   node_buf,
		   sample_buf_send,
		   node_buf_send,
		   index,
		   sp_l,
		   been_sent,
		   sbs_len,
		   nbs_len);

      // debug
      TIME_DEBUG_OUT << "Master has package down!" << endl;

      // Find the first slave_process to be ask to store data that lable sp_l
      int slave_rank=0;
      if (m_label_to_process.find(sp_l)!=m_label_to_process.end()){
	// If the sp_l is an old label that there is some process store.
	// Let the asked slave_process to be the last one that store data which is label sp_l.
	int stack_l=m_label_to_process[sp_l].size();
	slave_rank=m_label_to_process[sp_l][stack_l-1];
      }
      else {
	if (m_free_process==m3_all_process_num){
	  // all memory are not enough
	  // do something to handle this error
	}
	// Unless let asked slave_process be a new free process(if there exits one).
	slave_rank=m_free_process++;
      }

      int ask_len[2];
      ask_len[0]=sbs_len;
      ask_len[1]=nbs_len;
      int slave_alloc_flag=0;
      while (true){		// ask slave to alloc

	// debug
	TIME_DEBUG_OUT << "Master ask slave_process: " << slave_rank << endl;

	// Send the ask space control.
	MPI_Send(&CTRL_ALLOC_MEMORY,
		 1,
		 MPI_INT,
		 slave_rank,
		 M3_TAG,
		 MPI_COMM_WORLD);	

	// debug
	TIME_DEBUG_OUT << "Master ask memory: " 
		       << ask_len[0] 
		       << " " 
		       << ask_len[1] 
		       << endl;

	// Send the space information.
	MPI_Send(ask_len,
		 2,
		 MPI_INT,
		 slave_rank,
		 M3_TAG+1,
		 MPI_COMM_WORLD);

	// Get the answer from asked slave.
	MPI_Status mpi_status;
	MPI_Recv(&slave_alloc_flag,
		 1,
		 MPI_INT,
		 slave_rank,
		 M3_TAG,
		 MPI_COMM_WORLD,
		 &mpi_status);

	// If the asked slave haa no space to locate data.
	// Let the asked slave be a new free process(if exits one).
	if (slave_alloc_flag==CTRL_MEMORY_SCARCITY){
	  if (m_free_process==m3_all_process_num){
	    // all memory are not enough
	    // do something to handle this error
	  }
	  slave_rank=m_free_process++;
	}
	else break;		// now slave_rank has enough memory
      }

      // debug
      TIME_DEBUG_OUT << "master decide to sent " 
		     << sp_l 
		     << " to slave_process: " 
		     <<slave_rank 
		     << endl;

      // Save some information table.
      m_process_to_label[slave_rank]=sp_l;
      if (m_label_to_index.find(sp_l)==m_label_to_index.end()){
	m_label_to_index[sp_l]=m_index_to_label.size();
	m_index_to_label.push_back(sp_l);
      }
      if (m_process_train_data_num.find(slave_rank)==m_process_train_data_num.end())
	m_process_train_data_num[slave_rank]=sbs_len;
      else m_process_train_data_num[slave_rank]+=sbs_len;
      if (m_label_to_process.find(sp_l)==m_label_to_process.end()){
	vector<int> tmp;
	tmp.clear();
	tmp.push_back(slave_rank);
	m_label_to_process[sp_l]=tmp;
      }
      else {
	int tmp_l=m_label_to_process[sp_l].size();
	if (m_label_to_process[sp_l][tmp_l-1]!=slave_rank)
	  m_label_to_process[sp_l].push_back(slave_rank);
      }

      // debug
      TIME_DEBUG_OUT << "master begin to sent data && the CTRL:" 
		     << CTRL_GET_DATA 
		     << endl;

      // debug
      // check_send_buf
      TIME_DEBUG_OUT << "master check sent buf" << endl;
      for (int i=0;i<sbs_len;i++)
      	TIME_DEBUG_OUT << sample_buf_send[i].index << " "
		       << sample_buf_send[i].label << " "
		       << sample_buf_send[i].data_vector_length << endl;
      for (int i=0;i<nbs_len;i++){
      	if (i%8==0) debug_out << endl;
      	debug_out << "(" << node_buf_send[i].index << "," << node_buf_send[i].value << ")";
      }
      debug_out << endl;

      // Send data.
      MPI_Send(&CTRL_GET_DATA,
	       1,
	       MPI_INT,
	       slave_rank,
	       M3_TAG,
	       MPI_COMM_WORLD);
      MPI_Send(sample_buf_send,
	       sbs_len,
	       MPI_Data_Sample,
	       slave_rank,
	       M3_TAG+1,
	       MPI_COMM_WORLD);
      MPI_Send(node_buf_send,
	       nbs_len,
	       MPI_Data_Node,
	       slave_rank,
	       M3_TAG+2,
	       MPI_COMM_WORLD);
    }

    // debug
    TIME_DEBUG_OUT << "master loop over now ! " << endl;

  }

  // debug
  TIME_DEBUG_OUT << "master has read done! " << endl;


  for (int i=m3_start_slave_process_rank;i!=m3_all_process_num;i++){
    
    // debug
    TIME_DEBUG_OUT << "master sent to slave_process: " 
		   << i 
		   << " to load over" 
		   << endl;

    MPI_Send(&CTRL_READ_DONE,
	     1,
	     MPI_INT,
	     i,
	     M3_TAG,
	     MPI_COMM_WORLD); //sent load ok!
  }

  // debug
  TIME_DEBUG_OUT << "master sent loadover to all slave done" << endl;
  
  // Delete buf.
  delete [] read_buf;
  delete [] sample_buf;
  delete [] sample_buf_send;
  delete [] node_buf;
  delete [] node_buf_send;
  delete [] been_sent;

  // debug
  check_load_data();

};
void M3::M3_Master::load_train_data(const string &filename){

  // debug
  TIME_DEBUG_OUT << "come in the master load_train_data" << endl;

  vector<bool> tmp;
  tmp.clear();
  int i;
  size_t len = M3::trainLen = M3::wc_l(filename);
  for (i=0;i<len;i++) tmp.push_back(true);
  load_train_data(filename.c_str(),tmp);
  
  // debug
  TIME_DEBUG_OUT << "go out the master load_train_data" << endl;

}

// Prin middle information.
void M3::M3_Master::check_divide_data(){
  TIME_DEBUG_OUT << "Master has get all block infomation" << endl;
  int i;
  for (i=m3_start_slave_process_rank;i<m_free_process;i++)
    TIME_DEBUG_OUT << "Process " 
		   << i
		   << " has "
		   << m_process_train_subset_num[i]
		   << " block(s) " 
		   << endl;
}

void M3::M3_Master::make_train_info(){
  int i,j;
  m_train_task_info.clear();
  for (i=m3_start_slave_process_rank;i<m_free_process;i++)
    for (j=m3_start_slave_process_rank;j<m_free_process;j++)
      if (m_process_to_label[i]<m_process_to_label[j]){
	// Make a new task struct that handle train task information.
	Train_Task_Info tsi(i,
			    j,
			    m_process_train_subset_num[i],
			    m_process_train_subset_num[j],
			    m3_continue_subset_size);
	m_train_task_info.push_back(tsi);

	// debug
	TIME_DEBUG_OUT << "The task pair " << m_train_task_info.size()
		       << " is LABEL " << m_process_to_label[i]
		       << " VS " << m_process_to_label[j] << endl;
	TIME_DEBUG_OUT << " and the subset num pair is "
		       << tsi.subset_num_1
		       << " VS " 
		       << tsi.subset_num_2 << endl;
	TIME_DEBUG_OUT << " and the task_num pair is "
		       << tsi.task_num_1
		       << " VS " 
		       << tsi.task_num_2 << endl;

      }
}

void M3::M3_Master::divide_train_data(){
  int i;
  int block_info[2];
  MPI_Status mpi_status;
  for (i=m3_start_slave_process_rank;i<m_free_process;i++){
    // Get divide information from slave.
    MPI_Recv(&block_info,
	     2,
	     MPI_INT,
	     i,
	     M3_TAG,
	     MPI_COMM_WORLD,
	     &mpi_status);
    m_process_train_subset_num[i]=block_info[1];
  }

  // debug
  check_divide_data();

  make_train_info();
}

void M3::M3_Master::training_train_data(){

  int index=0;

  ofstream subset_config(SUBSET_CONFIG.c_str());
  int subset_config_index=0;
  Subset_Info subset_info;

  while (1){
    // Task pair label changed.
    if (m_train_task_info[index].task_over())
      index++;

    // Task over.
    if (index>=m_train_task_info.size())
      break;

    int free_process_rank;
    MPI_Status mpi_status;

    // debug
    TIME_DEBUG_OUT << "Master recv free process" << endl;

    // Get the free process rank.
    // !!!!WARNING: M3_SUBSET_INFO_TAG=9999 is must be only used here.
    MPI_Recv(&subset_info,
	     1,
	     MPI_Subset_Info,
	     MPI_ANY_SOURCE,
	     M3_SUBSET_INFO_TAG,
	     MPI_COMM_WORLD,
	     &mpi_status);
    free_process_rank=subset_info.process_rank;

    if (subset_info.save_index>=0){
      // Save subset_config
      // Format: filename_index label_1 label_2 subset_num_1 subset_num_2 memory(Byte)
      // Format: (last line) process_1 process_2 start_1 start_2 end_1 end_2
      subset_config << subset_info.save_index
		    << " "
		    << subset_info.label_1
		    << " " 
		    << subset_info.label_2
		    << " " 
		    << subset_info.subset_num_1
		    << " "
		    << subset_info.subset_num_2
		    << " "
		    << subset_info.subset_memory
		    << " "
		    << subset_info.process_1
		    << " "
		    << subset_info.process_2
		    << " "
		    << subset_info.start_1
		    << " "
		    << subset_info.start_2
		    << " "
		    << subset_info.end_1
		    << " "
		    << subset_info.end_2
		    << endl;
    }


    // debug
    TIME_DEBUG_OUT << "Master find the process " 
		   << free_process_rank 
		   << " is free" 
		   << endl;

    Train_Task_Info tsi=m_train_task_info[index];

    // debug
    TIME_DEBUG_OUT << "Master send CTRL" << endl;

    // debug
    TIME_DEBUG_OUT << "Master send data_process infomation:" << endl;
    TIME_DEBUG_OUT << "process " << tsi.process_rank_1
		   << " must sent from subset " << tsi.left_1() 
		   << " to " << tsi.right_1() 
		   << " to process: " << free_process_rank << endl;
    TIME_DEBUG_OUT << "process " << tsi.process_rank_2
		   << " must sent from subset " << tsi.left_2() 
		   << " to " << tsi.right_2() 
		   << " to process: " << free_process_rank << endl;

    // Send to the relate data_slave and train_slave there is task.
    MPI_Send(&CTRL_TRAIN_CONTINUE,
	     1,
	     MPI_INT,
	     free_process_rank,
	     M3_TAG,
	     MPI_COMM_WORLD);
    MPI_Send(&CTRL_TRAIN_CONTINUE,
	     1,
	     MPI_INT,
	     tsi.process_rank_1,
	     M3_TAG,
	     MPI_COMM_WORLD);
    MPI_Send(&CTRL_TRAIN_CONTINUE,
	     1,
	     MPI_INT,
	     tsi.process_rank_2,
	     M3_TAG,
	     MPI_COMM_WORLD);

    // debug
    TIME_DEBUG_OUT << "master CTRL send over " << endl;
    TIME_DEBUG_OUT << "master send relate process information" << endl;

    // Send to the data_slave which data(or subset) it will be sent.
    // And which train_slave will need them.
    int data_process_1[3],data_process_2[3];
    data_process_1[0]=tsi.left_1();
    data_process_1[1]=tsi.right_1();
    data_process_1[2]=free_process_rank;
    data_process_2[0]=tsi.left_2();
    data_process_2[1]=tsi.right_2();
    data_process_2[2]=free_process_rank;
    MPI_Send(data_process_1,
	     3,
	     MPI_INT,
	     tsi.process_rank_1,
	     M3_TAG,
	     MPI_COMM_WORLD);
    MPI_Send(data_process_2,
	     3,
	     MPI_INT,
	     tsi.process_rank_2,
	     M3_TAG,
	     MPI_COMM_WORLD);

    // debug
    TIME_DEBUG_OUT << "master send infomation to free process " << free_process_rank << endl;

    // Send to the train_slave which data_slave it will get data from.
    // And how many data.
    Subset_Info si;
    si.label_1=m_process_to_label[tsi.process_rank_1];
    si.label_2=m_process_to_label[tsi.process_rank_2];
    si.subset_num_1=tsi.right_1()-tsi.left_1()+1;
    si.subset_num_2=tsi.right_2()-tsi.left_2()+1;
    si.save_index=subset_config_index++;
    si.process_rank=free_process_rank;
    si.process_1=tsi.process_rank_1;
    si.process_2=tsi.process_rank_2;
    si.start_1=tsi.left_1();
    si.start_2=tsi.left_2();
    si.end_1=tsi.right_1();
    si.end_2=tsi.right_2();
    MPI_Send(&si,
	     1,
	     MPI_Subset_Info,
	     free_process_rank,
	     M3_TAG,
	     MPI_COMM_WORLD);

    // New task
    m_train_task_info[index].free_task++;
  }

  // debug
  TIME_DEBUG_OUT << "Master train done " << endl;

  // Wait for all train_slave free.
  for (int i=M3_MASTER_RANK+1;i<m3_start_slave_process_rank;i++){
    MPI_Status mpi_status;
    MPI_Recv(&subset_info,
	     1,
	     MPI_Subset_Info,
	     i,
	     M3_SUBSET_INFO_TAG,
	     MPI_COMM_WORLD,
	     &mpi_status);

    if (subset_info.save_index>=0){
      // Save subset_config
      // Format: filename_index label_1 label_2 subset_num_1 subset_num_2 memory(Byte)
      // Format: (last line) process_1 process_2 start_1 start_2 end_1 end_2
      subset_config << subset_info.save_index
		    << " "
		    << subset_info.label_1
		    << " " 
		    << subset_info.label_2
		    << " " 
		    << subset_info.subset_num_1
		    << " "
		    << subset_info.subset_num_2
		    << " "
		    << subset_info.subset_memory
		    << " "
		    << subset_info.process_1
		    << " "
		    << subset_info.process_2
		    << " "
		    << subset_info.start_1
		    << " "
		    << subset_info.start_2
		    << " "
		    << subset_info.end_1
		    << " "
		    << subset_info.end_2
		    << endl;
    }
  }

  // Close the subset config file
  subset_config.close();

  // Send to all slave that there is no task.
  for (int i=M3_MASTER_RANK+1;i<m3_all_process_num;i++)
    MPI_Send(&CTRL_TRAIN_DONE,
	     1,
	     MPI_INT,
	     i,
	     M3_TAG,MPI_COMM_WORLD);

  // debug
  TIME_DEBUG_OUT << "Master train over " << endl;

}

// For easy to send the test command.
void M3::M3_Master::test_ctrl(int ctrl){
  int i;
  for (i=1;i<m3_start_slave_process_rank;i++)
    MPI_Send(&ctrl,
	     1,
	     MPI_INT,
	     i,
	     M3_TAG,
	     MPI_COMM_WORLD);
}

// Master load subset config for test
void M3::M3_Master::load_subset_config(){
  m_test_subset.clear();
  ifstream config_in(SUBSET_CONFIG.c_str());
  Subset_Info ts;
  while (config_in 
	 >> ts.save_index 
	 >> ts.label_1 >> ts.label_2
	 >> ts.subset_num_1 >> ts.subset_num_2
	 >> ts.subset_memory
	 >> ts.process_1 >> ts.process_2
	 >> ts.start_1 >> ts.start_2
	 >> ts.end_1 >> ts.end_2)
    m_test_subset.push_back(ts);
  config_in.close();
}

bool M3::M3_Master::ask_load_subset(int index){
  MPI_Status mpi_status;
  Subset_Info si;
  int free_process_rank;
  int memory_res;
  while (true){

    if (m_test_process_num<=0)
      break;

    MPI_Recv(&si,
	     1,
	     MPI_Subset_Info,
	     MPI_ANY_SOURCE,
	     M3_SUBSET_INFO_TAG,
	     MPI_COMM_WORLD,
	     &mpi_status);
    free_process_rank=si.process_rank;

    // debug
    TIME_DEBUG_OUT << "master find process " << free_process_rank
		   << " is free to load subset " << index << endl;

    si=m_test_subset[index];
    si.process_rank=free_process_rank;
    MPI_Send(&si,
	     1,
	     MPI_Subset_Info,
	     free_process_rank,
	     M3_TAG,
	     MPI_COMM_WORLD);

    MPI_Recv(&memory_res,
	     1,
	     MPI_INT,
	     free_process_rank,
	     M3_TAG,
	     MPI_COMM_WORLD,
	     &mpi_status);

    if (memory_res==CTRL_MEMORY_ENOUGH){

      // debug
      TIME_DEBUG_OUT << "master find process " << free_process_rank 
		     << "'s memory is enough" << endl;

      m_test_subset[index].process_rank=free_process_rank;
      return true;
    }
    else {

      // debug
      TIME_DEBUG_OUT << "master find process " << free_process_rank 
		     << "'s memory is not enough" << endl;

      m_test_process_num--;
    }

  }
  return false;
}

void M3::M3_Master::classify_test_data(string file_name,
				       vector<bool> test_flag){

  const int READ_BUF_SIZE=1024*1024;
  char * read_buf=new char[READ_BUF_SIZE];

  ifstream file_in(file_name.c_str());

  int subset_index=0;
  int index=0;

  Data_Sample * sample_buf=new Data_Sample;
  Data_Node * node_buf=new Data_Node[NODE_BUF_SIZE];
  sample_buf->data_vector=node_buf;

  
  while (true){
    
    if (subset_index>=m_test_subset.size()){

      // debug
      TIME_DEBUG_OUT << "master test over" << endl;

      break;
    }
    
    m_test_process_num=min(m3_start_slave_process_rank,
			   m3_all_process_num)-1;

    // debug
    TIME_DEBUG_OUT << "master let slave load subset " << endl;
    
    test_ctrl(CTRL_LOAD_SUBSET);
    while (subset_index<m_test_subset.size() && ask_load_subset(subset_index))
      subset_index++;

    for (int i=0;i<m_test_process_num;i++){
      // Wait for all test process free
      Subset_Info si;
      MPI_Status mpi_status;
      MPI_Recv(&si,
	       1,
	       MPI_Subset_Info,
	       MPI_ANY_SOURCE,
	       M3_SUBSET_INFO_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);
      si.save_index=-1;
      MPI_Send(&si,
	       1,
	       MPI_Subset_Info,
	       mpi_status.MPI_SOURCE,
	       M3_TAG,
	       MPI_COMM_WORLD);
    }

    // debug
    TIME_DEBUG_OUT << "now begin to load test & classify" << endl;
    
    while (true){

      // debug
      TIME_DEBUG_OUT << "master begin to load test data" << endl;
      
      if (!file_in.getline(read_buf,
			   READ_BUF_SIZE)){
	
	// debug
	TIME_DEBUG_OUT << "test file is over!" << endl;

	break;
      }

      if (index>=test_flag.size() || (!test_flag[index++]))
	continue;

      test_ctrl(CTRL_CLASSIFY_DATA);

      parse_data(read_buf,
		 sample_buf);
      sample_buf->index=index-1;

      // debug
      TIME_DEBUG_OUT << "master bcast data to all slave to classify" << endl;

      // debug
      TIME_DEBUG_OUT << "master bcast information:" 
		     << sample_buf->index << " "
		     << sample_buf->label << " "
		     << sample_buf->data_vector_length << endl;
      TIME_DEBUG_OUT;
      for (int i=0;i<sample_buf->data_vector_length;i++)
	debug_out << "(" << sample_buf->data_vector[i].index
		  << "," << sample_buf->data_vector[i].value << ")";
      debug_out << endl;

      for (int i=1;i<m3_start_slave_process_rank;i++){
	MPI_Send(sample_buf,
		 1,
		 MPI_Data_Sample,
		 i,
		 M3_TAG,
		 MPI_COMM_WORLD);
	MPI_Send(sample_buf->data_vector,
		 sample_buf->data_vector_length,
		 MPI_Data_Node,
		 i,
		 M3_TAG,
		 MPI_COMM_WORLD);
      }

      // debug
      TIME_DEBUG_OUT << "master bcast data finished" << endl;

    }

    // debug
    TIME_DEBUG_OUT << "master has over one loop test " << endl;

    // debug
    TIME_DEBUG_OUT << "master send all slave clear memory" << endl;

    test_ctrl(CTRL_TEST_CLEAR);
  }

  test_ctrl(CTRL_TEST_DONE);  

  delete [] read_buf;
  delete [] node_buf;
  delete sample_buf;

  // debug
  TIME_DEBUG_OUT << "master test done" << endl;

}

void M3::M3_Master::score_test_data(vector<bool> test_flag){
///////////////////////////added by hoss///////////////////////////////////	
	struct labelInfo
	{
		float BLabel;
		float SLabel;
		labelInfo():BLabel(0),SLabel(0){}
	};
///////////////////////////////////////////////////////////////////////////
  vector<ifstream*> score_file;
  score_file.clear();
  for (int i=0;i<m_test_subset.size();i++){
    char tmp[10];
    sprintf(tmp,"%d",m_test_subset[i].save_index);
    string sn=SCORE_DIR+tmp;
    ifstream * file=new ifstream(sn.c_str());
    score_file.push_back(file);
  }

  int label_len=m_index_to_label.size();

///////////////////////////added by hoss///////////////////////////////////	
	labelInfo** info = new labelInfo*[label_len]; 
	for(int i=0;i<label_len;++i)
	{
		info[i] = new labelInfo[label_len];
	}
///////////////////////////////////////////////////////////////////////////
  double ** score_matrix=new double*[label_len];
  for (int i=0;i<label_len;i++)
    score_matrix[i]=new double[label_len];
  map<Block_Index,double> ** score_vector=new map<Block_Index,double>* [label_len];
  for (int i=0;i<label_len;i++)
    score_vector[i]=new map<Block_Index,double>[label_len];
  map<Block_Index,double>::iterator it;

///////////////////////////added by hoss(too bad)///////////////////////////////////	
  	size_t hossSetLen = m_test_subset.size();
	int hossli,hosslj;
	float hossl_1,hossl_2;
	for(int i=0;i<hossSetLen;++i)
	{
		hossl_1 = m_test_subset[i].label_1;
		hossl_2 = m_test_subset[i].label_2;
		hossli = m_label_to_index[hossl_1];
		hosslj = m_label_to_index[hossl_2];
		info[hossli][hosslj].BLabel = hossl_1>hossl_2?hossl_1:hossl_2;
		info[hossli][hosslj].SLabel = hossl_1>hossl_2?hossl_2:hossl_1;
		info[hosslj][hossli] = info[hossli][hosslj];
	}
///////////////////////////////////////////////////////////////////////////
   for (int tms=0;tms<test_flag.size();tms++)
     if (test_flag[tms]){

      for (int i=0;i<label_len;i++)
	for (int j=0;j<label_len;j++)
	  score_vector[i][j].clear();
      for (int i=0;i<m_test_subset.size();i++){
	ifstream * fin=score_file[i];
	int test_data_index;
	int li=m_label_to_index[m_test_subset[i].label_1];
	int lj=m_label_to_index[m_test_subset[i].label_2];
	
	(*fin) >> test_data_index;
	
	for (int j=m_test_subset[i].start_1;j<=m_test_subset[i].end_1;j++){
	  Block_Index bi;
	  bi.process=m_test_subset[i].process_1;
	  bi.index=j;

	  double sm=0;
	  double flag=(score_vector[li][lj].find(bi)==score_vector[li][lj].end());

	  for (int k=m_test_subset[i].start_2;k<=m_test_subset[i].end_2;k++){
	    double sc;
	    (*fin) >> sc;
	    if (flag)
	      sm=sc;
	    else sm=min(sm,sc);	// MIN-MAX: MIN
	  }
	  
	  // debug
// 	  cout << sm << " " << bi.process << " " << bi.index;
// 	  if (score_vector[li][lj].find(bi)!=score_vector[li][lj].end())
// 	    cout << " " << score_vector[li][lj][bi] << endl;
// 	  else cout << " nil" << endl;

	  score_vector[lj][li][bi] =  score_vector[li][lj][bi] = sm;//added by hoss(too bad)

	  // debug
// 	  cout << score_vector[li][lj][bi] << endl;

	}
      }
      for (int i=0;i<label_len;i++)
	for (int j=0;j<label_len;j++){
	  double sm;
	  it=score_vector[i][j].begin();
	  sm=(*it).second;
	  for (;it!=score_vector[i][j].end();it++)
	    sm=max(sm,(*it).second); // MIN-MAX: MAX

	  score_matrix[j][i] = score_matrix[i][j]=sm;//added by hoss(too bad)
	}
/*added by hoss
      // debug
      for (int i=0;i<label_len;i++) 
	  {
	for (int j=0;j<label_len;j++)
	{
	  cout << "label pair:" << m_index_to_label[i] << " " << m_index_to_label[j] << endl;
	  for (it=score_vector[i][j].begin();it!=score_vector[i][j].end();it++)
	  {
	    cout << "(" << (*it).first.process << "," << (*it).first.index 
		 << ":" << (*it).second << ")";
	  }
	  cout << endl;
	}
      }
*/
//////////////////////////added by hoss//////////////////////
	map<float,float> final_res;
	final_res.clear();
	for(int i=0;i<label_len;++i)
	{
		for(int j=0;j<label_len;++j)
		{
			final_res[info[i][j].BLabel] = 0;
			final_res[info[i][j].SLabel] = 0;
		}
	}
	float temphoss = 0;
	for(int hossI=0;hossI<label_len;++hossI)
	{
		for(int hossJ=hossI+1;hossJ<label_len;++hossJ)
		{
			temphoss = score_matrix[hossJ][hossI];
			if(temphoss > 0)
			{
				final_res[info[hossJ][hossI].BLabel] += 1;
			}
			else
			{
				final_res[info[hossJ][hossI].SLabel] += 1;
			}
		}
	}
	map<float,float>::iterator hossIter = final_res.begin();
	float hossMaxVal = hossIter->second,hossMaxKey = hossIter->first;
	while(hossIter != final_res.end())
	{
		++hossIter;
		if(hossIter->second > hossMaxVal)	
		{
			hossMaxVal = hossIter->second;
			hossMaxKey = hossIter->first;
		}
	}
	cout<<hossMaxKey<<endl;
/////////////////////////////////////////////////////////////
  }

  for (int i=0;i<score_file.size();i++){
    (*(score_file[i])).close();
    delete score_file[i];
  }
  score_file.clear();
  for (int i=0;i<label_len;i++){
    for (int j=0;j<label_len;j++)
      score_vector[i][j].clear();
    delete [] score_matrix[i];
    delete [] score_vector[i];
	delete[] info[i];//added by hoss
  }

  delete [] score_matrix;
  delete [] score_vector;
  delete[] info;
}

void M3::M3_Master::classify_test_data(const string &filename){

  load_subset_config();

  string file_name=filename;
  vector<bool> test_flag;
  test_flag.clear();
  size_t len = M3::testLen = M3::wc_l(filename);
  for (int i=0;i<len;i++)
    test_flag.push_back(true);
  classify_test_data(file_name,
		     test_flag);
}

void M3::M3_Master::score_test_data(){

  vector<bool> test_flag;
  test_flag.clear();
  size_t hossL = M3::testLen;
  for (int i=0;i<hossL;i++)
    test_flag.push_back(true);
  score_test_data(test_flag);

}






























//M3_Slave




M3::M3_Slave::M3_Slave(){
  m_sample_link_head=NULL;
  m_sample_link_tail=NULL;

  m_train_data_num=0;
  m_memory_enough=true;
}

M3::M3_Slave::~M3_Slave(){
  delete [] m_sample_arr;

  Sample_Link * sl,* sl_tmp;
  for (sl=m_sample_link_head;sl!=NULL;){
    Data_Sample * ds=sl->sample_head;
    delete [] (ds[0].data_vector);
    delete [] ds;
    sl_tmp=sl;
    sl=sl->next;
    delete sl_tmp;
  }
}

// Unpackage the load data to our struct.
void M3::M3_Slave::data_unpackage(Data_Sample * sample_buf,
				  Data_Node * node_buf,
				  int sp_buf_len,
				  int nd_buf_len){
  if (m_sample_link_tail==NULL){
    m_sample_link_head=m_sample_link_tail=new Sample_Link;
    m_sample_link_tail->sample_head=sample_buf;
    m_sample_link_tail->next=NULL;
  }
  else {
    m_sample_link_tail->next=new Sample_Link;
    m_sample_link_tail=m_sample_link_tail->next;
    m_sample_link_tail->sample_head=sample_buf;
    m_sample_link_tail->next=NULL;
  }

  m_sample_link_tail->length=sp_buf_len;
  m_train_data_num+=sp_buf_len;

  int nb_offset=0;
  int i;
  for (i=0;i<sp_buf_len;i++){	// point to local address
    sample_buf[i].data_vector=&node_buf[nb_offset];
    nb_offset+=sample_buf[i].data_vector_length;
  }
}

// Print middle information.
void M3::M3_Slave::check_load_data(){
  Sample_Link * sl;
  int tms=0;
  for (sl=m_sample_link_head;sl!=NULL;sl=sl->next){
    TIME_DEBUG_OUT << "In slave_process: " 
		   << m3_my_rank 
		   << " sample_link: " 
		   << ++tms 
		   << endl;
    int i;
    Data_Sample * ds=sl->sample_head;
    for (i=0;i<sl->length;i++){
      TIME_DEBUG_OUT << "slave_process[" 
		     << m3_my_rank 
		     << "][" <<tms << "][" << i << "]:"
		     << ds[i].index << " " 
		     << ds[i].label << " " 
		     << ds[i].data_vector_length << endl;
      int j;
      Data_Node * dn=ds[i].data_vector;
      for (j=0;j<ds[i].data_vector_length;j++)
	debug_out << "(" << dn[j].index << "," << dn[j].value << ")";
      debug_out << endl;
    }
  }
}

// Main load block.
void M3::M3_Slave::load_train_data(){

  // debug
  TIME_DEBUG_OUT << "come in the process: " 
		 << m3_my_rank 
		 <<" load_train_data" 
		 << endl;

  MPI_Status mpi_status;
  int will_do;
  bool read_done_flag=false;
  Data_Sample * sample_buf;
  Data_Node * node_buf;
  int sb_len,nb_len;

  while (!read_done_flag){		// ask & answer
    // Get the control.
    MPI_Recv(&will_do,
	     1,
	     MPI_INT,
	     M3_MASTER_RANK,
	     M3_TAG,
	     MPI_COMM_WORLD,
	     &mpi_status);

    // debug
    TIME_DEBUG_OUT << "slave_process:" 
		   << m3_my_rank 
		   << " receive the CTRL: " 
		   << will_do 
		   << endl;

    // Load is over.
    if (will_do==CTRL_READ_DONE){ // will_do==0
      read_done_flag=true;
    }
    // Must to ask whether the memory is enough.
    else if (will_do==CTRL_ALLOC_MEMORY){ // will_do==1
      // Handle the memory is not enough
      void (* old_handler)()=set_new_handler(M3::M3_Slave::local_memory_scarcity);

      int ask_len[2];
      // Get the ask information.
      MPI_Recv(ask_len,
	       2,
	       MPI_INT,
	       M3_MASTER_RANK,
	       M3_TAG+1,
	       MPI_COMM_WORLD,
	       &mpi_status);
      sb_len=ask_len[0];
      nb_len=ask_len[1];

      // debug
      TIME_DEBUG_OUT << "slave_procss: " 
		     << m3_my_rank 
		     << " require memroy: " 
		     << sb_len 
		     << " & " 
		     << nb_len 
		     << endl;

      // Ask memory.
      try{
	sample_buf=new Data_Sample[sb_len];
      } catch (exception e){
	m_memory_enough=false;
      }
      try{
	node_buf=new Data_Node[nb_len];
      } catch (exception e){
	delete [] sample_buf;
	m_memory_enough=false;
      }
      set_new_handler(old_handler);


      // debug
      // for test if there is no enough memory on one process
      // if (sb_len+m_train_data_num>DEBUG_MEMORY_CAPACITY)
      // 	m_memory_enough=false;
      //       else 
      // 	m_train_data_num+=sb_len;

      // Return response.
      if (m_memory_enough)
	MPI_Send(&CTRL_MEMORY_ENOUGH,
		 1,
		 MPI_INT,
		 M3_MASTER_RANK,
		 M3_TAG,
		 MPI_COMM_WORLD);
      else 
	MPI_Send(&CTRL_MEMORY_SCARCITY,
		 1,
		 MPI_INT,
		 M3_MASTER_RANK,
		 M3_TAG,
		 MPI_COMM_WORLD);
    }
    // Get data and unpackage them.
    else if (will_do==CTRL_GET_DATA){	// will_do==2

      // debug
      TIME_DEBUG_OUT << "slave_process: " 
		     << m3_my_rank 
		     << " begin to receive sample ,len:" 
		     <<sb_len << endl;

      MPI_Recv(sample_buf,
	       sb_len,
	       MPI_Data_Sample,
	       M3_MASTER_RANK,
	       M3_TAG+1,
	       MPI_COMM_WORLD,
	       &mpi_status);

      // debug
      TIME_DEBUG_OUT << "slave_process: " 
		     << m3_my_rank 
		     << " begin to receive node ,len:" 
		     << nb_len 
		     << endl;

      MPI_Recv(node_buf,
	       nb_len,
	       MPI_Data_Node,
	       M3_MASTER_RANK,
	       M3_TAG+2,
	       MPI_COMM_WORLD,
	       &mpi_status);
      data_unpackage(sample_buf,
		     node_buf,
		     sb_len,
		     nb_len);

      // debug
      // check receive buf
      TIME_DEBUG_OUT << "slave_process: " 
		     << m3_my_rank 
		     << " check receive buf" 
		     << endl;
      for (int i=0;i<sb_len;i++)
	TIME_DEBUG_OUT << sample_buf[i].index << " "
		       << sample_buf[i].label << " "
		       << sample_buf[i].data_vector_length << endl;
      for (int i=0;i<nb_len;i++){
	if (i%8==0) debug_out << endl;
	debug_out << "(" << node_buf[i].index << "," << node_buf[i].value << ")";
      }
      debug_out << endl;

    }
  }

  // debug
  TIME_DEBUG_OUT << "slave_process: " 
		 << m3_my_rank 
		 << " read done " 
		 << endl;

  // debug
  check_load_data();

}

// Make the data to be ** Data_Sample to divide.
void M3::M3_Slave::pre_divide(){
  Sample_Link * sl;

  m_sample_arr=new Data_Sample*[m_train_data_num];
  int i=0,index=0;
  Data_Sample * ds;
  for (sl=m_sample_link_head;sl!=NULL;sl=sl->next){
    ds=sl->sample_head;
    for (i=0;i<sl->length;i++)
      m_sample_arr[index++]=&(sl->sample_head[i]);
  }
}

// Print middle information.
void M3::M3_Slave::check_divide_data(){
  TIME_DEBUG_OUT << "Process " 
		 << m3_my_rank
		 << " has already divid "
		 << m_train_data_num 
		 << " data(s) " 
		 << " to "
		 << m_divide_situation.size()
		 << " block(s)" 
		 << endl;

  int i;
  for (i=0;i<m_divide_situation.size();i++){
    Divide_Info di=m_divide_situation[i];
    TIME_DEBUG_OUT << "Block "
		   << i 
		   << " : "
		   << di.start_offset
		   << " to "
		   << di.end_offset
		   << endl;
  }
}

void M3::M3_Slave::divide_train_data(int hoss_size)//added by hoss
{
  m3_subset_size = hoss_size;//added by hoss
  pre_divide();

  // debug
  TIME_DEBUG_OUT << "slave_process " << m3_my_rank << " now begin to divide data " << endl;


  if (m_train_data_num>0)
    m_divide_situation=m3_divider->divide(m_sample_arr,
					  m_train_data_num,
					  m3_subset_size);
  else m_divide_situation.clear();


  // debug
  TIME_DEBUG_OUT << "slave_process " 
		 << m3_my_rank 
		 << " now finish the divide " 
		 << endl;

  // Send divide informatino.
  int send_info[2];
  send_info[0]=m3_my_rank;
  send_info[1]=m_divide_situation.size();
  MPI_Send(send_info,
	   2,
	   MPI_INT,
	   M3_MASTER_RANK,
	   M3_TAG,
	   MPI_COMM_WORLD);

  // debug
  TIME_DEBUG_OUT << "slave_process " 
		 << m3_my_rank 
		 << " now has sent all info " 
		 << endl;

  // debug
  check_divide_data();

}

// Package sample which is be train as continue memory space to send.
void M3::M3_Slave::subset_sample_package(int ll,
					 int rr,
					 Data_Sample * sample_buf,
					 int & nb_len){
  nb_len=0;
  int len=0;
  int i;
  for (i=ll;i<=rr;i++){
    int j;
    for (j=m_divide_situation[i].start_offset;
	 j<=m_divide_situation[i].end_offset;
	 j++){
      sample_buf[len++]=(*(m_sample_arr[j]));
      nb_len+=(*(m_sample_arr[j])).data_vector_length;
    }
  }
}

// Package node which is be train as continue memory space to send.
void M3::M3_Slave::subset_node_package(int sb_len,
				       Data_Sample * sample_buf,
				       Data_Node * node_buf){
  int len=0;
  int i;
  for (i=0;i<sb_len;i++){
    int j;
    for (j=0;j<sample_buf[i].data_vector_length;j++)
      node_buf[len++]=sample_buf[i].data_vector[j];
  }
}

// Main train block.
void M3::M3_Slave::training_train_data(){

  int will_do;
  MPI_Status mpi_status;

  while (1){
    // Get control.
    MPI_Recv(&will_do,
	     1,
	     MPI_INT,
	     M3_MASTER_RANK,
	     M3_TAG,
	     MPI_COMM_WORLD,
	     &mpi_status);

    // debug
    TIME_DEBUG_OUT << "slave_process " 
		   << m3_my_rank 
		   << " receive the CTRL: " 
		   << will_do 
		   << endl;

    // Train is over.
    if (will_do==CTRL_TRAIN_DONE){

      // debug
      TIME_DEBUG_OUT << "slave_process " << m3_my_rank << " train done" << endl;

      break;
    }
    // Train is not over.
    else if (will_do==CTRL_TRAIN_CONTINUE){

      int sent_process;
      int subset_left,subset_right;
      Data_Sample * sample_buf;
      int * subset_len;
      Data_Node * node_buf;
      int sb_len,nb_len;
      int subset_num;
      
      // debug
      TIME_DEBUG_OUT << "slave_process " << m3_my_rank << " get relate information" << endl;

      // Receive send information : which data send to which process.
      int cmd[3];
      MPI_Recv(cmd,
	       3,
	       MPI_INT,
	       M3_MASTER_RANK,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);
      subset_left=cmd[0];
      subset_right=cmd[1];
      sent_process=cmd[2];
      subset_num=subset_right-subset_left+1;

      // debug
      TIME_DEBUG_OUT << "slave_process " << m3_my_rank 
		     << " recv the infomation: send to process " << sent_process
		     << " the subset from " << subset_left
		     << " to " << subset_right << endl;

      // debug
      TIME_DEBUG_OUT << "slave_process " << m3_my_rank << " send subset length arr" << endl;

      // Send to the train slave the every subset length.
      subset_len=new int[subset_num];
      sb_len=0;
      int i;
      for (i=subset_left;i<=subset_right;i++){
	subset_len[i-subset_left]=m_divide_situation[i].length;
	sb_len+=m_divide_situation[i].length;
      }
      MPI_Send(subset_len,
	       subset_num,
	       MPI_INT,
	       sent_process,
	       M3_TAG,
	       MPI_COMM_WORLD);

      // debug
      TIME_DEBUG_OUT << "slave_process " 
		     << m3_my_rank 
		     << " package sample_buf to send " 
		     << endl;

      // Package sample_buf.
      sample_buf=new Data_Sample[sb_len];
      subset_sample_package(subset_left,
			    subset_right,
			    sample_buf,
			    nb_len);

      // debug
      TIME_DEBUG_OUT << "slave_process " 
		     << m3_my_rank 
		     << " package sample_buf ok! now to send " 
		     << endl;
      TIME_DEBUG_OUT << "slave_process " << m3_my_rank
		     << " need to send " << sb_len
		     << " sample(s) & " << nb_len
		     << " node(s) to process " << sent_process << endl;

      // Send sample_buf.
      MPI_Send(sample_buf,
	       sb_len,
	       MPI_Data_Sample,
	       sent_process,
	       M3_TAG,
	       MPI_COMM_WORLD);

      // debug
      TIME_DEBUG_OUT << "slave_process " << m3_my_rank << " package node_buf to send " << endl;

      // Package node_buf.
      node_buf=new Data_Node[nb_len];
      subset_node_package(sb_len,
			  sample_buf,
			  node_buf);
      // Send node_buf.
      MPI_Send(node_buf,
	       nb_len,
	       MPI_Data_Node,
	       sent_process,
	       M3_TAG,
	       MPI_COMM_WORLD);

      // debug
      TIME_DEBUG_OUT << "slave_process " 
		     << m3_my_rank 
		     << " package node_buf ok! now to send " 
		     << endl;

      delete [] subset_len;
      delete [] sample_buf;
      delete [] node_buf;

      // debug
      TIME_DEBUG_OUT << "slave_process " 
		     << m3_my_rank 
		     << " all things has been send & buf has been deleted " 
		     << endl;

    }
  }

  // debug
  TIME_DEBUG_OUT << "slave_process " << m3_my_rank << " train over!" << endl;

}






































// M3_Run

// !!!WARNING:The train process must to new some buf before load block.
// !!!WARNING:But I only new in runing.
// !!!WARNING:There may be some mistake that be handle by parameter.
// !!!WARNING:All "new" and "delete" must be replaced.^_^

// Unpackage sample_buf as our struct.
void M3::M3_Run::subset_sample_unpackage(int subset_num,
					 int * subset_len,
					 Data_Sample * sample_buf,
					 Data_Sample ** sample_subset,
					 int & node_len){
  node_len=0;
  int i;
  int index=0;
  for (i=0;i<subset_num;i++){
    sample_subset[i]=&sample_buf[index];
    int j;
    for (j=0;j<subset_len[i];j++)
      node_len+=sample_buf[index+j].data_vector_length;
    index+=subset_len[i];

  }
}

// Unpackage sample_buf as our struct.
void M3::M3_Run::subset_node_unpackage(Data_Sample * sample_buf,
				       Data_Node * node_buf,
				       int sb_len){
  int i;
  int index=0;
  for (i=0;i<sb_len;i++){
    sample_buf[i].data_vector=&node_buf[index];
    index+=sample_buf[i].data_vector_length;
  }
}

// Main train block.
void M3::M3_Run::training_train_data(){
  int will_do;
  MPI_Status mpi_status;
  Subset_Info subset_info;
  subset_info.process_rank=m3_my_rank;
  subset_info.save_index=-1;
  while (1){

    // debug
    TIME_DEBUG_OUT << "train_process " << m3_my_rank << " send free" << endl;

    // Send to master that I'm free.
    MPI_Send(&subset_info,
	     1,
	     MPI_Subset_Info,
	     M3_MASTER_RANK,
	     M3_SUBSET_INFO_TAG,
	     MPI_COMM_WORLD); // I'm free!

    // Get control.
    MPI_Recv(&will_do,
	     1,
	     MPI_INT,
	     M3_MASTER_RANK,
	     M3_TAG,
	     MPI_COMM_WORLD,
	     &mpi_status);

    // debug
    TIME_DEBUG_OUT << "train_process " << m3_my_rank << " recv CTRL: " << will_do << endl;

    if (will_do==CTRL_TRAIN_DONE){

      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank << " train done " << endl;

      break;
    }

    else if (will_do==CTRL_TRAIN_CONTINUE){

      int save_index;

      // debug
      TIME_DEBUG_OUT << "train_process " 
		     << m3_my_rank 
		     << " recv relate information" << endl;

      // Get data information: which data from which process.
      MPI_Recv(&subset_info,
	       1,
	       MPI_Subset_Info,
	       M3_MASTER_RANK,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);
      m_data_process_1=subset_info.process_1;
      m_data_process_2=subset_info.process_2;
      m_data_subset_num_1=subset_info.subset_num_1;
      m_data_subset_num_2=subset_info.subset_num_2;
      save_index=subset_info.save_index;

      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank 
		     << " relate information_1: " 
		     << "from process " << m_data_process_1
		     << " to get " << m_data_subset_num_1 << " subset(s)" << endl;
      TIME_DEBUG_OUT << "train_process " << m3_my_rank 
		     << " relate information_2: " 
		     << "from process " << m_data_process_2
		     << " to get " << m_data_subset_num_2 << " subset(s)" << endl;

      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank << " get subset len arr " << endl;

      // Get every subset length from two data_slave.
      m_sample_subset_len_1=new int[m_data_subset_num_1];
      m_sample_subset_len_2=new int[m_data_subset_num_2];
      MPI_Recv(m_sample_subset_len_1,
	       m_data_subset_num_1,
	       MPI_INT,
	       m_data_process_1,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);
      MPI_Recv(m_sample_subset_len_2,
	       m_data_subset_num_2,
	       MPI_INT,
	       m_data_process_2,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);

      m_sample_len_1=0;
      m_sample_len_2=0;
      for (int i=0;i<m_data_subset_num_1;i++)
	m_sample_len_1+=m_sample_subset_len_1[i];
      for (int i=0;i<m_data_subset_num_2;i++)
	m_sample_len_2+=m_sample_subset_len_2[i];

      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank << " has revc the len arr " << endl;

      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank << " get sample " 
		     << m_sample_len_1 << " & " << m_sample_len_2<< endl;

      // Get sample_buf from two data_slave.
      m_sample_buf_1=new Data_Sample[m_sample_len_1];
      m_sample_buf_2=new Data_Sample[m_sample_len_2];
      MPI_Recv(m_sample_buf_1,
	       m_sample_len_1,
	       MPI_Data_Sample,
	       m_data_process_1,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);
      MPI_Recv(m_sample_buf_2,
	       m_sample_len_2,
	       MPI_Data_Sample,
	       m_data_process_2,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);
      m_data_label_1=m_sample_buf_1[0].label;
      m_data_label_2=m_sample_buf_2[0].label;

      // debug
      TIME_DEBUG_OUT <<"train_process " 
		     << m3_my_rank 
		     << " has recv sample!now to unpackage " 
		     << endl;

      // Unpackage sample_buf.
      m_sample_subset_1=new Data_Sample * [m_data_subset_num_1];
      m_sample_subset_2=new Data_Sample * [m_data_subset_num_2];
      subset_sample_unpackage(m_data_subset_num_1,
			      m_sample_subset_len_1,
			      m_sample_buf_1,
			      m_sample_subset_1,
			      m_node_len_1);
      subset_sample_unpackage(m_data_subset_num_2,
			      m_sample_subset_len_2,
			      m_sample_buf_2,
			      m_sample_subset_2,
			      m_node_len_2);

      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank << "has unpackage sample!" << endl;
      TIME_DEBUG_OUT << "train_process " << m3_my_rank 
		     << " from process " << m_data_process_1 
		     << " recv " << m_sample_len_1 
		     << " sample(s) & " << m_node_len_1
		     << " node(s) " << endl;
      TIME_DEBUG_OUT << "train_process " << m3_my_rank 
		     << " from process " << m_data_process_2 
		     << " recv " << m_sample_len_2 
		     << " sample(s) & " << m_node_len_2
		     << " node(s) " << endl;

      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank << " get node " << endl;

      // Get node_buf from two data_slave.
      m_node_buf_1=new Data_Node[m_node_len_1];
      m_node_buf_2=new Data_Node[m_node_len_2];
      MPI_Recv(m_node_buf_1,
	       m_node_len_1,
	       MPI_Data_Node,
	       m_data_process_1,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);
      MPI_Recv(m_node_buf_2,
	       m_node_len_2,
	       MPI_Data_Node,
	       m_data_process_2,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);

      // debug
      TIME_DEBUG_OUT << "train_process " 
		     << m3_my_rank 
		     << "has recv node!now unpackage!" << endl;

      // Unpackage node_buf.
      subset_node_unpackage(m_sample_buf_1,
			    m_node_buf_1,
			    m_sample_len_1);
      subset_node_unpackage(m_sample_buf_2,
			    m_node_buf_2,
			    m_sample_len_2);

      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank << "hsa unpackaged! " << endl;

      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank << "begin to train!" << endl;

      // do something to train
      char save_tmp[20];
      sprintf(save_tmp,"%d",save_index);
      string save_dir=SUBSET_DIR+save_tmp;
	  //////////////////added by hoss/////////////////////////////
	  libsvm_parameter temparam;
	  Classifier* tempClass = new libsvm(temparam);
	  tempClass->train(m_sample_subset_1,m_sample_subset_2,
			  			m_data_subset_num_1,m_data_subset_num_2,
						m_sample_subset_len_1,m_sample_subset_len_2,
						save_dir.c_str());
	  ////////////////////////////////////////////////////////////
      // debug
      subset_info.subset_memory=m3_my_rank;
      // debug
      TIME_DEBUG_OUT << "train_process " << m3_my_rank << "this times is train ok!" << endl;

      delete [] m_sample_subset_1;
      delete [] m_sample_subset_2;
      delete [] m_sample_subset_len_1;
      delete [] m_sample_subset_len_2;
      delete [] m_sample_buf_1;
      delete [] m_sample_buf_2;
      delete [] m_node_buf_1;
      delete [] m_node_buf_2;
    }
  }

  // debug
  TIME_DEBUG_OUT << "train_process " << m3_my_rank << " train over! " << endl;

}

void M3::M3_Run::classify_test_data(){

  int will_do;
  MPI_Status mpi_status;
  vector<Classifier*> classifier;
  vector<Subset_Info> subset_info;
  vector<ofstream*> middle_score;

  classifier.clear();
  subset_info.clear();
  middle_score.clear();

  Data_Sample * sample_buf=new Data_Sample;
  Data_Node * node_buf=new Data_Node[NODE_BUF_SIZE];
  sample_buf->data_vector=node_buf;

  while (true){
    MPI_Recv(&will_do,
	     1,
	     MPI_INT,
	     M3_MASTER_RANK,
	     M3_TAG,
	     MPI_COMM_WORLD,
	     &mpi_status);

    // debug
    TIME_DEBUG_OUT << "test_process " << m3_my_rank << " recv CTRL: " << will_do << endl;

    if (will_do==CTRL_TEST_DONE){

      // debug
      TIME_DEBUG_OUT << "test_process " << m3_my_rank << " test over! " << endl;

      break;
    }

    else if (will_do==CTRL_TEST_CLEAR){

      // debug
      TIME_DEBUG_OUT << "test_process " << m3_my_rank << " begin to clear memory! " << endl;

      for (int i=0;i<classifier.size();i++)
	delete classifier[i];
      classifier.clear();

      for (int i=0;i<middle_score.size();i++){
	(*(middle_score[i])).close();
	delete middle_score[i];
      }
      middle_score.clear();

      subset_info.clear();

      // debug
      TIME_DEBUG_OUT << "test_process " << m3_my_rank << " clear memory! " << endl;

    }

    else if (will_do==CTRL_LOAD_SUBSET){
      Subset_Info si;
      si.process_rank=m3_my_rank;
      while (true) {

	// debug
	TIME_DEBUG_OUT << "test_process " << m3_my_rank << " send free! " << endl;

	MPI_Send(&si,
		 1,
		 MPI_Subset_Info,
		 M3_MASTER_RANK,
		 M3_SUBSET_INFO_TAG,
		 MPI_COMM_WORLD);
	MPI_Recv(&si,
		 1,
		 MPI_Subset_Info,
		 M3_MASTER_RANK,
		 M3_TAG,
		 MPI_COMM_WORLD,
		 &mpi_status);

	// debug
	TIME_DEBUG_OUT << "test_process " 
		       << m3_my_rank << " get subset information:" << endl;
	TIME_DEBUG_OUT << "subset " << si.save_index << " : "
		       << si.label_1 << " "
		       << si.label_2 << " "
		       << si.subset_num_1 << " "
		       << si.subset_num_2 << " "
		       << si.subset_memory << endl;
	
	if (si.save_index<0){

	  // debug
	  TIME_DEBUG_OUT << "test_process " 
			 << m3_my_rank 
			 << " load subset over! " << endl;

	  break;
	}

	bool memory_flag=true;
	// do something to handle memory alloc
	// like:
	// Classifier * cler=new ...
	// memory_flag=cler->look at memory(si)
	// if (memory_flag)
	// classifier.push_back(cler);
	// else delete cler;
////////////////////////////////////added by hoss(no concern memory lack)/////////////
	libsvm_parameter hossTemPara;	
	Classifier* cler = new libsvm(hossTemPara);
	char hossStr[20];
	sprintf(hossStr,"%d",si.save_index);
    string save_dir = SUBSET_DIR + hossStr;
	cler->load_model(save_dir.c_str());
	classifier.push_back(cler);
//////////////////////////////////////////////////////////////////////////////////////
	if (memory_flag){
	  subset_info.push_back(si);

	  char tmp[20];
	  sprintf(tmp,"%d",si.save_index);
	  string file_name=SCORE_DIR+tmp;
	  ofstream * out_tmp=new ofstream(file_name.c_str());
	  middle_score.push_back(out_tmp);

	  MPI_Send(&CTRL_MEMORY_ENOUGH,
		   1,
		   MPI_INT,
		   M3_MASTER_RANK,
		   M3_TAG,
		   MPI_COMM_WORLD);
	}
	else {
	  MPI_Send(&CTRL_MEMORY_SCARCITY,
		   1,
		   MPI_INT,
		   M3_MASTER_RANK,
		   M3_TAG,
		   MPI_COMM_WORLD);
	  break;
	}
      }
    }

    else if (will_do==CTRL_CLASSIFY_DATA){

      // debug
      TIME_DEBUG_OUT << "test_process " << m3_my_rank << " get test data! " << endl;

      MPI_Recv(sample_buf,
	       1,
	       MPI_Data_Sample,
	       M3_MASTER_RANK,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);
      sample_buf->data_vector=node_buf;
      MPI_Recv(sample_buf->data_vector,
	       sample_buf->data_vector_length,
	       MPI_Data_Node,
	       M3_MASTER_RANK,
	       M3_TAG,
	       MPI_COMM_WORLD,
	       &mpi_status);

      // debug
      TIME_DEBUG_OUT << "test_process " << m3_my_rank << " has recved data " 
		     << sample_buf->index << endl;
      
      for (int i=0;i<subset_info.size();i++){
	int len=subset_info[i].subset_num_1*subset_info[i].subset_num_2;
	double * score=new double[len];
	
	// do somthing to get this score
	// like: (*(classifier[i])).classifier(sample_buf,score);
////////////////////////////////////////added by hoss/////
	score = classifier[i]->predict(sample_buf);
//////////////////////////////////////////////////////////
	(*(middle_score[i])) << sample_buf->index << " ";

	for (int j=0;j<len;j++)
	  (*(middle_score[i])) << score[j] << " ";
	(*(middle_score[i])) << endl;
	
	delete [] score;
      }

      // debug
      TIME_DEBUG_OUT << "test_process " << m3_my_rank << " has scored over " << endl;

    }
  }

  delete sample_buf;
  delete [] node_buf;
}

#undef TIME_DEBUG_OUT

#endif
