// GraphRegAllocator.cpp 
// Implement class GraphRegAllocator by czj
// Transplanted by lixiaolong.
// Fisrt to bulid Interfere_graph,then to bulid reg4vars[] which saved the allocation results. 
#include "crossbit/GraphRegAllocator.h"
#include "crossbit/VBlock.h"
#include "crossbit/VInst.h"

using namespace crossbit;

//#define PRINT
void GraphRegAllocator::init(VBlock *vb)
{
 lower_bound = 0;
 upper_bound = -1;
 size = 0;
 canGraph = 0;
	vinst_seq = 0;
	struct RegUsage ru;
 ru.status = RegAllocator::FREE;
	ru.mapped_to = -1;
	ru.inst = 0;
	for (XTRegNum i = 0; i < ra_num_of_reg; ++i)
	{
  ra_reg_table[i]=ru;
 }
 UTIL::XTMemset(ra_vreg_spilledIn,0,sizeof(ra_vreg_spilledIn));
 collectGraphInfo(vb);
 if(size <= ra_num_of_reg)//vreg lower than treg, needn't graph alloc algorithm
  canGraph = 0;
	else
		canGraph = 1;
	if(canGraph == 1)
	{ //vreg more than treg, use graph alloc algorithm,otherwise it's very waste of time.
		interfereGraphGen();
	 regPreAllocAllVars(size, ra_num_of_reg);//czj 2009.4.6
 }
}

void GraphRegAllocator::collectGraphInfo(VBlock * vblock)
{

 //#ifdef PRINT
 //vblock->print(std::cout);//czj 2009.4.8
 //#endif
 //std::cout<<"enter SPC: "<<std::setiosflags(std::ios::showbase)<<std::hex<<vblock->enterSPC()<<std::endl;
    
 /*for(XTInt32 i = 0; i < VREG_MAX_NUM; i++)
 {
  vreg_count[i] = 0;
  vreg_degree[i] = 0;
 }*/
    
 reg_lists.clear();
	std::set<XTRegNum> regList;
 std::set<XTRegNum> regList4def;//2009.5.5
 std::set<XTRegNum> regList4use;//2009.5.5
	VBlock::vb_const_reverse_iterator I = vblock->rbegin(),E = vblock->rend();
	for(; I != E; ++I)
	{
	 VInst *inst = *I;
		XTInt32 n = inst->getOperandNum();
        
  std::set<XTRegNum>::iterator it_use = regList4use.begin();//2009.5.5
  for(; it_use != regList4use.end(); it_use++)
  {
   XTInt32 temp = *it_use;
   std::set<XTRegNum>::iterator it_def = regList4def.find(temp);
   if(it_def != regList4def.end())
    regList4def.erase(it_def);
  }
            
  std::set<XTRegNum>::iterator it_def = regList4def.begin();
  for(; it_def != regList4def.end(); it_def++)
  {
   XTInt32 temp = *it_def;
   std::set<XTRegNum>::iterator it_def = regList.find(temp);
   if(it_def != regList.end())
    regList.erase(it_def);
  }

  regList4use.clear();
  regList4def.clear();
   //_reverse_ iterate the operand, so that operands DEFed 
   //will be handled prior to operands USEed
  for(XTInt32 i = n; i >= 1; --i)
  {
   Operand opr = inst->getOperand(i);
   XTRegNum r;

   if(opr.type == Operand::VREG_USE ||opr.type == Operand::VREG_DEF)
   {
    r = opr.reg;           
    vreg_count[r] = vreg_count[r] + 1;//VREG r use times, 2009.5.7, czj
                
			 if(r > upper_bound)
				 upper_bound = r;
                
    if(regList.find(r) == regList.end())
    {
     regList.insert(r);
    }
               
    if(opr.type == Operand::VREG_USE)
    {
     std::set<XTRegNum>::iterator it = regList4use.find(r);
     if(it == regList4use.end())
      regList4use.insert(r);
    }
               
    if(opr.type == Operand::VREG_DEF)
    {
     std::set<XTRegNum>::iterator it = regList4def.find(r);
     if(it == regList4def.end())
      regList4def.insert(r);
    }
   }
           /* if(opr.type == Operand::VREG_USE ||opr.type == Operand::VREG_DEF)
            {
                if(opr.type == Operand::VREG_USE)
                {
                    r = opr.reg;
			//modefied by czj 2008-7-31
			        if(r > upper_bound)
				        upper_bound = r;
                    if(regList.find(r) == regList.end())
                    {
                        regList.insert(r);
                    }
                }
                if(opr.type == Operand::VREG_DEF)
                {
                    r = opr.reg;
			        //modefied by czj 2008-7-31
			        if(r > upper_bound)
				        upper_bound = r;
                    std::set<XTRegNum>::iterator it = regList.find(r);
                    if(it != regList.end())
                        regList.erase(it);
                }
            }*/
  }
  reg_lists.push_front(regList);
 }
 size = upper_bound - lower_bound + 1;
}  

//XTInt32 ** GraphRegAllocator::interfereGraphGen()
void GraphRegAllocator::interfereGraphGen()
{
 for(XTInt32 tempV = 0; tempV < size; tempV++)
  for(XTInt32 tempH = 0; tempH < size; tempH++)
   interfere_graph[tempV][tempH] = 0;

 #ifdef PRINT
 std::cout<<"interfere info is as follows: "<<std::endl;
 std::cout<<"regLists size is: "<<reg_lists.size()<<std::endl;
 RegLists::iterator iter_tmp = reg_lists.begin(); // czj 2009.4.8
	for(int loop = 0; iter_tmp != reg_lists.end(); iter_tmp++, loop++)
	{
  std::set<XTRegNum>::iterator it_tmp = (*iter_tmp).begin();
		std::cout<<"line "<<loop<<" has "<<(*iter_tmp).size()<<" elements"<<std::endl;
		for(; it_tmp != (*iter_tmp).end(); it_tmp++)
		{
            std::cout<<(*it_tmp)-0<<' ';
		}
		std::cout<<std::endl;
	}
 #endif
    
 RegLists::iterator iter = reg_lists.begin();
 for(; iter != reg_lists.end(); iter++)
 {
  std::set<XTRegNum>::iterator it = (*iter).begin();
  for(; it != (*iter).end(); it++)
  {
   XTInt32 temp = *it - lower_bound;
   std::set<XTRegNum>::iterator it_next = it;
   it_next++;
   for(; it_next != (*iter).end(); it_next++)
   {
    XTInt32 temp_next = (*it_next) - lower_bound;
    interfere_graph[temp][temp_next] = 1;
    interfere_graph[temp_next][temp] = 1;
   }
  }
 }
    
 //collect each vreg's interfere edges, 2009.5.7, czj
 /*for(XTInt32 i = 0; i < size; i++){
  for(XTInt32 j = 0; j < size; j++)
  {
   if(interfere_graph[i][j] == 1)
    vreg_degree[i + lower_bound] += 1;
  }
 }*/
    
 #ifdef PRINT
 std::cout<<"lower bound is: "<<lower_bound<<std::endl;//czj 2009.4.8
	std::cout<<"interfere graph is: "<<std::endl;
 std::cout<<"     ";
 for(XTInt32 temp = 0; temp < size; temp++){
  std::cout<<std::setw(2)<<temp+lower_bound<<' ';
 }
 std::cout<<std::endl;
 for(XTInt32 tempV = 0; tempV < size; tempV++){
  std::cout<<"v"<<std::setw(2)<<tempV+lower_bound<<": ";
  for(XTInt32 tempH = 0; tempH < size; tempH++){
   std::cout<<std::setw(2)<<interfere_graph[tempV][tempH]<<' ';
	 }
	 std::cout<<std::endl;
 }
 #endif
}

void GraphRegAllocator::regPreAllocAllVars(XTInt32 graph_dim, XTInt32 t_reg_num)
{
 XTInt32 ** interfere_graph_backup = new XTInt32 * [graph_dim];
	XTInt32 * varStack = new XTInt32[graph_dim];
	assert(VREG_MAX_NUM >= graph_dim);
	XTInt32 neighborNum = 0;  //the current node's neighbor t_reg_number
	XTInt32 stackPoint = 0;	

	for(XTInt32 i = 0; i < graph_dim; i++)
		interfere_graph_backup[i] = new XTInt32[graph_dim];

	for(XTInt32 i = 0; i < graph_dim; i++)
		for(XTInt32 j = 0; j < graph_dim; j++)
			interfere_graph_backup[i][j] = interfere_graph[i][j];

	for(XTInt32 i = 0; i < graph_dim; i++)
		varStack[i] = -1;  //initiate the stack

	for(XTInt32 i=0; i < VREG_MAX_NUM; i++)
		reg4vars[i] = -1; //initiate the register assigned to these nodes

	XTInt32 i = 0;
	XTInt32 count = 0; //this is used to detect the graph'state
	XTInt32 stackDepth = graph_dim;
    //modified by czj 2009.5.8
 XTInt32 spilledVReg[VREG_MAX_NUM];
 for(XTInt32 initial = 0; initial < VREG_MAX_NUM; initial++)
  spilledVReg[initial] = 0;
        
 XTInt32 maxNeighborNum = 0;
 XTInt32 maxNeighborNode = -1;
	while(stackPoint < stackDepth ){	
		
  if(spilledVReg[i+lower_bound] == 1){
   i = (i+1)%graph_dim;
   continue;
  }
        
		XTInt32 inStack = 0;
		for(XTInt32 j = 0; j < stackDepth; j++){
			if(varStack[j] == i){
				inStack = 1;//i # node has been in stack	
				break;
			}
		}		
							
		if(inStack == 0){
			neighborNum = 0;
			for(XTInt32 j = 0; j < graph_dim; j++){
				if(interfere_graph[i][j] == 1)
					neighborNum += 1;			
			}
		
			if(neighborNum < t_reg_num - 3){ //czj 2009.4.3
				count = 0; 
    maxNeighborNum = 0;
    maxNeighborNode = -1;
			 varStack[stackPoint++] = i; //the i # node was inserted into the stack. Empty increament mode stack.
			 for(XTInt32 j = 0; j < graph_dim; j++){
				 interfere_graph[i][j] = 0;
				 interfere_graph[j][i] = 0;
			 }	
		 }
		 else{
			 count++;
    //double currentNeighborRate = 0.0;
    //currentNeighborRate = neighborNum / vreg_count[i+lower_bound];
    if(neighborNum > maxNeighborNum){
     maxNeighborNum = neighborNum;
     maxNeighborNode = i;
    }
			 if(count == stackDepth - stackPoint){
                    
     //select a proper vreg to spilled out, from test result, this strategy is not good. ##2009.5.7, czj
     /*int current_prio = vreg_count[lower_bound + i];
     XTInt32 spill_node = i; //which node to be spilled out
     for(XTInt32 temp_spill = 0; temp_spill < graph_dim; temp_spill++)
     {
      if(vreg_count[temp_spill + lower_bound] == 0)
       continue;
                            
      XTInt32 inStack = 0;
      for(XTInt32 temp = 0; temp < stackDepth; temp++){
       if(varStack[temp] == temp_spill){
        inStack = 1;
        break;
       }
      }
      if(inStack == 1)
       continue;

      if(vreg_count[lower_bound + temp_spill] < current_prio){
       current_prio = vreg_count[lower_bound + temp_spill];
       spill_node = temp_spill;
      }
     }
     i = spill_node; //select over, 2009.5.7, czj*/
     
     if(maxNeighborNode >= 0)
      i = maxNeighborNode;
    
     spilledVReg[i+lower_bound] = 1;//modefied by czj 2009.5.8 
				 for(XTInt32 j = 0; j < graph_dim; j++){
					 interfere_graph[i][j] = 0;
					 interfere_graph[j][i] = 0;
				 }//need something to do with spilled var***
				 stackDepth -= 1;
				 count = 0;
			 }
		 }	
	 }		
		
	 i++;
	 i %= graph_dim;
 }
 //for debug,czj,2009.5.8
 /*std::cout<<"spilled node:"<<std::endl;
 for(XTInt32 ii=0; ii < VREG_MAX_NUM; ii++)
 {
  if(spilledVReg[ii] == 1)
   std::cout<<std::dec<<ii<<' ';
 }
 std::cout<<std::endl;
 std::cout<<"assigned result:"<<std::endl;
 for(XTInt32 ii=0; ii < stackDepth; ii++)
 {
  std::cout<<std::dec<<(varStack[ii]+lower_bound)<<' ';
 }
 std::cout<<std::endl;*/

	XTInt32 * treg_status = new XTInt32[t_reg_num];

	for(XTInt32 i = stackDepth; i > 0; i--){
		XTInt32 current_var = -1;
		current_var = varStack[i-1]; //get the first node of the node stack
		for(XTInt32 i = 0; i < t_reg_num; i++)
			treg_status[i] = 0; //0 indicate this reg of the index hasn't been assigned, 1 for other condition
		for(XTInt32 j = 0; j < graph_dim; j++){
			if(reg4vars[j + lower_bound] >= 0 && interfere_graph_backup[current_var][j] == 1)
			{
				XTInt32 t_reg = 0;
				t_reg = reg4vars[j + lower_bound];
				treg_status[t_reg] = 1;
			}		
		}
		for(XTInt32 j = 0; j < t_reg_num; j++){
			if((j==1) || (j==3) || (j==4))//czj 2009.4.3
			 continue;
   if(treg_status[j] == 0){
				reg4vars[current_var + lower_bound] = j;//czj 2009.4.9
				break;
			}		 
		}
	}
	
	/*release the memory allocated dynamically*/	
	for(XTInt32 i = 0; i < graph_dim; i++){
	 for(XTInt32 j = 0; j < graph_dim; j++)
	 {
	  interfere_graph[i][j] = interfere_graph_backup[i][j];
	 }
	}

 #ifdef PRINT
 std::cout<<"reg alloc result: "<<std::endl;
 for(XTInt32 i = 0; i < VREG_MAX_NUM; i++)
  std::cout<<std::setw(2)<<i<<' ';
 std::cout<<std::endl;
 for(XTInt32 i = 0; i < VREG_MAX_NUM; i++)
  std::cout<<std::setw(2)<<reg4vars[i]<<' ';
 std::cout<<std::endl;
 #endif

	for(XTInt32 i = 0; i < graph_dim; i++)
		delete[] interfere_graph_backup[i];
	delete[] interfere_graph_backup;
	delete[] varStack;
	delete[] treg_status;
}
/*void GraphRegAllocator::phaseTask()
{
}
*/
// Allocate target register for virtual register
XTRegNum GraphRegAllocator::regAlloc(XTRegNum vreg, RegAccessMode mode)
{
	XTRegNum alloc = -1;
	for(XTRegNum i=0;i<ra_num_of_reg;i++)
	{
		if(ra_reg_table[i].status==ALLOCATED && ra_reg_table[i].mapped_to==vreg)//czj 2009.4.5
		{
   if(ra_vreg_spilledIn[vreg]==false&&mode==USE)
   {
    regSpillIn(vreg,i);
    ra_vreg_spilledIn[vreg]=true;
   }
			ra_reg_table[i].inst=vinst_seq;
			return i;
		}
	}
 if (canGraph==0)
 { 
		for(XTRegNum i=0;i<ra_num_of_reg;i++)
		{  
		 if(ra_reg_table[i].status==FREE)
   {
    if(ra_vreg_spilledIn[vreg]==false&&mode==USE)
    { 
     regSpillIn(vreg,i);
					ra_vreg_spilledIn[vreg]=true;
    }
				ra_reg_table[i].status=ALLOCATED;
				ra_reg_table[i].mapped_to=vreg;
				ra_reg_table[i].inst=vinst_seq;
				return i;
			}
  } 
 }
 // find ALLOCATED target register for the virtual regiser 
	XTInt32 recommend_treg = -1;
	recommend_treg = reg4vars[vreg];
	XTUint32 preempt = 0; //whether preempt reg of other variables, 0 no, 1 yes  
	if(recommend_treg < 0)
		preempt = 1;	 //when tReg is reserved or not have a tReg for the vreg, set mark to true
	else{
		struct RegUsage & tRegUsg = ra_reg_table[recommend_treg];
		if(tRegUsg.status == FREE)
		{	
   alloc = recommend_treg;
  }
	    //else if(tRegUsg.status != ALLOCATED || tRegUsg.inst == vinst_seq)
		else if(tRegUsg.status == RESERVED || tRegUsg.inst == vinst_seq)
		{
   preempt = 1;
  }
  else	//tRegUsg.status == ALLOCATED && tRegUsg.inst != vinst_seq
  {
			if(interfere_graph[tRegUsg.mapped_to - lower_bound][vreg - lower_bound] == 0){
				alloc = recommend_treg;
			 ra_vreg_spilledIn[tRegUsg.mapped_to] = false;
			}
			else{
    alloc = regSpillOut(recommend_treg);
   }
  }                        
	}                                     
 
	if(preempt == 1){  //can not allocated
		XTInt32 treg4spill = -1; //czj 2009.4.3
  XTInt32 treg4try = 0;
  treg4try = vreg % ra_num_of_reg; //try all the tregs, begin from treg whose # equals to vreg%ra_num_of_reg
  for(XTInt32 i=0; i<ra_num_of_reg;i++)  //czj 2009.4.2
  {    //find the one vreg % ra_num_of_reg
   struct RegUsage & tRegUsg = ra_reg_table[treg4try];
   if(tRegUsg.status == FREE)
   {
    alloc = treg4try;
    break;
   }
		 else//czj 2009.4.3
   {
    if(treg4spill == -1){
     if(tRegUsg.status == ALLOCATED && tRegUsg.inst != vinst_seq)
     {
      treg4spill = treg4try;
					}
				}
    treg4try = (treg4try+1) % ra_num_of_reg;
			}
  }
  if(alloc != treg4try && treg4spill != -1){
			alloc = regSpillOut(treg4spill);
		}
	}	
 if (mode==USE)
 {
		regSpillIn(vreg, alloc);
		ra_vreg_spilledIn[vreg]=true;
 }
	ra_reg_table[alloc].status = ALLOCATED;//czj 2009.4.5
	ra_reg_table[alloc].mapped_to = vreg;
	ra_reg_table[alloc].inst = vinst_seq; 
 return alloc;
}

// Force allocate target register "expect" to "vreg"
//
// Algorithms:
//  if (vreg is already allocated to expect)
//	return immediately
//  else 
//	empty "expect" register for "Vreg";
//	if (vreg is currently allocated to another target register)
//	    move it to "expect"
//	if (vreg is spilled)
//	    load vreg from spill pool to expect
//	...
// 
XTRegNum GraphRegAllocator::regAllocForce(XTRegNum vreg, XTRegNum expect, RegAccessMode mode)
{
 RegUsage etru=ra_reg_table[expect];
	assert(etru.status != RegAllocator::RESERVED); // dead-lock
				
	if (etru.status == RegAllocator::ALLOCATED) 
	{
  if(etru.mapped_to==vreg)
  {
   ra_reg_table[expect].inst=vinst_seq;
   if(ra_vreg_spilledIn[vreg]==false&&mode==USE)
   {
    regSpillIn(vreg,expect);
    ra_vreg_spilledIn[vreg]=true;
   }
   return expect;
  }
  
  if (etru.inst==vinst_seq)
   printf("In Function regAllocForce: deadlock occur!!!\n");
		
  //force_spillout_no++;//czj 2009.4.3
  regSpillOut(expect);
 }
 for(XTRegNum i=0;i<ra_num_of_reg;i++)
	{
		if(ra_reg_table[i].status==ALLOCATED&&ra_reg_table[i].mapped_to==vreg)//czj 2009.4.5
  {
   (*ra_cb_reg_to_reg)(i,expect);
			ra_reg_table[expect].status = ALLOCATED;
   ra_reg_table[expect].inst=vinst_seq;
			ra_reg_table[expect].mapped_to=vreg;
   ra_reg_table[i].status=FREE;
   ra_reg_table[i].mapped_to=-1;
   return expect;
		}
	}
 ra_vreg_spilledIn[vreg]=false;
	if (mode==USE) 
	{
  regSpillIn(vreg, expect);
  ra_vreg_spilledIn[vreg]=true;
 }

	ra_reg_table[expect].status=ALLOCATED;
	ra_reg_table[expect].mapped_to=vreg;
 ra_reg_table[expect].inst=vinst_seq;
	return expect;
}

XTRegNum GraphRegAllocator::regAllocExpect(XTRegNum vreg, XTUint8 reg_mask, RegAccessMode mode)
{
 return regAllocForce(vreg,3,mode);
}
// Force "vreg" NOT to be allocated to "except"
//
// Algorithm:
//	if vreg is already allocated and it's not "except"
//	    return immediately
//	else 
//	    "reserve" "except" register
//	    alloc = regAlloc(vreg, mode)
//	    "release" "except" register
//
XTRegNum GraphRegAllocator::regAllocForceExcept(XTRegNum vreg, XTRegNum except, RegAccessMode mode)
{
 XTInt32 temp=ra_reg_table[except].status;
 XTRegNum alloc;
	if (temp== ALLOCATED && ra_reg_table[except].mapped_to == vreg)
	{				
  regSpillOut(except);
  temp=FREE;
 }
 ra_reg_table[except].status=RESERVED;
 alloc=regAlloc(vreg,mode);
 ra_reg_table[except].status=temp;

	return alloc;
}

void GraphRegAllocator::regAllocReserve(XTRegNum treg)
{
 struct RegUsage &tru = ra_reg_table[treg];
	assert(tru.status != RegAllocator::RESERVED && "regAllocReserve"); 
	if(tru.inst==vinst_seq) printf("Can't reserve the reg here!!\n");
 if (tru.status == RegAllocator::ALLOCATED)//czj 2009.4.5
		regSpillOut(treg);
	tru.status = RegAllocator::RESERVED;
 tru.mapped_to = 0;
	return;
}

void GraphRegAllocator::regAllocRelease(XTRegNum treg)// Release only affect the RESERVED Registers.
{
	struct RegUsage &tru = ra_reg_table[treg];
	assert(tru.status == RESERVED);
	tru.status = RegAllocator::FREE;
	return;
}

void GraphRegAllocator::regSpillIn(XTRegNum vreg, XTRegNum treg)
{
	(*ra_cb_spill_in)((XTMemAddr)(ra_spill_pool+vreg), treg);
	//spillin_no++;
}

XTRegNum GraphRegAllocator::regSpillOut(XTRegNum treg)
{
	//spillout_no++;
	assert(ra_reg_table[treg].status == RegAllocator::ALLOCATED);	

	XTRegNum vreg = ra_reg_table[treg].mapped_to;

	(*ra_cb_spill_out)(treg, (XTMemAddr)(ra_spill_pool+vreg));
 ra_vreg_spilledIn[vreg]=false;
	return treg;
}
