// GraphRegAllocator.cpp 
// Implement class GraphRegAllocator by czj
// Transplanted by lixiaolong.
// Fisrt to bulid Interfere_graph,then to bulid reg4vars[] which saved the allocation results. 
#include "crossbit/GraphRegAllocator.h"
#include "crossbit/VBlock.h"
#include "crossbit/VInst.h"

using namespace crossbit;

void GraphRegAllocator::init(VBlock *vb)
{
  	lower_bound = 0;
  	upper_bound = -1;
  	size = 0;
  	canGraph = 0;
				vinst_seq = 0;
				struct RegUsage ru;
				ru.status = RegAllocator::FREE;
				ru.mapped_to = -1;
				ru.inst = 0;
				for (XTRegNum i = 0; i < ra_num_of_reg; ++i)
				{
								ra_reg_table[i]=ru;
				}
			 UTIL::XTMemset(ra_vreg_spilledIn,0,sizeof(ra_vreg_spilledIn));
 	  collectGraphInfo(vb);
  	if(size <= ra_num_of_reg)//vreg lower than treg, needn't graph alloc algorithm
		canGraph = 0;
	else
		canGraph = 1;
	if(canGraph == 1)
	{ //vreg more than treg, use graph alloc algorithm,otherwise it's very waste of time.
		XTInt32 ** interfere_graph = NULL;
		interfere_graph = interfereGraphGen();
		if(interfere_graph == NULL){
			std::cerr<<"Interfer graph generating failed!\n";
			exit(0);
		}
		regPreAllocAllVars(interfere_graph, size, ra_num_of_reg);
}
}
void GraphRegAllocator::collectGraphInfo(VBlock * vblock)
{
		reg_lists.clear();
		std::set<XTRegNum> regList;
		VBlock::vb_const_reverse_iterator I = vblock->rbegin(),
																					E = vblock->rend();
		for(; I != E; ++I)
		{
				VInst *inst = *I;
				XTInt32 n = inst->getOperandNum();
    //_reverse_ iterate the operand, so that operands DEFed 
    //will be handled prior to operands USEed
    for(XTInt32 i = n; i >= 1; --i)
    {
      Operand opr = inst->getOperand(i);
      XTRegNum r;

      if(opr.type == Operand::VREG_USE ||
         opr.type == Operand::VREG_DEF)
      {
         if(opr.type == Operand::VREG_USE)
         {
            r = opr.reg;
			//modefied by czj 2008-7-31
			if(r > upper_bound)
				upper_bound = r;
            if(regList.find(r) == regList.end())
            {
               regList.insert(r);
            }
         }
         if(opr.type == Operand::VREG_DEF)
         {
            r = opr.reg;
			//modefied by czj 2008-7-31
			if(r > upper_bound)
				upper_bound = r;
            std::set<XTRegNum>::iterator it = regList.find(r);
            if(it != regList.end())
               regList.erase(it);
         }
      }
      reg_lists.push_front(regList);
    }
  }
  size = upper_bound - lower_bound + 1;
}  

XTInt32 ** GraphRegAllocator::interfereGraphGen()
{
  XTInt32 **interfere_graph = new XTInt32*[size];
  for(XTInt32 temp = 0; temp < size; temp++)
       interfere_graph[temp] = new XTInt32[size];
  for(XTInt32 tempV = 0; tempV < size; tempV++)
     for(XTInt32 tempH = 0; tempH < size; tempH++)
         interfere_graph[tempV][tempH] = 0;


  RegLists::iterator iter = reg_lists.begin();
  for(; iter != reg_lists.end(); iter++)
  {
     std::set<XTRegNum>::iterator it = (*iter).begin();
     for(; it != (*iter).end(); it++)
     {
        XTInt32 temp = *it - lower_bound;
        std::set<XTRegNum>::iterator it_next = it;
        it_next++;
        for(; it_next != (*iter).end(); it_next++)
        {
           XTInt32 temp_next = (*it_next) - lower_bound;
           interfere_graph[temp][temp_next] = 1;
           interfere_graph[temp_next][temp] = 1;
        }
     }
  }
  
  return interfere_graph;
}


void GraphRegAllocator::regPreAllocAllVars(XTInt32 **interfere_graph, XTInt32 graph_dim, XTInt32 t_reg_num)
{
    XTInt32 ** interfere_graph_backup = new XTInt32 * [graph_dim];
	XTInt32 * varStack = new XTInt32[graph_dim];
    //reg4vars = new XTInt32[graph_dim];
	assert(VREG_MAX_NUM >= graph_dim);
	XTInt32 neighborNum = 0;  //the current node's neighbor t_reg_number
	XTInt32 stackPoint = 0;	

	for(XTInt32 i = 0; i < graph_dim; i++)
		interfere_graph_backup[i] = new XTInt32[graph_dim];

	for(XTInt32 i = 0; i < graph_dim; i++)
		for(XTInt32 j = 0; j < graph_dim; j++)
			interfere_graph_backup[i][j] = interfere_graph[i][j];

	for(XTInt32 i = 0; i < graph_dim; i++)
		varStack[i] = -1;  //initiate the stack

	for(XTInt32 i=0; i < graph_dim; i++)
		reg4vars[i] = -1; //initiate the register assigned to these nodes

	XTInt32 i = 0;
	XTInt32 count = 0; //this is used to detect the graph'state
	XTInt32 stackDepth = graph_dim;
	while(stackPoint < stackDepth ){	
		
		XTInt32 inStack = 0;
		for(XTInt32 j = 0; j < stackDepth; j++){
			if(varStack[j] == i){
				inStack = 1;//i # node has been in stack	
				break;
			}
		}		
							
		if(inStack == 0){
			neighborNum = 0;
			for(XTInt32 j = 0; j < graph_dim; j++){
				if(interfere_graph[i][j] == 1)
					neighborNum += 1;			
			}
		
			if(neighborNum < t_reg_num){
				count = 0;
				varStack[stackPoint++] = i; //the i # node was inserted into the stack. Empty increament mode stack.
				for(XTInt32 j = 0; j < graph_dim; j++){
					interfere_graph[i][j] = 0;
					interfere_graph[j][i] = 0;
				}	
			}
			else{
				count++;
				if(count == stackDepth - stackPoint){
					for(XTInt32 j = 0; j < graph_dim; j++){
						interfere_graph[i][j] = 0;
						interfere_graph[j][i] = 0;
					}
					stackDepth -= 1;
					count = 0;
				}
			}	
		}		
		
		i++;
		i %= graph_dim;
	}

	XTInt32 * treg_status = new XTInt32[t_reg_num];

	for(XTInt32 i = stackDepth; i > 0; i--){
			XTInt32 current_var = -1;
			current_var = varStack[i-1]; //get the first node of the node stack
		for(XTInt32 i = 0; i < t_reg_num; i++)
			treg_status[i] = 0; //0 indicate this reg of the index hasn't been assigned, 1 for other condition
		for(XTInt32 j = 0; j < graph_dim; j++){
			if(reg4vars[j] >= 0 && interfere_graph_backup[current_var][j] == 1)
			{
				XTInt32 t_reg = 0;
				t_reg = reg4vars[j];
				treg_status[t_reg] = 1;
			}		
		
		}
		for(XTInt32 j = 0; j < t_reg_num; j++){
			if(treg_status[j] == 0){
				reg4vars[current_var] = j;
				break;
			}		 
		}
	}
	

	/*release the memory allocated dynamically*/	
	for(XTInt32 i = 0; i < graph_dim; i++)
		delete[] interfere_graph_backup[i];
	for(XTInt32 i = 0; i< graph_dim; i++)
		delete[] interfere_graph[i];
	delete[] interfere_graph_backup;
    delete[] interfere_graph;
	delete[] varStack;
	delete[] treg_status;
}
/*void GraphRegAllocator::phaseTask()
{
}
*/
// Allocate target register for virtual register
XTRegNum GraphRegAllocator::regAlloc(XTRegNum vreg, RegAccessMode mode)
{
   //printf("This is regAlloc calling!\n");
				XTRegNum alloc;
				for(XTRegNum i=0;i<ra_num_of_reg;i++)
									{
											if(ra_reg_table[i].status==ALLOCATED && ra_reg_table[i].mapped_to==vreg)
										{
            if(ra_vreg_spilledIn[vreg]==false&&mode==USE)
            {
              regSpillIn(vreg,i);
              ra_vreg_spilledIn[vreg]=true;
            }
												  ra_reg_table[i].inst=vinst_seq;
														return i;
									 }
									}
    if (canGraph=0)
    { 
				for(XTRegNum i=0;i<ra_num_of_reg;i++)
								{  
										if(ra_reg_table[i].status==FREE)
           {
            if(ra_vreg_spilledIn[vreg]==false&&mode==USE)
           { 
            regSpillIn(vreg,i);
											 ra_vreg_spilledIn[vreg]=true;
            }
												ra_reg_table[i].status=ALLOCATED;
												ra_reg_table[i].mapped_to=vreg;
												ra_reg_table[i].inst=vinst_seq;
         //   printf("2 %d vreg alloc %d treg\n",vreg,i);
												return i;
											}
 								} 
	    }
           // find ALLOCATED target register whose mapped virtual regiser 
			XTInt32 recommend_treg = -1;
			recommend_treg = reg4vars[vreg];
			XTUint32 preempt = 0; //whether preempt reg of other variables, 0 no, 1 yes  
			if(recommend_treg < 0)
				preempt = 1;	 //when tReg is reserved or not have a tReg for the vreg, set mark to true
			else{
				struct RegUsage & tRegUsg = ra_reg_table[recommend_treg];
				if(tRegUsg.status == FREE)
			    {	//mark = false;
                   alloc = recommend_treg;
                }
				else if(tRegUsg.status != ALLOCATED || tRegUsg.inst == vinst_seq)
					 {
                   		preempt = 1;
                     }
                     else	//tRegUsg.status == ALLOCATED && tRegUsg.inst != vinst_seq
                     {//mark = false;
                        alloc = regSpillOut(recommend_treg);
                     }
                        
				}
                        
               
                //mark = true;
			if(preempt == 1){  //can not allocated
                        XTInt32 treg4try = 0;
                        treg4try = vreg % ra_num_of_reg; //try all the tregs, begin from treg whose # equals to vreg%ra_num_of_reg
                        for(XTInt32 i=0; i<ra_num_of_reg;i++)
                        {    //find the one vreg % ra_num_of_reg
                             struct RegUsage & tRegUsg = ra_reg_table[treg4try];
                                if(tRegUsg.status == FREE)
                                {
                                    alloc = treg4try;
                                    break;
                                }
                       
                                else if(tRegUsg.status != ALLOCATED || tRegUsg.inst == vinst_seq)
                                        {
                                           treg4try = (treg4try+1) % ra_num_of_reg; 
                                        }
                                else{
                                        alloc = regSpillOut(treg4try);
                                        break;
                               	    }
                        }
			}	
 			if (mode==USE)
   {
		 	regSpillIn(vreg, alloc);
				ra_vreg_spilledIn[vreg]=true;
   }
				ra_reg_table[alloc].status = ALLOCATED;
				ra_reg_table[alloc].mapped_to = vreg;
				ra_reg_table[alloc].inst = vinst_seq; 
  //  printf("3 %d vreg alloc %d treg\n",vreg,alloc);
    return alloc;
}

// Force allocate target register "expect" to "vreg"
//
// Algorithms:
//  if (vreg is already allocated to expect)
//	return immediately
//  else 
//	empty "expect" register for "Vreg";
//	if (vreg is currently allocated to another target register)
//	    move it to "expect"
//	if (vreg is spilled)
//	    load vreg from spill pool to expect
//	...
// 
XTRegNum GraphRegAllocator::regAllocForce(XTRegNum vreg, XTRegNum expect, RegAccessMode mode)
{
//    printf("This is regAllocForce calling!\n");
    RegUsage etru=ra_reg_table[expect];
				assert(etru.status != RegAllocator::RESERVED); // dead-lock
				
			  if (etru.status == RegAllocator::ALLOCATED) 
								{
            if(etru.mapped_to==vreg)
           {
            ra_reg_table[expect].inst=vinst_seq;
            if(ra_vreg_spilledIn[vreg]==false&&mode==USE)
           {
            regSpillIn(vreg,expect);
            ra_vreg_spilledIn[vreg]=true;
           }
          //  printf("******1 %d vreg alloc %d treg\n",vreg,expect);
            return expect;
           }
            if (etru.inst==vinst_seq)
                printf("longlong!");
            regSpillOut(expect);
        }
            for(XTRegNum i=0;i<ra_num_of_reg;i++)
												{
														if(ra_reg_table[i].status==ALLOCATED&&ra_reg_table[i].mapped_to==vreg)
             {
                (*ra_cb_reg_to_reg)(i,expect);
                ra_reg_table[expect].status=ALLOCATED;
                ra_reg_table[expect].inst=vinst_seq;
																ra_reg_table[expect].mapped_to=vreg;
                ra_reg_table[i].status=FREE;
                ra_reg_table[i].mapped_to=-1;
                return expect;
												}
							   	}
        ra_vreg_spilledIn[vreg]=false;
								if (mode==USE) 
											{
            	regSpillIn(vreg, expect);
             ra_vreg_spilledIn[vreg]=true;
           }

							 ra_reg_table[expect].status=ALLOCATED;
								ra_reg_table[expect].mapped_to=vreg;
        ra_reg_table[expect].inst=vinst_seq;
      //  printf("******3 %d vreg alloc %d treg\n",vreg,expect);
				return expect;
}

// Force "vreg" NOT to be allocated to "except"
//
// Algorithm:
//	if vreg is already allocated and it's not "except"
//	    return immediately
//	else 
//	    "reserve" "except" register
//	    alloc = regAlloc(vreg, mode)
//	    "release" "except" register
//
XTRegNum GraphRegAllocator::regAllocForceExcept(XTRegNum vreg, XTRegNum except, RegAccessMode mode)
{
  //  printf("This is regAllocForceExcept calling!\n");
    XTInt32 temp=ra_reg_table[except].status;
    XTRegNum alloc;
				if (temp== ALLOCATED && ra_reg_table[except].mapped_to == vreg)
			{				
     regSpillOut(except);
     temp=FREE;
   }
     ra_reg_table[except].status=RESERVED;
     alloc=regAlloc(vreg,mode);
     ra_reg_table[except].status=temp;

		 		return alloc;
}

void GraphRegAllocator::regAllocReserve(XTRegNum treg)
{
    //printf("This is regAllocReserve calling!\n");
				struct RegUsage &tru = ra_reg_table[treg];
				assert(tru.status != RegAllocator::RESERVED && "regAllocReserve"); 
			 if(tru.inst==vinst_seq) printf("Can't reserve the reg here!!\n");
   	if (tru.status == RegAllocator::ALLOCATED)
								regSpillOut(treg);
				tru.status = RegAllocator::RESERVED;
    tru.mapped_to = 0;
				return;
}


void GraphRegAllocator::regAllocRelease(XTRegNum treg)// Release only affect the RESERVED Registers.
{
				struct RegUsage &tru = ra_reg_table[treg];
				assert(tru.status == RESERVED);
				tru.status = RegAllocator::FREE;
				return;
}

void GraphRegAllocator::regSpillIn(XTRegNum vreg, XTRegNum treg)
{
		(*ra_cb_spill_in)((XTMemAddr)(ra_spill_pool+vreg), treg);
}


XTRegNum GraphRegAllocator::regSpillOut(XTRegNum treg)
{
				assert(ra_reg_table[treg].status == RegAllocator::ALLOCATED);	

				XTRegNum vreg = ra_reg_table[treg].mapped_to;

				(*ra_cb_spill_out)(treg, (XTMemAddr)(ra_spill_pool+vreg));
    ra_vreg_spilledIn[vreg]=false;
				return treg;
}
