#include <petsc/private/vecscatterimpl.h> /*I   "petscvec.h"    I*/
#if defined(PETSC_HAVE_CUDA)
#include <petsc/private/cudavecimpl.h>
#endif
/* ------------------------------------------------------------------*/
/*@
   VecScatterGetMerged - Returns true if the scatter is completed in the VecScatterBegin()
      and the VecScatterEnd() does nothing

   Not Collective

   Input Parameter:
.   ctx - scatter context created with VecScatterCreate()

   Output Parameter:
.   flg - PETSC_TRUE if the VecScatterBegin/End() are all done during the VecScatterBegin()

   Level: developer

.seealso: VecScatterCreate(), VecScatterEnd(), VecScatterBegin()
@*/
PetscErrorCode  VecScatterGetMerged(VecScatter ctx,PetscBool  *flg)
{
  PetscFunctionBegin;
  PetscValidHeaderSpecific(ctx,VEC_SCATTER_CLASSID,1);
  *flg = ctx->beginandendtogether;
  PetscFunctionReturn(0);
}

/*@
   VecScatterBegin - Begins a generalized scatter from one vector to
   another. Complete the scattering phase with VecScatterEnd().

   Neighbor-wise Collective on VecScatter

   Input Parameters:
+  ctx - scatter context generated by VecScatterCreate()
.  x - the vector from which we scatter
.  y - the vector to which we scatter
.  addv - either ADD_VALUES, MAX_VALUES, MIN_VALUES or INSERT_VALUES, with INSERT_VALUES mode any location
          not scattered to retains its old value; i.e. the vector is NOT first zeroed.
-  mode - the scattering mode, usually SCATTER_FORWARD.  The available modes are:
    SCATTER_FORWARD or SCATTER_REVERSE


   Level: intermediate

   Options Database: See VecScatterCreate()

   Notes:
   The vectors x and y need not be the same vectors used in the call
   to VecScatterCreate(), but x must have the same parallel data layout
   as that passed in as the x to VecScatterCreate(), similarly for the y.
   Most likely they have been obtained from VecDuplicate().

   You cannot change the values in the input vector between the calls to VecScatterBegin()
   and VecScatterEnd().

   If you use SCATTER_REVERSE the two arguments x and y should be reversed, from
   the SCATTER_FORWARD.

   y[iy[i]] = x[ix[i]], for i=0,...,ni-1

   This scatter is far more general than the conventional
   scatter, since it can be a gather or a scatter or a combination,
   depending on the indices ix and iy.  If x is a parallel vector and y
   is sequential, VecScatterBegin() can serve to gather values to a
   single processor.  Similarly, if y is parallel and x sequential, the
   routine can scatter from one processor to many processors.


.seealso: VecScatterCreate(), VecScatterEnd()
@*/
PetscErrorCode  VecScatterBegin(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
{
  PetscErrorCode ierr;
  PetscInt       to_n,from_n;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(ctx,VEC_SCATTER_CLASSID,1);
  PetscValidHeaderSpecific(x,VEC_CLASSID,2);
  PetscValidHeaderSpecific(y,VEC_CLASSID,3);
  if (ctx->inuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE," Scatter ctx already in use");

  if (PetscDefined(USE_DEBUG)) {
    /*
     Error checking to make sure these vectors match the vectors used
     to create the vector scatter context. -1 in the from_n and to_n indicate the
     vector lengths are unknown (for example with mapped scatters) and thus
     no error checking is performed.
     */
    if (ctx->from_n >= 0 && ctx->to_n >= 0) {
      ierr = VecGetLocalSize(x,&from_n);CHKERRQ(ierr);
      ierr = VecGetLocalSize(y,&to_n);CHKERRQ(ierr);
      if (mode & SCATTER_REVERSE) {
        if (to_n != ctx->from_n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter reverse and vector to != ctx from size)",to_n,ctx->from_n);
        if (from_n != ctx->to_n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter reverse and vector from != ctx to size)",from_n,ctx->to_n);
      } else {
        if (to_n != ctx->to_n)     SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter forward and vector to != ctx to size)",to_n,ctx->to_n);
        if (from_n != ctx->from_n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter forward and vector from != ctx from size)",from_n,ctx->from_n);
      }
    }
  }

  ctx->inuse = PETSC_TRUE;
  ierr = PetscLogEventBegin(VEC_ScatterBegin,ctx,x,y,0);CHKERRQ(ierr);
  ierr = (*ctx->ops->begin)(ctx,x,y,addv,mode);CHKERRQ(ierr);
  if (ctx->beginandendtogether && ctx->ops->end) {
    ctx->inuse = PETSC_FALSE;
    ierr = (*ctx->ops->end)(ctx,x,y,addv,mode);CHKERRQ(ierr);
  }
  ierr = PetscLogEventEnd(VEC_ScatterBegin,ctx,x,y,0);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}

/* --------------------------------------------------------------------*/
/*@
   VecScatterEnd - Ends a generalized scatter from one vector to another.  Call
   after first calling VecScatterBegin().

   Neighbor-wise Collective on VecScatter

   Input Parameters:
+  ctx - scatter context generated by VecScatterCreate()
.  x - the vector from which we scatter
.  y - the vector to which we scatter
.  addv - one of ADD_VALUES, MAX_VALUES, MIN_VALUES or INSERT_VALUES
-  mode - the scattering mode, usually SCATTER_FORWARD.  The available modes are:
     SCATTER_FORWARD, SCATTER_REVERSE

   Level: intermediate

   Notes:
   If you use SCATTER_REVERSE the arguments x and y should be reversed, from the SCATTER_FORWARD.

   y[iy[i]] = x[ix[i]], for i=0,...,ni-1

.seealso: VecScatterBegin(), VecScatterCreate()
@*/
PetscErrorCode  VecScatterEnd(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
{
  PetscErrorCode ierr;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(ctx,VEC_SCATTER_CLASSID,1);
  PetscValidHeaderSpecific(x,VEC_CLASSID,2);
  PetscValidHeaderSpecific(y,VEC_CLASSID,3);
  ctx->inuse = PETSC_FALSE;
  if (!ctx->ops->end) PetscFunctionReturn(0);
  if (!ctx->beginandendtogether) {
    ierr = PetscLogEventBegin(VEC_ScatterEnd,ctx,x,y,0);CHKERRQ(ierr);
    ierr = (*(ctx)->ops->end)(ctx,x,y,addv,mode);CHKERRQ(ierr);
    ierr = PetscLogEventEnd(VEC_ScatterEnd,ctx,x,y,0);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}

/*@
   VecScatterDestroy - Destroys a scatter context created by VecScatterCreate()

   Collective on VecScatter

   Input Parameter:
.  ctx - the scatter context

   Level: intermediate

.seealso: VecScatterCreate(), VecScatterCopy()
@*/
PetscErrorCode VecScatterDestroy(VecScatter *ctx)
{
  PetscErrorCode ierr;

  PetscFunctionBegin;
  if (!*ctx) PetscFunctionReturn(0);
  PetscValidHeaderSpecific(*ctx,VEC_SCATTER_CLASSID,1);
  if ((*ctx)->inuse && ((PetscObject)(*ctx))->refct == 1) SETERRQ(((PetscObject)(*ctx))->comm,PETSC_ERR_ARG_WRONGSTATE,"Scatter context is in use");
  if (--((PetscObject)(*ctx))->refct > 0) {*ctx = 0; PetscFunctionReturn(0);}

  /* if memory was published with SAWs then destroy it */
  ierr = PetscObjectSAWsViewOff((PetscObject)(*ctx));CHKERRQ(ierr);
  if ((*ctx)->ops->destroy) {ierr = (*(*ctx)->ops->destroy)(*ctx);CHKERRQ(ierr);}
#if defined(PETSC_HAVE_CUDA)
  ierr = VecScatterCUDAIndicesDestroy((PetscCUDAIndices*)&((*ctx)->spptr));CHKERRQ(ierr);
#endif
  ierr = PetscHeaderDestroy(ctx);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}

/*@
   VecScatterSetUp - Sets up the VecScatter to be able to actually scatter information between vectors

   Collective on VecScatter

   Input Parameter:
.  ctx - the scatter context

   Level: intermediate

.seealso: VecScatterCreate(), VecScatterCopy()
@*/
PetscErrorCode VecScatterSetUp(VecScatter ctx)
{
  PetscErrorCode ierr;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(ctx,VEC_SCATTER_CLASSID,1);
  ierr = (*ctx->ops->setup)(ctx);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}

/*@
   VecScatterCopy - Makes a copy of a scatter context.

   Collective on VecScatter

   Input Parameter:
.  sctx - the scatter context

   Output Parameter:
.  ctx - the context copy

   Level: advanced

.seealso: VecScatterCreate(), VecScatterDestroy()
@*/
PetscErrorCode  VecScatterCopy(VecScatter sctx,VecScatter *ctx)
{
  PetscErrorCode ierr;
  VecScatterType type;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(sctx,VEC_SCATTER_CLASSID,1);
  PetscValidPointer(ctx,2);
  if (!sctx->ops->copy) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot copy this type");
  ierr = PetscHeaderCreate(*ctx,VEC_SCATTER_CLASSID,"VecScatter","VecScatter","Vec",PetscObjectComm((PetscObject)sctx),VecScatterDestroy,VecScatterView);CHKERRQ(ierr);
  (*ctx)->to_n   = sctx->to_n;
  (*ctx)->from_n = sctx->from_n;
  ierr = (*sctx->ops->copy)(sctx,*ctx);CHKERRQ(ierr);

  ierr = VecScatterGetType(sctx,&type);CHKERRQ(ierr);
  ierr = PetscObjectChangeTypeName((PetscObject)(*ctx),type);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}

/*@C
   VecScatterViewFromOptions - View from Options

   Collective on VecScatter

   Input Parameters:
+  A - the scatter context
.  obj - Optional object
-  name - command line option

   Level: intermediate
.seealso:  VecScatter, VecScatterView, PetscObjectViewFromOptions(), VecScatterCreate()
@*/
PetscErrorCode  VecScatterViewFromOptions(VecScatter A,PetscObject obj,const char name[])
{
  PetscErrorCode ierr;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(A,VEC_SCATTER_CLASSID,1);
  ierr = PetscObjectViewFromOptions((PetscObject)A,obj,name);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}

/* ------------------------------------------------------------------*/
/*@C
   VecScatterView - Views a vector scatter context.

   Collective on VecScatter

   Input Parameters:
+  ctx - the scatter context
-  viewer - the viewer for displaying the context

   Level: intermediate

@*/
PetscErrorCode  VecScatterView(VecScatter ctx,PetscViewer viewer)
{
  PetscErrorCode ierr;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(ctx,VEC_SCATTER_CLASSID,1);
  if (!viewer) {
    ierr = PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)ctx),&viewer);CHKERRQ(ierr);
  }
  PetscValidHeaderSpecific(viewer,PETSC_VIEWER_CLASSID,2);
  if (ctx->ops->view) {
    ierr = (*ctx->ops->view)(ctx,viewer);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}

/*@C
   VecScatterRemap - Remaps the "from" and "to" indices in a
   vector scatter context. FOR EXPERTS ONLY!

   Collective on VecScatter

   Input Parameters:
+  scat    - vector scatter context
.  tomap   - remapping plan for "to" indices (may be NULL).
-  frommap - remapping plan for "from" indices (may be NULL)

   Level: developer

   Notes:
     In the parallel case the todata contains indices from where the data is taken
     (and then sent to others)! The fromdata contains indices from where the received
     data is finally put locally.

     In the sequential case the todata contains indices from where the data is put
     and the fromdata contains indices from where the data is taken from.
     This is backwards from the paralllel case!

@*/
PetscErrorCode  VecScatterRemap(VecScatter scat,PetscInt tomap[],PetscInt frommap[])
{
  VecScatter_MPI_General *to,*from;
  VecScatter_Seq_General *sgto,*sgfrom;
  VecScatter_Seq_Stride  *ssto;
  PetscInt               i,ierr;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(scat,VEC_SCATTER_CLASSID,1);
  if (tomap)   PetscValidIntPointer(tomap,2);
  if (frommap) PetscValidIntPointer(frommap,3);

  if (scat->ops->remap) {
    ierr = (*scat->ops->remap)(scat,tomap,frommap);CHKERRQ(ierr);
  } else {
    to     = (VecScatter_MPI_General*)scat->todata;
    from   = (VecScatter_MPI_General*)scat->fromdata;
    ssto   = (VecScatter_Seq_Stride*)scat->todata;
    sgto   = (VecScatter_Seq_General*)scat->todata;
    sgfrom = (VecScatter_Seq_General*)scat->fromdata;

    /* remap indices from where we take/read data */
    if (tomap) {
      if (to->format == VEC_SCATTER_MPI_TOALL) {
        SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Not for to all scatter");
      } else if (to->format == VEC_SCATTER_MPI_GENERAL) {
        /* handle off processor parts */
        for (i=0; i<to->starts[to->n]; i++) to->indices[i] = tomap[to->indices[i]];

        /* handle local part */
        for (i=0; i<to->local.n; i++) to->local.vslots[i] = tomap[to->local.vslots[i]];

        /* the memcpy optimizations in vecscatter was based on index patterns it has.
           They need to be recalculated when indices are changed (remapped).
         */
        ierr = VecScatterMemcpyPlanDestroy_PtoP(to,from);CHKERRQ(ierr);
        ierr = VecScatterMemcpyPlanCreate_PtoP(to,from);CHKERRQ(ierr);
      } else if (sgfrom->format == VEC_SCATTER_SEQ_GENERAL) {
        /* remap indices*/
        for (i=0; i<sgfrom->n; i++) sgfrom->vslots[i] = tomap[sgfrom->vslots[i]];
        /* update optimizations, which happen when it is a Stride1toSG, SGtoStride1 or SGToSG vecscatter */
        if (ssto->format == VEC_SCATTER_SEQ_STRIDE && ssto->step == 1) {
          PetscInt tmp[2];
          tmp[0] = 0; tmp[1] = sgfrom->n;
          ierr = VecScatterMemcpyPlanDestroy(&sgfrom->memcpy_plan);CHKERRQ(ierr);
          ierr = VecScatterMemcpyPlanCreate_Index(1,tmp,sgfrom->vslots,1/*bs*/,&sgfrom->memcpy_plan);CHKERRQ(ierr);
        } else if (sgto->format == VEC_SCATTER_SEQ_GENERAL) {
          ierr = VecScatterMemcpyPlanDestroy(&sgto->memcpy_plan);CHKERRQ(ierr);
          ierr = VecScatterMemcpyPlanDestroy(&sgfrom->memcpy_plan);CHKERRQ(ierr);
          ierr = VecScatterMemcpyPlanCreate_SGToSG(1/*bs*/,sgto,sgfrom);CHKERRQ(ierr);
        }
      } else if (sgfrom->format == VEC_SCATTER_SEQ_STRIDE) {
        VecScatter_Seq_Stride *ssto = (VecScatter_Seq_Stride*)sgfrom;

        /* if the remapping is the identity and stride is identity then skip remap */
        if (ssto->step == 1 && ssto->first == 0) {
          for (i=0; i<ssto->n; i++) {
            if (tomap[i] != i) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Unable to remap such scatters");
          }
        } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Unable to remap such scatters");
      } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Unable to remap such scatters");
    }
  }
  if (frommap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unable to remap the FROM in scatters yet");

  /*
    Mark then vector lengths as unknown because we do not know the
    lengths of the remapped vectors
  */
  scat->from_n = -1;
  scat->to_n   = -1;
  PetscFunctionReturn(0);
}

/* Given a parallel VecScatter context, return number of procs and vector entries involved in remote (i.e., off-process) communication

  Input Parameters:
+ ctx   - the context (must be a parallel vecscatter)
- send  - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)

  Output parameters:
+ num_procs   - number of remote processors
- num_entries - number of vector entries to send or recv


  .seealso: VecScatterGetRemote_Private(), VecScatterGetRemoteOrdered_Private()

  Notes:
  Sometimes PETSc internally needs to use the matrix-vector-multiply vecscatter context for other purposes. The client code
  usually only uses MPI_Send/Recv. This group of subroutines provides info needed for such uses.
 */
PetscErrorCode VecScatterGetRemoteCount_Private(VecScatter ctx,PetscBool send,PetscInt *num_procs,PetscInt *num_entries)
{
  VecScatter_MPI_General *vs;
  PetscBool              par;
  PetscErrorCode         ierr;

  PetscFunctionBegin;
  if (ctx->ops->getremotecount) {
    ierr = (*ctx->ops->getremotecount)(ctx,send,num_procs,num_entries);CHKERRQ(ierr);
  } else {
    vs = (VecScatter_MPI_General*)(send ? ctx->todata : ctx->fromdata);
    par = (vs->format == VEC_SCATTER_MPI_GENERAL)? PETSC_TRUE : PETSC_FALSE;
    if (num_procs)   *num_procs   = par ? vs->n             : 0;
    if (num_entries) *num_entries = par ? vs->starts[vs->n] : 0;
  }
  PetscFunctionReturn(0);
}

/* Given a parallel VecScatter context, return a plan that represents the remote communication.
   Any output parameter can be NULL.

  Input Parameters:
+ ctx   - the context
- send  - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)

  Output parameters:
+ n        - number of remote processors
. starts   - starting point in indices for each proc. ATTENTION: starts[0] is not necessarily zero.
             Therefore, expressions like starts[i+1]-starts[i] and indices[starts[i]+j] work as
             expected for a CSR structure but buf[starts[i]+j] may be out of range if buf was allocated
             with length starts[n]-starts[0]. One should use buf[starts[i]-starts[0]+j] instead.
. indices  - indices of entries to send/recv
. procs    - ranks of remote processors
- bs       - block size

  .seealso: VecScatterRestoreRemote_Private(), VecScatterGetRemoteOrdered_Private()
 */
PetscErrorCode VecScatterGetRemote_Private(VecScatter ctx,PetscBool send,PetscInt *n,const PetscInt **starts,const PetscInt **indices,const PetscMPIInt **procs,PetscInt *bs)
{
  VecScatter_MPI_General *vs;
  PetscBool              par;
  PetscErrorCode         ierr;

  PetscFunctionBegin;
  if (ctx->ops->getremote) {
    ierr = (*ctx->ops->getremote)(ctx,send,n,starts,indices,procs,bs);CHKERRQ(ierr);
  } else {
    vs = (VecScatter_MPI_General*)(send ? ctx->todata : ctx->fromdata);
    par = (vs->format == VEC_SCATTER_MPI_GENERAL)? PETSC_TRUE : PETSC_FALSE;
    if (n)       *n       = par ? vs->n       : 0;
    if (indices) *indices = par ? vs->indices : NULL;
    if (starts)  *starts  = par ? vs->starts  : NULL;
    if (procs)   *procs   = par ? vs->procs   : NULL;
    if (bs)      *bs      = par ? vs->bs      : 0;
  }
  PetscFunctionReturn(0);
}


/* Given a parallel VecScatter context, return a plan that represents the remote communication. Ranks of remote
   processors returned in procs must be sorted in ascending order. Any output parameter can be NULL.

  Input Parameters:
+ ctx   - the context
- send  - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)

  Output parameters:
+ n        - number of remote processors
. starts   - starting point in indices for each proc. ATTENTION: starts[0] is not necessarily zero.
             Therefore, expressions like starts[i+1]-starts[i] and indices[starts[i]+j] work as
             expected for a CSR structure but buf[starts[i]+j] may be out of range if buf was allocated
             with length starts[n]-starts[0]. One should use buf[starts[i]-starts[0]+j] instead.
. indices  - indices of entries to send/recv
. procs    - ranks of remote processors
- bs       - block size

  .seealso: VecScatterRestoreRemoteOrdered_Private(), VecScatterGetRemote_Private()

  Notes:
  Output parameters like starts, indices must also be adapted according to the sorted ranks.
 */
PetscErrorCode VecScatterGetRemoteOrdered_Private(VecScatter ctx,PetscBool send,PetscInt *n,const PetscInt **starts,const PetscInt **indices,const PetscMPIInt **procs,PetscInt *bs)
{
  VecScatter_MPI_General *vs;
  PetscBool              par;
  PetscErrorCode         ierr;

  PetscFunctionBegin;
  if (ctx->ops->getremoteordered) {
    ierr = (*ctx->ops->getremoteordered)(ctx,send,n,starts,indices,procs,bs);CHKERRQ(ierr);
  } else {
    vs = (VecScatter_MPI_General*)(send ? ctx->todata : ctx->fromdata);
    par = (vs->format == VEC_SCATTER_MPI_GENERAL)? PETSC_TRUE : PETSC_FALSE;
    if (n)       *n       = par ? vs->n       : 0;
    if (indices) *indices = par ? vs->indices : NULL;
    if (starts)  *starts  = par ? vs->starts  : NULL;
    if (procs)   *procs   = par ? vs->procs   : NULL;
    if (bs)      *bs      = par ? vs->bs      : 0;
  }
  if (PetscUnlikelyDebug(n && procs)) {
    PetscInt i;
    /* from back to front to also handle cases *n=0 */
    for (i=*n-1; i>0; i--) { if ((*procs)[i-1] > (*procs)[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"procs[] are not ordered"); }
  }
  PetscFunctionReturn(0);
}

/* Given a parallel VecScatter context, restore the plan returned by VecScatterGetRemote_Private. This gives a chance for
   an implementation to free memory allocated in the VecScatterGetRemote_Private call.

  Input Parameters:
+ ctx   - the context
- send  - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)

  Output parameters:
+ n        - number of remote processors
. starts   - starting point in indices for each proc
. indices  - indices of entries to send/recv
. procs    - ranks of remote processors
- bs       - block size

  .seealso: VecScatterGetRemote_Private()
 */
PetscErrorCode VecScatterRestoreRemote_Private(VecScatter ctx,PetscBool send,PetscInt *n,const PetscInt **starts,const PetscInt **indices,const PetscMPIInt **procs,PetscInt *bs)
{
  PetscErrorCode         ierr;

  PetscFunctionBegin;
  if (ctx->ops->restoreremote) {
    ierr = (*ctx->ops->restoreremote)(ctx,send,n,starts,indices,procs,bs);CHKERRQ(ierr);
  } else {
    if (starts)  *starts  = NULL;
    if (indices) *indices = NULL;
    if (procs)   *procs   = NULL;
  }
  PetscFunctionReturn(0);
}

/* Given a parallel VecScatter context, restore the plan returned by VecScatterGetRemoteOrdered_Private. This gives a chance for
   an implementation to free memory allocated in the VecScatterGetRemoteOrdered_Private call.

  Input Parameters:
+ ctx   - the context
- send  - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)

  Output parameters:
+ n        - number of remote processors
. starts   - starting point in indices for each proc
. indices  - indices of entries to send/recv
. procs    - ranks of remote processors
- bs       - block size

  .seealso: VecScatterGetRemoteOrdered_Private()
 */
PetscErrorCode VecScatterRestoreRemoteOrdered_Private(VecScatter ctx,PetscBool send,PetscInt *n,const PetscInt **starts,const PetscInt **indices,const PetscMPIInt **procs,PetscInt *bs)
{
  PetscErrorCode ierr;
  PetscFunctionBegin;
  if (ctx->ops->restoreremoteordered) {
    ierr = (*ctx->ops->restoreremoteordered)(ctx,send,n,starts,indices,procs,bs);CHKERRQ(ierr);
  } else {
    ierr = VecScatterRestoreRemote_Private(ctx,send,n,starts,indices,procs,bs);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}

#if defined(PETSC_HAVE_CUDA)

/*@C
   VecScatterInitializeForGPU - Initializes a generalized scatter from one vector
   to another for GPU based computation.

   Input Parameters:
+  inctx - scatter context generated by VecScatterCreate()
-  x - the vector from which we scatter

  Level: intermediate

  Notes:
   Effectively, this function creates all the necessary indexing buffers and work
   vectors needed to move only those data points in a vector which need to
   be communicated across ranks. This is done at the first time this function is
   called. Currently, this only used in the context of the parallel SpMV call in
   MatMult_MPIAIJCUSPARSE.

   This function is executed before the call to MatMult. This enables the memory
   transfers to be overlapped with the MatMult SpMV kernel call.

.seealso: VecScatterFinalizeForGPU(), VecScatterCreate(), VecScatterEnd()
@*/
PETSC_EXTERN PetscErrorCode VecScatterInitializeForGPU(VecScatter inctx,Vec x)
{
  PetscErrorCode ierr;
  PetscInt       i,nrecvs,nsends,sbs,rbs,ns,nr;
  const PetscInt *sstarts,*rstarts,*sindices,*rindices;
  VecScatterType type;
  PetscBool      isSF;

  PetscFunctionBegin;
  ierr = VecScatterGetType(inctx,&type);CHKERRQ(ierr);
  ierr = PetscStrcmp(type,VECSCATTERSF,&isSF);CHKERRQ(ierr);
  if (isSF) PetscFunctionReturn(0);

  ierr = VecScatterGetRemote_Private(inctx,PETSC_TRUE/*send*/, &nsends,&sstarts,&sindices,NULL/*procs*/,&sbs);CHKERRQ(ierr);
  ierr = VecScatterGetRemote_Private(inctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,&rindices,NULL/*procs*/,&rbs);CHKERRQ(ierr);
  ns   = nsends ? sstarts[nsends]-sstarts[0] : 0; /* s/rstarts[0] is not necessarily zero */
  nr   = nrecvs ? rstarts[nrecvs]-rstarts[0] : 0;

  if (x->offloadmask != PETSC_OFFLOAD_UNALLOCATED && (nsends>0 || nrecvs>0)) {
    if (!inctx->spptr) {
      PetscInt k,*tindicesSends,*sindicesSends,*tindicesRecvs,*sindicesRecvs;
      /* Here we create indices for both the senders and receivers. */
      ierr = PetscMalloc1(ns,&tindicesSends);CHKERRQ(ierr);
      ierr = PetscMalloc1(nr,&tindicesRecvs);CHKERRQ(ierr);

      /* s/rindices and s/rstarts could be NULL when ns or nr is zero */
      if (ns) {ierr = PetscArraycpy(tindicesSends,&sindices[sstarts[0]],ns);CHKERRQ(ierr);}
      if (nr) {ierr = PetscArraycpy(tindicesRecvs,&rindices[rstarts[0]],nr);CHKERRQ(ierr);}

      ierr = PetscSortRemoveDupsInt(&ns,tindicesSends);CHKERRQ(ierr);
      ierr = PetscSortRemoveDupsInt(&nr,tindicesRecvs);CHKERRQ(ierr);

      ierr = PetscMalloc1(sbs*ns,&sindicesSends);CHKERRQ(ierr);
      ierr = PetscMalloc1(rbs*nr,&sindicesRecvs);CHKERRQ(ierr);

      /* sender indices */
      for (i=0; i<ns; i++) {
        for (k=0; k<sbs; k++) sindicesSends[i*sbs+k] = tindicesSends[i]+k;
      }
      ierr = PetscFree(tindicesSends);CHKERRQ(ierr);

      /* receiver indices */
      for (i=0; i<nr; i++) {
        for (k=0; k<rbs; k++) sindicesRecvs[i*rbs+k] = tindicesRecvs[i]+k;
      }
      ierr = PetscFree(tindicesRecvs);CHKERRQ(ierr);

      /* create GPU indices, work vectors, ... */
      ierr = VecScatterCUDAIndicesCreate_PtoP(ns*sbs,sindicesSends,nr*rbs,sindicesRecvs,(PetscCUDAIndices*)&inctx->spptr);CHKERRQ(ierr);
      ierr = PetscFree(sindicesSends);CHKERRQ(ierr);
      ierr = PetscFree(sindicesRecvs);CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}

/*@C
   VecScatterFinalizeForGPU - Finalizes a generalized scatter from one vector to
   another for GPU based computation.

   Input Parameter:
.  inctx - scatter context generated by VecScatterCreate()

  Level: intermediate

  Notes:
   Effectively, this function resets the temporary buffer flags. Currently, this
   only used in the context of the parallel SpMV call in in MatMult_MPIAIJCUSPARSE
   Once the MatMultAdd is finished, the GPU temporary buffers used for messaging are no longer valid.

.seealso: VecScatterInitializeForGPU(), VecScatterCreate(), VecScatterEnd()
@*/
PETSC_EXTERN PetscErrorCode VecScatterFinalizeForGPU(VecScatter inctx)
{
  PetscFunctionBegin;
  PetscFunctionReturn(0);
}

#endif
