/*
 * ====================================================
 * Copyright (C) 2007 by Ellips BV. All rights reserved.
 *
 * Permission to use, copy, modify, and distribute this
 * software is freely granted, provided that this notice
 * is preserved.
 * ====================================================
 */

  #include "x86_64mach.h"

  .global SYM (memcpy)
  SOTYPE_FUNCTION(memcpy)

SYM (memcpy):
  movl    edi, eax                /* Store destination in return value */
  cmpl    $16, edx
  jb      .Lbyte_copy

  movl    edi, r8d                /* Align destination on quad word boundary */
  andl    $7, r8d
  jz      .Lquadword_aligned
  movl    $8, ecx
  subl    r8d, ecx
  subl    ecx, edx

.Lheader_loop:
  movb    %nacl:(r15, rsi), r8b
  inc     esi
  movb    r8b, %nacl:(r15, rdi)
  inc     edi
  dec     ecx
  jnz     .Lheader_loop

.Lquadword_aligned:
  cmpl    $256, edx
  jb      .Lquadword_copy

  pushq    rax
  pushq    r12
  pushq    r13
  pushq    r14

  movl    edx, ecx                /* Copy 128 bytes at a time */
  shrl    $7, ecx

  /*
   * Avoid revealing the sandbox base address.
   * In particular this means that we don't do the following:
   *   movq 32(r15,rsi), r11
   *   ...
   *   movq r11, %nacl:32(r15,rdi)
   * because the latter instruction might be reached via a direct or
   * indirect jump when r11 contains the sandbox base address in its
   * top 32 bits, and this would write the sandbox base address into
   * memory.  We treat r11 as a write-only register to avoid
   * revealing the sandbox base address to user code.
   * Instead, we spill rdx and use that. Additionally, we avoid string
   * instructions (movs) because they leave the full 64 bits in rsi/rdi.
   */
  pushq   $0
  movl    ebp, (rsp)
  pushq   rdx  /* Save byte count */
  .p2align 4

.Lloop:
  naclrestbp esi, r15
  movq       (rbp), rax
  movq     8 (rbp), r8
  movq    16 (rbp), r9
  movq    24 (rbp), r10
  movq    32 (rbp), rdx
  movq    40 (rbp), r12
  movq    48 (rbp), r13
  movq    56 (rbp), r14

  naclrestbp edi, r15
  movq    rax,    (rbp)
  movq    r8 ,  8 (rbp)
  movq    r9 , 16 (rbp)
  movq    r10, 24 (rbp)
  movq    rdx, 32 (rbp)
  movq    r12, 40 (rbp)
  movq    r13, 48 (rbp)
  movq    r14, 56 (rbp)

  naclrestbp esi, r15
  movq    64  (rbp), rax
  movq    72  (rbp), r8
  movq    80  (rbp), r9
  movq    88  (rbp), r10
  movq    96  (rbp), rdx
  movq    104 (rbp), r12
  movq    112 (rbp), r13
  movq    120 (rbp), r14

  naclrestbp edi, r15
  movq    rax,  64 (rbp)
  movq    r8 ,  72 (rbp)
  movq    r9 ,  80 (rbp)
  movq    r10,  88 (rbp)
  movq    rdx,  96 (rbp)
  movq    r12, 104 (rbp)
  movq    r13, 112 (rbp)
  movq    r14, 120 (rbp)

  leal    128 (rsi), esi
  leal    128 (rdi), edi

  dec     ecx
  jnz     .Lloop

  popq    rcx /* Restore byte count */
  popq    rax
  naclrestbp eax, r15
  /* Copy the remaining bytes */
  andl    $127, ecx
  jz      .Lrep1_end
.Lrep1:
  movb    %nacl:(r15, rsi), r8b
  inc     esi
  movb    r8b, %nacl:(r15, rdi)
  inc     edi
  dec     ecx
  jnz     .Lrep1
.Lrep1_end:
  popq    r14
  popq    r13
  popq    r12
  popq    rax
  pop     r11
  nacljmp r11d, r15


.Lbyte_copy:
  testl   edx, edx
  jz      .Lbyte_copy_end
.Lbyte_copy_loop:
  movb    %nacl:(r15, rsi), r8b
  inc     esi
  movb    r8b, %nacl:(r15, rdi)
  inc     edi
  dec     edx
  jnz     .Lbyte_copy_loop
.Lbyte_copy_end:
  pop     r11
  nacljmp r11d, r15


.Lquadword_copy:
  movl    edx, ecx
  shrl    $3, ecx
  jz      .Lrep2_end
  .p2align 4
.Lrep2:
  movq    %nacl:(r15, rsi), r8
  add     $8, esi
  movq    r8, %nacl:(r15, rdi)
  add     $8, edi
  dec     ecx
  jnz     .Lrep2
.Lrep2_end:
  andl    $7, edx
  jz      .Lrep3_end
.Lrep3:
  /* Copy the remaining bytes */
  movb    %nacl:(r15, rsi), r8b
  inc     esi
  movb    r8b, %nacl:(r15, rdi)
  inc     edi
  dec     edx
  jnz     .Lrep3
.Lrep3_end:
  pop     r11
  nacljmp r11d, r15
