; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mattr=-bmi -mtriple=x86_64-linux | FileCheck %s -check-prefix=X86-64 -check-prefix=X64
; RUN: llc < %s -mattr=-bmi -mtriple=x86_64-linux-gnux32 | FileCheck %s -check-prefix=X86-64 -check-prefix=X32
; RUN: llc < %s -mattr=-bmi -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
; RUN: llc < %s -mattr=-bmi -mtriple=i686--    | FileCheck %s -check-prefix=X86-32

; Use h registers. On x86-64, codegen doesn't support general allocation
; of h registers yet, due to x86 encoding complications.

define void @bar64(i64 inreg %x, ptr inreg %p) nounwind {
; X64-LABEL: bar64:
; X64:       # %bb.0:
; X64-NEXT:    shrq $8, %rdi
; X64-NEXT:    incb %dil
; X64-NEXT:    movb %dil, (%rsi)
; X64-NEXT:    retq
;
; X32-LABEL: bar64:
; X32:       # %bb.0:
; X32-NEXT:    shrq $8, %rdi
; X32-NEXT:    incb %dil
; X32-NEXT:    movb %dil, (%esi)
; X32-NEXT:    retq
;
; WIN64-LABEL: bar64:
; WIN64:       # %bb.0:
; WIN64-NEXT:    shrq $8, %rcx
; WIN64-NEXT:    incb %cl
; WIN64-NEXT:    movb %cl, (%rdx)
; WIN64-NEXT:    retq
;
; X86-32-LABEL: bar64:
; X86-32:       # %bb.0:
; X86-32-NEXT:    incb %ah
; X86-32-NEXT:    movb %ah, (%ecx)
; X86-32-NEXT:    retl

; See FIXME: on regclass GR8.
; It could be optimally transformed like; incb %ch; movb %ch, (%rdx)

  %t0 = lshr i64 %x, 8
  %t1 = trunc i64 %t0 to i8
  %t2 = add i8 %t1, 1
  store i8 %t2, ptr %p
  ret void
}

define void @bar32(i32 inreg %x, ptr inreg %p) nounwind {
; X64-LABEL: bar32:
; X64:       # %bb.0:
; X64-NEXT:    shrl $8, %edi
; X64-NEXT:    incb %dil
; X64-NEXT:    movb %dil, (%rsi)
; X64-NEXT:    retq
;
; X32-LABEL: bar32:
; X32:       # %bb.0:
; X32-NEXT:    shrl $8, %edi
; X32-NEXT:    incb %dil
; X32-NEXT:    movb %dil, (%esi)
; X32-NEXT:    retq
;
; WIN64-LABEL: bar32:
; WIN64:       # %bb.0:
; WIN64-NEXT:    shrl $8, %ecx
; WIN64-NEXT:    incb %cl
; WIN64-NEXT:    movb %cl, (%rdx)
; WIN64-NEXT:    retq
;
; X86-32-LABEL: bar32:
; X86-32:       # %bb.0:
; X86-32-NEXT:    incb %ah
; X86-32-NEXT:    movb %ah, (%edx)
; X86-32-NEXT:    retl


  %t0 = lshr i32 %x, 8
  %t1 = trunc i32 %t0 to i8
  %t2 = add i8 %t1, 1
  store i8 %t2, ptr %p
  ret void
}

define void @bar16(i16 inreg %x, ptr inreg %p) nounwind {
; X64-LABEL: bar16:
; X64:       # %bb.0:
; X64-NEXT:    shrl $8, %edi
; X64-NEXT:    incb %dil
; X64-NEXT:    movb %dil, (%rsi)
; X64-NEXT:    retq
;
; X32-LABEL: bar16:
; X32:       # %bb.0:
; X32-NEXT:    shrl $8, %edi
; X32-NEXT:    incb %dil
; X32-NEXT:    movb %dil, (%esi)
; X32-NEXT:    retq
;
; WIN64-LABEL: bar16:
; WIN64:       # %bb.0:
; WIN64-NEXT:    # kill: def $cx killed $cx def $ecx
; WIN64-NEXT:    shrl $8, %ecx
; WIN64-NEXT:    incb %cl
; WIN64-NEXT:    movb %cl, (%rdx)
; WIN64-NEXT:    retq
;
; X86-32-LABEL: bar16:
; X86-32:       # %bb.0:
; X86-32-NEXT:    incb %ah
; X86-32-NEXT:    movb %ah, (%edx)
; X86-32-NEXT:    retl


  %t0 = lshr i16 %x, 8
  %t1 = trunc i16 %t0 to i8
  %t2 = add i8 %t1, 1
  store i8 %t2, ptr %p
  ret void
}

define i64 @qux64(i64 inreg %x) nounwind {
; X86-64-LABEL: qux64:
; X86-64:       # %bb.0:
; X86-64-NEXT:    movq %rdi, %rax
; X86-64-NEXT:    movzbl %ah, %eax
; X86-64-NEXT:    retq
;
; WIN64-LABEL: qux64:
; WIN64:       # %bb.0:
; WIN64-NEXT:    movzbl %ch, %eax
; WIN64-NEXT:    retq
;
; X86-32-LABEL: qux64:
; X86-32:       # %bb.0:
; X86-32-NEXT:    movzbl %ah, %eax
; X86-32-NEXT:    xorl %edx, %edx
; X86-32-NEXT:    retl


  %t0 = lshr i64 %x, 8
  %t1 = and i64 %t0, 255
  ret i64 %t1
}

define i32 @qux32(i32 inreg %x) nounwind {
; X86-64-LABEL: qux32:
; X86-64:       # %bb.0:
; X86-64-NEXT:    movl %edi, %eax
; X86-64-NEXT:    movzbl %ah, %eax
; X86-64-NEXT:    retq
;
; WIN64-LABEL: qux32:
; WIN64:       # %bb.0:
; WIN64-NEXT:    movzbl %ch, %eax
; WIN64-NEXT:    retq
;
; X86-32-LABEL: qux32:
; X86-32:       # %bb.0:
; X86-32-NEXT:    movzbl %ah, %eax
; X86-32-NEXT:    retl


  %t0 = lshr i32 %x, 8
  %t1 = and i32 %t0, 255
  ret i32 %t1
}

define i16 @qux16(i16 inreg %x) nounwind {
; X86-64-LABEL: qux16:
; X86-64:       # %bb.0:
; X86-64-NEXT:    movl %edi, %eax
; X86-64-NEXT:    movzbl %ah, %eax
; X86-64-NEXT:    # kill: def $ax killed $ax killed $eax
; X86-64-NEXT:    retq
;
; WIN64-LABEL: qux16:
; WIN64:       # %bb.0:
; WIN64-NEXT:    movzwl %cx, %eax
; WIN64-NEXT:    shrl $8, %eax
; WIN64-NEXT:    # kill: def $ax killed $ax killed $eax
; WIN64-NEXT:    retq
;
; X86-32-LABEL: qux16:
; X86-32:       # %bb.0:
; X86-32-NEXT:    movzbl %ah, %eax
; X86-32-NEXT:    # kill: def $ax killed $ax killed $eax
; X86-32-NEXT:    retl


  %t0 = lshr i16 %x, 8
  ret i16 %t0
}
