; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; MemCpy optimizations should take place even in presence of invariant.start
; RUN: opt < %s -passes=memcpyopt -S -verify-memoryssa | FileCheck %s

target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"

target triple = "i686-apple-darwin9"

%0 = type { x86_fp80, x86_fp80 }
declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)

declare ptr @llvm.invariant.start.p0(i64, ptr nocapture) nounwind readonly

; The intermediate alloca and one of the memcpy's should be eliminated, the
; other should be transformed to a memmove.
define void @test1(ptr %P, ptr %Q) nounwind  {
; CHECK-LABEL: @test1(
; CHECK-NEXT:    [[MEMTMP:%.*]] = alloca [[TMP0:%.*]], align 16
; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[MEMTMP]], ptr align 16 [[P:%.*]], i32 32, i1 false)
; CHECK-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 32, ptr [[P]])
; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[P]], i32 32, i1 false)
; CHECK-NEXT:    ret void
;
  %memtmp = alloca %0, align 16
  call void @llvm.memcpy.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
  %i = call ptr @llvm.invariant.start.p0(i64 32, ptr %P)
  call void @llvm.memcpy.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
  ret void
}


; The invariant.start intrinsic does not inhibit tranforming the memcpy to a
; memset.
define void @test2(ptr %dst1, ptr %dst2, i8 %c) {
; CHECK-LABEL: @test2(
; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr [[DST1:%.*]], i8 [[C:%.*]], i64 128, i1 false)
; CHECK-NEXT:    [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 32, ptr [[DST1]])
; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[DST2:%.*]], i8 [[C]], i64 128, i1 false)
; CHECK-NEXT:    ret void
;
  call void @llvm.memset.p0.i64(ptr %dst1, i8 %c, i64 128, i1 false)
  %i = call ptr @llvm.invariant.start.p0(i64 32, ptr %dst1)
  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst2, ptr align 8 %dst1, i64 128, i1 false)
  ret void
}
