; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=licm -S < %s | FileCheck %s
; RUN: opt -passes='loop-mssa(licm)' -S %s | FileCheck %s

; If we can prove a local is thread local, we can insert stores during
; promotion which wouldn't be legal otherwise.

target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-linux-generic"

@p = external global ptr

declare ptr @malloc(i64)

; Exercise the TLS case
define ptr @test(i32 %n) {
; CHECK-LABEL: @test(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[MEM:%.*]] = call noalias dereferenceable(16) ptr @malloc(i64 16)
; CHECK-NEXT:    br label [[FOR_BODY_LR_PH:%.*]]
; CHECK:       for.body.lr.ph:
; CHECK-NEXT:    [[ADDR_PROMOTED:%.*]] = load i32, ptr [[MEM]], align 4
; CHECK-NEXT:    br label [[FOR_HEADER:%.*]]
; CHECK:       for.header:
; CHECK-NEXT:    [[NEW1:%.*]] = phi i32 [ [[ADDR_PROMOTED]], [[FOR_BODY_LR_PH]] ], [ [[NEW:%.*]], [[FOR_BODY:%.*]] ]
; CHECK-NEXT:    [[I_02:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT:    [[GUARD:%.*]] = load atomic ptr, ptr @p monotonic, align 8
; CHECK-NEXT:    [[EXITCMP:%.*]] = icmp eq ptr [[GUARD]], null
; CHECK-NEXT:    br i1 [[EXITCMP]], label [[FOR_BODY]], label [[EARLY_EXIT:%.*]]
; CHECK:       early-exit:
; CHECK-NEXT:    [[NEW1_LCSSA:%.*]] = phi i32 [ [[NEW1]], [[FOR_HEADER]] ]
; CHECK-NEXT:    store i32 [[NEW1_LCSSA]], ptr [[MEM]], align 4
; CHECK-NEXT:    ret ptr null
; CHECK:       for.body:
; CHECK-NEXT:    [[NEW]] = add i32 [[NEW1]], 1
; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_02]], 1
; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N:%.*]]
; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_HEADER]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
; CHECK:       for.cond.for.end_crit_edge:
; CHECK-NEXT:    [[NEW_LCSSA:%.*]] = phi i32 [ [[NEW]], [[FOR_BODY]] ]
; CHECK-NEXT:    [[SPLIT:%.*]] = phi ptr [ [[MEM]], [[FOR_BODY]] ]
; CHECK-NEXT:    store i32 [[NEW_LCSSA]], ptr [[MEM]], align 4
; CHECK-NEXT:    ret ptr null
;
entry:
  ;; ignore the required null check for simplicity
  %mem = call dereferenceable(16) noalias ptr @malloc(i64 16)
  br label %for.body.lr.ph

for.body.lr.ph:                                   ; preds = %entry
  br label %for.header

for.header:
  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
  %old = load i32, ptr %mem, align 4
  ; deliberate impossible to analyze branch
  %guard = load atomic ptr, ptr @p monotonic, align 8
  %exitcmp = icmp eq ptr %guard, null
  br i1 %exitcmp, label %for.body, label %early-exit

early-exit:
  ret ptr null

for.body:
  %new = add i32 %old, 1
  store i32 %new, ptr %mem, align 4
  %inc = add nsw i32 %i.02, 1
  %cmp = icmp slt i32 %inc, %n
  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge

for.cond.for.end_crit_edge:                       ; preds = %for.body
  %split = phi ptr [ %mem, %for.body ]
  ret ptr null
}

; Stack allocations can also be thread-local
define ptr @test2(i32 %n) {
; CHECK-LABEL: @test2(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[MEM:%.*]] = alloca i8, i32 16, align 1
; CHECK-NEXT:    br label [[FOR_BODY_LR_PH:%.*]]
; CHECK:       for.body.lr.ph:
; CHECK-NEXT:    [[ADDR_PROMOTED:%.*]] = load i32, ptr [[MEM]], align 4
; CHECK-NEXT:    br label [[FOR_HEADER:%.*]]
; CHECK:       for.header:
; CHECK-NEXT:    [[NEW1:%.*]] = phi i32 [ [[ADDR_PROMOTED]], [[FOR_BODY_LR_PH]] ], [ [[NEW:%.*]], [[FOR_BODY:%.*]] ]
; CHECK-NEXT:    [[I_02:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT:    [[GUARD:%.*]] = load atomic ptr, ptr @p monotonic, align 8
; CHECK-NEXT:    [[EXITCMP:%.*]] = icmp eq ptr [[GUARD]], null
; CHECK-NEXT:    br i1 [[EXITCMP]], label [[FOR_BODY]], label [[EARLY_EXIT:%.*]]
; CHECK:       early-exit:
; CHECK-NEXT:    [[NEW1_LCSSA:%.*]] = phi i32 [ [[NEW1]], [[FOR_HEADER]] ]
; CHECK-NEXT:    store i32 [[NEW1_LCSSA]], ptr [[MEM]], align 4
; CHECK-NEXT:    ret ptr null
; CHECK:       for.body:
; CHECK-NEXT:    [[NEW]] = add i32 [[NEW1]], 1
; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_02]], 1
; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N:%.*]]
; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_HEADER]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
; CHECK:       for.cond.for.end_crit_edge:
; CHECK-NEXT:    [[NEW_LCSSA:%.*]] = phi i32 [ [[NEW]], [[FOR_BODY]] ]
; CHECK-NEXT:    [[SPLIT:%.*]] = phi ptr [ [[MEM]], [[FOR_BODY]] ]
; CHECK-NEXT:    store i32 [[NEW_LCSSA]], ptr [[MEM]], align 4
; CHECK-NEXT:    ret ptr null
;
entry:
  %mem = alloca i8, i32 16
  br label %for.body.lr.ph

for.body.lr.ph:                                   ; preds = %entry
  br label %for.header

for.header:
  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
  %old = load i32, ptr %mem, align 4
  ; deliberate impossible to analyze branch
  %guard = load atomic ptr, ptr @p monotonic, align 8
  %exitcmp = icmp eq ptr %guard, null
  br i1 %exitcmp, label %for.body, label %early-exit

early-exit:
  ret ptr null

for.body:
  %new = add i32 %old, 1
  store i32 %new, ptr %mem, align 4
  %inc = add nsw i32 %i.02, 1
  %cmp = icmp slt i32 %inc, %n
  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge

for.cond.for.end_crit_edge:                       ; preds = %for.body
  %split = phi ptr [ %mem, %for.body ]
  ret ptr null
}

declare noalias ptr @custom_malloc(i64)

; Custom allocation function marked via noalias.
define ptr @test_custom_malloc(i32 %n) {
; CHECK-LABEL: @test_custom_malloc(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[MEM:%.*]] = call noalias dereferenceable(16) ptr @custom_malloc(i64 16)
; CHECK-NEXT:    br label [[FOR_BODY_LR_PH:%.*]]
; CHECK:       for.body.lr.ph:
; CHECK-NEXT:    [[ADDR_PROMOTED:%.*]] = load i32, ptr [[MEM]], align 4
; CHECK-NEXT:    br label [[FOR_HEADER:%.*]]
; CHECK:       for.header:
; CHECK-NEXT:    [[NEW1:%.*]] = phi i32 [ [[ADDR_PROMOTED]], [[FOR_BODY_LR_PH]] ], [ [[NEW:%.*]], [[FOR_BODY:%.*]] ]
; CHECK-NEXT:    [[I_02:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT:    [[GUARD:%.*]] = load volatile ptr, ptr @p, align 8
; CHECK-NEXT:    [[EXITCMP:%.*]] = icmp eq ptr [[GUARD]], null
; CHECK-NEXT:    br i1 [[EXITCMP]], label [[FOR_BODY]], label [[EARLY_EXIT:%.*]]
; CHECK:       early-exit:
; CHECK-NEXT:    [[NEW1_LCSSA:%.*]] = phi i32 [ [[NEW1]], [[FOR_HEADER]] ]
; CHECK-NEXT:    store i32 [[NEW1_LCSSA]], ptr [[MEM]], align 4
; CHECK-NEXT:    ret ptr null
; CHECK:       for.body:
; CHECK-NEXT:    [[NEW]] = add i32 [[NEW1]], 1
; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_02]], 1
; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N:%.*]]
; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_HEADER]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
; CHECK:       for.cond.for.end_crit_edge:
; CHECK-NEXT:    [[NEW_LCSSA:%.*]] = phi i32 [ [[NEW]], [[FOR_BODY]] ]
; CHECK-NEXT:    [[SPLIT:%.*]] = phi ptr [ [[MEM]], [[FOR_BODY]] ]
; CHECK-NEXT:    store i32 [[NEW_LCSSA]], ptr [[MEM]], align 4
; CHECK-NEXT:    ret ptr null
;
entry:
  %mem = call dereferenceable(16) noalias ptr @custom_malloc(i64 16)
  br label %for.body.lr.ph

for.body.lr.ph:                                   ; preds = %entry
  br label %for.header

for.header:
  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
  %old = load i32, ptr %mem, align 4
  ; deliberate impossible to analyze branch
  %guard = load volatile ptr, ptr @p
  %exitcmp = icmp eq ptr %guard, null
  br i1 %exitcmp, label %for.body, label %early-exit

early-exit:
  ret ptr null

for.body:
  %new = add i32 %old, 1
  store i32 %new, ptr %mem, align 4
  %inc = add nsw i32 %i.02, 1
  %cmp = icmp slt i32 %inc, %n
  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge

for.cond.for.end_crit_edge:                       ; preds = %for.body
  %split = phi ptr [ %mem, %for.body ]
  ret ptr null
}

declare ptr @not_malloc(i64)

; Negative test - not an allocation function.
define ptr @test_neg_not_malloc(i32 %n) {
; CHECK-LABEL: @test_neg_not_malloc(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[MEM:%.*]] = call dereferenceable(16) ptr @not_malloc(i64 16)
; CHECK-NEXT:    br label [[FOR_BODY_LR_PH:%.*]]
; CHECK:       for.body.lr.ph:
; CHECK-NEXT:    br label [[FOR_HEADER:%.*]]
; CHECK:       for.header:
; CHECK-NEXT:    [[I_02:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
; CHECK-NEXT:    [[OLD:%.*]] = load i32, ptr [[MEM]], align 4
; CHECK-NEXT:    [[GUARD:%.*]] = load volatile ptr, ptr @p, align 8
; CHECK-NEXT:    [[EXITCMP:%.*]] = icmp eq ptr [[GUARD]], null
; CHECK-NEXT:    br i1 [[EXITCMP]], label [[FOR_BODY]], label [[EARLY_EXIT:%.*]]
; CHECK:       early-exit:
; CHECK-NEXT:    ret ptr null
; CHECK:       for.body:
; CHECK-NEXT:    [[NEW:%.*]] = add i32 [[OLD]], 1
; CHECK-NEXT:    store i32 [[NEW]], ptr [[MEM]], align 4
; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_02]], 1
; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N:%.*]]
; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_HEADER]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
; CHECK:       for.cond.for.end_crit_edge:
; CHECK-NEXT:    [[SPLIT:%.*]] = phi ptr [ [[MEM]], [[FOR_BODY]] ]
; CHECK-NEXT:    ret ptr null
;
entry:
  ;; ignore the required null check for simplicity
  %mem = call dereferenceable(16) ptr @not_malloc(i64 16)
  br label %for.body.lr.ph

for.body.lr.ph:                                   ; preds = %entry
  br label %for.header

for.header:
  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
  %old = load i32, ptr %mem, align 4
  ; deliberate impossible to analyze branch
  %guard = load volatile ptr, ptr @p
  %exitcmp = icmp eq ptr %guard, null
  br i1 %exitcmp, label %for.body, label %early-exit

early-exit:
  ret ptr null

for.body:
  %new = add i32 %old, 1
  store i32 %new, ptr %mem, align 4
  %inc = add nsw i32 %i.02, 1
  %cmp = icmp slt i32 %inc, %n
  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge

for.cond.for.end_crit_edge:                       ; preds = %for.body
  %split = phi ptr [ %mem, %for.body ]
  ret ptr null
}

; Negative test - can't speculate load since branch
; may control alignment
define ptr @test_neg2(i32 %n) {
; CHECK-LABEL: @test_neg2(
; CHECK-NEXT:  entry:
; CHECK-NEXT:    [[MEM:%.*]] = call noalias dereferenceable(16) ptr @malloc(i64 16)
; CHECK-NEXT:    br label [[FOR_BODY_LR_PH:%.*]]
; CHECK:       for.body.lr.ph:
; CHECK-NEXT:    br label [[FOR_HEADER:%.*]]
; CHECK:       for.header:
; CHECK-NEXT:    [[I_02:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
; CHECK-NEXT:    [[GUARD:%.*]] = load volatile ptr, ptr @p, align 8
; CHECK-NEXT:    [[EXITCMP:%.*]] = icmp eq ptr [[GUARD]], null
; CHECK-NEXT:    br i1 [[EXITCMP]], label [[FOR_BODY]], label [[EARLY_EXIT:%.*]]
; CHECK:       early-exit:
; CHECK-NEXT:    ret ptr null
; CHECK:       for.body:
; CHECK-NEXT:    [[OLD:%.*]] = load i32, ptr [[MEM]], align 4
; CHECK-NEXT:    [[NEW:%.*]] = add i32 [[OLD]], 1
; CHECK-NEXT:    store i32 [[NEW]], ptr [[MEM]], align 4
; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_02]], 1
; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N:%.*]]
; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_HEADER]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
; CHECK:       for.cond.for.end_crit_edge:
; CHECK-NEXT:    [[SPLIT:%.*]] = phi ptr [ [[MEM]], [[FOR_BODY]] ]
; CHECK-NEXT:    ret ptr null
;
entry:
  ;; ignore the required null check for simplicity
  %mem = call dereferenceable(16) noalias ptr @malloc(i64 16)
  br label %for.body.lr.ph

for.body.lr.ph:                                   ; preds = %entry
  br label %for.header

for.header:
  %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
  ; deliberate impossible to analyze branch
  %guard = load volatile ptr, ptr @p
  %exitcmp = icmp eq ptr %guard, null
  br i1 %exitcmp, label %for.body, label %early-exit

early-exit:
  ret ptr null

for.body:
  %old = load i32, ptr %mem, align 4
  %new = add i32 %old, 1
  store i32 %new, ptr %mem, align 4
  %inc = add nsw i32 %i.02, 1
  %cmp = icmp slt i32 %inc, %n
  br i1 %cmp, label %for.header, label %for.cond.for.end_crit_edge

for.cond.for.end_crit_edge:                       ; preds = %for.body
  %split = phi ptr [ %mem, %for.body ]
  ret ptr null
}
