//===--- AccessSummaryAnalysis.cpp - PIL Access Summary Analysis ----------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//

#define DEBUG_TYPE "pil-access-summary-analysis"

#include "polarphp/pil/lang/InstructionUtils.h"
#include "polarphp/pil/lang/PILArgument.h"
#include "polarphp/pil/optimizer/analysis/AccessSummaryAnalysis.h"
#include "polarphp/pil/optimizer/analysis/FunctionOrder.h"
#include "polarphp/pil/optimizer/passmgr/PassManager.h"
#include "polarphp/pil/lang/DebugUtils.h"

using namespace polar;

void AccessSummaryAnalysis::processFunction(FunctionInfo *info,
                                            FunctionOrder &order) {
   // Does the summary need to be recomputed?
   if (order.prepareForVisiting(info))
      return;

   // Compute function summary on a per-argument basis.
   unsigned index = 0;
   for (PILArgument *arg : info->getFunction()->getArguments()) {
      FunctionSummary &functionSummary = info->getSummary();
      ArgumentSummary &argSummary =
         functionSummary.getAccessForArgument(index);
      ++index;

      auto *functionArg = cast<PILFunctionArgument>(arg);
      // Only summarize @inout_aliasable arguments.
      PILArgumentConvention convention =
         functionArg->getArgumentConvention().Value;
      if (convention != PILArgumentConvention::Indirect_InoutAliasable)
         continue;

      processArgument(info, functionArg, argSummary, order);
   }
}

/// Track uses of the arguments, recording in the summary any accesses
/// started by a begin_access and any flows of the arguments to other
/// functions.
void AccessSummaryAnalysis::processArgument(FunctionInfo *info,
                                            PILFunctionArgument *argument,
                                            ArgumentSummary &summary,
                                            FunctionOrder &order) {
   unsigned argumentIndex = argument->getIndex();

   // Use a worklist to track argument uses to be processed.
   llvm::SmallVector<Operand *, 32> worklist;

   // Start by adding the immediate uses of the argument to the worklist.
   worklist.append(argument->use_begin(), argument->use_end());

   // Iterate to follow uses of the arguments.
   while (!worklist.empty()) {
      Operand *operand = worklist.pop_back_val();
      PILInstruction *user = operand->getUser();

      switch (user->getKind()) {
         case PILInstructionKind::BeginAccessInst: {
            auto *BAI = cast<BeginAccessInst>(user);
            if (BAI->getEnforcement() != PILAccessEnforcement::Unsafe) {
               const IndexTrieNode *subPath = findSubPathAccessed(BAI);
               summary.mergeWith(BAI->getAccessKind(), BAI->getLoc(), subPath);
               // We don't add the users of the begin_access to the worklist because
               // even if these users eventually begin an access to the address
               // or a projection from it, that access can't begin more exclusive
               // access than this access -- otherwise it will be diagnosed
               // elsewhere.
            }
            break;
         }
         case PILInstructionKind::EndUnpairedAccessInst:
            // Don't diagnose unpaired access statically.
            assert(cast<EndUnpairedAccessInst>(user)->getEnforcement() ==
                   PILAccessEnforcement::Dynamic);
            break;
         case PILInstructionKind::StructElementAddrInst:
         case PILInstructionKind::TupleElementAddrInst: {
            // Eventually we'll summarize individual struct elements separately.
            // For now an access to a part of the struct is treated as an access
            // to the whole struct.
            auto inst = cast<SingleValueInstruction>(user);
            worklist.append(inst->use_begin(), inst->use_end());
            break;
         }
         case PILInstructionKind::DebugValueAddrInst:
         case PILInstructionKind::AddressToPointerInst:
            // Ignore these uses, they don't affect formal accesses.
            break;
         case PILInstructionKind::PartialApplyInst:
            processPartialApply(info, argumentIndex, cast<PartialApplyInst>(user),
                                operand, order);
            break;
         case PILInstructionKind::ApplyInst:
            processFullApply(info, argumentIndex, cast<ApplyInst>(user), operand,
                             order);
            break;
         case PILInstructionKind::TryApplyInst:
            processFullApply(info, argumentIndex, cast<TryApplyInst>(user), operand,
                             order);
            break;
         default:
            // FIXME: These likely represent scenarios in which we're not generating
            // begin access markers. Ignore these for now. But we really should
            // add PIL verification to ensure all loads and stores have associated
            // access markers. Once PIL verification is implemented, enable the
            // following assert to verify that the cases handled above are
            // comprehensive, which guarantees that exclusivity enforcement is
            // complete.
            //   assert(false && "Unrecognized argument use");
            break;
      }
   }
}

#ifndef NDEBUG
/// Sanity check to make sure that a noescape partial apply is
/// only ultimately used by an apply, a try_apply or as an argument (but not
/// the called function) in a partial_apply.
///
/// FIXME: This needs to be checked in the PILVerifier.
static bool hasExpectedUsesOfNoEscapePartialApply(Operand *partialApplyUse) {
   PILInstruction *user = partialApplyUse->getUser();

   // It is fine to call the partial apply
   switch (user->getKind()) {
      case PILInstructionKind::ApplyInst:
      case PILInstructionKind::TryApplyInst:
         return true;
         // partial_apply [stack] is terminated by a dealloc_stack.
      case PILInstructionKind::DeallocStackInst:
         return true;

      case PILInstructionKind::ConvertFunctionInst:
         return llvm::all_of(cast<ConvertFunctionInst>(user)->getUses(),
                             hasExpectedUsesOfNoEscapePartialApply);

      case PILInstructionKind::ConvertEscapeToNoEscapeInst:
         return llvm::all_of(cast<ConvertEscapeToNoEscapeInst>(user)->getUses(),
                             hasExpectedUsesOfNoEscapePartialApply);

      case PILInstructionKind::PartialApplyInst:
         return partialApplyUse->get() != cast<PartialApplyInst>(user)->getCallee();

         // Look through begin_borrow.
      case PILInstructionKind::BeginBorrowInst:
         return llvm::all_of(cast<BeginBorrowInst>(user)->getUses(),
                             hasExpectedUsesOfNoEscapePartialApply);

         // Look through mark_dependence.
      case PILInstructionKind::MarkDependenceInst:
         return llvm::all_of(cast<MarkDependenceInst>(user)->getUses(),
                             hasExpectedUsesOfNoEscapePartialApply);

      case PILInstructionKind::CopyBlockWithoutEscapingInst:
         return partialApplyUse->getOperandNumber() ==
                CopyBlockWithoutEscapingInst::Closure;

         // A copy_value that is only used by the store to a block storage is fine.
         // It is part of the pattern we emit for verifying that a noescape closure
         // passed to objc has not escaped.
         //  %4 = convert_escape_to_noescape [not_guaranteed] %3 :
         //    $@callee_guaranteed () -> () to $@noescape @callee_guaranteed () -> ()
         //  %5 = function_ref @withoutEscapingThunk
         //  %6 = partial_apply [callee_guaranteed] %5(%4) :
         //    $@convention(thin) (@noescape @callee_guaranteed () -> ()) -> ()
         //  %7 = mark_dependence %6 : $@callee_guaranteed () -> () on %4 :
         //    $@noescape @callee_guaranteed () -> ()
         //  %8 = copy_value %7 : $@callee_guaranteed () -> ()
         //  %9 = alloc_stack $@block_storage @callee_guaranteed () -> ()
         //  %10 = project_block_storage %9 :
         //    $*@block_storage @callee_guaranteed () -> ()
         //  store %8 to [init] %10 : $*@callee_guaranteed () -> ()
         //  %13 = init_block_storage_header %9 :
         //    $*@block_storage @callee_guaranteed () -> (),
         //    invoke %12
         //  %14 = copy_block_without_escaping %13 : $() -> () withoutEscaping %7
      case PILInstructionKind::CopyValueInst:
         return isa<StoreInst>(getSingleNonDebugUser(cast<CopyValueInst>(user)));

         // End borrow is always ok.
      case PILInstructionKind::EndBorrowInst:
         return true;

      case PILInstructionKind::StoreInst:
      case PILInstructionKind::DestroyValueInst:
         // @block_storage is passed by storing it to the stack. We know this is
         // still nonescaping simply because our original argument convention is
         // @inout_aliasable. In this PIL, both store and destroy_value are users
         // of %closure:
         //
         // %closure = partial_apply %f1(%arg)
         //   : $@convention(thin) (@inout_aliasable T) -> ()
         // %storage = alloc_stack $@block_storage @callee_owned () -> ()
         // %block_addr = project_block_storage %storage
         //   : $*@block_storage @callee_owned () -> ()
         // store %closure to [init] %block_addr : $*@callee_owned () -> ()
         // %block = init_block_storage_header %storage
         //     : $*@block_storage @callee_owned () -> (),
         //   invoke %f2 : $@convention(c)
         //     (@inout_aliasable @block_storage @callee_owned () -> ()) -> (),
         //   type $@convention(block) () -> ()
         // %copy = copy_block %block : $@convention(block) () -> ()
         // destroy_value %storage : $@callee_owned () -> ()
         return true;
      default:
         return false;
   }
}
#endif

void AccessSummaryAnalysis::processPartialApply(FunctionInfo *callerInfo,
                                                unsigned callerArgumentIndex,
                                                PartialApplyInst *apply,
                                                Operand *applyArgumentOperand,
                                                FunctionOrder &order) {
   PILFunction *calleeFunction = apply->getCalleeFunction();
   assert(calleeFunction && !calleeFunction->empty() &&
          "Missing definition of noescape closure?");

   // Make sure the partial_apply is not calling the result of another
   // partial_apply.
   assert(isa<FunctionRefBaseInst>(apply->getCallee())
          && "Noescape partial apply of non-functionref?");

   assert(llvm::all_of(apply->getUses(),
                       hasExpectedUsesOfNoEscapePartialApply) &&
          "noescape partial_apply has unexpected use!");

   // The argument index in the called function.
   ApplySite site(apply);
   unsigned calleeArgumentIndex = site.getCalleeArgIndex(*applyArgumentOperand);

   processCall(callerInfo, callerArgumentIndex, calleeFunction,
               calleeArgumentIndex, order);
}

void AccessSummaryAnalysis::processFullApply(FunctionInfo *callerInfo,
                                             unsigned callerArgumentIndex,
                                             FullApplySite apply,
                                             Operand *argumentOperand,
                                             FunctionOrder &order) {
   unsigned operandNumber = argumentOperand->getOperandNumber();
   assert(operandNumber > 0 && "Summarizing apply for non-argument?");

   unsigned calleeArgumentIndex = operandNumber - 1;
   PILFunction *callee = apply.getCalleeFunction();
   // We can't apply a summary for function whose body we can't see.
   // Since user-provided closures are always in the same module as their callee
   // This likely indicates a missing begin_access before an open-coded
   // call.
   if (!callee || callee->empty())
      return;

   processCall(callerInfo, callerArgumentIndex, callee, calleeArgumentIndex,
               order);
}

void AccessSummaryAnalysis::processCall(FunctionInfo *callerInfo,
                                        unsigned callerArgumentIndex,
                                        PILFunction *callee,
                                        unsigned argumentIndex,
                                        FunctionOrder &order) {
   // Record the flow of an argument from  the caller to the callee so that
   // the interprocedural analysis can iterate to a fixpoint.
   FunctionInfo *calleeInfo = getFunctionInfo(callee);
   ArgumentFlow flow = {callerArgumentIndex, argumentIndex, calleeInfo};
   callerInfo->recordFlow(flow);
   if (!calleeInfo->isVisited()) {
      processFunction(calleeInfo, order);
      order.tryToSchedule(calleeInfo);
   }

   propagateFromCalleeToCaller(callerInfo, flow);
}

bool AccessSummaryAnalysis::ArgumentSummary::mergeWith(
   PILAccessKind otherKind, PILLocation otherLoc,
   const IndexTrieNode *otherSubPath) {
   bool changed = false;

   auto found =
      SubAccesses.try_emplace(otherSubPath, otherKind, otherLoc, otherSubPath);
   if (!found.second) {
      // We already have an entry for otherSubPath, so merge with it.
      changed = found.first->second.mergeWith(otherKind, otherLoc, otherSubPath);
   } else {
      // We just added a new entry for otherSubPath.
      changed = true;
   }

   return changed;
}

bool AccessSummaryAnalysis::ArgumentSummary::mergeWith(
   const ArgumentSummary &other) {
   bool changed = false;

   const SubAccessMap &otherAccesses = other.SubAccesses;
   for (auto it = otherAccesses.begin(), e = otherAccesses.end(); it != e;
        ++it) {
      const SubAccessSummary &otherSubAccess = it->getSecond();
      if (mergeWith(otherSubAccess.getAccessKind(), otherSubAccess.getAccessLoc(),
                    otherSubAccess.getSubPath())) {
         changed = true;
      }
   }

   return changed;
}

bool AccessSummaryAnalysis::SubAccessSummary::mergeWith(
   PILAccessKind otherKind, PILLocation otherLoc,
   const IndexTrieNode *otherSubPath) {
   assert(otherSubPath == this->SubPath);
   // In the lattice, a modification-like accesses subsume a read access or no
   // access.
   if (Kind == PILAccessKind::Read && otherKind != PILAccessKind::Read) {
      Kind = otherKind;
      AccessLoc = otherLoc;
      return true;
   }

   return false;
}

bool AccessSummaryAnalysis::SubAccessSummary::mergeWith(
   const SubAccessSummary &other) {
   // We don't currently support merging accesses for different sub paths.
   assert(SubPath == other.SubPath);
   return mergeWith(other.Kind, other.AccessLoc, SubPath);
}

void AccessSummaryAnalysis::recompute(FunctionInfo *initial) {
   allocNewUpdateID();

   FunctionOrder order(getCurrentUpdateID());

   // Summarize the function and its callees.
   processFunction(initial, order);

   // Build the bottom-up order.
   order.tryToSchedule(initial);
   order.finishScheduling();

   // Iterate the interprocedural analysis to a fixed point.
   bool needAnotherIteration;
   do {
      needAnotherIteration = false;
      for (FunctionInfo *calleeInfo : order) {
         for (const auto &callerEntry : calleeInfo->getCallers()) {
            assert(callerEntry.isValid());
            if (!order.wasRecomputedWithCurrentUpdateID(calleeInfo))
               continue;

            FunctionInfo *callerInfo = callerEntry.Caller;

            // Propagate from callee to caller.
            for (const auto &argumentFlow : callerInfo->getArgumentFlows()) {
               if (argumentFlow.CalleeFunctionInfo != calleeInfo)
                  continue;

               bool changed = propagateFromCalleeToCaller(callerInfo, argumentFlow);
               if (changed && !callerInfo->isScheduledAfter(calleeInfo)) {
                  needAnotherIteration = true;
               }
            }
         }
      }
   } while (needAnotherIteration);
}

std::string AccessSummaryAnalysis::SubAccessSummary::getDescription(
   PILType BaseType, PILModule &M, TypeExpansionContext context) const {
   std::string sbuf;
   llvm::raw_string_ostream os(sbuf);

   os << AccessSummaryAnalysis::getSubPathDescription(BaseType, SubPath, M,
                                                      context);

   if (!SubPath->isRoot())
      os << " ";
   os << getPILAccessKindName(getAccessKind());
   return os.str();
}

void AccessSummaryAnalysis::ArgumentSummary::getSortedSubAccesses(
   SmallVectorImpl<SubAccessSummary> &storage) const {
   for (auto it = SubAccesses.begin(), e = SubAccesses.end(); it != e; ++it) {
      storage.push_back(it->getSecond());
   }

   const auto &compare = [](const SubAccessSummary &lhs,
                            const SubAccessSummary &rhs) {
      return compareSubPaths(lhs.getSubPath(), rhs.getSubPath());
   };
   std::sort(storage.begin(), storage.end(), compare);

   assert(storage.size() == SubAccesses.size());
}

std::string AccessSummaryAnalysis::ArgumentSummary::getDescription(
   PILType BaseType, PILModule &M, TypeExpansionContext context) const {
   std::string sbuf;
   llvm::raw_string_ostream os(sbuf);
   os << "[";
   unsigned index = 0;

   SmallVector<AccessSummaryAnalysis::SubAccessSummary, 8> Sorted;
   Sorted.reserve(SubAccesses.size());
   getSortedSubAccesses(Sorted);

   for (auto &subAccess : Sorted) {
      if (index > 0) {
         os << ", ";
      }
      os << subAccess.getDescription(BaseType, M, context);
      ++index;
   }
   os << "]";

   return os.str();
}

bool AccessSummaryAnalysis::propagateFromCalleeToCaller(
   FunctionInfo *callerInfo, ArgumentFlow flow) {
   // For a given flow from a caller's argument to a callee's argument,
   // propagate the argument summary information to the caller.

   FunctionInfo *calleeInfo = flow.CalleeFunctionInfo;
   const auto &calleeArgument =
      calleeInfo->getSummary().getAccessForArgument(flow.CalleeArgumentIndex);
   auto &callerArgument =
      callerInfo->getSummary().getAccessForArgument(flow.CallerArgumentIndex);

   bool changed = callerArgument.mergeWith(calleeArgument);
   return changed;
}

AccessSummaryAnalysis::FunctionInfo *
AccessSummaryAnalysis::getFunctionInfo(PILFunction *F) {
   FunctionInfo *&FInfo = FunctionInfos[F];
   if (!FInfo) {
      FInfo = new (Allocator.Allocate()) FunctionInfo(F);
   }
   return FInfo;
}

const AccessSummaryAnalysis::FunctionSummary &
AccessSummaryAnalysis::getOrCreateSummary(PILFunction *fn) {
   FunctionInfo *info = getFunctionInfo(fn);
   if (!info->isValid())
      recompute(info);

   return info->getSummary();
}

void AccessSummaryAnalysis::AccessSummaryAnalysis::invalidate() {
   FunctionInfos.clear();
   Allocator.DestroyAll();
   SubPathTrie.reset(new IndexTrieNode());
}

void AccessSummaryAnalysis::invalidate(PILFunction *F, InvalidationKind K) {
   FunctionInfos.erase(F);
}

PILAnalysis *polar::createAccessSummaryAnalysis(PILModule *M) {
   return new AccessSummaryAnalysis();
}

/// If the instruction is a field or tuple projection and it has a single
/// user return a pair of the single user and the projection index.
/// Otherwise, return a pair with the component nullptr and the second
/// unspecified.
static std::pair<SingleValueInstruction *, unsigned>
getSingleAddressProjectionUser(SingleValueInstruction *I) {
   SingleValueInstruction *SingleUser = nullptr;
   unsigned ProjectionIndex = 0;

   for (Operand *Use : I->getUses()) {
      PILInstruction *User = Use->getUser();
      if (isa<BeginAccessInst>(I) && isa<EndAccessInst>(User))
         continue;

      // Ignore sanitizer instrumentation when looking for a single projection
      // user. This ensures that we're able to find a single projection subpath
      // even when sanitization is enabled.
      if (isSanitizerInstrumentation(User))
         continue;

      // We have more than a single user so bail.
      if (SingleUser)
         return std::make_pair(nullptr, 0);

      switch (User->getKind()) {
         case PILInstructionKind::StructElementAddrInst: {
            auto inst = cast<StructElementAddrInst>(User);
            ProjectionIndex = inst->getFieldNo();
            SingleUser = inst;
            break;
         }
         case PILInstructionKind::TupleElementAddrInst: {
            auto inst = cast<TupleElementAddrInst>(User);
            ProjectionIndex = inst->getFieldNo();
            SingleUser = inst;
            break;
         }
         default:
            return std::make_pair(nullptr, 0);
      }
   }

   return std::make_pair(SingleUser, ProjectionIndex);
}

const IndexTrieNode *
AccessSummaryAnalysis::findSubPathAccessed(BeginAccessInst *BAI) {
   IndexTrieNode *SubPath = getSubPathTrieRoot();

   // For each single-user projection of BAI, construct or get a node
   // from the trie representing the index of the field or tuple element
   // accessed by that projection.
   SingleValueInstruction *Iter = BAI;
   while (true) {
      std::pair<SingleValueInstruction *, unsigned> ProjectionUser =
         getSingleAddressProjectionUser(Iter);
      if (!ProjectionUser.first)
         break;

      SubPath = SubPath->getChild(ProjectionUser.second);
      Iter = ProjectionUser.first;
   }

   return SubPath;
}

/// Returns a string representation of the SubPath
/// suitable for use in diagnostic text. Only supports the Projections
/// that stored-property relaxation supports: struct stored properties
/// and tuple elements.
std::string AccessSummaryAnalysis::getSubPathDescription(
   PILType baseType, const IndexTrieNode *subPath, PILModule &M,
   TypeExpansionContext context) {
   // Walk the trie to the root to collect the sequence (in reverse order).
   llvm::SmallVector<unsigned, 4> reversedIndices;
   const IndexTrieNode *I = subPath;
   while (!I->isRoot()) {
      reversedIndices.push_back(I->getIndex());
      I = I->getParent();
   }

   std::string sbuf;
   llvm::raw_string_ostream os(sbuf);

   PILType containingType = baseType;
   for (unsigned index : llvm::reverse(reversedIndices)) {
      os << ".";

      if (StructDecl *D = containingType.getStructOrBoundGenericStruct()) {
         VarDecl *var = D->getStoredProperties()[index];
         os << var->getBaseName();
         containingType = containingType.getFieldType(var, M, context);
         continue;
      }

      if (auto tupleTy = containingType.getAs<TupleType>()) {
         Identifier elementName = tupleTy->getElement(index).getName();
         if (elementName.empty())
            os << index;
         else
            os << elementName;
         containingType = containingType.getTupleElementType(index);
         continue;
      }

      llvm_unreachable("Unexpected type in projection SubPath!");
   }

   return os.str();
}

static unsigned subPathLength(const IndexTrieNode *subPath) {
   unsigned length = 0;

   const IndexTrieNode *iter = subPath;
   while (iter) {
      ++length;
      iter = iter->getParent();
   }

   return length;
}

bool AccessSummaryAnalysis::compareSubPaths(const IndexTrieNode *lhs,
                                            const IndexTrieNode *rhs) {
   unsigned lhsLength = subPathLength(lhs);
   unsigned rhsLength = subPathLength(rhs);

   if (lhsLength != rhsLength)
      return lhsLength < rhsLength;


   while (lhs) {
      if (lhs->getIndex() != rhs->getIndex())
         return lhs->getIndex() < rhs->getIndex();

      lhs = lhs->getParent();
      rhs = rhs->getParent();
   }

   assert(!rhs && "Equal paths with different lengths?");
   // The two paths are equal.
   return false;
}

void AccessSummaryAnalysis::FunctionSummary::print(raw_ostream &os,
                                                   PILFunction *fn) const {
   unsigned argCount = getArgumentCount();
   os << "(";

   for (unsigned i = 0; i < argCount; ++i) {
      if (i > 0) {
         os << ",  ";
      }
      PILArgument *arg = fn->getArgument(i);
      PILModule &m = fn->getModule();
      os << getAccessForArgument(i).getDescription(arg->getType(), m,
                                                   TypeExpansionContext(*fn));
   }

   os << ")";
}
