repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
unknown
date_merged
unknown
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/jit/lsra.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #ifndef _LSRA_H_ #define _LSRA_H_ #include "arraylist.h" #include "smallhash.h" // Minor and forward-reference types class Interval; class RefPosition; class LinearScan; class RegRecord; template <class T> class ArrayStack; // LsraLocation tracks the linearized order of the nodes. // Each node is assigned two LsraLocations - one for all the uses and all but the last // def, and a second location for the last def (if any) typedef unsigned int LsraLocation; const unsigned int MinLocation = 0; const unsigned int MaxLocation = UINT_MAX; // max number of registers an operation could require internally (in addition to uses and defs) const unsigned int MaxInternalRegisters = 8; const unsigned int RegisterTypeCount = 2; /***************************************************************************** * Register types *****************************************************************************/ typedef var_types RegisterType; #define IntRegisterType TYP_INT #define FloatRegisterType TYP_FLOAT //------------------------------------------------------------------------ // regType: Return the RegisterType to use for a given type // // Arguments: // type - the type of interest // template <class T> RegisterType regType(T type) { return varTypeUsesFloatReg(TypeGet(type)) ? FloatRegisterType : IntRegisterType; } //------------------------------------------------------------------------ // useFloatReg: Check if the given var_type should be allocated to a FloatRegisterType // inline bool useFloatReg(var_types type) { return (regType(type) == FloatRegisterType); } //------------------------------------------------------------------------ // registerTypesEquivalent: Check to see if two RegisterTypes are equivalent // inline bool registerTypesEquivalent(RegisterType a, RegisterType b) { return varTypeIsIntegralOrI(a) == varTypeIsIntegralOrI(b); } //------------------------------------------------------------------------ // calleeSaveRegs: Get the set of callee-save registers of the given RegisterType // inline regMaskTP calleeSaveRegs(RegisterType rt) { return varTypeIsIntegralOrI(rt) ? RBM_INT_CALLEE_SAVED : RBM_FLT_CALLEE_SAVED; } //------------------------------------------------------------------------ // callerSaveRegs: Get the set of caller-save registers of the given RegisterType // inline regMaskTP callerSaveRegs(RegisterType rt) { return varTypeIsIntegralOrI(rt) ? RBM_INT_CALLEE_TRASH : RBM_FLT_CALLEE_TRASH; } //------------------------------------------------------------------------ // RefInfo: Captures the necessary information for a definition that is "in-flight" // during `buildIntervals` (i.e. a tree-node definition has been encountered, // but not its use). This includes the RefPosition and its associated // GenTree node. // struct RefInfo { RefPosition* ref; GenTree* treeNode; RefInfo(RefPosition* r, GenTree* t) : ref(r), treeNode(t) { } // default constructor for data structures RefInfo() { } }; //------------------------------------------------------------------------ // RefInfoListNode: used to store a single `RefInfo` value for a // node during `buildIntervals`. // // This is the node type for `RefInfoList` below. // class RefInfoListNode final : public RefInfo { friend class RefInfoList; friend class RefInfoListNodePool; RefInfoListNode* m_next; // The next node in the list public: RefInfoListNode(RefPosition* r, GenTree* t) : RefInfo(r, t) { } //------------------------------------------------------------------------ // RefInfoListNode::Next: Returns the next node in the list. RefInfoListNode* Next() const { return m_next; } }; //------------------------------------------------------------------------ // RefInfoList: used to store a list of `RefInfo` values for a // node during `buildIntervals`. // // This list of 'RefInfoListNode's contains the source nodes consumed by // a node, and is created by 'BuildNode'. // class RefInfoList final { friend class RefInfoListNodePool; RefInfoListNode* m_head; // The head of the list RefInfoListNode* m_tail; // The tail of the list public: RefInfoList() : m_head(nullptr), m_tail(nullptr) { } RefInfoList(RefInfoListNode* node) : m_head(node), m_tail(node) { assert(m_head->m_next == nullptr); } //------------------------------------------------------------------------ // RefInfoList::IsEmpty: Returns true if the list is empty. // bool IsEmpty() const { return m_head == nullptr; } //------------------------------------------------------------------------ // RefInfoList::Begin: Returns the first node in the list. // RefInfoListNode* Begin() const { return m_head; } //------------------------------------------------------------------------ // RefInfoList::End: Returns the position after the last node in the // list. The returned value is suitable for use as // a sentinel for iteration. // RefInfoListNode* End() const { return nullptr; } //------------------------------------------------------------------------ // RefInfoList::End: Returns the position after the last node in the // list. The returned value is suitable for use as // a sentinel for iteration. // RefInfoListNode* Last() const { return m_tail; } //------------------------------------------------------------------------ // RefInfoList::Append: Appends a node to the list. // // Arguments: // node - The node to append. Must not be part of an existing list. // void Append(RefInfoListNode* node) { assert(node->m_next == nullptr); if (m_tail == nullptr) { assert(m_head == nullptr); m_head = node; } else { m_tail->m_next = node; } m_tail = node; } //------------------------------------------------------------------------ // RefInfoList::Append: Appends another list to this list. // // Arguments: // other - The list to append. // void Append(RefInfoList other) { if (m_tail == nullptr) { assert(m_head == nullptr); m_head = other.m_head; } else { m_tail->m_next = other.m_head; } m_tail = other.m_tail; } //------------------------------------------------------------------------ // RefInfoList::Prepend: Prepends a node to the list. // // Arguments: // node - The node to prepend. Must not be part of an existing list. // void Prepend(RefInfoListNode* node) { assert(node->m_next == nullptr); if (m_head == nullptr) { assert(m_tail == nullptr); m_tail = node; } else { node->m_next = m_head; } m_head = node; } //------------------------------------------------------------------------ // RefInfoList::Add: Adds a node to the list. // // Arguments: // node - The node to add. Must not be part of an existing list. // prepend - True if it should be prepended (otherwise is appended) // void Add(RefInfoListNode* node, bool prepend) { if (prepend) { Prepend(node); } else { Append(node); } } //------------------------------------------------------------------------ // removeListNode - retrieve the RefInfo for the given node // // Notes: // The BuildNode methods use this helper to retrieve the RefInfo for child nodes // from the useList being constructed. // RefInfoListNode* removeListNode(RefInfoListNode* listNode, RefInfoListNode* prevListNode) { RefInfoListNode* nextNode = listNode->Next(); if (prevListNode == nullptr) { m_head = nextNode; } else { prevListNode->m_next = nextNode; } if (nextNode == nullptr) { m_tail = prevListNode; } listNode->m_next = nullptr; return listNode; } // removeListNode - remove the RefInfoListNode for the given GenTree node from the defList RefInfoListNode* removeListNode(GenTree* node); // Same as above but takes a multiRegIdx to support multi-reg nodes. RefInfoListNode* removeListNode(GenTree* node, unsigned multiRegIdx); //------------------------------------------------------------------------ // GetRefPosition - retrieve the RefPosition for the given node // // Notes: // The Build methods use this helper to retrieve the RefPosition for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. RefPosition* GetRefPosition(GenTree* node) { for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { if (listNode->treeNode == node) { return listNode->ref; } } assert(!"GetRefPosition didn't find the node"); unreached(); } //------------------------------------------------------------------------ // RefInfoList::GetSecond: Gets the second node in the list. // // Arguments: // (DEBUG ONLY) treeNode - The GenTree* we expect to be in the second node. // RefInfoListNode* GetSecond(INDEBUG(GenTree* treeNode)) { noway_assert((Begin() != nullptr) && (Begin()->Next() != nullptr)); RefInfoListNode* second = Begin()->Next(); assert(second->treeNode == treeNode); return second; } #ifdef DEBUG // Count - return the number of nodes in the list (DEBUG only) int Count() { int count = 0; for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { count++; } return count; } #endif // DEBUG }; //------------------------------------------------------------------------ // RefInfoListNodePool: manages a pool of `RefInfoListNode` // values to decrease overall memory usage // during `buildIntervals`. // // `buildIntervals` involves creating a list of RefInfo items per // node that either directly produces a set of registers or that is a // contained node with register-producing sources. However, these lists // are short-lived: they are destroyed once the use of the corresponding // node is processed. As such, there is typically only a small number of // `RefInfoListNode` values in use at any given time. Pooling these // values avoids otherwise frequent allocations. class RefInfoListNodePool final { RefInfoListNode* m_freeList; Compiler* m_compiler; static const unsigned defaultPreallocation = 8; public: RefInfoListNodePool(Compiler* compiler, unsigned preallocate = defaultPreallocation); RefInfoListNode* GetNode(RefPosition* r, GenTree* t); void ReturnNode(RefInfoListNode* listNode); }; #if TRACK_LSRA_STATS enum LsraStat { #define LSRA_STAT_DEF(enum_name, enum_str) enum_name, #include "lsra_stats.h" #undef LSRA_STAT_DEF #define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) STAT_##enum_name, #include "lsra_score.h" #undef REG_SEL_DEF COUNT }; #endif // TRACK_LSRA_STATS struct LsraBlockInfo { // bbNum of the predecessor to use for the register location of live-in variables. // 0 for fgFirstBB. unsigned int predBBNum; weight_t weight; bool hasCriticalInEdge : 1; bool hasCriticalOutEdge : 1; bool hasEHBoundaryIn : 1; bool hasEHBoundaryOut : 1; bool hasEHPred : 1; #if TRACK_LSRA_STATS // Per block maintained LSRA statistics. unsigned stats[LsraStat::COUNT]; #endif // TRACK_LSRA_STATS }; enum RegisterScore { #define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) enum_name = value, #include "lsra_score.h" #undef REG_SEL_DEF NONE = 0 }; // This is sort of a bit mask // The low order 2 bits will be 1 for defs, and 2 for uses enum RefType : unsigned char { #define DEF_REFTYPE(memberName, memberValue, shortName) memberName = memberValue, #include "lsra_reftypes.h" #undef DEF_REFTYPE }; // position in a block (for resolution) enum BlockStartOrEnd { BlockPositionStart = 0, BlockPositionEnd = 1, PositionCount = 2 }; inline bool RefTypeIsUse(RefType refType) { return ((refType & RefTypeUse) == RefTypeUse); } inline bool RefTypeIsDef(RefType refType) { return ((refType & RefTypeDef) == RefTypeDef); } typedef regNumberSmall* VarToRegMap; typedef jitstd::list<Interval> IntervalList; typedef jitstd::list<RefPosition> RefPositionList; typedef jitstd::list<RefPosition>::iterator RefPositionIterator; typedef jitstd::list<RefPosition>::reverse_iterator RefPositionReverseIterator; class Referenceable { public: Referenceable() { firstRefPosition = nullptr; recentRefPosition = nullptr; lastRefPosition = nullptr; } // A linked list of RefPositions. These are only traversed in the forward // direction, and are not moved, so they don't need to be doubly linked // (see RefPosition). RefPosition* firstRefPosition; RefPosition* recentRefPosition; RefPosition* lastRefPosition; // Get the position of the next reference which is at or greater than // the current location (relies upon recentRefPosition being udpated // during traversal). RefPosition* getNextRefPosition(); LsraLocation getNextRefLocation(); }; class RegRecord : public Referenceable { public: RegRecord() { assignedInterval = nullptr; previousInterval = nullptr; regNum = REG_NA; isCalleeSave = false; registerType = IntRegisterType; } void init(regNumber reg) { #ifdef TARGET_ARM64 // The Zero register, or the SP if ((reg == REG_ZR) || (reg == REG_SP)) { // IsGeneralRegister returns false for REG_ZR and REG_SP regNum = reg; registerType = IntRegisterType; } else #endif if (emitter::isFloatReg(reg)) { registerType = FloatRegisterType; } else { // The constructor defaults to IntRegisterType assert(emitter::isGeneralRegister(reg) && registerType == IntRegisterType); } regNum = reg; isCalleeSave = ((RBM_CALLEE_SAVED & genRegMask(reg)) != 0); } #ifdef DEBUG // print out representation void dump(); // concise representation for embedding void tinyDump(); #endif // DEBUG // DATA // interval to which this register is currently allocated. // If the interval is inactive (isActive == false) then it is not currently live, // and the register can be unassigned (i.e. setting assignedInterval to nullptr) // without spilling the register. Interval* assignedInterval; // Interval to which this register was previously allocated, and which was unassigned // because it was inactive. This register will be reassigned to this Interval when // assignedInterval becomes inactive. Interval* previousInterval; regNumber regNum; bool isCalleeSave; RegisterType registerType; unsigned char regOrder; }; inline bool leafInRange(GenTree* leaf, int lower, int upper) { if (!leaf->IsIntCnsFitsInI32()) { return false; } if (leaf->AsIntCon()->gtIconVal < lower) { return false; } if (leaf->AsIntCon()->gtIconVal > upper) { return false; } return true; } inline bool leafInRange(GenTree* leaf, int lower, int upper, int multiple) { if (!leafInRange(leaf, lower, upper)) { return false; } if (leaf->AsIntCon()->gtIconVal % multiple) { return false; } return true; } inline bool leafAddInRange(GenTree* leaf, int lower, int upper, int multiple = 1) { if (leaf->OperGet() != GT_ADD) { return false; } return leafInRange(leaf->gtGetOp2(), lower, upper, multiple); } inline bool isCandidateVar(const LclVarDsc* varDsc) { return varDsc->lvLRACandidate; } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LinearScan XX XX XX XX This is the container for the Linear Scan data structures and methods. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // OPTION 1: The algorithm as described in "Optimized Interval Splitting in a // Linear Scan Register Allocator". It is driven by iterating over the Interval // lists. In this case, we need multiple IntervalLists, and Intervals will be // moved between them so they must be easily updated. // OPTION 2: The algorithm is driven by iterating over the RefPositions. In this // case, we only need a single IntervalList, and it won't be updated. // The RefPosition must refer to its Interval, and we need to be able to traverse // to the next RefPosition in code order // THIS IS THE OPTION CURRENTLY BEING PURSUED class LinearScan : public LinearScanInterface { friend class RefPosition; friend class Interval; friend class Lowering; public: // This could use further abstraction. From Compiler we need the tree, // the flowgraph and the allocator. LinearScan(Compiler* theCompiler); // This is the main driver virtual void doLinearScan(); static bool isSingleRegister(regMaskTP regMask) { return (genExactlyOneBit(regMask)); } // Initialize the block traversal for LSRA. // This resets the bbVisitedSet, and on the first invocation sets the blockSequence array, // which determines the order in which blocks will be allocated (currently called during Lowering). BasicBlock* startBlockSequence(); // Move to the next block in sequence, updating the current block information. BasicBlock* moveToNextBlock(); // Get the next block to be scheduled without changing the current block, // but updating the blockSequence during the first iteration if it is not fully computed. BasicBlock* getNextBlock(); // This is called during code generation to update the location of variables virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb); // This does the dataflow analysis and builds the intervals void buildIntervals(); // This is where the actual assignment is done void allocateRegisters(); // This is the resolution phase, where cross-block mismatches are fixed up void resolveRegisters(); void writeRegisters(RefPosition* currentRefPosition, GenTree* tree); // Insert a copy in the case where a tree node value must be moved to a different // register at the point of use, or it is reloaded to a different register // than the one it was spilled from void insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE void makeUpperVectorInterval(unsigned varIndex); Interval* getUpperVectorInterval(unsigned varIndex); // Save the upper half of a vector that lives in a callee-save register at the point of a call. void insertUpperVectorSave(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block); // Restore the upper half of a vector that's been partially spilled prior to a use in 'tree'. void insertUpperVectorRestore(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE // resolve along one block-block edge enum ResolveType { ResolveSplit, ResolveJoin, ResolveCritical, ResolveSharedCritical, ResolveTypeCount }; #ifdef DEBUG static const char* resolveTypeName[ResolveTypeCount]; #endif enum WhereToInsert { InsertAtTop, InsertAtBottom }; #ifdef TARGET_ARM void addResolutionForDouble(BasicBlock* block, GenTree* insertionPoint, Interval** sourceIntervals, regNumberSmall* location, regNumber toReg, regNumber fromReg, ResolveType resolveType); #endif void addResolution( BasicBlock* block, GenTree* insertionPoint, Interval* interval, regNumber outReg, regNumber inReg); void handleOutgoingCriticalEdges(BasicBlock* block); void resolveEdge(BasicBlock* fromBlock, BasicBlock* toBlock, ResolveType resolveType, VARSET_VALARG_TP liveSet); void resolveEdges(); // Keep track of how many temp locations we'll need for spill void initMaxSpill(); void updateMaxSpill(RefPosition* refPosition); void recordMaxSpill(); // max simultaneous spill locations used of every type unsigned int maxSpill[TYP_COUNT]; unsigned int currentSpill[TYP_COUNT]; bool needFloatTmpForFPCall; bool needDoubleTmpForFPCall; #ifdef DEBUG private: //------------------------------------------------------------------------ // Should we stress lsra? This uses the COMPlus_JitStressRegs variable. // // The mask bits are currently divided into fields in which each non-zero value // is a distinct stress option (e.g. 0x3 is not a combination of 0x1 and 0x2). // However, subject to possible constraints (to be determined), the different // fields can be combined (e.g. 0x7 is a combination of 0x3 and 0x4). // Note that the field values are declared in a public enum, but the actual bits are // only accessed via accessors. unsigned lsraStressMask; // This controls the registers available for allocation enum LsraStressLimitRegs{LSRA_LIMIT_NONE = 0, LSRA_LIMIT_CALLEE = 0x1, LSRA_LIMIT_CALLER = 0x2, LSRA_LIMIT_SMALL_SET = 0x3, LSRA_LIMIT_MASK = 0x3}; // When LSRA_LIMIT_SMALL_SET is specified, it is desirable to select a "mixed" set of caller- and callee-save // registers, so as to get different coverage than limiting to callee or caller. // At least for x86 and AMD64, and potentially other architecture that will support SIMD, // we need a minimum of 5 fp regs in order to support the InitN intrinsic for Vector4. // Hence the "SmallFPSet" has 5 elements. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI // On System V the RDI and RSI are not callee saved. Use R12 ans R13 as callee saved registers. static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_R12 | RBM_R13); #else // !UNIX_AMD64_ABI // On Windows Amd64 use the RDI and RSI as callee saved registers. static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_ESI | RBM_EDI); #endif // !UNIX_AMD64_ABI static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); #elif defined(TARGET_ARM) // On ARM, we may need two registers to set up the target register for a virtual call, so we need // to have at least the maximum number of arg registers, plus 2. static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5); static const regMaskTP LsraLimitSmallFPSet = (RBM_F0 | RBM_F1 | RBM_F2 | RBM_F16 | RBM_F17); #elif defined(TARGET_ARM64) static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R19 | RBM_R20); static const regMaskTP LsraLimitSmallFPSet = (RBM_V0 | RBM_V1 | RBM_V2 | RBM_V8 | RBM_V9); #elif defined(TARGET_X86) static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EDI); static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); #else #error Unsupported or unset target architecture #endif // target LsraStressLimitRegs getStressLimitRegs() { return (LsraStressLimitRegs)(lsraStressMask & LSRA_LIMIT_MASK); } regMaskTP getConstrainedRegMask(regMaskTP regMaskActual, regMaskTP regMaskConstrain, unsigned minRegCount); regMaskTP stressLimitRegs(RefPosition* refPosition, regMaskTP mask); // This controls the heuristics used to select registers // These can be combined. enum LsraSelect{LSRA_SELECT_DEFAULT = 0, LSRA_SELECT_REVERSE_HEURISTICS = 0x04, LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08, LSRA_SELECT_NEAREST = 0x10, LSRA_SELECT_MASK = 0x1c}; LsraSelect getSelectionHeuristics() { return (LsraSelect)(lsraStressMask & LSRA_SELECT_MASK); } bool doReverseSelect() { return ((lsraStressMask & LSRA_SELECT_REVERSE_HEURISTICS) != 0); } bool doReverseCallerCallee() { return ((lsraStressMask & LSRA_SELECT_REVERSE_CALLER_CALLEE) != 0); } bool doSelectNearest() { return ((lsraStressMask & LSRA_SELECT_NEAREST) != 0); } // This controls the order in which basic blocks are visited during allocation enum LsraTraversalOrder{LSRA_TRAVERSE_LAYOUT = 0x20, LSRA_TRAVERSE_PRED_FIRST = 0x40, LSRA_TRAVERSE_RANDOM = 0x60, // NYI LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST, LSRA_TRAVERSE_MASK = 0x60}; LsraTraversalOrder getLsraTraversalOrder() { if ((lsraStressMask & LSRA_TRAVERSE_MASK) == 0) { return LSRA_TRAVERSE_DEFAULT; } return (LsraTraversalOrder)(lsraStressMask & LSRA_TRAVERSE_MASK); } bool isTraversalLayoutOrder() { return getLsraTraversalOrder() == LSRA_TRAVERSE_LAYOUT; } bool isTraversalPredFirstOrder() { return getLsraTraversalOrder() == LSRA_TRAVERSE_PRED_FIRST; } // This controls whether lifetimes should be extended to the entire method. // Note that this has no effect under MinOpts enum LsraExtendLifetimes{LSRA_DONT_EXTEND = 0, LSRA_EXTEND_LIFETIMES = 0x80, LSRA_EXTEND_LIFETIMES_MASK = 0x80}; LsraExtendLifetimes getLsraExtendLifeTimes() { return (LsraExtendLifetimes)(lsraStressMask & LSRA_EXTEND_LIFETIMES_MASK); } bool extendLifetimes() { return getLsraExtendLifeTimes() == LSRA_EXTEND_LIFETIMES; } // This controls whether variables locations should be set to the previous block in layout order // (LSRA_BLOCK_BOUNDARY_LAYOUT), or to that of the highest-weight predecessor (LSRA_BLOCK_BOUNDARY_PRED - // the default), or rotated (LSRA_BLOCK_BOUNDARY_ROTATE). enum LsraBlockBoundaryLocations{LSRA_BLOCK_BOUNDARY_PRED = 0, LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100, LSRA_BLOCK_BOUNDARY_ROTATE = 0x200, LSRA_BLOCK_BOUNDARY_MASK = 0x300}; LsraBlockBoundaryLocations getLsraBlockBoundaryLocations() { return (LsraBlockBoundaryLocations)(lsraStressMask & LSRA_BLOCK_BOUNDARY_MASK); } regNumber rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs); // This controls whether we always insert a GT_RELOAD instruction after a spill // Note that this can be combined with LSRA_SPILL_ALWAYS (or not) enum LsraReload{LSRA_NO_RELOAD_IF_SAME = 0, LSRA_ALWAYS_INSERT_RELOAD = 0x400, LSRA_RELOAD_MASK = 0x400}; LsraReload getLsraReload() { return (LsraReload)(lsraStressMask & LSRA_RELOAD_MASK); } bool alwaysInsertReload() { return getLsraReload() == LSRA_ALWAYS_INSERT_RELOAD; } // This controls whether we spill everywhere enum LsraSpill{LSRA_DONT_SPILL_ALWAYS = 0, LSRA_SPILL_ALWAYS = 0x800, LSRA_SPILL_MASK = 0x800}; LsraSpill getLsraSpill() { return (LsraSpill)(lsraStressMask & LSRA_SPILL_MASK); } bool spillAlways() { return getLsraSpill() == LSRA_SPILL_ALWAYS; } // This controls whether RefPositions that lower/codegen indicated as reg optional be // allocated a reg at all. enum LsraRegOptionalControl{LSRA_REG_OPTIONAL_DEFAULT = 0, LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000, LSRA_REG_OPTIONAL_MASK = 0x1000}; LsraRegOptionalControl getLsraRegOptionalControl() { return (LsraRegOptionalControl)(lsraStressMask & LSRA_REG_OPTIONAL_MASK); } bool regOptionalNoAlloc() { return getLsraRegOptionalControl() == LSRA_REG_OPTIONAL_NO_ALLOC; } bool candidatesAreStressLimited() { return ((lsraStressMask & (LSRA_LIMIT_MASK | LSRA_SELECT_MASK)) != 0); } // Dump support void dumpDefList(); void lsraDumpIntervals(const char* msg); void dumpRefPositions(const char* msg); void dumpVarRefPositions(const char* msg); // Checking code static bool IsLsraAdded(GenTree* node) { return ((node->gtDebugFlags & GTF_DEBUG_NODE_LSRA_ADDED) != 0); } static void SetLsraAdded(GenTree* node) { node->gtDebugFlags |= GTF_DEBUG_NODE_LSRA_ADDED; } static bool IsResolutionMove(GenTree* node); static bool IsResolutionNode(LIR::Range& containingRange, GenTree* node); void verifyFinalAllocation(); void verifyResolutionMove(GenTree* resolutionNode, LsraLocation currentLocation); #else // !DEBUG bool doSelectNearest() { return false; } bool extendLifetimes() { return false; } bool spillAlways() { return false; } // In a retail build we support only the default traversal order bool isTraversalLayoutOrder() { return false; } bool isTraversalPredFirstOrder() { return true; } bool getLsraExtendLifeTimes() { return false; } static void SetLsraAdded(GenTree* node) { // do nothing; checked only under #DEBUG } bool candidatesAreStressLimited() { return false; } #endif // !DEBUG public: // Used by Lowering when considering whether to split Longs, as well as by identifyCandidates(). bool isRegCandidate(LclVarDsc* varDsc); bool isContainableMemoryOp(GenTree* node); private: // Determine which locals are candidates for allocation void identifyCandidates(); // determine which locals are used in EH constructs we don't want to deal with void identifyCandidatesExceptionDataflow(); void buildPhysRegRecords(); #ifdef DEBUG void checkLastUses(BasicBlock* block); int ComputeOperandDstCount(GenTree* operand); int ComputeAvailableSrcCount(GenTree* node); #endif // DEBUG void setFrameType(); // Update allocations at start/end of block void unassignIntervalBlockStart(RegRecord* regRecord, VarToRegMap inVarToRegMap); void processBlockEndAllocation(BasicBlock* current); // Record variable locations at start/end of block void processBlockStartLocations(BasicBlock* current); void processBlockEndLocations(BasicBlock* current); #ifdef TARGET_ARM bool isSecondHalfReg(RegRecord* regRec, Interval* interval); RegRecord* getSecondHalfRegRec(RegRecord* regRec); RegRecord* findAnotherHalfRegRec(RegRecord* regRec); regNumber findAnotherHalfRegNum(regNumber regNum); bool canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation); void unassignDoublePhysReg(RegRecord* doubleRegRecord); #endif void updateAssignedInterval(RegRecord* reg, Interval* interval, RegisterType regType); void updatePreviousInterval(RegRecord* reg, Interval* interval, RegisterType regType); bool canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval); bool isAssignedToInterval(Interval* interval, RegRecord* regRec); bool isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation); bool canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation); weight_t getSpillWeight(RegRecord* physRegRecord); // insert refpositions representing prolog zero-inits which will be added later void insertZeroInitRefPositions(); // add physreg refpositions for a tree node, based on calling convention and instruction selection predictions void addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse); void resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition); void buildRefPositionsForNode(GenTree* tree, LsraLocation loc); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE void buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc, regMaskTP fpCalleeKillSet); void buildUpperVectorRestoreRefPosition(Interval* lclVarInterval, LsraLocation currentLoc, GenTree* node, bool isUse); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(UNIX_AMD64_ABI) // For AMD64 on SystemV machines. This method // is called as replacement for raUpdateRegStateForArg // that is used on Windows. On System V systems a struct can be passed // partially using registers from the 2 register files. void unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc); #endif // defined(UNIX_AMD64_ABI) // Update reg state for an incoming register argument void updateRegStateForArg(LclVarDsc* argDsc); inline bool isCandidateLocalRef(GenTree* tree) { if (tree->IsLocal()) { const LclVarDsc* varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon()); return isCandidateVar(varDsc); } return false; } // Helpers for getKillSetForNode(). regMaskTP getKillSetForStoreInd(GenTreeStoreInd* tree); regMaskTP getKillSetForShiftRotate(GenTreeOp* tree); regMaskTP getKillSetForMul(GenTreeOp* tree); regMaskTP getKillSetForCall(GenTreeCall* call); regMaskTP getKillSetForModDiv(GenTreeOp* tree); regMaskTP getKillSetForBlockStore(GenTreeBlk* blkNode); regMaskTP getKillSetForReturn(); regMaskTP getKillSetForProfilerHook(); #ifdef FEATURE_HW_INTRINSICS regMaskTP getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node); #endif // FEATURE_HW_INTRINSICS // Return the registers killed by the given tree node. // This is used only for an assert, and for stress, so it is only defined under DEBUG. // Otherwise, the Build methods should obtain the killMask from the appropriate method above. #ifdef DEBUG regMaskTP getKillSetForNode(GenTree* tree); #endif // Given some tree node add refpositions for all the registers this node kills bool buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc, regMaskTP killMask); regMaskTP allRegs(RegisterType rt); regMaskTP allByteRegs(); regMaskTP allSIMDRegs(); regMaskTP internalFloatRegCandidates(); void makeRegisterInactive(RegRecord* physRegRecord); void freeRegister(RegRecord* physRegRecord); void freeRegisters(regMaskTP regsToFree); // Get the type that this tree defines. var_types getDefType(GenTree* tree) { var_types type = tree->TypeGet(); if (type == TYP_STRUCT) { assert(tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR)); GenTreeLclVar* lclVar = tree->AsLclVar(); LclVarDsc* varDsc = compiler->lvaGetDesc(lclVar); type = varDsc->GetRegisterType(lclVar); } assert(type != TYP_UNDEF && type != TYP_STRUCT); return type; } // Managing internal registers during the BuildNode process. RefPosition* defineNewInternalTemp(GenTree* tree, RegisterType regType, regMaskTP candidates); RefPosition* buildInternalIntRegisterDefForNode(GenTree* tree, regMaskTP internalCands = RBM_NONE); RefPosition* buildInternalFloatRegisterDefForNode(GenTree* tree, regMaskTP internalCands = RBM_NONE); void buildInternalRegisterUses(); void writeLocalReg(GenTreeLclVar* lclNode, unsigned varNum, regNumber reg); void resolveLocalRef(BasicBlock* block, GenTreeLclVar* treeNode, RefPosition* currentRefPosition); void insertMove(BasicBlock* block, GenTree* insertionPoint, unsigned lclNum, regNumber inReg, regNumber outReg); void insertSwap( BasicBlock* block, GenTree* insertionPoint, unsigned lclNum1, regNumber reg1, unsigned lclNum2, regNumber reg2); private: Interval* newInterval(RegisterType regType); Interval* getIntervalForLocalVar(unsigned varIndex) { assert(varIndex < compiler->lvaTrackedCount); assert(localVarIntervals[varIndex] != nullptr); return localVarIntervals[varIndex]; } Interval* getIntervalForLocalVarNode(GenTreeLclVarCommon* tree) { const LclVarDsc* varDsc = compiler->lvaGetDesc(tree); assert(varDsc->lvTracked); return getIntervalForLocalVar(varDsc->lvVarIndex); } RegRecord* getRegisterRecord(regNumber regNum); RefPosition* newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType); RefPosition* newRefPosition(Interval* theInterval, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx = 0); // This creates a RefTypeUse at currentLoc. It sets the treeNode to nullptr if it is not a // lclVar interval. RefPosition* newUseRefPosition(Interval* theInterval, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx = 0); RefPosition* newRefPosition( regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask); void applyCalleeSaveHeuristics(RefPosition* rp); void checkConflictingDefUse(RefPosition* rp); void associateRefPosWithInterval(RefPosition* rp); weight_t getWeight(RefPosition* refPos); /***************************************************************************** * Register management ****************************************************************************/ RegisterType getRegisterType(Interval* currentInterval, RefPosition* refPosition); #ifdef DEBUG const char* getScoreName(RegisterScore score); #endif regNumber allocateReg(Interval* current, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); regNumber assignCopyReg(RefPosition* refPosition); bool isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPosition); bool isSpillCandidate(Interval* current, RefPosition* refPosition, RegRecord* physRegRecord); void checkAndAssignInterval(RegRecord* regRec, Interval* interval); void assignPhysReg(RegRecord* regRec, Interval* interval); void assignPhysReg(regNumber reg, Interval* interval) { assignPhysReg(getRegisterRecord(reg), interval); } bool isAssigned(RegRecord* regRec ARM_ARG(RegisterType newRegType)); void checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition); void unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegType)); void unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition); void unassignPhysRegNoSpill(RegRecord* reg); void unassignPhysReg(regNumber reg) { unassignPhysReg(getRegisterRecord(reg), nullptr); } void setIntervalAsSpilled(Interval* interval); void setIntervalAsSplit(Interval* interval); void spillInterval(Interval* interval, RefPosition* fromRefPosition DEBUGARG(RefPosition* toRefPosition)); void spillGCRefs(RefPosition* killRefPosition); /***************************************************************************** * Register selection ****************************************************************************/ regMaskTP getFreeCandidates(regMaskTP candidates, var_types regType) { regMaskTP result = candidates & m_AvailableRegs; #ifdef TARGET_ARM // For TYP_DOUBLE on ARM, we can only use register for which the odd half is // also available. if (regType == TYP_DOUBLE) { result &= (m_AvailableRegs >> 1); } #endif // TARGET_ARM return result; } #ifdef DEBUG class RegisterSelection; // For lsra ordering experimentation typedef void (LinearScan::RegisterSelection::*HeuristicFn)(); typedef JitHashTable<RegisterScore, JitSmallPrimitiveKeyFuncs<RegisterScore>, HeuristicFn> ScoreMappingTable; #define REGSELECT_HEURISTIC_COUNT 17 #endif class RegisterSelection { public: RegisterSelection(LinearScan* linearScan); // Perform register selection and update currentInterval or refPosition FORCEINLINE regMaskTP select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); // If the register is from unassigned set such that it was not already // assigned to the current interval FORCEINLINE bool foundUnassignedReg() { assert(found && isSingleRegister(foundRegBit)); bool isUnassignedReg = ((foundRegBit & unassignedSet) != RBM_NONE); return isUnassignedReg && !isAlreadyAssigned(); } // Did register selector decide to spill this interval FORCEINLINE bool isSpilling() { return (foundRegBit & freeCandidates) == RBM_NONE; } // Is the value one of the constant that is already in a register FORCEINLINE bool isMatchingConstant() { assert(found && isSingleRegister(foundRegBit)); return (matchingConstants & foundRegBit) != RBM_NONE; } // Did we apply CONST_AVAILABLE heuristics FORCEINLINE bool isConstAvailable() { return (score & CONST_AVAILABLE) != 0; } private: #ifdef DEBUG RegisterScore RegSelectionOrder[REGSELECT_HEURISTIC_COUNT] = {NONE}; ScoreMappingTable* mappingTable = nullptr; #endif LinearScan* linearScan = nullptr; int score = 0; Interval* currentInterval = nullptr; RefPosition* refPosition = nullptr; RegisterType regType = RegisterType::TYP_UNKNOWN; LsraLocation currentLocation = MinLocation; RefPosition* nextRefPos = nullptr; regMaskTP candidates; regMaskTP preferences = RBM_NONE; Interval* relatedInterval = nullptr; regMaskTP relatedPreferences = RBM_NONE; LsraLocation rangeEndLocation; LsraLocation relatedLastLocation; bool preferCalleeSave = false; RefPosition* rangeEndRefPosition; RefPosition* lastRefPosition; regMaskTP callerCalleePrefs = RBM_NONE; LsraLocation lastLocation; RegRecord* prevRegRec = nullptr; regMaskTP prevRegBit = RBM_NONE; // These are used in the post-selection updates, and must be set for any selection. regMaskTP freeCandidates; regMaskTP matchingConstants; regMaskTP unassignedSet; regMaskTP foundRegBit; // Compute the sets for COVERS, OWN_PREFERENCE, COVERS_RELATED, COVERS_FULL and UNASSIGNED together, // as they all require similar computation. regMaskTP coversSet; regMaskTP preferenceSet; regMaskTP coversRelatedSet; regMaskTP coversFullSet; bool coversSetsCalculated = false; bool found = false; bool skipAllocation = false; regNumber foundReg = REG_NA; // If the selected register is already assigned to the current internal FORCEINLINE bool isAlreadyAssigned() { assert(found && isSingleRegister(candidates)); return (prevRegBit & preferences) == foundRegBit; } bool applySelection(int selectionScore, regMaskTP selectionCandidates); bool applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate); FORCEINLINE void calculateCoversSets(); FORCEINLINE void reset(Interval* interval, RefPosition* refPosition); #define REG_SEL_DEF(stat, value, shortname, orderSeqId) FORCEINLINE void try_##stat(); #include "lsra_score.h" #undef REG_SEL_DEF }; RegisterSelection* regSelector; /***************************************************************************** * For Resolution phase ****************************************************************************/ // TODO-Throughput: Consider refactoring this so that we keep a map from regs to vars for better scaling unsigned int regMapCount; // When we split edges, we create new blocks, and instead of expanding the VarToRegMaps, we // rely on the property that the "in" map is the same as the "from" block of the edge, and the // "out" map is the same as the "to" block of the edge (by construction). // So, for any block whose bbNum is greater than bbNumMaxBeforeResolution, we use the // splitBBNumToTargetBBNumMap. // TODO-Throughput: We may want to look into the cost/benefit tradeoff of doing this vs. expanding // the arrays. unsigned bbNumMaxBeforeResolution; struct SplitEdgeInfo { unsigned fromBBNum; unsigned toBBNum; }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, SplitEdgeInfo> SplitBBNumToTargetBBNumMap; SplitBBNumToTargetBBNumMap* splitBBNumToTargetBBNumMap; SplitBBNumToTargetBBNumMap* getSplitBBNumToTargetBBNumMap() { if (splitBBNumToTargetBBNumMap == nullptr) { splitBBNumToTargetBBNumMap = new (getAllocator(compiler)) SplitBBNumToTargetBBNumMap(getAllocator(compiler)); } return splitBBNumToTargetBBNumMap; } SplitEdgeInfo getSplitEdgeInfo(unsigned int bbNum); void initVarRegMaps(); void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); VarToRegMap getInVarToRegMap(unsigned int bbNum); VarToRegMap getOutVarToRegMap(unsigned int bbNum); void setVarReg(VarToRegMap map, unsigned int trackedVarIndex, regNumber reg); regNumber getVarReg(VarToRegMap map, unsigned int trackedVarIndex); // Initialize the incoming VarToRegMap to the given map values (generally a predecessor of // the block) VarToRegMap setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap); regNumber getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type); #ifdef DEBUG void dumpVarToRegMap(VarToRegMap map); void dumpInVarToRegMap(BasicBlock* block); void dumpOutVarToRegMap(BasicBlock* block); // There are three points at which a tuple-style dump is produced, and each // differs slightly: // - In LSRA_DUMP_PRE, it does a simple dump of each node, with indications of what // tree nodes are consumed. // - In LSRA_DUMP_REFPOS, which is after the intervals are built, but before // register allocation, each node is dumped, along with all of the RefPositions, // The Intervals are identifed as Lnnn for lclVar intervals, Innn for for other // intervals, and Tnnn for internal temps. // - In LSRA_DUMP_POST, which is after register allocation, the registers are // shown. enum LsraTupleDumpMode{LSRA_DUMP_PRE, LSRA_DUMP_REFPOS, LSRA_DUMP_POST}; void lsraGetOperandString(GenTree* tree, LsraTupleDumpMode mode, char* operandString, unsigned operandStringLength); void lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest); void DumpOperandDefs( GenTree* operand, bool& first, LsraTupleDumpMode mode, char* operandString, const unsigned operandStringLength); void TupleStyleDump(LsraTupleDumpMode mode); LsraLocation maxNodeLocation; // Width of various fields - used to create a streamlined dump during allocation that shows the // state of all the registers in columns. int regColumnWidth; int regTableIndent; const char* columnSeparator; const char* line; const char* leftBox; const char* middleBox; const char* rightBox; static const int MAX_FORMAT_CHARS = 12; char intervalNameFormat[MAX_FORMAT_CHARS]; char regNameFormat[MAX_FORMAT_CHARS]; char shortRefPositionFormat[MAX_FORMAT_CHARS]; char emptyRefPositionFormat[MAX_FORMAT_CHARS]; char indentFormat[MAX_FORMAT_CHARS]; static const int MAX_LEGEND_FORMAT_CHARS = 25; char bbRefPosFormat[MAX_LEGEND_FORMAT_CHARS]; char legendFormat[MAX_LEGEND_FORMAT_CHARS]; // How many rows have we printed since last printing a "title row"? static const int MAX_ROWS_BETWEEN_TITLES = 50; int rowCountSinceLastTitle; // Current mask of registers being printed in the dump. regMaskTP lastDumpedRegisters; regMaskTP registersToDump; int lastUsedRegNumIndex; bool shouldDumpReg(regNumber regNum) { return (registersToDump & genRegMask(regNum)) != 0; } void dumpRegRecordHeader(); void dumpRegRecordTitle(); void dumpRegRecordTitleIfNeeded(); void dumpRegRecordTitleLines(); void dumpRegRecords(); void dumpNewBlock(BasicBlock* currentBlock, LsraLocation location); // An abbreviated RefPosition dump for printing with column-based register state void dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock); // Print the number of spaces occupied by a dumpRefPositionShort() void dumpEmptyRefPosition(); // A dump of Referent, in exactly regColumnWidth characters void dumpIntervalName(Interval* interval); // Events during the allocation phase that cause some dump output enum LsraDumpEvent{ // Conflicting def/use LSRA_EVENT_DEFUSE_CONFLICT, LSRA_EVENT_DEFUSE_FIXED_DELAY_USE, LSRA_EVENT_DEFUSE_CASE1, LSRA_EVENT_DEFUSE_CASE2, LSRA_EVENT_DEFUSE_CASE3, LSRA_EVENT_DEFUSE_CASE4, LSRA_EVENT_DEFUSE_CASE5, LSRA_EVENT_DEFUSE_CASE6, // Spilling LSRA_EVENT_SPILL, LSRA_EVENT_SPILL_EXTENDED_LIFETIME, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, LSRA_EVENT_DONE_KILL_GC_REFS, LSRA_EVENT_NO_GC_KILLS, // Block boundaries LSRA_EVENT_START_BB, LSRA_EVENT_END_BB, // Miscellaneous LSRA_EVENT_FREE_REGS, LSRA_EVENT_UPPER_VECTOR_SAVE, LSRA_EVENT_UPPER_VECTOR_RESTORE, // Characteristics of the current RefPosition LSRA_EVENT_INCREMENT_RANGE_END, // ??? LSRA_EVENT_LAST_USE, LSRA_EVENT_LAST_USE_DELAYED, LSRA_EVENT_NEEDS_NEW_REG, // Allocation decisions LSRA_EVENT_FIXED_REG, LSRA_EVENT_EXP_USE, LSRA_EVENT_ZERO_REF, LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, LSRA_EVENT_KEPT_ALLOCATION, LSRA_EVENT_COPY_REG, LSRA_EVENT_MOVE_REG, LSRA_EVENT_ALLOC_REG, LSRA_EVENT_NO_REG_ALLOCATED, LSRA_EVENT_RELOAD, LSRA_EVENT_SPECIAL_PUTARG, LSRA_EVENT_REUSE_REG, }; void dumpLsraAllocationEvent(LsraDumpEvent event, Interval* interval = nullptr, regNumber reg = REG_NA, BasicBlock* currentBlock = nullptr, RegisterScore registerScore = NONE); void validateIntervals(); #endif // DEBUG #if TRACK_LSRA_STATS unsigned regCandidateVarCount; void updateLsraStat(LsraStat stat, unsigned currentBBNum); void dumpLsraStats(FILE* file); LsraStat getLsraStatFromScore(RegisterScore registerScore); LsraStat firstRegSelStat = STAT_FREE; public: virtual void dumpLsraStatsCsv(FILE* file); virtual void dumpLsraStatsSummary(FILE* file); static const char* getStatName(unsigned stat); #define INTRACK_STATS(x) x #define INTRACK_STATS_IF(condition, work) \ if (condition) \ { \ work; \ } #else // !TRACK_LSRA_STATS #define INTRACK_STATS(x) #define INTRACK_STATS_IF(condition, work) #endif // !TRACK_LSRA_STATS private: Compiler* compiler; CompAllocator getAllocator(Compiler* comp) { return comp->getAllocator(CMK_LSRA); } #ifdef DEBUG // This is used for dumping RefPosition* activeRefPosition; #endif // DEBUG IntervalList intervals; RegRecord physRegs[REG_COUNT]; // Map from tracked variable index to Interval*. Interval** localVarIntervals; // Set of blocks that have been visited. BlockSet bbVisitedSet; void markBlockVisited(BasicBlock* block) { BlockSetOps::AddElemD(compiler, bbVisitedSet, block->bbNum); } void clearVisitedBlocks() { BlockSetOps::ClearD(compiler, bbVisitedSet); } bool isBlockVisited(BasicBlock* block) { return BlockSetOps::IsMember(compiler, bbVisitedSet, block->bbNum); } #if DOUBLE_ALIGN bool doDoubleAlign; #endif // A map from bbNum to the block information used during register allocation. LsraBlockInfo* blockInfo; BasicBlock* findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated)); // The order in which the blocks will be allocated. // This is any array of BasicBlock*, in the order in which they should be traversed. BasicBlock** blockSequence; // The verifiedAllBBs flag indicates whether we have verified that all BBs have been // included in the blockSeuqence above, during setBlockSequence(). bool verifiedAllBBs; void setBlockSequence(); int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights); BasicBlockList* blockSequenceWorkList; bool blockSequencingDone; #ifdef DEBUG // LSRA must not change number of blocks and blockEpoch that it initializes at start. unsigned blockEpoch; #endif // DEBUG void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet); void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode); BasicBlock* getNextCandidateFromWorkList(); // Indicates whether the allocation pass has been completed. bool allocationPassComplete; // The bbNum of the block being currently allocated or resolved. unsigned int curBBNum; // The current location LsraLocation currentLoc; // The first location in a cold or funclet block. LsraLocation firstColdLoc; // The ordinal of the block we're on (i.e. this is the curBBSeqNum-th block we've allocated). unsigned int curBBSeqNum; // The number of blocks that we've sequenced. unsigned int bbSeqCount; // The Location of the start of the current block. LsraLocation curBBStartLocation; // True if the method contains any critical edges. bool hasCriticalEdges; // True if there are any register candidate lclVars available for allocation. bool enregisterLocalVars; virtual bool willEnregisterLocalVars() const { return enregisterLocalVars; } // Ordered list of RefPositions RefPositionList refPositions; // Per-block variable location mappings: an array indexed by block number that yields a // pointer to an array of regNumber, one per variable. VarToRegMap* inVarToRegMaps; VarToRegMap* outVarToRegMaps; // A temporary VarToRegMap used during the resolution of critical edges. VarToRegMap sharedCriticalVarToRegMap; PhasedVar<regMaskTP> availableIntRegs; PhasedVar<regMaskTP> availableFloatRegs; PhasedVar<regMaskTP> availableDoubleRegs; // The set of all register candidates. Note that this may be a subset of tracked vars. VARSET_TP registerCandidateVars; // Current set of live register candidate vars, used during building of RefPositions to determine // whether to preference to callee-save. VARSET_TP currentLiveVars; // Set of variables that may require resolution across an edge. // This is first constructed during interval building, to contain all the lclVars that are live at BB edges. // Then, any lclVar that is always in the same register is removed from the set. VARSET_TP resolutionCandidateVars; // This set contains all the lclVars that are ever spilled or split. VARSET_TP splitOrSpilledVars; // Set of floating point variables to consider for callee-save registers. VARSET_TP fpCalleeSaveCandidateVars; // Set of variables exposed on EH flow edges. VARSET_TP exceptVars; // Set of variables exposed on finally edges. These must be zero-init if they are refs or if compInitMem is true. VARSET_TP finallyVars; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static const var_types LargeVectorSaveType = TYP_SIMD16; #elif defined(TARGET_ARM64) static const var_types LargeVectorSaveType = TYP_DOUBLE; #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) // Set of large vector (TYP_SIMD32 on AVX) variables. VARSET_TP largeVectorVars; // Set of large vector (TYP_SIMD32 on AVX) variables to consider for callee-save registers. VARSET_TP largeVectorCalleeSaveCandidateVars; #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE //----------------------------------------------------------------------- // Register status //----------------------------------------------------------------------- regMaskTP m_AvailableRegs; regNumber getRegForType(regNumber reg, var_types regType) { #ifdef TARGET_ARM if ((regType == TYP_DOUBLE) && !genIsValidDoubleReg(reg)) { reg = REG_PREV(reg); } #endif // TARGET_ARM return reg; } regMaskTP getRegMask(regNumber reg, var_types regType) { reg = getRegForType(reg, regType); regMaskTP regMask = genRegMask(reg); #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { assert(genIsValidDoubleReg(reg)); regMask |= (regMask << 1); } #endif // TARGET_ARM return regMask; } void resetAvailableRegs() { m_AvailableRegs = (availableIntRegs | availableFloatRegs); m_RegistersWithConstants = RBM_NONE; } bool isRegAvailable(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (m_AvailableRegs & regMask) == regMask; } void setRegsInUse(regMaskTP regMask) { m_AvailableRegs &= ~regMask; } void setRegInUse(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); setRegsInUse(regMask); } void makeRegsAvailable(regMaskTP regMask) { m_AvailableRegs |= regMask; } void makeRegAvailable(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); makeRegsAvailable(regMask); } void clearNextIntervalRef(regNumber reg, var_types regType); void updateNextIntervalRef(regNumber reg, Interval* interval); void clearSpillCost(regNumber reg, var_types regType); void updateSpillCost(regNumber reg, Interval* interval); regMaskTP m_RegistersWithConstants; void clearConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants &= ~getRegMask(reg, regType); } void setConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants |= getRegMask(reg, regType); } bool isRegConstant(regNumber reg, var_types regType) { reg = getRegForType(reg, regType); regMaskTP regMask = getRegMask(reg, regType); return (m_RegistersWithConstants & regMask) == regMask; } regMaskTP getMatchingConstants(regMaskTP mask, Interval* currentInterval, RefPosition* refPosition); regMaskTP fixedRegs; LsraLocation nextFixedRef[REG_COUNT]; void updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition); LsraLocation getNextFixedRef(regNumber regNum, var_types regType) { LsraLocation loc = nextFixedRef[regNum]; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { loc = Min(loc, nextFixedRef[regNum + 1]); } #endif return loc; } LsraLocation nextIntervalRef[REG_COUNT]; LsraLocation getNextIntervalRef(regNumber regNum, var_types regType) { LsraLocation loc = nextIntervalRef[regNum]; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { loc = Min(loc, nextIntervalRef[regNum + 1]); } #endif return loc; } weight_t spillCost[REG_COUNT]; regMaskTP regsBusyUntilKill; regMaskTP regsInUseThisLocation; regMaskTP regsInUseNextLocation; bool isRegBusy(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (regsBusyUntilKill & regMask) != RBM_NONE; } void setRegBusyUntilKill(regNumber reg, var_types regType) { regsBusyUntilKill |= getRegMask(reg, regType); } void clearRegBusyUntilKill(regNumber reg) { regsBusyUntilKill &= ~genRegMask(reg); } bool isRegInUse(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (regsInUseThisLocation & regMask) != RBM_NONE; } void resetRegState() { resetAvailableRegs(); regsBusyUntilKill = RBM_NONE; } bool conflictingFixedRegReference(regNumber regNum, RefPosition* refPosition); // This method should not be used and is here to retain old behavior. // It should be replaced by isRegAvailable(). // See comment in allocateReg(); bool isFree(RegRecord* regRecord); //----------------------------------------------------------------------- // Build methods //----------------------------------------------------------------------- // The listNodePool is used to maintain the RefInfo for nodes that are "in flight" // i.e. whose consuming node has not yet been handled. RefInfoListNodePool listNodePool; // When Def RefPositions are built for a node, their RefInfoListNode // (GenTree* to RefPosition* mapping) is placed in the defList. // As the consuming node is handled, it removes the RefInfoListNode from the // defList, use the interval associated with the corresponding Def RefPosition and // use it to build the Use RefPosition. RefInfoList defList; // As we build uses, we may want to preference the next definition (i.e. the register produced // by the current node) to the same register as one of its uses. This is done by setting // 'tgtPrefUse' to that RefPosition. RefPosition* tgtPrefUse = nullptr; RefPosition* tgtPrefUse2 = nullptr; // The following keep track of information about internal (temporary register) intervals // during the building of a single node. static const int MaxInternalCount = 5; RefPosition* internalDefs[MaxInternalCount]; int internalCount = 0; bool setInternalRegsDelayFree; // When a RefTypeUse is marked as 'delayRegFree', we also want to mark the RefTypeDef // in the next Location as 'hasInterferingUses'. This is accomplished by setting this // 'pendingDelayFree' to true as they are created, and clearing it as a new node is // handled in 'BuildNode'. bool pendingDelayFree; // This method clears the "build state" before starting to handle a new node. void clearBuildState() { tgtPrefUse = nullptr; tgtPrefUse2 = nullptr; internalCount = 0; setInternalRegsDelayFree = false; pendingDelayFree = false; } bool isCandidateMultiRegLclVar(GenTreeLclVar* lclNode); bool checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode); RefPosition* BuildUse(GenTree* operand, regMaskTP candidates = RBM_NONE, int multiRegIdx = 0); void setDelayFree(RefPosition* use); int BuildBinaryUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); #ifdef TARGET_XARCH int BuildRMWUses(GenTree* node, GenTree* op1, GenTree* op2, regMaskTP candidates = RBM_NONE); #endif // !TARGET_XARCH // This is the main entry point for building the RefPositions for a node. // These methods return the number of sources. int BuildNode(GenTree* tree); void getTgtPrefOperands(GenTree* tree, GenTree* op1, GenTree* op2, bool* prefOp1, bool* prefOp2); bool supportsSpecialPutArg(); int BuildSimple(GenTree* tree); int BuildOperandUses(GenTree* node, regMaskTP candidates = RBM_NONE); int BuildDelayFreeUses(GenTree* node, GenTree* rmwNode = nullptr, regMaskTP candidates = RBM_NONE); int BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates = RBM_NONE); int BuildAddrUses(GenTree* addr, regMaskTP candidates = RBM_NONE); void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs); RefPosition* BuildDef(GenTree* tree, regMaskTP dstCandidates = RBM_NONE, int multiRegIdx = 0); void BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates = RBM_NONE); void BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask); int BuildReturn(GenTree* tree); #ifdef TARGET_XARCH // This method, unlike the others, returns the number of sources, since it may be called when // 'tree' is contained. int BuildShiftRotate(GenTree* tree); #endif // TARGET_XARCH #ifdef TARGET_ARM int BuildShiftLongCarry(GenTree* tree); #endif int BuildPutArgReg(GenTreeUnOp* node); int BuildCall(GenTreeCall* call); int BuildCmp(GenTree* tree); int BuildBlockStore(GenTreeBlk* blkNode); int BuildModDiv(GenTree* tree); int BuildIntrinsic(GenTree* tree); void BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, LclVarDsc* varDsc, RefPosition* singleUseRef, int index); int BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc); int BuildStoreLoc(GenTreeLclVarCommon* tree); int BuildIndir(GenTreeIndir* indirTree); int BuildGCWriteBarrier(GenTree* tree); int BuildCast(GenTreeCast* cast); #if defined(TARGET_XARCH) // returns true if the tree can use the read-modify-write memory instruction form bool isRMWRegOper(GenTree* tree); int BuildMul(GenTree* tree); void SetContainsAVXFlags(unsigned sizeOfSIMDVector = 0); #endif // defined(TARGET_XARCH) #if defined(TARGET_X86) // Move the last use bit, if any, from 'fromTree' to 'toTree'; 'fromTree' must be contained. void CheckAndMoveRMWLastUse(GenTree* fromTree, GenTree* toTree) { // If 'fromTree' is not a last-use lclVar, there's nothing to do. if ((fromTree == nullptr) || !fromTree->OperIs(GT_LCL_VAR) || ((fromTree->gtFlags & GTF_VAR_DEATH) == 0)) { return; } // If 'fromTree' was a lclVar, it must be contained and 'toTree' must match. if (!fromTree->isContained() || (toTree == nullptr) || !toTree->OperIs(GT_LCL_VAR) || (fromTree->AsLclVarCommon()->GetLclNum() != toTree->AsLclVarCommon()->GetLclNum())) { assert(!"Unmatched RMW indirections"); return; } // This is probably not necessary, but keeps things consistent. fromTree->gtFlags &= ~GTF_VAR_DEATH; toTree->gtFlags |= GTF_VAR_DEATH; } #endif // TARGET_X86 #ifdef FEATURE_SIMD int BuildSIMD(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS int BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCount); #endif // FEATURE_HW_INTRINSICS int BuildPutArgStk(GenTreePutArgStk* argNode); #if FEATURE_ARG_SPLIT int BuildPutArgSplit(GenTreePutArgSplit* tree); #endif // FEATURE_ARG_SPLIT int BuildLclHeap(GenTree* tree); }; /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Interval XX XX XX XX This is the fundamental data structure for linear scan register XX XX allocation. It represents the live range(s) for a variable or temp. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ class Interval : public Referenceable { public: Interval(RegisterType registerType, regMaskTP registerPreferences) : registerPreferences(registerPreferences) , relatedInterval(nullptr) , assignedReg(nullptr) , varNum(0) , physReg(REG_COUNT) , registerType(registerType) , isActive(false) , isLocalVar(false) , isSplit(false) , isSpilled(false) , isInternal(false) , isStructField(false) , isPromotedStruct(false) , hasConflictingDefUse(false) , hasInterferingUses(false) , isSpecialPutArg(false) , preferCalleeSave(false) , isConstant(false) #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE , isUpperVector(false) , isPartiallySpilled(false) #endif , isWriteThru(false) , isSingleDef(false) #ifdef DEBUG , intervalIndex(0) #endif { } #ifdef DEBUG // print out representation void dump(); // concise representation for embedding void tinyDump(); // extremely concise representation void microDump(); #endif // DEBUG void setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* l); // Fixed registers for which this Interval has a preference regMaskTP registerPreferences; // The relatedInterval is: // - for any other interval, it is the interval to which this interval // is currently preferenced (e.g. because they are related by a copy) Interval* relatedInterval; // The assignedReg is the RecRecord for the register to which this interval // has been assigned at some point - if the interval is active, this is the // register it currently occupies. RegRecord* assignedReg; unsigned int varNum; // This is the "variable number": the index into the lvaTable array // The register to which it is currently assigned. regNumber physReg; RegisterType registerType; // Is this Interval currently in a register and live? bool isActive; bool isLocalVar : 1; // Indicates whether this interval has been assigned to different registers bool isSplit : 1; // Indicates whether this interval is ever spilled bool isSpilled : 1; // indicates an interval representing the internal requirements for // generating code for a node (temp registers internal to the node) // Note that this interval may live beyond a node in the GT_ARR_LENREF/GT_IND // case (though never lives beyond a stmt) bool isInternal : 1; // true if this is a LocalVar for a struct field bool isStructField : 1; // true iff this is a GT_LDOBJ for a fully promoted (PROMOTION_TYPE_INDEPENDENT) struct bool isPromotedStruct : 1; // true if this is an SDSU interval for which the def and use have conflicting register // requirements bool hasConflictingDefUse : 1; // true if this interval's defining node has "delayRegFree" uses, either due to it being an RMW instruction, // OR because it requires an internal register that differs from the target. bool hasInterferingUses : 1; // True if this interval is defined by a putArg, whose source is a non-last-use lclVar. // During allocation, this flag will be cleared if the source is not already in the required register. // Othewise, we will leave the register allocated to the lclVar, but mark the RegRecord as // isBusyUntilKill, so that it won't be reused if the lclVar goes dead before the call. bool isSpecialPutArg : 1; // True if this interval interferes with a call. bool preferCalleeSave : 1; // True if this interval is defined by a constant node that may be reused and/or may be // able to reuse a constant that's already in a register. bool isConstant : 1; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // True if this is a special interval for saving the upper half of a large vector. bool isUpperVector : 1; // This is a convenience method to avoid ifdef's everywhere this is used. bool IsUpperVector() const { return isUpperVector; } // True if this interval has been partially spilled bool isPartiallySpilled : 1; #else bool IsUpperVector() const { return false; } #endif // True if this interval is associated with a lclVar that is written to memory at each definition. bool isWriteThru : 1; // True if this interval has a single definition. bool isSingleDef : 1; #ifdef DEBUG unsigned int intervalIndex; #endif // DEBUG LclVarDsc* getLocalVar(Compiler* comp) { assert(isLocalVar); return comp->lvaGetDesc(this->varNum); } // Get the local tracked variable "index" (lvVarIndex), used in bitmasks. unsigned getVarIndex(Compiler* comp) { LclVarDsc* varDsc = getLocalVar(comp); assert(varDsc->lvTracked); // If this isn't true, we shouldn't be calling this function! return varDsc->lvVarIndex; } bool isAssignedTo(regNumber regNum) { // This uses regMasks to handle the case where a double actually occupies two registers // TODO-Throughput: This could/should be done more cheaply. return (physReg != REG_NA && (genRegMask(physReg, registerType) & genRegMask(regNum)) != RBM_NONE); } // Assign the related interval. void assignRelatedInterval(Interval* newRelatedInterval) { #ifdef DEBUG if (VERBOSE) { printf("Assigning related "); newRelatedInterval->microDump(); printf(" to "); this->microDump(); printf("\n"); } #endif // DEBUG relatedInterval = newRelatedInterval; } // Assign the related interval, but only if it isn't already assigned. bool assignRelatedIntervalIfUnassigned(Interval* newRelatedInterval) { if (relatedInterval == nullptr) { assignRelatedInterval(newRelatedInterval); return true; } else { #ifdef DEBUG if (VERBOSE) { printf("Interval "); this->microDump(); printf(" already has a related interval\n"); } #endif // DEBUG return false; } } // Get the current preferences for this Interval. // Note that when we have an assigned register we don't necessarily update the // registerPreferences to that register, as there may be multiple, possibly disjoint, // definitions. This method will return the current assigned register if any, or // the 'registerPreferences' otherwise. // regMaskTP getCurrentPreferences() { return (assignedReg == nullptr) ? registerPreferences : genRegMask(assignedReg->regNum); } void mergeRegisterPreferences(regMaskTP preferences) { // We require registerPreferences to have been initialized. assert(registerPreferences != RBM_NONE); // It is invalid to update with empty preferences assert(preferences != RBM_NONE); regMaskTP commonPreferences = (registerPreferences & preferences); if (commonPreferences != RBM_NONE) { registerPreferences = commonPreferences; return; } // There are no preferences in common. // Preferences need to reflect both cases where a var must occupy a specific register, // as well as cases where a var is live when a register is killed. // In the former case, we would like to record all such registers, however we don't // really want to use any registers that will interfere. // To approximate this, we never "or" together multi-reg sets, which are generally kill sets. if (!genMaxOneBit(preferences)) { // The new preference value is a multi-reg set, so it's probably a kill. // Keep the new value. registerPreferences = preferences; return; } if (!genMaxOneBit(registerPreferences)) { // The old preference value is a multi-reg set. // Keep the existing preference set, as it probably reflects one or more kills. // It may have been a union of multiple individual registers, but we can't // distinguish that case without extra cost. return; } // If we reach here, we have two disjoint single-reg sets. // Keep only the callee-save preferences, if not empty. // Otherwise, take the union of the preferences. regMaskTP newPreferences = registerPreferences | preferences; if (preferCalleeSave) { regMaskTP calleeSaveMask = (calleeSaveRegs(this->registerType) & (newPreferences)); if (calleeSaveMask != RBM_NONE) { newPreferences = calleeSaveMask; } } registerPreferences = newPreferences; } // Update the registerPreferences on the interval. // If there are conflicting requirements on this interval, set the preferences to // the union of them. That way maybe we'll get at least one of them. // An exception is made in the case where one of the existing or new // preferences are all callee-save, in which case we "prefer" the callee-save void updateRegisterPreferences(regMaskTP preferences) { // If this interval is preferenced, that interval may have already been assigned a // register, and we want to include that in the preferences. if ((relatedInterval != nullptr) && !relatedInterval->isActive) { mergeRegisterPreferences(relatedInterval->getCurrentPreferences()); } // Now merge the new preferences. mergeRegisterPreferences(preferences); } }; class RefPosition { public: // A RefPosition refers to either an Interval or a RegRecord. 'referent' points to one // of these types. If it refers to a RegRecord, then 'isPhysRegRef()' is true. If it // refers to an Interval, then 'isPhysRegRef()' is false. // referent can never be null. Referenceable* referent; // nextRefPosition is the next in code order. // Note that in either case there is no need for these to be doubly linked, as they // are only traversed in the forward direction, and are not moved. RefPosition* nextRefPosition; // The remaining fields are common to both options GenTree* treeNode; unsigned int bbNum; LsraLocation nodeLocation; // Prior to the allocation pass, registerAssignment captures the valid registers // for this RefPosition. // After the allocation pass, this contains the actual assignment regMaskTP registerAssignment; RefType refType; // NOTE: C++ only packs bitfields if the base type is the same. So make all the base // NOTE: types of the logically "bool" types that follow 'unsigned char', so they match // NOTE: RefType that precedes this, and multiRegIdx can also match. // Indicates whether this ref position is to be allocated a reg only if profitable. Currently these are the // ref positions that lower/codegen has indicated as reg optional and is considered a contained memory operand if // no reg is allocated. unsigned char regOptional : 1; // Used by RefTypeDef/Use positions of a multi-reg call node. // Indicates the position of the register that this ref position refers to. // The max bits needed is based on max value of MAX_RET_REG_COUNT value // across all targets and that happens 4 on on Arm. Hence index value // would be 0..MAX_RET_REG_COUNT-1. unsigned char multiRegIdx : 2; // Last Use - this may be true for multiple RefPositions in the same Interval unsigned char lastUse : 1; // Spill and Copy info // reload indicates that the value was spilled, and must be reloaded here. // spillAfter indicates that the value is spilled here, so a spill must be added. // singleDefSpill indicates that it is associated with a single-def var and if it // is decided to get spilled, it will be spilled at firstRefPosition def. That // way, the the value of stack will always be up-to-date and no more spills or // resolutions (from reg to stack) will be needed for such single-def var. // copyReg indicates that the value needs to be copied to a specific register, // but that it will also retain its current assigned register. // moveReg indicates that the value needs to be moved to a different register, // and that this will be its new assigned register. // A RefPosition may have any flag individually or the following combinations: // - reload and spillAfter (i.e. it remains in memory), but not in combination with copyReg or moveReg // (reload cannot exist with copyReg or moveReg; it should be reloaded into the appropriate reg) // - spillAfter and copyReg (i.e. it must be copied to a new reg for use, but is then spilled) // - spillAfter and moveReg (i.e. it most be both spilled and moved) // NOTE: a moveReg involves an explicit move, and would usually not be needed for a fixed Reg if it is going // to be spilled, because the code generator will do the move to the fixed register, and doesn't need to // record the new register location as the new "home" location of the lclVar. However, if there is a conflicting // use at the same location (e.g. lclVar V1 is in rdx and needs to be in rcx, but V2 needs to be in rdx), then // we need an explicit move. // - copyReg and moveReg must not exist with each other. unsigned char reload : 1; unsigned char spillAfter : 1; unsigned char singleDefSpill : 1; unsigned char writeThru : 1; // true if this var is defined in a register and also spilled. spillAfter must NOT be // set. unsigned char copyReg : 1; unsigned char moveReg : 1; // true if this var is moved to a new register unsigned char isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval unsigned char isFixedRegRef : 1; unsigned char isLocalDefUse : 1; // delayRegFree indicates that the register should not be freed right away, but instead wait // until the next Location after it would normally be freed. This is used for the case of // non-commutative binary operators, where op2 must not be assigned the same register as // the target. We do this by not freeing it until after the target has been defined. // Another option would be to actually change the Location of the op2 use until the same // Location as the def, but then it could potentially reuse a register that has been freed // from the other source(s), e.g. if it's a lastUse or spilled. unsigned char delayRegFree : 1; // outOfOrder is marked on a (non-def) RefPosition that doesn't follow a definition of the // register currently assigned to the Interval. This happens when we use the assigned // register from a predecessor that is not the most recently allocated BasicBlock. unsigned char outOfOrder : 1; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // If upper vector save/restore can be avoided. unsigned char skipSaveRestore : 1; #endif #ifdef DEBUG // Minimum number registers that needs to be ensured while // constraining candidates for this ref position under // LSRA stress. unsigned minRegCandidateCount; // The unique RefPosition number, equal to its index in the // refPositions list. Only used for debugging dumps. unsigned rpNum; #endif // DEBUG RefPosition(unsigned int bbNum, LsraLocation nodeLocation, GenTree* treeNode, RefType refType) : referent(nullptr) , nextRefPosition(nullptr) , treeNode(treeNode) , bbNum(bbNum) , nodeLocation(nodeLocation) , registerAssignment(RBM_NONE) , refType(refType) , multiRegIdx(0) , lastUse(false) , reload(false) , spillAfter(false) , singleDefSpill(false) , writeThru(false) , copyReg(false) , moveReg(false) , isPhysRegRef(false) , isFixedRegRef(false) , isLocalDefUse(false) , delayRegFree(false) , outOfOrder(false) #ifdef DEBUG , minRegCandidateCount(1) , rpNum(0) #endif { } Interval* getInterval() { assert(!isPhysRegRef); return (Interval*)referent; } void setInterval(Interval* i) { referent = i; isPhysRegRef = false; } RegRecord* getReg() { assert(isPhysRegRef); return (RegRecord*)referent; } void setReg(RegRecord* r) { referent = r; isPhysRegRef = true; registerAssignment = genRegMask(r->regNum); } regNumber assignedReg() { if (registerAssignment == RBM_NONE) { return REG_NA; } return genRegNumFromMask(registerAssignment); } // Returns true if it is a reference on a gentree node. bool IsActualRef() { switch (refType) { case RefTypeDef: case RefTypeUse: #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE case RefTypeUpperVectorSave: case RefTypeUpperVectorRestore: #endif return true; // These must always be marked RegOptional. case RefTypeExpUse: case RefTypeParamDef: case RefTypeDummyDef: case RefTypeZeroInit: assert(RegOptional()); return false; default: return false; } } bool IsPhysRegRef() { return ((refType == RefTypeFixedReg) || (refType == RefTypeKill)); } void setRegOptional(bool val) { regOptional = val; } // Returns true whether this ref position is to be allocated // a reg only if it is profitable. bool RegOptional() { // TODO-CQ: Right now if a ref position is marked as // copyreg or movereg, then it is not treated as // 'allocate if profitable'. This is an implementation // limitation that needs to be addressed. return regOptional && !copyReg && !moveReg; } void setMultiRegIdx(unsigned idx) { multiRegIdx = idx; assert(multiRegIdx == idx); } unsigned getMultiRegIdx() { return multiRegIdx; } LsraLocation getRefEndLocation() { return delayRegFree ? nodeLocation + 1 : nodeLocation; } RefPosition* getRangeEndRef() { if (lastUse || nextRefPosition == nullptr || spillAfter) { return this; } // It would seem to make sense to only return 'nextRefPosition' if it is a lastUse, // and otherwise return `lastRefPosition', but that tends to excessively lengthen // the range for heuristic purposes. // TODO-CQ: Look into how this might be improved . return nextRefPosition; } LsraLocation getRangeEndLocation() { return getRangeEndRef()->getRefEndLocation(); } bool isIntervalRef() { return (!IsPhysRegRef() && (referent != nullptr)); } // isFixedRefOfRegMask indicates that the RefPosition has a fixed assignment to the register // specified by the given mask bool isFixedRefOfRegMask(regMaskTP regMask) { assert(genMaxOneBit(regMask)); return (registerAssignment == regMask); } // isFixedRefOfReg indicates that the RefPosition has a fixed assignment to the given register bool isFixedRefOfReg(regNumber regNum) { return (isFixedRefOfRegMask(genRegMask(regNum))); } #ifdef DEBUG // operator= copies everything except 'rpNum', which must remain unique RefPosition& operator=(const RefPosition& rp) { unsigned rpNumSave = rpNum; memcpy(this, &rp, sizeof(rp)); rpNum = rpNumSave; return *this; } void dump(LinearScan* linearScan); #endif // DEBUG }; #ifdef DEBUG void dumpRegMask(regMaskTP regs); #endif // DEBUG /*****************************************************************************/ #endif //_LSRA_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #ifndef _LSRA_H_ #define _LSRA_H_ #include "arraylist.h" #include "smallhash.h" // Minor and forward-reference types class Interval; class RefPosition; class LinearScan; class RegRecord; template <class T> class ArrayStack; // LsraLocation tracks the linearized order of the nodes. // Each node is assigned two LsraLocations - one for all the uses and all but the last // def, and a second location for the last def (if any) typedef unsigned int LsraLocation; const unsigned int MinLocation = 0; const unsigned int MaxLocation = UINT_MAX; // max number of registers an operation could require internally (in addition to uses and defs) const unsigned int MaxInternalRegisters = 8; const unsigned int RegisterTypeCount = 2; /***************************************************************************** * Register types *****************************************************************************/ typedef var_types RegisterType; #define IntRegisterType TYP_INT #define FloatRegisterType TYP_FLOAT //------------------------------------------------------------------------ // regType: Return the RegisterType to use for a given type // // Arguments: // type - the type of interest // template <class T> RegisterType regType(T type) { return varTypeUsesFloatReg(TypeGet(type)) ? FloatRegisterType : IntRegisterType; } //------------------------------------------------------------------------ // useFloatReg: Check if the given var_type should be allocated to a FloatRegisterType // inline bool useFloatReg(var_types type) { return (regType(type) == FloatRegisterType); } //------------------------------------------------------------------------ // registerTypesEquivalent: Check to see if two RegisterTypes are equivalent // inline bool registerTypesEquivalent(RegisterType a, RegisterType b) { return varTypeIsIntegralOrI(a) == varTypeIsIntegralOrI(b); } //------------------------------------------------------------------------ // calleeSaveRegs: Get the set of callee-save registers of the given RegisterType // inline regMaskTP calleeSaveRegs(RegisterType rt) { return varTypeIsIntegralOrI(rt) ? RBM_INT_CALLEE_SAVED : RBM_FLT_CALLEE_SAVED; } //------------------------------------------------------------------------ // callerSaveRegs: Get the set of caller-save registers of the given RegisterType // inline regMaskTP callerSaveRegs(RegisterType rt) { return varTypeIsIntegralOrI(rt) ? RBM_INT_CALLEE_TRASH : RBM_FLT_CALLEE_TRASH; } //------------------------------------------------------------------------ // RefInfo: Captures the necessary information for a definition that is "in-flight" // during `buildIntervals` (i.e. a tree-node definition has been encountered, // but not its use). This includes the RefPosition and its associated // GenTree node. // struct RefInfo { RefPosition* ref; GenTree* treeNode; RefInfo(RefPosition* r, GenTree* t) : ref(r), treeNode(t) { } // default constructor for data structures RefInfo() { } }; //------------------------------------------------------------------------ // RefInfoListNode: used to store a single `RefInfo` value for a // node during `buildIntervals`. // // This is the node type for `RefInfoList` below. // class RefInfoListNode final : public RefInfo { friend class RefInfoList; friend class RefInfoListNodePool; RefInfoListNode* m_next; // The next node in the list public: RefInfoListNode(RefPosition* r, GenTree* t) : RefInfo(r, t) { } //------------------------------------------------------------------------ // RefInfoListNode::Next: Returns the next node in the list. RefInfoListNode* Next() const { return m_next; } }; //------------------------------------------------------------------------ // RefInfoList: used to store a list of `RefInfo` values for a // node during `buildIntervals`. // // This list of 'RefInfoListNode's contains the source nodes consumed by // a node, and is created by 'BuildNode'. // class RefInfoList final { friend class RefInfoListNodePool; RefInfoListNode* m_head; // The head of the list RefInfoListNode* m_tail; // The tail of the list public: RefInfoList() : m_head(nullptr), m_tail(nullptr) { } RefInfoList(RefInfoListNode* node) : m_head(node), m_tail(node) { assert(m_head->m_next == nullptr); } //------------------------------------------------------------------------ // RefInfoList::IsEmpty: Returns true if the list is empty. // bool IsEmpty() const { return m_head == nullptr; } //------------------------------------------------------------------------ // RefInfoList::Begin: Returns the first node in the list. // RefInfoListNode* Begin() const { return m_head; } //------------------------------------------------------------------------ // RefInfoList::End: Returns the position after the last node in the // list. The returned value is suitable for use as // a sentinel for iteration. // RefInfoListNode* End() const { return nullptr; } //------------------------------------------------------------------------ // RefInfoList::End: Returns the position after the last node in the // list. The returned value is suitable for use as // a sentinel for iteration. // RefInfoListNode* Last() const { return m_tail; } //------------------------------------------------------------------------ // RefInfoList::Append: Appends a node to the list. // // Arguments: // node - The node to append. Must not be part of an existing list. // void Append(RefInfoListNode* node) { assert(node->m_next == nullptr); if (m_tail == nullptr) { assert(m_head == nullptr); m_head = node; } else { m_tail->m_next = node; } m_tail = node; } //------------------------------------------------------------------------ // RefInfoList::Append: Appends another list to this list. // // Arguments: // other - The list to append. // void Append(RefInfoList other) { if (m_tail == nullptr) { assert(m_head == nullptr); m_head = other.m_head; } else { m_tail->m_next = other.m_head; } m_tail = other.m_tail; } //------------------------------------------------------------------------ // RefInfoList::Prepend: Prepends a node to the list. // // Arguments: // node - The node to prepend. Must not be part of an existing list. // void Prepend(RefInfoListNode* node) { assert(node->m_next == nullptr); if (m_head == nullptr) { assert(m_tail == nullptr); m_tail = node; } else { node->m_next = m_head; } m_head = node; } //------------------------------------------------------------------------ // RefInfoList::Add: Adds a node to the list. // // Arguments: // node - The node to add. Must not be part of an existing list. // prepend - True if it should be prepended (otherwise is appended) // void Add(RefInfoListNode* node, bool prepend) { if (prepend) { Prepend(node); } else { Append(node); } } //------------------------------------------------------------------------ // removeListNode - retrieve the RefInfo for the given node // // Notes: // The BuildNode methods use this helper to retrieve the RefInfo for child nodes // from the useList being constructed. // RefInfoListNode* removeListNode(RefInfoListNode* listNode, RefInfoListNode* prevListNode) { RefInfoListNode* nextNode = listNode->Next(); if (prevListNode == nullptr) { m_head = nextNode; } else { prevListNode->m_next = nextNode; } if (nextNode == nullptr) { m_tail = prevListNode; } listNode->m_next = nullptr; return listNode; } // removeListNode - remove the RefInfoListNode for the given GenTree node from the defList RefInfoListNode* removeListNode(GenTree* node); // Same as above but takes a multiRegIdx to support multi-reg nodes. RefInfoListNode* removeListNode(GenTree* node, unsigned multiRegIdx); //------------------------------------------------------------------------ // GetRefPosition - retrieve the RefPosition for the given node // // Notes: // The Build methods use this helper to retrieve the RefPosition for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. RefPosition* GetRefPosition(GenTree* node) { for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { if (listNode->treeNode == node) { return listNode->ref; } } assert(!"GetRefPosition didn't find the node"); unreached(); } //------------------------------------------------------------------------ // RefInfoList::GetSecond: Gets the second node in the list. // // Arguments: // (DEBUG ONLY) treeNode - The GenTree* we expect to be in the second node. // RefInfoListNode* GetSecond(INDEBUG(GenTree* treeNode)) { noway_assert((Begin() != nullptr) && (Begin()->Next() != nullptr)); RefInfoListNode* second = Begin()->Next(); assert(second->treeNode == treeNode); return second; } #ifdef DEBUG // Count - return the number of nodes in the list (DEBUG only) int Count() { int count = 0; for (RefInfoListNode *listNode = Begin(), *end = End(); listNode != end; listNode = listNode->Next()) { count++; } return count; } #endif // DEBUG }; //------------------------------------------------------------------------ // RefInfoListNodePool: manages a pool of `RefInfoListNode` // values to decrease overall memory usage // during `buildIntervals`. // // `buildIntervals` involves creating a list of RefInfo items per // node that either directly produces a set of registers or that is a // contained node with register-producing sources. However, these lists // are short-lived: they are destroyed once the use of the corresponding // node is processed. As such, there is typically only a small number of // `RefInfoListNode` values in use at any given time. Pooling these // values avoids otherwise frequent allocations. class RefInfoListNodePool final { RefInfoListNode* m_freeList; Compiler* m_compiler; static const unsigned defaultPreallocation = 8; public: RefInfoListNodePool(Compiler* compiler, unsigned preallocate = defaultPreallocation); RefInfoListNode* GetNode(RefPosition* r, GenTree* t); void ReturnNode(RefInfoListNode* listNode); }; #if TRACK_LSRA_STATS enum LsraStat { #define LSRA_STAT_DEF(enum_name, enum_str) enum_name, #include "lsra_stats.h" #undef LSRA_STAT_DEF #define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) STAT_##enum_name, #include "lsra_score.h" #undef REG_SEL_DEF COUNT }; #endif // TRACK_LSRA_STATS struct LsraBlockInfo { // bbNum of the predecessor to use for the register location of live-in variables. // 0 for fgFirstBB. unsigned int predBBNum; weight_t weight; bool hasCriticalInEdge : 1; bool hasCriticalOutEdge : 1; bool hasEHBoundaryIn : 1; bool hasEHBoundaryOut : 1; bool hasEHPred : 1; #if TRACK_LSRA_STATS // Per block maintained LSRA statistics. unsigned stats[LsraStat::COUNT]; #endif // TRACK_LSRA_STATS }; enum RegisterScore { #define REG_SEL_DEF(enum_name, value, short_str, orderSeqId) enum_name = value, #include "lsra_score.h" #undef REG_SEL_DEF NONE = 0 }; // This is sort of a bit mask // The low order 2 bits will be 1 for defs, and 2 for uses enum RefType : unsigned char { #define DEF_REFTYPE(memberName, memberValue, shortName) memberName = memberValue, #include "lsra_reftypes.h" #undef DEF_REFTYPE }; // position in a block (for resolution) enum BlockStartOrEnd { BlockPositionStart = 0, BlockPositionEnd = 1, PositionCount = 2 }; inline bool RefTypeIsUse(RefType refType) { return ((refType & RefTypeUse) == RefTypeUse); } inline bool RefTypeIsDef(RefType refType) { return ((refType & RefTypeDef) == RefTypeDef); } typedef regNumberSmall* VarToRegMap; typedef jitstd::list<Interval> IntervalList; typedef jitstd::list<RefPosition> RefPositionList; typedef jitstd::list<RefPosition>::iterator RefPositionIterator; typedef jitstd::list<RefPosition>::reverse_iterator RefPositionReverseIterator; class Referenceable { public: Referenceable() { firstRefPosition = nullptr; recentRefPosition = nullptr; lastRefPosition = nullptr; } // A linked list of RefPositions. These are only traversed in the forward // direction, and are not moved, so they don't need to be doubly linked // (see RefPosition). RefPosition* firstRefPosition; RefPosition* recentRefPosition; RefPosition* lastRefPosition; // Get the position of the next reference which is at or greater than // the current location (relies upon recentRefPosition being udpated // during traversal). RefPosition* getNextRefPosition(); LsraLocation getNextRefLocation(); }; class RegRecord : public Referenceable { public: RegRecord() { assignedInterval = nullptr; previousInterval = nullptr; regNum = REG_NA; isCalleeSave = false; registerType = IntRegisterType; } void init(regNumber reg) { #ifdef TARGET_ARM64 // The Zero register, or the SP if ((reg == REG_ZR) || (reg == REG_SP)) { // IsGeneralRegister returns false for REG_ZR and REG_SP regNum = reg; registerType = IntRegisterType; } else #endif if (emitter::isFloatReg(reg)) { registerType = FloatRegisterType; } else { // The constructor defaults to IntRegisterType assert(emitter::isGeneralRegister(reg) && registerType == IntRegisterType); } regNum = reg; isCalleeSave = ((RBM_CALLEE_SAVED & genRegMask(reg)) != 0); } #ifdef DEBUG // print out representation void dump(); // concise representation for embedding void tinyDump(); #endif // DEBUG // DATA // interval to which this register is currently allocated. // If the interval is inactive (isActive == false) then it is not currently live, // and the register can be unassigned (i.e. setting assignedInterval to nullptr) // without spilling the register. Interval* assignedInterval; // Interval to which this register was previously allocated, and which was unassigned // because it was inactive. This register will be reassigned to this Interval when // assignedInterval becomes inactive. Interval* previousInterval; regNumber regNum; bool isCalleeSave; RegisterType registerType; unsigned char regOrder; }; inline bool leafInRange(GenTree* leaf, int lower, int upper) { if (!leaf->IsIntCnsFitsInI32()) { return false; } if (leaf->AsIntCon()->gtIconVal < lower) { return false; } if (leaf->AsIntCon()->gtIconVal > upper) { return false; } return true; } inline bool leafInRange(GenTree* leaf, int lower, int upper, int multiple) { if (!leafInRange(leaf, lower, upper)) { return false; } if (leaf->AsIntCon()->gtIconVal % multiple) { return false; } return true; } inline bool leafAddInRange(GenTree* leaf, int lower, int upper, int multiple = 1) { if (leaf->OperGet() != GT_ADD) { return false; } return leafInRange(leaf->gtGetOp2(), lower, upper, multiple); } inline bool isCandidateVar(const LclVarDsc* varDsc) { return varDsc->lvLRACandidate; } /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LinearScan XX XX XX XX This is the container for the Linear Scan data structures and methods. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // OPTION 1: The algorithm as described in "Optimized Interval Splitting in a // Linear Scan Register Allocator". It is driven by iterating over the Interval // lists. In this case, we need multiple IntervalLists, and Intervals will be // moved between them so they must be easily updated. // OPTION 2: The algorithm is driven by iterating over the RefPositions. In this // case, we only need a single IntervalList, and it won't be updated. // The RefPosition must refer to its Interval, and we need to be able to traverse // to the next RefPosition in code order // THIS IS THE OPTION CURRENTLY BEING PURSUED class LinearScan : public LinearScanInterface { friend class RefPosition; friend class Interval; friend class Lowering; public: // This could use further abstraction. From Compiler we need the tree, // the flowgraph and the allocator. LinearScan(Compiler* theCompiler); // This is the main driver virtual void doLinearScan(); static bool isSingleRegister(regMaskTP regMask) { return (genExactlyOneBit(regMask)); } // Initialize the block traversal for LSRA. // This resets the bbVisitedSet, and on the first invocation sets the blockSequence array, // which determines the order in which blocks will be allocated (currently called during Lowering). BasicBlock* startBlockSequence(); // Move to the next block in sequence, updating the current block information. BasicBlock* moveToNextBlock(); // Get the next block to be scheduled without changing the current block, // but updating the blockSequence during the first iteration if it is not fully computed. BasicBlock* getNextBlock(); // This is called during code generation to update the location of variables virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb); // This does the dataflow analysis and builds the intervals void buildIntervals(); // This is where the actual assignment is done void allocateRegisters(); // This is the resolution phase, where cross-block mismatches are fixed up void resolveRegisters(); void writeRegisters(RefPosition* currentRefPosition, GenTree* tree); // Insert a copy in the case where a tree node value must be moved to a different // register at the point of use, or it is reloaded to a different register // than the one it was spilled from void insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned multiRegIdx, RefPosition* refPosition); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE void makeUpperVectorInterval(unsigned varIndex); Interval* getUpperVectorInterval(unsigned varIndex); // Save the upper half of a vector that lives in a callee-save register at the point of a call. void insertUpperVectorSave(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block); // Restore the upper half of a vector that's been partially spilled prior to a use in 'tree'. void insertUpperVectorRestore(GenTree* tree, RefPosition* refPosition, Interval* upperVectorInterval, BasicBlock* block); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE // resolve along one block-block edge enum ResolveType { ResolveSplit, ResolveJoin, ResolveCritical, ResolveSharedCritical, ResolveTypeCount }; #ifdef DEBUG static const char* resolveTypeName[ResolveTypeCount]; #endif enum WhereToInsert { InsertAtTop, InsertAtBottom }; #ifdef TARGET_ARM void addResolutionForDouble(BasicBlock* block, GenTree* insertionPoint, Interval** sourceIntervals, regNumberSmall* location, regNumber toReg, regNumber fromReg, ResolveType resolveType); #endif void addResolution( BasicBlock* block, GenTree* insertionPoint, Interval* interval, regNumber outReg, regNumber inReg); void handleOutgoingCriticalEdges(BasicBlock* block); void resolveEdge(BasicBlock* fromBlock, BasicBlock* toBlock, ResolveType resolveType, VARSET_VALARG_TP liveSet); void resolveEdges(); // Keep track of how many temp locations we'll need for spill void initMaxSpill(); void updateMaxSpill(RefPosition* refPosition); void recordMaxSpill(); // max simultaneous spill locations used of every type unsigned int maxSpill[TYP_COUNT]; unsigned int currentSpill[TYP_COUNT]; bool needFloatTmpForFPCall; bool needDoubleTmpForFPCall; #ifdef DEBUG private: //------------------------------------------------------------------------ // Should we stress lsra? This uses the COMPlus_JitStressRegs variable. // // The mask bits are currently divided into fields in which each non-zero value // is a distinct stress option (e.g. 0x3 is not a combination of 0x1 and 0x2). // However, subject to possible constraints (to be determined), the different // fields can be combined (e.g. 0x7 is a combination of 0x3 and 0x4). // Note that the field values are declared in a public enum, but the actual bits are // only accessed via accessors. unsigned lsraStressMask; // This controls the registers available for allocation enum LsraStressLimitRegs{LSRA_LIMIT_NONE = 0, LSRA_LIMIT_CALLEE = 0x1, LSRA_LIMIT_CALLER = 0x2, LSRA_LIMIT_SMALL_SET = 0x3, LSRA_LIMIT_MASK = 0x3}; // When LSRA_LIMIT_SMALL_SET is specified, it is desirable to select a "mixed" set of caller- and callee-save // registers, so as to get different coverage than limiting to callee or caller. // At least for x86 and AMD64, and potentially other architecture that will support SIMD, // we need a minimum of 5 fp regs in order to support the InitN intrinsic for Vector4. // Hence the "SmallFPSet" has 5 elements. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI // On System V the RDI and RSI are not callee saved. Use R12 ans R13 as callee saved registers. static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_R12 | RBM_R13); #else // !UNIX_AMD64_ABI // On Windows Amd64 use the RDI and RSI as callee saved registers. static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_ESI | RBM_EDI); #endif // !UNIX_AMD64_ABI static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); #elif defined(TARGET_ARM) // On ARM, we may need two registers to set up the target register for a virtual call, so we need // to have at least the maximum number of arg registers, plus 2. static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5); static const regMaskTP LsraLimitSmallFPSet = (RBM_F0 | RBM_F1 | RBM_F2 | RBM_F16 | RBM_F17); #elif defined(TARGET_ARM64) static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R19 | RBM_R20); static const regMaskTP LsraLimitSmallFPSet = (RBM_V0 | RBM_V1 | RBM_V2 | RBM_V8 | RBM_V9); #elif defined(TARGET_X86) static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EDI); static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7); #else #error Unsupported or unset target architecture #endif // target LsraStressLimitRegs getStressLimitRegs() { return (LsraStressLimitRegs)(lsraStressMask & LSRA_LIMIT_MASK); } regMaskTP getConstrainedRegMask(regMaskTP regMaskActual, regMaskTP regMaskConstrain, unsigned minRegCount); regMaskTP stressLimitRegs(RefPosition* refPosition, regMaskTP mask); // This controls the heuristics used to select registers // These can be combined. enum LsraSelect{LSRA_SELECT_DEFAULT = 0, LSRA_SELECT_REVERSE_HEURISTICS = 0x04, LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08, LSRA_SELECT_NEAREST = 0x10, LSRA_SELECT_MASK = 0x1c}; LsraSelect getSelectionHeuristics() { return (LsraSelect)(lsraStressMask & LSRA_SELECT_MASK); } bool doReverseSelect() { return ((lsraStressMask & LSRA_SELECT_REVERSE_HEURISTICS) != 0); } bool doReverseCallerCallee() { return ((lsraStressMask & LSRA_SELECT_REVERSE_CALLER_CALLEE) != 0); } bool doSelectNearest() { return ((lsraStressMask & LSRA_SELECT_NEAREST) != 0); } // This controls the order in which basic blocks are visited during allocation enum LsraTraversalOrder{LSRA_TRAVERSE_LAYOUT = 0x20, LSRA_TRAVERSE_PRED_FIRST = 0x40, LSRA_TRAVERSE_RANDOM = 0x60, // NYI LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST, LSRA_TRAVERSE_MASK = 0x60}; LsraTraversalOrder getLsraTraversalOrder() { if ((lsraStressMask & LSRA_TRAVERSE_MASK) == 0) { return LSRA_TRAVERSE_DEFAULT; } return (LsraTraversalOrder)(lsraStressMask & LSRA_TRAVERSE_MASK); } bool isTraversalLayoutOrder() { return getLsraTraversalOrder() == LSRA_TRAVERSE_LAYOUT; } bool isTraversalPredFirstOrder() { return getLsraTraversalOrder() == LSRA_TRAVERSE_PRED_FIRST; } // This controls whether lifetimes should be extended to the entire method. // Note that this has no effect under MinOpts enum LsraExtendLifetimes{LSRA_DONT_EXTEND = 0, LSRA_EXTEND_LIFETIMES = 0x80, LSRA_EXTEND_LIFETIMES_MASK = 0x80}; LsraExtendLifetimes getLsraExtendLifeTimes() { return (LsraExtendLifetimes)(lsraStressMask & LSRA_EXTEND_LIFETIMES_MASK); } bool extendLifetimes() { return getLsraExtendLifeTimes() == LSRA_EXTEND_LIFETIMES; } // This controls whether variables locations should be set to the previous block in layout order // (LSRA_BLOCK_BOUNDARY_LAYOUT), or to that of the highest-weight predecessor (LSRA_BLOCK_BOUNDARY_PRED - // the default), or rotated (LSRA_BLOCK_BOUNDARY_ROTATE). enum LsraBlockBoundaryLocations{LSRA_BLOCK_BOUNDARY_PRED = 0, LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100, LSRA_BLOCK_BOUNDARY_ROTATE = 0x200, LSRA_BLOCK_BOUNDARY_MASK = 0x300}; LsraBlockBoundaryLocations getLsraBlockBoundaryLocations() { return (LsraBlockBoundaryLocations)(lsraStressMask & LSRA_BLOCK_BOUNDARY_MASK); } regNumber rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs); // This controls whether we always insert a GT_RELOAD instruction after a spill // Note that this can be combined with LSRA_SPILL_ALWAYS (or not) enum LsraReload{LSRA_NO_RELOAD_IF_SAME = 0, LSRA_ALWAYS_INSERT_RELOAD = 0x400, LSRA_RELOAD_MASK = 0x400}; LsraReload getLsraReload() { return (LsraReload)(lsraStressMask & LSRA_RELOAD_MASK); } bool alwaysInsertReload() { return getLsraReload() == LSRA_ALWAYS_INSERT_RELOAD; } // This controls whether we spill everywhere enum LsraSpill{LSRA_DONT_SPILL_ALWAYS = 0, LSRA_SPILL_ALWAYS = 0x800, LSRA_SPILL_MASK = 0x800}; LsraSpill getLsraSpill() { return (LsraSpill)(lsraStressMask & LSRA_SPILL_MASK); } bool spillAlways() { return getLsraSpill() == LSRA_SPILL_ALWAYS; } // This controls whether RefPositions that lower/codegen indicated as reg optional be // allocated a reg at all. enum LsraRegOptionalControl{LSRA_REG_OPTIONAL_DEFAULT = 0, LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000, LSRA_REG_OPTIONAL_MASK = 0x1000}; LsraRegOptionalControl getLsraRegOptionalControl() { return (LsraRegOptionalControl)(lsraStressMask & LSRA_REG_OPTIONAL_MASK); } bool regOptionalNoAlloc() { return getLsraRegOptionalControl() == LSRA_REG_OPTIONAL_NO_ALLOC; } bool candidatesAreStressLimited() { return ((lsraStressMask & (LSRA_LIMIT_MASK | LSRA_SELECT_MASK)) != 0); } // Dump support void dumpDefList(); void lsraDumpIntervals(const char* msg); void dumpRefPositions(const char* msg); void dumpVarRefPositions(const char* msg); // Checking code static bool IsLsraAdded(GenTree* node) { return ((node->gtDebugFlags & GTF_DEBUG_NODE_LSRA_ADDED) != 0); } static void SetLsraAdded(GenTree* node) { node->gtDebugFlags |= GTF_DEBUG_NODE_LSRA_ADDED; } static bool IsResolutionMove(GenTree* node); static bool IsResolutionNode(LIR::Range& containingRange, GenTree* node); void verifyFinalAllocation(); void verifyResolutionMove(GenTree* resolutionNode, LsraLocation currentLocation); #else // !DEBUG bool doSelectNearest() { return false; } bool extendLifetimes() { return false; } bool spillAlways() { return false; } // In a retail build we support only the default traversal order bool isTraversalLayoutOrder() { return false; } bool isTraversalPredFirstOrder() { return true; } bool getLsraExtendLifeTimes() { return false; } static void SetLsraAdded(GenTree* node) { // do nothing; checked only under #DEBUG } bool candidatesAreStressLimited() { return false; } #endif // !DEBUG public: // Used by Lowering when considering whether to split Longs, as well as by identifyCandidates(). bool isRegCandidate(LclVarDsc* varDsc); bool isContainableMemoryOp(GenTree* node); private: // Determine which locals are candidates for allocation void identifyCandidates(); // determine which locals are used in EH constructs we don't want to deal with void identifyCandidatesExceptionDataflow(); void buildPhysRegRecords(); #ifdef DEBUG void checkLastUses(BasicBlock* block); int ComputeOperandDstCount(GenTree* operand); int ComputeAvailableSrcCount(GenTree* node); #endif // DEBUG void setFrameType(); // Update allocations at start/end of block void unassignIntervalBlockStart(RegRecord* regRecord, VarToRegMap inVarToRegMap); void processBlockEndAllocation(BasicBlock* current); // Record variable locations at start/end of block void processBlockStartLocations(BasicBlock* current); void processBlockEndLocations(BasicBlock* current); #ifdef TARGET_ARM bool isSecondHalfReg(RegRecord* regRec, Interval* interval); RegRecord* getSecondHalfRegRec(RegRecord* regRec); RegRecord* findAnotherHalfRegRec(RegRecord* regRec); regNumber findAnotherHalfRegNum(regNumber regNum); bool canSpillDoubleReg(RegRecord* physRegRecord, LsraLocation refLocation); void unassignDoublePhysReg(RegRecord* doubleRegRecord); #endif void updateAssignedInterval(RegRecord* reg, Interval* interval, RegisterType regType); void updatePreviousInterval(RegRecord* reg, Interval* interval, RegisterType regType); bool canRestorePreviousInterval(RegRecord* regRec, Interval* assignedInterval); bool isAssignedToInterval(Interval* interval, RegRecord* regRec); bool isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation); bool canSpillReg(RegRecord* physRegRecord, LsraLocation refLocation); weight_t getSpillWeight(RegRecord* physRegRecord); // insert refpositions representing prolog zero-inits which will be added later void insertZeroInitRefPositions(); // add physreg refpositions for a tree node, based on calling convention and instruction selection predictions void addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse); void resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition); void buildRefPositionsForNode(GenTree* tree, LsraLocation loc); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE void buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc, regMaskTP fpCalleeKillSet); void buildUpperVectorRestoreRefPosition(Interval* lclVarInterval, LsraLocation currentLoc, GenTree* node, bool isUse); #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(UNIX_AMD64_ABI) // For AMD64 on SystemV machines. This method // is called as replacement for raUpdateRegStateForArg // that is used on Windows. On System V systems a struct can be passed // partially using registers from the 2 register files. void unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc); #endif // defined(UNIX_AMD64_ABI) // Update reg state for an incoming register argument void updateRegStateForArg(LclVarDsc* argDsc); inline bool isCandidateLocalRef(GenTree* tree) { if (tree->IsLocal()) { const LclVarDsc* varDsc = compiler->lvaGetDesc(tree->AsLclVarCommon()); return isCandidateVar(varDsc); } return false; } // Helpers for getKillSetForNode(). regMaskTP getKillSetForStoreInd(GenTreeStoreInd* tree); regMaskTP getKillSetForShiftRotate(GenTreeOp* tree); regMaskTP getKillSetForMul(GenTreeOp* tree); regMaskTP getKillSetForCall(GenTreeCall* call); regMaskTP getKillSetForModDiv(GenTreeOp* tree); regMaskTP getKillSetForBlockStore(GenTreeBlk* blkNode); regMaskTP getKillSetForReturn(); regMaskTP getKillSetForProfilerHook(); #ifdef FEATURE_HW_INTRINSICS regMaskTP getKillSetForHWIntrinsic(GenTreeHWIntrinsic* node); #endif // FEATURE_HW_INTRINSICS // Return the registers killed by the given tree node. // This is used only for an assert, and for stress, so it is only defined under DEBUG. // Otherwise, the Build methods should obtain the killMask from the appropriate method above. #ifdef DEBUG regMaskTP getKillSetForNode(GenTree* tree); #endif // Given some tree node add refpositions for all the registers this node kills bool buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc, regMaskTP killMask); regMaskTP allRegs(RegisterType rt); regMaskTP allByteRegs(); regMaskTP allSIMDRegs(); regMaskTP internalFloatRegCandidates(); void makeRegisterInactive(RegRecord* physRegRecord); void freeRegister(RegRecord* physRegRecord); void freeRegisters(regMaskTP regsToFree); // Get the type that this tree defines. var_types getDefType(GenTree* tree) { var_types type = tree->TypeGet(); if (type == TYP_STRUCT) { assert(tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR)); GenTreeLclVar* lclVar = tree->AsLclVar(); LclVarDsc* varDsc = compiler->lvaGetDesc(lclVar); type = varDsc->GetRegisterType(lclVar); } assert(type != TYP_UNDEF && type != TYP_STRUCT); return type; } // Managing internal registers during the BuildNode process. RefPosition* defineNewInternalTemp(GenTree* tree, RegisterType regType, regMaskTP candidates); RefPosition* buildInternalIntRegisterDefForNode(GenTree* tree, regMaskTP internalCands = RBM_NONE); RefPosition* buildInternalFloatRegisterDefForNode(GenTree* tree, regMaskTP internalCands = RBM_NONE); void buildInternalRegisterUses(); void writeLocalReg(GenTreeLclVar* lclNode, unsigned varNum, regNumber reg); void resolveLocalRef(BasicBlock* block, GenTreeLclVar* treeNode, RefPosition* currentRefPosition); void insertMove(BasicBlock* block, GenTree* insertionPoint, unsigned lclNum, regNumber inReg, regNumber outReg); void insertSwap( BasicBlock* block, GenTree* insertionPoint, unsigned lclNum1, regNumber reg1, unsigned lclNum2, regNumber reg2); private: Interval* newInterval(RegisterType regType); Interval* getIntervalForLocalVar(unsigned varIndex) { assert(varIndex < compiler->lvaTrackedCount); assert(localVarIntervals[varIndex] != nullptr); return localVarIntervals[varIndex]; } Interval* getIntervalForLocalVarNode(GenTreeLclVarCommon* tree) { const LclVarDsc* varDsc = compiler->lvaGetDesc(tree); assert(varDsc->lvTracked); return getIntervalForLocalVar(varDsc->lvVarIndex); } RegRecord* getRegisterRecord(regNumber regNum); RefPosition* newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType); RefPosition* newRefPosition(Interval* theInterval, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx = 0); // This creates a RefTypeUse at currentLoc. It sets the treeNode to nullptr if it is not a // lclVar interval. RefPosition* newUseRefPosition(Interval* theInterval, GenTree* theTreeNode, regMaskTP mask, unsigned multiRegIdx = 0); RefPosition* newRefPosition( regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask); void applyCalleeSaveHeuristics(RefPosition* rp); void checkConflictingDefUse(RefPosition* rp); void associateRefPosWithInterval(RefPosition* rp); weight_t getWeight(RefPosition* refPos); /***************************************************************************** * Register management ****************************************************************************/ RegisterType getRegisterType(Interval* currentInterval, RefPosition* refPosition); #ifdef DEBUG const char* getScoreName(RegisterScore score); #endif regNumber allocateReg(Interval* current, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); regNumber assignCopyReg(RefPosition* refPosition); bool isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPosition); bool isSpillCandidate(Interval* current, RefPosition* refPosition, RegRecord* physRegRecord); void checkAndAssignInterval(RegRecord* regRec, Interval* interval); void assignPhysReg(RegRecord* regRec, Interval* interval); void assignPhysReg(regNumber reg, Interval* interval) { assignPhysReg(getRegisterRecord(reg), interval); } bool isAssigned(RegRecord* regRec ARM_ARG(RegisterType newRegType)); void checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition); void unassignPhysReg(RegRecord* regRec ARM_ARG(RegisterType newRegType)); void unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition); void unassignPhysRegNoSpill(RegRecord* reg); void unassignPhysReg(regNumber reg) { unassignPhysReg(getRegisterRecord(reg), nullptr); } void setIntervalAsSpilled(Interval* interval); void setIntervalAsSplit(Interval* interval); void spillInterval(Interval* interval, RefPosition* fromRefPosition DEBUGARG(RefPosition* toRefPosition)); void spillGCRefs(RefPosition* killRefPosition); /***************************************************************************** * Register selection ****************************************************************************/ regMaskTP getFreeCandidates(regMaskTP candidates, var_types regType) { regMaskTP result = candidates & m_AvailableRegs; #ifdef TARGET_ARM // For TYP_DOUBLE on ARM, we can only use register for which the odd half is // also available. if (regType == TYP_DOUBLE) { result &= (m_AvailableRegs >> 1); } #endif // TARGET_ARM return result; } #ifdef DEBUG class RegisterSelection; // For lsra ordering experimentation typedef void (LinearScan::RegisterSelection::*HeuristicFn)(); typedef JitHashTable<RegisterScore, JitSmallPrimitiveKeyFuncs<RegisterScore>, HeuristicFn> ScoreMappingTable; #define REGSELECT_HEURISTIC_COUNT 17 #endif class RegisterSelection { public: RegisterSelection(LinearScan* linearScan); // Perform register selection and update currentInterval or refPosition FORCEINLINE regMaskTP select(Interval* currentInterval, RefPosition* refPosition DEBUG_ARG(RegisterScore* registerScore)); // If the register is from unassigned set such that it was not already // assigned to the current interval FORCEINLINE bool foundUnassignedReg() { assert(found && isSingleRegister(foundRegBit)); bool isUnassignedReg = ((foundRegBit & unassignedSet) != RBM_NONE); return isUnassignedReg && !isAlreadyAssigned(); } // Did register selector decide to spill this interval FORCEINLINE bool isSpilling() { return (foundRegBit & freeCandidates) == RBM_NONE; } // Is the value one of the constant that is already in a register FORCEINLINE bool isMatchingConstant() { assert(found && isSingleRegister(foundRegBit)); return (matchingConstants & foundRegBit) != RBM_NONE; } // Did we apply CONST_AVAILABLE heuristics FORCEINLINE bool isConstAvailable() { return (score & CONST_AVAILABLE) != 0; } private: #ifdef DEBUG RegisterScore RegSelectionOrder[REGSELECT_HEURISTIC_COUNT] = {NONE}; ScoreMappingTable* mappingTable = nullptr; #endif LinearScan* linearScan = nullptr; int score = 0; Interval* currentInterval = nullptr; RefPosition* refPosition = nullptr; RegisterType regType = RegisterType::TYP_UNKNOWN; LsraLocation currentLocation = MinLocation; RefPosition* nextRefPos = nullptr; regMaskTP candidates; regMaskTP preferences = RBM_NONE; Interval* relatedInterval = nullptr; regMaskTP relatedPreferences = RBM_NONE; LsraLocation rangeEndLocation; LsraLocation relatedLastLocation; bool preferCalleeSave = false; RefPosition* rangeEndRefPosition; RefPosition* lastRefPosition; regMaskTP callerCalleePrefs = RBM_NONE; LsraLocation lastLocation; RegRecord* prevRegRec = nullptr; regMaskTP prevRegBit = RBM_NONE; // These are used in the post-selection updates, and must be set for any selection. regMaskTP freeCandidates; regMaskTP matchingConstants; regMaskTP unassignedSet; regMaskTP foundRegBit; // Compute the sets for COVERS, OWN_PREFERENCE, COVERS_RELATED, COVERS_FULL and UNASSIGNED together, // as they all require similar computation. regMaskTP coversSet; regMaskTP preferenceSet; regMaskTP coversRelatedSet; regMaskTP coversFullSet; bool coversSetsCalculated = false; bool found = false; bool skipAllocation = false; regNumber foundReg = REG_NA; // If the selected register is already assigned to the current internal FORCEINLINE bool isAlreadyAssigned() { assert(found && isSingleRegister(candidates)); return (prevRegBit & preferences) == foundRegBit; } bool applySelection(int selectionScore, regMaskTP selectionCandidates); bool applySingleRegSelection(int selectionScore, regMaskTP selectionCandidate); FORCEINLINE void calculateCoversSets(); FORCEINLINE void reset(Interval* interval, RefPosition* refPosition); #define REG_SEL_DEF(stat, value, shortname, orderSeqId) FORCEINLINE void try_##stat(); #include "lsra_score.h" #undef REG_SEL_DEF }; RegisterSelection* regSelector; /***************************************************************************** * For Resolution phase ****************************************************************************/ // TODO-Throughput: Consider refactoring this so that we keep a map from regs to vars for better scaling unsigned int regMapCount; // When we split edges, we create new blocks, and instead of expanding the VarToRegMaps, we // rely on the property that the "in" map is the same as the "from" block of the edge, and the // "out" map is the same as the "to" block of the edge (by construction). // So, for any block whose bbNum is greater than bbNumMaxBeforeResolution, we use the // splitBBNumToTargetBBNumMap. // TODO-Throughput: We may want to look into the cost/benefit tradeoff of doing this vs. expanding // the arrays. unsigned bbNumMaxBeforeResolution; struct SplitEdgeInfo { unsigned fromBBNum; unsigned toBBNum; }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, SplitEdgeInfo> SplitBBNumToTargetBBNumMap; SplitBBNumToTargetBBNumMap* splitBBNumToTargetBBNumMap; SplitBBNumToTargetBBNumMap* getSplitBBNumToTargetBBNumMap() { if (splitBBNumToTargetBBNumMap == nullptr) { splitBBNumToTargetBBNumMap = new (getAllocator(compiler)) SplitBBNumToTargetBBNumMap(getAllocator(compiler)); } return splitBBNumToTargetBBNumMap; } SplitEdgeInfo getSplitEdgeInfo(unsigned int bbNum); void initVarRegMaps(); void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg); VarToRegMap getInVarToRegMap(unsigned int bbNum); VarToRegMap getOutVarToRegMap(unsigned int bbNum); void setVarReg(VarToRegMap map, unsigned int trackedVarIndex, regNumber reg); regNumber getVarReg(VarToRegMap map, unsigned int trackedVarIndex); // Initialize the incoming VarToRegMap to the given map values (generally a predecessor of // the block) VarToRegMap setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap); regNumber getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type); #ifdef DEBUG void dumpVarToRegMap(VarToRegMap map); void dumpInVarToRegMap(BasicBlock* block); void dumpOutVarToRegMap(BasicBlock* block); // There are three points at which a tuple-style dump is produced, and each // differs slightly: // - In LSRA_DUMP_PRE, it does a simple dump of each node, with indications of what // tree nodes are consumed. // - In LSRA_DUMP_REFPOS, which is after the intervals are built, but before // register allocation, each node is dumped, along with all of the RefPositions, // The Intervals are identifed as Lnnn for lclVar intervals, Innn for for other // intervals, and Tnnn for internal temps. // - In LSRA_DUMP_POST, which is after register allocation, the registers are // shown. enum LsraTupleDumpMode{LSRA_DUMP_PRE, LSRA_DUMP_REFPOS, LSRA_DUMP_POST}; void lsraGetOperandString(GenTree* tree, LsraTupleDumpMode mode, char* operandString, unsigned operandStringLength); void lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDest); void DumpOperandDefs( GenTree* operand, bool& first, LsraTupleDumpMode mode, char* operandString, const unsigned operandStringLength); void TupleStyleDump(LsraTupleDumpMode mode); LsraLocation maxNodeLocation; // Width of various fields - used to create a streamlined dump during allocation that shows the // state of all the registers in columns. int regColumnWidth; int regTableIndent; const char* columnSeparator; const char* line; const char* leftBox; const char* middleBox; const char* rightBox; static const int MAX_FORMAT_CHARS = 12; char intervalNameFormat[MAX_FORMAT_CHARS]; char regNameFormat[MAX_FORMAT_CHARS]; char shortRefPositionFormat[MAX_FORMAT_CHARS]; char emptyRefPositionFormat[MAX_FORMAT_CHARS]; char indentFormat[MAX_FORMAT_CHARS]; static const int MAX_LEGEND_FORMAT_CHARS = 25; char bbRefPosFormat[MAX_LEGEND_FORMAT_CHARS]; char legendFormat[MAX_LEGEND_FORMAT_CHARS]; // How many rows have we printed since last printing a "title row"? static const int MAX_ROWS_BETWEEN_TITLES = 50; int rowCountSinceLastTitle; // Current mask of registers being printed in the dump. regMaskTP lastDumpedRegisters; regMaskTP registersToDump; int lastUsedRegNumIndex; bool shouldDumpReg(regNumber regNum) { return (registersToDump & genRegMask(regNum)) != 0; } void dumpRegRecordHeader(); void dumpRegRecordTitle(); void dumpRegRecordTitleIfNeeded(); void dumpRegRecordTitleLines(); void dumpRegRecords(); void dumpNewBlock(BasicBlock* currentBlock, LsraLocation location); // An abbreviated RefPosition dump for printing with column-based register state void dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock); // Print the number of spaces occupied by a dumpRefPositionShort() void dumpEmptyRefPosition(); // A dump of Referent, in exactly regColumnWidth characters void dumpIntervalName(Interval* interval); // Events during the allocation phase that cause some dump output enum LsraDumpEvent{ // Conflicting def/use LSRA_EVENT_DEFUSE_CONFLICT, LSRA_EVENT_DEFUSE_FIXED_DELAY_USE, LSRA_EVENT_DEFUSE_CASE1, LSRA_EVENT_DEFUSE_CASE2, LSRA_EVENT_DEFUSE_CASE3, LSRA_EVENT_DEFUSE_CASE4, LSRA_EVENT_DEFUSE_CASE5, LSRA_EVENT_DEFUSE_CASE6, // Spilling LSRA_EVENT_SPILL, LSRA_EVENT_SPILL_EXTENDED_LIFETIME, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, LSRA_EVENT_DONE_KILL_GC_REFS, LSRA_EVENT_NO_GC_KILLS, // Block boundaries LSRA_EVENT_START_BB, LSRA_EVENT_END_BB, // Miscellaneous LSRA_EVENT_FREE_REGS, LSRA_EVENT_UPPER_VECTOR_SAVE, LSRA_EVENT_UPPER_VECTOR_RESTORE, // Characteristics of the current RefPosition LSRA_EVENT_INCREMENT_RANGE_END, // ??? LSRA_EVENT_LAST_USE, LSRA_EVENT_LAST_USE_DELAYED, LSRA_EVENT_NEEDS_NEW_REG, // Allocation decisions LSRA_EVENT_FIXED_REG, LSRA_EVENT_EXP_USE, LSRA_EVENT_ZERO_REF, LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, LSRA_EVENT_KEPT_ALLOCATION, LSRA_EVENT_COPY_REG, LSRA_EVENT_MOVE_REG, LSRA_EVENT_ALLOC_REG, LSRA_EVENT_NO_REG_ALLOCATED, LSRA_EVENT_RELOAD, LSRA_EVENT_SPECIAL_PUTARG, LSRA_EVENT_REUSE_REG, }; void dumpLsraAllocationEvent(LsraDumpEvent event, Interval* interval = nullptr, regNumber reg = REG_NA, BasicBlock* currentBlock = nullptr, RegisterScore registerScore = NONE); void validateIntervals(); #endif // DEBUG #if TRACK_LSRA_STATS unsigned regCandidateVarCount; void updateLsraStat(LsraStat stat, unsigned currentBBNum); void dumpLsraStats(FILE* file); LsraStat getLsraStatFromScore(RegisterScore registerScore); LsraStat firstRegSelStat = STAT_FREE; public: virtual void dumpLsraStatsCsv(FILE* file); virtual void dumpLsraStatsSummary(FILE* file); static const char* getStatName(unsigned stat); #define INTRACK_STATS(x) x #define INTRACK_STATS_IF(condition, work) \ if (condition) \ { \ work; \ } #else // !TRACK_LSRA_STATS #define INTRACK_STATS(x) #define INTRACK_STATS_IF(condition, work) #endif // !TRACK_LSRA_STATS private: Compiler* compiler; CompAllocator getAllocator(Compiler* comp) { return comp->getAllocator(CMK_LSRA); } #ifdef DEBUG // This is used for dumping RefPosition* activeRefPosition; #endif // DEBUG IntervalList intervals; RegRecord physRegs[REG_COUNT]; // Map from tracked variable index to Interval*. Interval** localVarIntervals; // Set of blocks that have been visited. BlockSet bbVisitedSet; void markBlockVisited(BasicBlock* block) { BlockSetOps::AddElemD(compiler, bbVisitedSet, block->bbNum); } void clearVisitedBlocks() { BlockSetOps::ClearD(compiler, bbVisitedSet); } bool isBlockVisited(BasicBlock* block) { return BlockSetOps::IsMember(compiler, bbVisitedSet, block->bbNum); } #if DOUBLE_ALIGN bool doDoubleAlign; #endif // A map from bbNum to the block information used during register allocation. LsraBlockInfo* blockInfo; BasicBlock* findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated)); // The order in which the blocks will be allocated. // This is any array of BasicBlock*, in the order in which they should be traversed. BasicBlock** blockSequence; // The verifiedAllBBs flag indicates whether we have verified that all BBs have been // included in the blockSeuqence above, during setBlockSequence(). bool verifiedAllBBs; void setBlockSequence(); int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights); BasicBlockList* blockSequenceWorkList; bool blockSequencingDone; #ifdef DEBUG // LSRA must not change number of blocks and blockEpoch that it initializes at start. unsigned blockEpoch; #endif // DEBUG void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet); void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode); BasicBlock* getNextCandidateFromWorkList(); // Indicates whether the allocation pass has been completed. bool allocationPassComplete; // The bbNum of the block being currently allocated or resolved. unsigned int curBBNum; // The current location LsraLocation currentLoc; // The first location in a cold or funclet block. LsraLocation firstColdLoc; // The ordinal of the block we're on (i.e. this is the curBBSeqNum-th block we've allocated). unsigned int curBBSeqNum; // The number of blocks that we've sequenced. unsigned int bbSeqCount; // The Location of the start of the current block. LsraLocation curBBStartLocation; // True if the method contains any critical edges. bool hasCriticalEdges; // True if there are any register candidate lclVars available for allocation. bool enregisterLocalVars; virtual bool willEnregisterLocalVars() const { return enregisterLocalVars; } // Ordered list of RefPositions RefPositionList refPositions; // Per-block variable location mappings: an array indexed by block number that yields a // pointer to an array of regNumber, one per variable. VarToRegMap* inVarToRegMaps; VarToRegMap* outVarToRegMaps; // A temporary VarToRegMap used during the resolution of critical edges. VarToRegMap sharedCriticalVarToRegMap; PhasedVar<regMaskTP> availableIntRegs; PhasedVar<regMaskTP> availableFloatRegs; PhasedVar<regMaskTP> availableDoubleRegs; // The set of all register candidates. Note that this may be a subset of tracked vars. VARSET_TP registerCandidateVars; // Current set of live register candidate vars, used during building of RefPositions to determine // whether to preference to callee-save. VARSET_TP currentLiveVars; // Set of variables that may require resolution across an edge. // This is first constructed during interval building, to contain all the lclVars that are live at BB edges. // Then, any lclVar that is always in the same register is removed from the set. VARSET_TP resolutionCandidateVars; // This set contains all the lclVars that are ever spilled or split. VARSET_TP splitOrSpilledVars; // Set of floating point variables to consider for callee-save registers. VARSET_TP fpCalleeSaveCandidateVars; // Set of variables exposed on EH flow edges. VARSET_TP exceptVars; // Set of variables exposed on finally edges. These must be zero-init if they are refs or if compInitMem is true. VARSET_TP finallyVars; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static const var_types LargeVectorSaveType = TYP_SIMD16; #elif defined(TARGET_ARM64) static const var_types LargeVectorSaveType = TYP_DOUBLE; #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) // Set of large vector (TYP_SIMD32 on AVX) variables. VARSET_TP largeVectorVars; // Set of large vector (TYP_SIMD32 on AVX) variables to consider for callee-save registers. VARSET_TP largeVectorCalleeSaveCandidateVars; #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE //----------------------------------------------------------------------- // Register status //----------------------------------------------------------------------- regMaskTP m_AvailableRegs; regNumber getRegForType(regNumber reg, var_types regType) { #ifdef TARGET_ARM if ((regType == TYP_DOUBLE) && !genIsValidDoubleReg(reg)) { reg = REG_PREV(reg); } #endif // TARGET_ARM return reg; } regMaskTP getRegMask(regNumber reg, var_types regType) { reg = getRegForType(reg, regType); regMaskTP regMask = genRegMask(reg); #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { assert(genIsValidDoubleReg(reg)); regMask |= (regMask << 1); } #endif // TARGET_ARM return regMask; } void resetAvailableRegs() { m_AvailableRegs = (availableIntRegs | availableFloatRegs); m_RegistersWithConstants = RBM_NONE; } bool isRegAvailable(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (m_AvailableRegs & regMask) == regMask; } void setRegsInUse(regMaskTP regMask) { m_AvailableRegs &= ~regMask; } void setRegInUse(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); setRegsInUse(regMask); } void makeRegsAvailable(regMaskTP regMask) { m_AvailableRegs |= regMask; } void makeRegAvailable(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); makeRegsAvailable(regMask); } void clearNextIntervalRef(regNumber reg, var_types regType); void updateNextIntervalRef(regNumber reg, Interval* interval); void clearSpillCost(regNumber reg, var_types regType); void updateSpillCost(regNumber reg, Interval* interval); regMaskTP m_RegistersWithConstants; void clearConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants &= ~getRegMask(reg, regType); } void setConstantReg(regNumber reg, var_types regType) { m_RegistersWithConstants |= getRegMask(reg, regType); } bool isRegConstant(regNumber reg, var_types regType) { reg = getRegForType(reg, regType); regMaskTP regMask = getRegMask(reg, regType); return (m_RegistersWithConstants & regMask) == regMask; } regMaskTP getMatchingConstants(regMaskTP mask, Interval* currentInterval, RefPosition* refPosition); regMaskTP fixedRegs; LsraLocation nextFixedRef[REG_COUNT]; void updateNextFixedRef(RegRecord* regRecord, RefPosition* nextRefPosition); LsraLocation getNextFixedRef(regNumber regNum, var_types regType) { LsraLocation loc = nextFixedRef[regNum]; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { loc = Min(loc, nextFixedRef[regNum + 1]); } #endif return loc; } LsraLocation nextIntervalRef[REG_COUNT]; LsraLocation getNextIntervalRef(regNumber regNum, var_types regType) { LsraLocation loc = nextIntervalRef[regNum]; #ifdef TARGET_ARM if (regType == TYP_DOUBLE) { loc = Min(loc, nextIntervalRef[regNum + 1]); } #endif return loc; } weight_t spillCost[REG_COUNT]; regMaskTP regsBusyUntilKill; regMaskTP regsInUseThisLocation; regMaskTP regsInUseNextLocation; bool isRegBusy(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (regsBusyUntilKill & regMask) != RBM_NONE; } void setRegBusyUntilKill(regNumber reg, var_types regType) { regsBusyUntilKill |= getRegMask(reg, regType); } void clearRegBusyUntilKill(regNumber reg) { regsBusyUntilKill &= ~genRegMask(reg); } bool isRegInUse(regNumber reg, var_types regType) { regMaskTP regMask = getRegMask(reg, regType); return (regsInUseThisLocation & regMask) != RBM_NONE; } void resetRegState() { resetAvailableRegs(); regsBusyUntilKill = RBM_NONE; } bool conflictingFixedRegReference(regNumber regNum, RefPosition* refPosition); // This method should not be used and is here to retain old behavior. // It should be replaced by isRegAvailable(). // See comment in allocateReg(); bool isFree(RegRecord* regRecord); //----------------------------------------------------------------------- // Build methods //----------------------------------------------------------------------- // The listNodePool is used to maintain the RefInfo for nodes that are "in flight" // i.e. whose consuming node has not yet been handled. RefInfoListNodePool listNodePool; // When Def RefPositions are built for a node, their RefInfoListNode // (GenTree* to RefPosition* mapping) is placed in the defList. // As the consuming node is handled, it removes the RefInfoListNode from the // defList, use the interval associated with the corresponding Def RefPosition and // use it to build the Use RefPosition. RefInfoList defList; // As we build uses, we may want to preference the next definition (i.e. the register produced // by the current node) to the same register as one of its uses. This is done by setting // 'tgtPrefUse' to that RefPosition. RefPosition* tgtPrefUse = nullptr; RefPosition* tgtPrefUse2 = nullptr; // The following keep track of information about internal (temporary register) intervals // during the building of a single node. static const int MaxInternalCount = 5; RefPosition* internalDefs[MaxInternalCount]; int internalCount = 0; bool setInternalRegsDelayFree; // When a RefTypeUse is marked as 'delayRegFree', we also want to mark the RefTypeDef // in the next Location as 'hasInterferingUses'. This is accomplished by setting this // 'pendingDelayFree' to true as they are created, and clearing it as a new node is // handled in 'BuildNode'. bool pendingDelayFree; // This method clears the "build state" before starting to handle a new node. void clearBuildState() { tgtPrefUse = nullptr; tgtPrefUse2 = nullptr; internalCount = 0; setInternalRegsDelayFree = false; pendingDelayFree = false; } bool isCandidateMultiRegLclVar(GenTreeLclVar* lclNode); bool checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode); RefPosition* BuildUse(GenTree* operand, regMaskTP candidates = RBM_NONE, int multiRegIdx = 0); void setDelayFree(RefPosition* use); int BuildBinaryUses(GenTreeOp* node, regMaskTP candidates = RBM_NONE); #ifdef TARGET_XARCH int BuildRMWUses(GenTree* node, GenTree* op1, GenTree* op2, regMaskTP candidates = RBM_NONE); #endif // !TARGET_XARCH // This is the main entry point for building the RefPositions for a node. // These methods return the number of sources. int BuildNode(GenTree* tree); void getTgtPrefOperands(GenTree* tree, GenTree* op1, GenTree* op2, bool* prefOp1, bool* prefOp2); bool supportsSpecialPutArg(); int BuildSimple(GenTree* tree); int BuildOperandUses(GenTree* node, regMaskTP candidates = RBM_NONE); int BuildDelayFreeUses(GenTree* node, GenTree* rmwNode = nullptr, regMaskTP candidates = RBM_NONE); int BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates = RBM_NONE); int BuildAddrUses(GenTree* addr, regMaskTP candidates = RBM_NONE); void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs); RefPosition* BuildDef(GenTree* tree, regMaskTP dstCandidates = RBM_NONE, int multiRegIdx = 0); void BuildDefs(GenTree* tree, int dstCount, regMaskTP dstCandidates = RBM_NONE); void BuildDefsWithKills(GenTree* tree, int dstCount, regMaskTP dstCandidates, regMaskTP killMask); int BuildReturn(GenTree* tree); #ifdef TARGET_XARCH // This method, unlike the others, returns the number of sources, since it may be called when // 'tree' is contained. int BuildShiftRotate(GenTree* tree); #endif // TARGET_XARCH #ifdef TARGET_ARM int BuildShiftLongCarry(GenTree* tree); #endif int BuildPutArgReg(GenTreeUnOp* node); int BuildCall(GenTreeCall* call); int BuildCmp(GenTree* tree); int BuildBlockStore(GenTreeBlk* blkNode); int BuildModDiv(GenTree* tree); int BuildIntrinsic(GenTree* tree); void BuildStoreLocDef(GenTreeLclVarCommon* storeLoc, LclVarDsc* varDsc, RefPosition* singleUseRef, int index); int BuildMultiRegStoreLoc(GenTreeLclVar* storeLoc); int BuildStoreLoc(GenTreeLclVarCommon* tree); int BuildIndir(GenTreeIndir* indirTree); int BuildGCWriteBarrier(GenTree* tree); int BuildCast(GenTreeCast* cast); #if defined(TARGET_XARCH) // returns true if the tree can use the read-modify-write memory instruction form bool isRMWRegOper(GenTree* tree); int BuildMul(GenTree* tree); void SetContainsAVXFlags(unsigned sizeOfSIMDVector = 0); #endif // defined(TARGET_XARCH) #if defined(TARGET_X86) // Move the last use bit, if any, from 'fromTree' to 'toTree'; 'fromTree' must be contained. void CheckAndMoveRMWLastUse(GenTree* fromTree, GenTree* toTree) { // If 'fromTree' is not a last-use lclVar, there's nothing to do. if ((fromTree == nullptr) || !fromTree->OperIs(GT_LCL_VAR) || ((fromTree->gtFlags & GTF_VAR_DEATH) == 0)) { return; } // If 'fromTree' was a lclVar, it must be contained and 'toTree' must match. if (!fromTree->isContained() || (toTree == nullptr) || !toTree->OperIs(GT_LCL_VAR) || (fromTree->AsLclVarCommon()->GetLclNum() != toTree->AsLclVarCommon()->GetLclNum())) { assert(!"Unmatched RMW indirections"); return; } // This is probably not necessary, but keeps things consistent. fromTree->gtFlags &= ~GTF_VAR_DEATH; toTree->gtFlags |= GTF_VAR_DEATH; } #endif // TARGET_X86 #ifdef FEATURE_SIMD int BuildSIMD(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS int BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCount); #endif // FEATURE_HW_INTRINSICS int BuildPutArgStk(GenTreePutArgStk* argNode); #if FEATURE_ARG_SPLIT int BuildPutArgSplit(GenTreePutArgSplit* tree); #endif // FEATURE_ARG_SPLIT int BuildLclHeap(GenTree* tree); }; /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Interval XX XX XX XX This is the fundamental data structure for linear scan register XX XX allocation. It represents the live range(s) for a variable or temp. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ class Interval : public Referenceable { public: Interval(RegisterType registerType, regMaskTP registerPreferences) : registerPreferences(registerPreferences) , relatedInterval(nullptr) , assignedReg(nullptr) , varNum(0) , physReg(REG_COUNT) , registerType(registerType) , isActive(false) , isLocalVar(false) , isSplit(false) , isSpilled(false) , isInternal(false) , isStructField(false) , isPromotedStruct(false) , hasConflictingDefUse(false) , hasInterferingUses(false) , isSpecialPutArg(false) , preferCalleeSave(false) , isConstant(false) #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE , isUpperVector(false) , isPartiallySpilled(false) #endif , isWriteThru(false) , isSingleDef(false) #ifdef DEBUG , intervalIndex(0) #endif { } #ifdef DEBUG // print out representation void dump(); // concise representation for embedding void tinyDump(); // extremely concise representation void microDump(); #endif // DEBUG void setLocalNumber(Compiler* compiler, unsigned lclNum, LinearScan* l); // Fixed registers for which this Interval has a preference regMaskTP registerPreferences; // The relatedInterval is: // - for any other interval, it is the interval to which this interval // is currently preferenced (e.g. because they are related by a copy) Interval* relatedInterval; // The assignedReg is the RecRecord for the register to which this interval // has been assigned at some point - if the interval is active, this is the // register it currently occupies. RegRecord* assignedReg; unsigned int varNum; // This is the "variable number": the index into the lvaTable array // The register to which it is currently assigned. regNumber physReg; RegisterType registerType; // Is this Interval currently in a register and live? bool isActive; bool isLocalVar : 1; // Indicates whether this interval has been assigned to different registers bool isSplit : 1; // Indicates whether this interval is ever spilled bool isSpilled : 1; // indicates an interval representing the internal requirements for // generating code for a node (temp registers internal to the node) // Note that this interval may live beyond a node in the GT_ARR_LENREF/GT_IND // case (though never lives beyond a stmt) bool isInternal : 1; // true if this is a LocalVar for a struct field bool isStructField : 1; // true iff this is a GT_LDOBJ for a fully promoted (PROMOTION_TYPE_INDEPENDENT) struct bool isPromotedStruct : 1; // true if this is an SDSU interval for which the def and use have conflicting register // requirements bool hasConflictingDefUse : 1; // true if this interval's defining node has "delayRegFree" uses, either due to it being an RMW instruction, // OR because it requires an internal register that differs from the target. bool hasInterferingUses : 1; // True if this interval is defined by a putArg, whose source is a non-last-use lclVar. // During allocation, this flag will be cleared if the source is not already in the required register. // Othewise, we will leave the register allocated to the lclVar, but mark the RegRecord as // isBusyUntilKill, so that it won't be reused if the lclVar goes dead before the call. bool isSpecialPutArg : 1; // True if this interval interferes with a call. bool preferCalleeSave : 1; // True if this interval is defined by a constant node that may be reused and/or may be // able to reuse a constant that's already in a register. bool isConstant : 1; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // True if this is a special interval for saving the upper half of a large vector. bool isUpperVector : 1; // This is a convenience method to avoid ifdef's everywhere this is used. bool IsUpperVector() const { return isUpperVector; } // True if this interval has been partially spilled bool isPartiallySpilled : 1; #else bool IsUpperVector() const { return false; } #endif // True if this interval is associated with a lclVar that is written to memory at each definition. bool isWriteThru : 1; // True if this interval has a single definition. bool isSingleDef : 1; #ifdef DEBUG unsigned int intervalIndex; #endif // DEBUG LclVarDsc* getLocalVar(Compiler* comp) { assert(isLocalVar); return comp->lvaGetDesc(this->varNum); } // Get the local tracked variable "index" (lvVarIndex), used in bitmasks. unsigned getVarIndex(Compiler* comp) { LclVarDsc* varDsc = getLocalVar(comp); assert(varDsc->lvTracked); // If this isn't true, we shouldn't be calling this function! return varDsc->lvVarIndex; } bool isAssignedTo(regNumber regNum) { // This uses regMasks to handle the case where a double actually occupies two registers // TODO-Throughput: This could/should be done more cheaply. return (physReg != REG_NA && (genRegMask(physReg, registerType) & genRegMask(regNum)) != RBM_NONE); } // Assign the related interval. void assignRelatedInterval(Interval* newRelatedInterval) { #ifdef DEBUG if (VERBOSE) { printf("Assigning related "); newRelatedInterval->microDump(); printf(" to "); this->microDump(); printf("\n"); } #endif // DEBUG relatedInterval = newRelatedInterval; } // Assign the related interval, but only if it isn't already assigned. bool assignRelatedIntervalIfUnassigned(Interval* newRelatedInterval) { if (relatedInterval == nullptr) { assignRelatedInterval(newRelatedInterval); return true; } else { #ifdef DEBUG if (VERBOSE) { printf("Interval "); this->microDump(); printf(" already has a related interval\n"); } #endif // DEBUG return false; } } // Get the current preferences for this Interval. // Note that when we have an assigned register we don't necessarily update the // registerPreferences to that register, as there may be multiple, possibly disjoint, // definitions. This method will return the current assigned register if any, or // the 'registerPreferences' otherwise. // regMaskTP getCurrentPreferences() { return (assignedReg == nullptr) ? registerPreferences : genRegMask(assignedReg->regNum); } void mergeRegisterPreferences(regMaskTP preferences) { // We require registerPreferences to have been initialized. assert(registerPreferences != RBM_NONE); // It is invalid to update with empty preferences assert(preferences != RBM_NONE); regMaskTP commonPreferences = (registerPreferences & preferences); if (commonPreferences != RBM_NONE) { registerPreferences = commonPreferences; return; } // There are no preferences in common. // Preferences need to reflect both cases where a var must occupy a specific register, // as well as cases where a var is live when a register is killed. // In the former case, we would like to record all such registers, however we don't // really want to use any registers that will interfere. // To approximate this, we never "or" together multi-reg sets, which are generally kill sets. if (!genMaxOneBit(preferences)) { // The new preference value is a multi-reg set, so it's probably a kill. // Keep the new value. registerPreferences = preferences; return; } if (!genMaxOneBit(registerPreferences)) { // The old preference value is a multi-reg set. // Keep the existing preference set, as it probably reflects one or more kills. // It may have been a union of multiple individual registers, but we can't // distinguish that case without extra cost. return; } // If we reach here, we have two disjoint single-reg sets. // Keep only the callee-save preferences, if not empty. // Otherwise, take the union of the preferences. regMaskTP newPreferences = registerPreferences | preferences; if (preferCalleeSave) { regMaskTP calleeSaveMask = (calleeSaveRegs(this->registerType) & (newPreferences)); if (calleeSaveMask != RBM_NONE) { newPreferences = calleeSaveMask; } } registerPreferences = newPreferences; } // Update the registerPreferences on the interval. // If there are conflicting requirements on this interval, set the preferences to // the union of them. That way maybe we'll get at least one of them. // An exception is made in the case where one of the existing or new // preferences are all callee-save, in which case we "prefer" the callee-save void updateRegisterPreferences(regMaskTP preferences) { // If this interval is preferenced, that interval may have already been assigned a // register, and we want to include that in the preferences. if ((relatedInterval != nullptr) && !relatedInterval->isActive) { mergeRegisterPreferences(relatedInterval->getCurrentPreferences()); } // Now merge the new preferences. mergeRegisterPreferences(preferences); } }; class RefPosition { public: // A RefPosition refers to either an Interval or a RegRecord. 'referent' points to one // of these types. If it refers to a RegRecord, then 'isPhysRegRef()' is true. If it // refers to an Interval, then 'isPhysRegRef()' is false. // referent can never be null. Referenceable* referent; // nextRefPosition is the next in code order. // Note that in either case there is no need for these to be doubly linked, as they // are only traversed in the forward direction, and are not moved. RefPosition* nextRefPosition; // The remaining fields are common to both options GenTree* treeNode; unsigned int bbNum; LsraLocation nodeLocation; // Prior to the allocation pass, registerAssignment captures the valid registers // for this RefPosition. // After the allocation pass, this contains the actual assignment regMaskTP registerAssignment; RefType refType; // NOTE: C++ only packs bitfields if the base type is the same. So make all the base // NOTE: types of the logically "bool" types that follow 'unsigned char', so they match // NOTE: RefType that precedes this, and multiRegIdx can also match. // Indicates whether this ref position is to be allocated a reg only if profitable. Currently these are the // ref positions that lower/codegen has indicated as reg optional and is considered a contained memory operand if // no reg is allocated. unsigned char regOptional : 1; // Used by RefTypeDef/Use positions of a multi-reg call node. // Indicates the position of the register that this ref position refers to. // The max bits needed is based on max value of MAX_RET_REG_COUNT value // across all targets and that happens 4 on on Arm. Hence index value // would be 0..MAX_RET_REG_COUNT-1. unsigned char multiRegIdx : 2; // Last Use - this may be true for multiple RefPositions in the same Interval unsigned char lastUse : 1; // Spill and Copy info // reload indicates that the value was spilled, and must be reloaded here. // spillAfter indicates that the value is spilled here, so a spill must be added. // singleDefSpill indicates that it is associated with a single-def var and if it // is decided to get spilled, it will be spilled at firstRefPosition def. That // way, the the value of stack will always be up-to-date and no more spills or // resolutions (from reg to stack) will be needed for such single-def var. // copyReg indicates that the value needs to be copied to a specific register, // but that it will also retain its current assigned register. // moveReg indicates that the value needs to be moved to a different register, // and that this will be its new assigned register. // A RefPosition may have any flag individually or the following combinations: // - reload and spillAfter (i.e. it remains in memory), but not in combination with copyReg or moveReg // (reload cannot exist with copyReg or moveReg; it should be reloaded into the appropriate reg) // - spillAfter and copyReg (i.e. it must be copied to a new reg for use, but is then spilled) // - spillAfter and moveReg (i.e. it most be both spilled and moved) // NOTE: a moveReg involves an explicit move, and would usually not be needed for a fixed Reg if it is going // to be spilled, because the code generator will do the move to the fixed register, and doesn't need to // record the new register location as the new "home" location of the lclVar. However, if there is a conflicting // use at the same location (e.g. lclVar V1 is in rdx and needs to be in rcx, but V2 needs to be in rdx), then // we need an explicit move. // - copyReg and moveReg must not exist with each other. unsigned char reload : 1; unsigned char spillAfter : 1; unsigned char singleDefSpill : 1; unsigned char writeThru : 1; // true if this var is defined in a register and also spilled. spillAfter must NOT be // set. unsigned char copyReg : 1; unsigned char moveReg : 1; // true if this var is moved to a new register unsigned char isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval unsigned char isFixedRegRef : 1; unsigned char isLocalDefUse : 1; // delayRegFree indicates that the register should not be freed right away, but instead wait // until the next Location after it would normally be freed. This is used for the case of // non-commutative binary operators, where op2 must not be assigned the same register as // the target. We do this by not freeing it until after the target has been defined. // Another option would be to actually change the Location of the op2 use until the same // Location as the def, but then it could potentially reuse a register that has been freed // from the other source(s), e.g. if it's a lastUse or spilled. unsigned char delayRegFree : 1; // outOfOrder is marked on a (non-def) RefPosition that doesn't follow a definition of the // register currently assigned to the Interval. This happens when we use the assigned // register from a predecessor that is not the most recently allocated BasicBlock. unsigned char outOfOrder : 1; #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // If upper vector save/restore can be avoided. unsigned char skipSaveRestore : 1; #endif #ifdef DEBUG // Minimum number registers that needs to be ensured while // constraining candidates for this ref position under // LSRA stress. unsigned minRegCandidateCount; // The unique RefPosition number, equal to its index in the // refPositions list. Only used for debugging dumps. unsigned rpNum; #endif // DEBUG RefPosition(unsigned int bbNum, LsraLocation nodeLocation, GenTree* treeNode, RefType refType) : referent(nullptr) , nextRefPosition(nullptr) , treeNode(treeNode) , bbNum(bbNum) , nodeLocation(nodeLocation) , registerAssignment(RBM_NONE) , refType(refType) , multiRegIdx(0) , lastUse(false) , reload(false) , spillAfter(false) , singleDefSpill(false) , writeThru(false) , copyReg(false) , moveReg(false) , isPhysRegRef(false) , isFixedRegRef(false) , isLocalDefUse(false) , delayRegFree(false) , outOfOrder(false) #ifdef DEBUG , minRegCandidateCount(1) , rpNum(0) #endif { } Interval* getInterval() { assert(!isPhysRegRef); return (Interval*)referent; } void setInterval(Interval* i) { referent = i; isPhysRegRef = false; } RegRecord* getReg() { assert(isPhysRegRef); return (RegRecord*)referent; } void setReg(RegRecord* r) { referent = r; isPhysRegRef = true; registerAssignment = genRegMask(r->regNum); } regNumber assignedReg() { if (registerAssignment == RBM_NONE) { return REG_NA; } return genRegNumFromMask(registerAssignment); } // Returns true if it is a reference on a gentree node. bool IsActualRef() { switch (refType) { case RefTypeDef: case RefTypeUse: #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE case RefTypeUpperVectorSave: case RefTypeUpperVectorRestore: #endif return true; // These must always be marked RegOptional. case RefTypeExpUse: case RefTypeParamDef: case RefTypeDummyDef: case RefTypeZeroInit: assert(RegOptional()); return false; default: return false; } } bool IsPhysRegRef() { return ((refType == RefTypeFixedReg) || (refType == RefTypeKill)); } void setRegOptional(bool val) { regOptional = val; } // Returns true whether this ref position is to be allocated // a reg only if it is profitable. bool RegOptional() { // TODO-CQ: Right now if a ref position is marked as // copyreg or movereg, then it is not treated as // 'allocate if profitable'. This is an implementation // limitation that needs to be addressed. return regOptional && !copyReg && !moveReg; } void setMultiRegIdx(unsigned idx) { multiRegIdx = idx; assert(multiRegIdx == idx); } unsigned getMultiRegIdx() { return multiRegIdx; } LsraLocation getRefEndLocation() { return delayRegFree ? nodeLocation + 1 : nodeLocation; } RefPosition* getRangeEndRef() { if (lastUse || nextRefPosition == nullptr || spillAfter) { return this; } // It would seem to make sense to only return 'nextRefPosition' if it is a lastUse, // and otherwise return `lastRefPosition', but that tends to excessively lengthen // the range for heuristic purposes. // TODO-CQ: Look into how this might be improved . return nextRefPosition; } LsraLocation getRangeEndLocation() { return getRangeEndRef()->getRefEndLocation(); } bool isIntervalRef() { return (!IsPhysRegRef() && (referent != nullptr)); } // isFixedRefOfRegMask indicates that the RefPosition has a fixed assignment to the register // specified by the given mask bool isFixedRefOfRegMask(regMaskTP regMask) { assert(genMaxOneBit(regMask)); return (registerAssignment == regMask); } // isFixedRefOfReg indicates that the RefPosition has a fixed assignment to the given register bool isFixedRefOfReg(regNumber regNum) { return (isFixedRefOfRegMask(genRegMask(regNum))); } #ifdef DEBUG // operator= copies everything except 'rpNum', which must remain unique RefPosition& operator=(const RefPosition& rp) { unsigned rpNumSave = rpNum; memcpy(this, &rp, sizeof(rp)); rpNum = rpNumSave; return *this; } void dump(LinearScan* linearScan); #endif // DEBUG }; #ifdef DEBUG void dumpRegMask(regMaskTP regs); #endif // DEBUG /*****************************************************************************/ #endif //_LSRA_H_ /*****************************************************************************/
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/vm/stubmgr.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // StubMgr.h // // // The stub manager exists so that the debugger can accurately step through // the myriad stubs & wrappers which exist in the EE, without imposing undue // overhead on the stubs themselves. // // Each type of stub (except those which the debugger can treat as atomic operations) // needs to have a stub manager to represent it. The stub manager is responsible for // (a) identifying the stub as such, and // (b) tracing into the stub & reporting what the stub will call. This // report can consist of // (i) a managed code address // (ii) an unmanaged code address // (iii) another stub address // (iv) a "frame patch" address - that is, an address in the stub, // which the debugger can patch. When the patch is hit, the debugger // will query the topmost frame to trace itself. (Thus this is // a way of deferring the trace logic to the frame which the stub // will push.) // // The set of stub managers is extensible, but should be kept to a reasonable number // as they are currently linearly searched & queried for each stub. // // // IMPORTANT IMPLEMENTATION NOTE: Due to code versioning, tracing through a jitted code // call is a speculative exercise. A trace could predict that calling method Foo would run // jitted code at address 0x1234, however afterwards code versioning redirects Foo to call // an alternate jitted code body at address 0x5678. To handle this stub managers should // either: // a) stop tracing at offset zero of the newly called jitted code. The debugger knows // to treat offset 0 in jitted code as potentially being any jitted code instance // b) trace all the way through the jitted method such that regardless of which jitted // code instance gets called the trace will still end at the predicted location. // // If we wanted to be more rigorous about this we should probably have different trace // results for intra-jitted and inter-jitted trace results but given the relative // stability of this part of the code I haven't attacked that problem right now. It does // work as-is. // #ifndef __stubmgr_h__ #define __stubmgr_h__ #include "simplerwlock.hpp" #include "lockedrangelist.h" // When 'TraceStub' returns, it gives the address of where the 'target' is for a stub' // TraceType indicates what this 'target' is enum TraceType { TRACE_ENTRY_STUB, // Stub goes to an unmanaged entry stub TRACE_STUB, // Stub goes to another stub TRACE_UNMANAGED, // Stub goes to unmanaged code TRACE_MANAGED, // Stub goes to Jitted code TRACE_UNJITTED_METHOD, // Is the prestub, since there is no code, the address will actually be a MethodDesc* TRACE_FRAME_PUSH, // Don't know where stub goes, stop at address, and then ask the frame that is on the stack TRACE_MGR_PUSH, // Don't know where stub goes, stop at address then call TraceManager() below to find out TRACE_OTHER // We are going somewhere you can't step into (eg. ee helper function) }; class StubManager; class SString; class DebuggerRCThread; enum StubCodeBlockKind : int; // A TraceDestination describes where code is going to call. This can be used by the Debugger's Step-In functionality // to skip through stubs and place a patch directly at a call's target. // TD are supplied by the stubmanagers. class TraceDestination { public: friend class DebuggerRCThread; TraceDestination() { } #ifdef _DEBUG // Get a string representation of this TraceDestination // Uses the supplied buffer to store the memory (or may return a string literal). // This will also print the TD's arguments. const WCHAR * DbgToString(SString &buffer); #endif // Initialize for unmanaged code. // The addr is in unmanaged code. Used for Step-in from managed to native. void InitForUnmanaged(PCODE addr) { this->type = TRACE_UNMANAGED; this->address = addr; this->stubManager = NULL; } // The addr is inside jitted code (eg, there's a JitManaged that will claim it) void InitForManaged(PCODE addr) { this->type = TRACE_MANAGED; this->address = addr; this->stubManager = NULL; } // Initialize for an unmanaged entry stub. void InitForUnmanagedStub(PCODE addr) { this->type = TRACE_ENTRY_STUB; this->address = addr; this->stubManager = NULL; } // Initialize for a stub. void InitForStub(PCODE addr) { this->type = TRACE_STUB; this->address = addr; this->stubManager = NULL; } // Init for a managed unjitted method. // This will place an IL patch that will get bound when the debugger gets a Jit complete // notification for this method. // If pDesc is a wrapper methoddesc, we will unwrap it. void InitForUnjittedMethod(MethodDesc * pDesc); // Place a patch at the given addr, and then when it's hit, // call pStubManager->TraceManager() to get the next TraceDestination. void InitForManagerPush(PCODE addr, StubManager * pStubManager) { this->type = TRACE_MGR_PUSH; this->address = addr; this->stubManager = pStubManager; } // Place a patch at the given addr, and then when it's hit // call GetThread()->GetFrame()->TraceFrame() to get the next TraceDestination. // This address must be safe to run a callstack at. void InitForFramePush(PCODE addr) { this->type = TRACE_FRAME_PUSH; this->address = addr; this->stubManager = NULL; } // Nobody recognized the target address. We will not be able to step-in to it. // This is ok if the target just calls into mscorwks (such as an Fcall) because // there's no managed code to step in to, and we don't support debugging the CLR // itself, so there's no native code to step into either. void InitForOther(PCODE addr) { this->type = TRACE_OTHER; this->address = addr; this->stubManager = NULL; } // Accessors TraceType GetTraceType() { return type; } PCODE GetAddress() { LIMITED_METHOD_CONTRACT; _ASSERTE(type != TRACE_UNJITTED_METHOD); return address; } MethodDesc* GetMethodDesc() { LIMITED_METHOD_CONTRACT; _ASSERTE(type == TRACE_UNJITTED_METHOD); return pDesc; } StubManager * GetStubManager() { return stubManager; } // Expose this b/c DebuggerPatchTable::AddPatchForAddress() needs it. // Ideally we'd get rid of this. void Bad_SetTraceType(TraceType t) { this->type = t; } private: TraceType type; // The kind of code the stub is going to PCODE address; // Where the stub is going StubManager *stubManager; // The manager that claims this stub MethodDesc *pDesc; }; // For logging #ifdef LOGGING void LogTraceDestination(const char * szHint, PCODE stubAddr, TraceDestination * pTrace); #define LOG_TRACE_DESTINATION(_tracedestination, stubAddr, _stHint) LogTraceDestination(_stHint, stubAddr, _tracedestination) #else #define LOG_TRACE_DESTINATION(_tracedestination, stubAddr, _stHint) #endif typedef VPTR(class StubManager) PTR_StubManager; class StubManager { friend class StubManagerIterator; VPTR_BASE_VTABLE_CLASS(StubManager) public: // Startup and shutdown the global stubmanager service. static void InitializeStubManagers(); static void TerminateStubManagers(); // Does any sub manager recognise this EIP? static BOOL IsStub(PCODE stubAddress) { WRAPPER_NO_CONTRACT; return FindStubManager(stubAddress) != NULL; } // Find stub manager for given code address static PTR_StubManager FindStubManager(PCODE stubAddress); // Look for stubAddress, if found return TRUE, and set 'trace' to static BOOL TraceStub(PCODE stubAddress, TraceDestination *trace); // if 'trace' indicates TRACE_STUB, keep calling TraceStub on 'trace', until you get out of all stubs // returns true if successful static BOOL FollowTrace(TraceDestination *trace); #ifdef DACCESS_COMPILE static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); #endif static void AddStubManager(StubManager *mgr); // NOTE: Very important when using this. It is not thread safe, except in this very // limited scenario: the thread must have the runtime suspended. static void UnlinkStubManager(StubManager *mgr); #ifndef DACCESS_COMPILE StubManager(); virtual ~StubManager(); #endif #ifdef _DEBUG // Debug helper to help identify stub-managers. Make it pure to force stub managers to implement it. virtual const char * DbgGetName() = 0; #endif // Only Stubmanagers that return 'TRACE_MGR_PUSH' as a trace type need to implement this function // Fills in 'trace' (the target), and 'pRetAddr' (the method that called the stub) (this is needed // as a 'fall back' so that the debugger can at least stop when the stub returns. virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { LIMITED_METHOD_CONTRACT; _ASSERTE(!"Default impl of TraceManager should never be called!"); return FALSE; } // The worker for IsStub. This calls CheckIsStub_Internal, but wraps it w/ // a try-catch. BOOL CheckIsStub_Worker(PCODE stubStartAddress); #ifdef _DEBUG public: //----------------------------------------------------------------------------- // Debugging Stubmanager bugs is very painful. You need to figure out // how you go to where you got and which stub-manager is at fault. // To help with this, we track a rolling log so that we can give very // informative asserts. this log is not thread-safe, but we really only expect // a single stub-manager usage at a time. // // A stub manager for a step-in operation may be used across // both the helper thread and then the managed thread doing the step-in. // These threads will coordinate to have exclusive access (helper will only access // when stopped; and managed thread will only access when running). // // It's also possible (but rare) for a single thread to have multiple step-in operations. // Since that's so rare, no present need to expand our logging to support it. //----------------------------------------------------------------------------- static bool IsStubLoggingEnabled(); // Call to reset the log. This is used at the start of a new step-operation. static void DbgBeginLog(TADDR addrCallInstruction, TADDR addrCallTarget); static void DbgFinishLog(); // Log arbitrary string. This is a nop if it's outside the Begin/Finish window. // We could consider making each log entry type-safe (and thus avoid the string operations). static void DbgWriteLog(const CHAR *format, ...); // Get the log as a string. static void DbgGetLog(SString * pStringOut); protected: // Implement log as a SString. static SString * s_pDbgStubManagerLog; static CrstStatic s_DbgLogCrst; #endif protected: // Each stubmanaged implements this. // This may throw, AV, etc depending on the implementation. This should not // be called directly unless you know exactly what you're doing. virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress) = 0; // The worker for TraceStub virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) = 0; #ifdef _DEBUG_IMPL static BOOL IsSingleOwner(PCODE stubAddress, StubManager * pOwner); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); public: // This is used by DAC to provide more information on who owns a stub. virtual LPCWSTR GetStubManagerName(PCODE addr) = 0; #endif private: SPTR_DECL(StubManager, g_pFirstManager); PTR_StubManager m_pNextManager; static CrstStatic s_StubManagerListCrst; }; //----------------------------------------------------------- // Stub manager for the prestub. Although there is just one, it has // unique behavior so it gets its own stub manager. //----------------------------------------------------------- class ThePreStubManager : public StubManager { VPTR_VTABLE_CLASS(ThePreStubManager, StubManager) public: #ifndef DACCESS_COMPILE ThePreStubManager() { LIMITED_METHOD_CONTRACT; } #endif #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "ThePreStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE static void Init(void); #endif #ifdef DACCESS_COMPILE protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("ThePreStub"); } #endif }; // ------------------------------------------------------- // Stub manager classes for method desc prestubs & normal // frame-pushing, StubLinker created stubs // ------------------------------------------------------- typedef VPTR(class PrecodeStubManager) PTR_PrecodeStubManager; class PrecodeStubManager : public StubManager { VPTR_VTABLE_CLASS(PrecodeStubManager, StubManager) public: SPTR_DECL(PrecodeStubManager, g_pManager); #ifdef _DEBUG // Debug helper to help identify stub-managers. virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "PrecodeStubManager"; } #endif static void Init(); #ifndef DACCESS_COMPILE PrecodeStubManager() {LIMITED_METHOD_CONTRACT;} ~PrecodeStubManager() {WRAPPER_NO_CONTRACT;} #endif public: virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("MethodDescPrestub"); } #endif }; // Note that this stub was written by a debugger guy, and thus when he refers to 'multicast' // stub, he really means multi or single cast stub. This was done b/c the same stub // services both types of stub. // Note from the debugger guy: the way to understand what this manager does is to // first grok EmitMulticastInvoke for the platform you're working on (right now, just x86). // Then return here, and understand that (for x86) the only way we know which method // we're going to invoke next is by inspecting EDI when we've got the debuggee stopped // in the stub, and so our trace frame will either (FRAME_PUSH) put a breakpoint // in the stub, or (if we hit the BP) examine EDI, etc, & figure out where we're going next. typedef VPTR(class StubLinkStubManager) PTR_StubLinkStubManager; class StubLinkStubManager : public StubManager { VPTR_VTABLE_CLASS(StubLinkStubManager, StubManager) public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "StubLinkStubManager"; } #endif SPTR_DECL(StubLinkStubManager, g_pManager); static void Init(); #ifndef DACCESS_COMPILE StubLinkStubManager() : StubManager(), m_rangeList() {LIMITED_METHOD_CONTRACT;} ~StubLinkStubManager() {WRAPPER_NO_CONTRACT;} #endif protected: LockedRangeList m_rangeList; public: // Get dac-ized pointer to rangelist. PTR_RangeList GetRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(StubLinkStubManager, this, m_rangeList); return PTR_RangeList(addr); } virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("StubLinkStub"); } #endif } ; // Stub manager for thunks. typedef VPTR(class ThunkHeapStubManager) PTR_ThunkHeapStubManager; class ThunkHeapStubManager : public StubManager { VPTR_VTABLE_CLASS(ThunkHeapStubManager, StubManager) public: SPTR_DECL(ThunkHeapStubManager, g_pManager); static void Init(); #ifndef DACCESS_COMPILE ThunkHeapStubManager() : StubManager(), m_rangeList() { LIMITED_METHOD_CONTRACT; } ~ThunkHeapStubManager() {WRAPPER_NO_CONTRACT;} #endif #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "ThunkHeapStubManager"; } #endif protected: LockedRangeList m_rangeList; public: // Get dac-ized pointer to rangelist. PTR_RangeList GetRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(ThunkHeapStubManager, this, m_rangeList); return PTR_RangeList(addr); } virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("ThunkHeapStub"); } #endif }; // // Stub manager for jump stubs created by ExecutionManager::jumpStub() // These are currently used only on the 64-bit targets IA64 and AMD64 // typedef VPTR(class JumpStubStubManager) PTR_JumpStubStubManager; class JumpStubStubManager : public StubManager { VPTR_VTABLE_CLASS(JumpStubStubManager, StubManager) public: SPTR_DECL(JumpStubStubManager, g_pManager); static void Init(); #ifndef DACCESS_COMPILE JumpStubStubManager() {LIMITED_METHOD_CONTRACT;} ~JumpStubStubManager() {WRAPPER_NO_CONTRACT;} #endif #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "JumpStubStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("JumpStub"); } #endif }; // // Stub manager for code sections. It forwards the query to the more appropriate // stub manager, or handles the query itself. // typedef VPTR(class RangeSectionStubManager) PTR_RangeSectionStubManager; class RangeSectionStubManager : public StubManager { VPTR_VTABLE_CLASS(RangeSectionStubManager, StubManager) public: SPTR_DECL(RangeSectionStubManager, g_pManager); static void Init(); #ifndef DACCESS_COMPILE RangeSectionStubManager() {LIMITED_METHOD_CONTRACT;} ~RangeSectionStubManager() {WRAPPER_NO_CONTRACT;} #endif static StubCodeBlockKind GetStubKind(PCODE stubStartAddress); static PCODE GetMethodThunkTarget(PCODE stubStartAddress); public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "RangeSectionStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr); #endif }; // // This is the stub manager for IL stubs. // typedef VPTR(class ILStubManager) PTR_ILStubManager; #ifdef FEATURE_COMINTEROP struct ComPlusCallInfo; #endif // FEATURE_COMINTEROP class ILStubManager : public StubManager { VPTR_VTABLE_CLASS(ILStubManager, StubManager) public: static void Init(); #ifndef DACCESS_COMPILE ILStubManager() : StubManager() {WRAPPER_NO_CONTRACT;} ~ILStubManager() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; // StubManager::UnlinkStubManager uses a crst } CONTRACTL_END; } #endif public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "ILStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("ILStub"); } #endif }; // This is used to recognize // GenericComPlusCallStub() // VarargPInvokeStub() // GenericPInvokeCalliHelper() typedef VPTR(class InteropDispatchStubManager) PTR_InteropDispatchStubManager; class InteropDispatchStubManager : public StubManager { VPTR_VTABLE_CLASS(InteropDispatchStubManager, StubManager) public: static void Init(); #ifndef DACCESS_COMPILE InteropDispatchStubManager() : StubManager() {WRAPPER_NO_CONTRACT;} ~InteropDispatchStubManager() {WRAPPER_NO_CONTRACT;} #endif #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "InteropDispatchStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("InteropDispatchStub"); } #endif }; // // Since we don't generate delegate invoke stubs at runtime on WIN64, we // can't use the StubLinkStubManager for these stubs. Instead, we create // an additional DelegateInvokeStubManager instead. // typedef VPTR(class DelegateInvokeStubManager) PTR_DelegateInvokeStubManager; class DelegateInvokeStubManager : public StubManager { VPTR_VTABLE_CLASS(DelegateInvokeStubManager, StubManager) public: SPTR_DECL(DelegateInvokeStubManager, g_pManager); static void Init(); #if !defined(DACCESS_COMPILE) DelegateInvokeStubManager() : StubManager(), m_rangeList() {LIMITED_METHOD_CONTRACT;} ~DelegateInvokeStubManager() {WRAPPER_NO_CONTRACT;} #endif // DACCESS_COMPILE BOOL AddStub(Stub* pStub); void RemoveStub(Stub* pStub); #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "DelegateInvokeStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); #if !defined(DACCESS_COMPILE) virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); static BOOL TraceDelegateObject(BYTE *orDel, TraceDestination *trace); #endif // DACCESS_COMPILE private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); protected: LockedRangeList m_rangeList; public: // Get dac-ized pointer to rangelist. PTR_RangeList GetRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(DelegateInvokeStubManager, this, m_rangeList); return PTR_RangeList(addr); } #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("DelegateInvokeStub"); } #endif }; #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) //--------------------------------------------------------------------------------------- // // This is the stub manager to help the managed debugger step into a tail call. // It helps the debugger trace through JIT_TailCall(). // typedef VPTR(class TailCallStubManager) PTR_TailCallStubManager; class TailCallStubManager : public StubManager { VPTR_VTABLE_CLASS(TailCallStubManager, StubManager) public: static void Init(); #if !defined(DACCESS_COMPILE) TailCallStubManager() : StubManager() {WRAPPER_NO_CONTRACT;} ~TailCallStubManager() {WRAPPER_NO_CONTRACT;} virtual BOOL TraceManager(Thread * pThread, TraceDestination * pTrace, T_CONTEXT * pContext, BYTE ** ppRetAddr); static bool IsTailCallJitHelper(PCODE code); #endif // DACCESS_COMPILE #if defined(_DEBUG) virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "TailCallStubManager"; } #endif // _DEBUG virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination * pTrace); #if defined(DACCESS_COMPILE) virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) {LIMITED_METHOD_CONTRACT; return W("TailCallStub");} #endif // !DACCESS_COMPILE }; #else // TARGET_X86 && UNIX_X86_ABI class TailCallStubManager { public: static void Init() { } static bool IsTailCallJitHelper(PCODE code) { return false; } }; #endif // TARGET_X86 && UNIX_X86_ABI // // Helpers for common value locations in stubs to make stub managers more portable // class StubManagerHelpers { public: static PCODE GetReturnAddress(T_CONTEXT * pContext) { #if defined(TARGET_X86) return *dac_cast<PTR_PCODE>(pContext->Esp); #elif defined(TARGET_AMD64) return *dac_cast<PTR_PCODE>(pContext->Rsp); #elif defined(TARGET_ARM) return pContext->Lr; #elif defined(TARGET_ARM64) return pContext->Lr; #else PORTABILITY_ASSERT("StubManagerHelpers::GetReturnAddress"); return NULL; #endif } static PTR_Object GetThisPtr(T_CONTEXT * pContext) { #if defined(TARGET_X86) return dac_cast<PTR_Object>(pContext->Ecx); #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI return dac_cast<PTR_Object>(pContext->Rdi); #else return dac_cast<PTR_Object>(pContext->Rcx); #endif #elif defined(TARGET_ARM) return dac_cast<PTR_Object>((TADDR)pContext->R0); #elif defined(TARGET_ARM64) return dac_cast<PTR_Object>(pContext->X0); #else PORTABILITY_ASSERT("StubManagerHelpers::GetThisPtr"); return NULL; #endif } static PCODE GetTailCallTarget(T_CONTEXT * pContext) { #if defined(TARGET_X86) return pContext->Eax; #elif defined(TARGET_AMD64) return pContext->Rax; #elif defined(TARGET_ARM) return pContext->R12; #elif defined(TARGET_ARM64) return pContext->X12; #else PORTABILITY_ASSERT("StubManagerHelpers::GetTailCallTarget"); return NULL; #endif } static TADDR GetHiddenArg(T_CONTEXT * pContext) { #if defined(TARGET_X86) return pContext->Eax; #elif defined(TARGET_AMD64) return pContext->R10; #elif defined(TARGET_ARM) return pContext->R12; #elif defined(TARGET_ARM64) return pContext->X12; #else PORTABILITY_ASSERT("StubManagerHelpers::GetHiddenArg"); return NULL; #endif } static PCODE GetRetAddrFromMulticastILStubFrame(T_CONTEXT * pContext) { /* Following is the callstack corresponding to context received by ILStubManager::TraceManager. This function returns the return address (user code address) where control should return after all delegates in multicast delegate have been executed. StubHelpers::MulticastDebuggerTraceHelper IL_STUB_MulticastDelegate_Invoke UserCode which invokes multicast delegate <--- */ #if defined(TARGET_X86) return *((PCODE *)pContext->Ebp + 1); #elif defined(TARGET_AMD64) T_CONTEXT context(*pContext); Thread::VirtualUnwindCallFrame(&context); Thread::VirtualUnwindCallFrame(&context); return context.Rip; #elif defined(TARGET_ARM) return *((PCODE *)((TADDR)pContext->R11) + 1); #elif defined(TARGET_ARM64) return *((PCODE *)pContext->Fp + 1); #else PORTABILITY_ASSERT("StubManagerHelpers::GetRetAddrFromMulticastILStubFrame"); return NULL; #endif } static TADDR GetSecondArg(T_CONTEXT * pContext) { #if defined(TARGET_X86) return pContext->Edx; #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI return pContext->Rsi; #else return pContext->Rdx; #endif #elif defined(TARGET_ARM) return pContext->R1; #elif defined(TARGET_ARM64) return pContext->X1; #else PORTABILITY_ASSERT("StubManagerHelpers::GetSecondArg"); return NULL; #endif } }; #endif // !__stubmgr_h__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // StubMgr.h // // // The stub manager exists so that the debugger can accurately step through // the myriad stubs & wrappers which exist in the EE, without imposing undue // overhead on the stubs themselves. // // Each type of stub (except those which the debugger can treat as atomic operations) // needs to have a stub manager to represent it. The stub manager is responsible for // (a) identifying the stub as such, and // (b) tracing into the stub & reporting what the stub will call. This // report can consist of // (i) a managed code address // (ii) an unmanaged code address // (iii) another stub address // (iv) a "frame patch" address - that is, an address in the stub, // which the debugger can patch. When the patch is hit, the debugger // will query the topmost frame to trace itself. (Thus this is // a way of deferring the trace logic to the frame which the stub // will push.) // // The set of stub managers is extensible, but should be kept to a reasonable number // as they are currently linearly searched & queried for each stub. // // // IMPORTANT IMPLEMENTATION NOTE: Due to code versioning, tracing through a jitted code // call is a speculative exercise. A trace could predict that calling method Foo would run // jitted code at address 0x1234, however afterwards code versioning redirects Foo to call // an alternate jitted code body at address 0x5678. To handle this stub managers should // either: // a) stop tracing at offset zero of the newly called jitted code. The debugger knows // to treat offset 0 in jitted code as potentially being any jitted code instance // b) trace all the way through the jitted method such that regardless of which jitted // code instance gets called the trace will still end at the predicted location. // // If we wanted to be more rigorous about this we should probably have different trace // results for intra-jitted and inter-jitted trace results but given the relative // stability of this part of the code I haven't attacked that problem right now. It does // work as-is. // #ifndef __stubmgr_h__ #define __stubmgr_h__ #include "simplerwlock.hpp" #include "lockedrangelist.h" // When 'TraceStub' returns, it gives the address of where the 'target' is for a stub' // TraceType indicates what this 'target' is enum TraceType { TRACE_ENTRY_STUB, // Stub goes to an unmanaged entry stub TRACE_STUB, // Stub goes to another stub TRACE_UNMANAGED, // Stub goes to unmanaged code TRACE_MANAGED, // Stub goes to Jitted code TRACE_UNJITTED_METHOD, // Is the prestub, since there is no code, the address will actually be a MethodDesc* TRACE_FRAME_PUSH, // Don't know where stub goes, stop at address, and then ask the frame that is on the stack TRACE_MGR_PUSH, // Don't know where stub goes, stop at address then call TraceManager() below to find out TRACE_OTHER // We are going somewhere you can't step into (eg. ee helper function) }; class StubManager; class SString; class DebuggerRCThread; enum StubCodeBlockKind : int; // A TraceDestination describes where code is going to call. This can be used by the Debugger's Step-In functionality // to skip through stubs and place a patch directly at a call's target. // TD are supplied by the stubmanagers. class TraceDestination { public: friend class DebuggerRCThread; TraceDestination() { } #ifdef _DEBUG // Get a string representation of this TraceDestination // Uses the supplied buffer to store the memory (or may return a string literal). // This will also print the TD's arguments. const WCHAR * DbgToString(SString &buffer); #endif // Initialize for unmanaged code. // The addr is in unmanaged code. Used for Step-in from managed to native. void InitForUnmanaged(PCODE addr) { this->type = TRACE_UNMANAGED; this->address = addr; this->stubManager = NULL; } // The addr is inside jitted code (eg, there's a JitManaged that will claim it) void InitForManaged(PCODE addr) { this->type = TRACE_MANAGED; this->address = addr; this->stubManager = NULL; } // Initialize for an unmanaged entry stub. void InitForUnmanagedStub(PCODE addr) { this->type = TRACE_ENTRY_STUB; this->address = addr; this->stubManager = NULL; } // Initialize for a stub. void InitForStub(PCODE addr) { this->type = TRACE_STUB; this->address = addr; this->stubManager = NULL; } // Init for a managed unjitted method. // This will place an IL patch that will get bound when the debugger gets a Jit complete // notification for this method. // If pDesc is a wrapper methoddesc, we will unwrap it. void InitForUnjittedMethod(MethodDesc * pDesc); // Place a patch at the given addr, and then when it's hit, // call pStubManager->TraceManager() to get the next TraceDestination. void InitForManagerPush(PCODE addr, StubManager * pStubManager) { this->type = TRACE_MGR_PUSH; this->address = addr; this->stubManager = pStubManager; } // Place a patch at the given addr, and then when it's hit // call GetThread()->GetFrame()->TraceFrame() to get the next TraceDestination. // This address must be safe to run a callstack at. void InitForFramePush(PCODE addr) { this->type = TRACE_FRAME_PUSH; this->address = addr; this->stubManager = NULL; } // Nobody recognized the target address. We will not be able to step-in to it. // This is ok if the target just calls into mscorwks (such as an Fcall) because // there's no managed code to step in to, and we don't support debugging the CLR // itself, so there's no native code to step into either. void InitForOther(PCODE addr) { this->type = TRACE_OTHER; this->address = addr; this->stubManager = NULL; } // Accessors TraceType GetTraceType() { return type; } PCODE GetAddress() { LIMITED_METHOD_CONTRACT; _ASSERTE(type != TRACE_UNJITTED_METHOD); return address; } MethodDesc* GetMethodDesc() { LIMITED_METHOD_CONTRACT; _ASSERTE(type == TRACE_UNJITTED_METHOD); return pDesc; } StubManager * GetStubManager() { return stubManager; } // Expose this b/c DebuggerPatchTable::AddPatchForAddress() needs it. // Ideally we'd get rid of this. void Bad_SetTraceType(TraceType t) { this->type = t; } private: TraceType type; // The kind of code the stub is going to PCODE address; // Where the stub is going StubManager *stubManager; // The manager that claims this stub MethodDesc *pDesc; }; // For logging #ifdef LOGGING void LogTraceDestination(const char * szHint, PCODE stubAddr, TraceDestination * pTrace); #define LOG_TRACE_DESTINATION(_tracedestination, stubAddr, _stHint) LogTraceDestination(_stHint, stubAddr, _tracedestination) #else #define LOG_TRACE_DESTINATION(_tracedestination, stubAddr, _stHint) #endif typedef VPTR(class StubManager) PTR_StubManager; class StubManager { friend class StubManagerIterator; VPTR_BASE_VTABLE_CLASS(StubManager) public: // Startup and shutdown the global stubmanager service. static void InitializeStubManagers(); static void TerminateStubManagers(); // Does any sub manager recognise this EIP? static BOOL IsStub(PCODE stubAddress) { WRAPPER_NO_CONTRACT; return FindStubManager(stubAddress) != NULL; } // Find stub manager for given code address static PTR_StubManager FindStubManager(PCODE stubAddress); // Look for stubAddress, if found return TRUE, and set 'trace' to static BOOL TraceStub(PCODE stubAddress, TraceDestination *trace); // if 'trace' indicates TRACE_STUB, keep calling TraceStub on 'trace', until you get out of all stubs // returns true if successful static BOOL FollowTrace(TraceDestination *trace); #ifdef DACCESS_COMPILE static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); #endif static void AddStubManager(StubManager *mgr); // NOTE: Very important when using this. It is not thread safe, except in this very // limited scenario: the thread must have the runtime suspended. static void UnlinkStubManager(StubManager *mgr); #ifndef DACCESS_COMPILE StubManager(); virtual ~StubManager(); #endif #ifdef _DEBUG // Debug helper to help identify stub-managers. Make it pure to force stub managers to implement it. virtual const char * DbgGetName() = 0; #endif // Only Stubmanagers that return 'TRACE_MGR_PUSH' as a trace type need to implement this function // Fills in 'trace' (the target), and 'pRetAddr' (the method that called the stub) (this is needed // as a 'fall back' so that the debugger can at least stop when the stub returns. virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr) { LIMITED_METHOD_CONTRACT; _ASSERTE(!"Default impl of TraceManager should never be called!"); return FALSE; } // The worker for IsStub. This calls CheckIsStub_Internal, but wraps it w/ // a try-catch. BOOL CheckIsStub_Worker(PCODE stubStartAddress); #ifdef _DEBUG public: //----------------------------------------------------------------------------- // Debugging Stubmanager bugs is very painful. You need to figure out // how you go to where you got and which stub-manager is at fault. // To help with this, we track a rolling log so that we can give very // informative asserts. this log is not thread-safe, but we really only expect // a single stub-manager usage at a time. // // A stub manager for a step-in operation may be used across // both the helper thread and then the managed thread doing the step-in. // These threads will coordinate to have exclusive access (helper will only access // when stopped; and managed thread will only access when running). // // It's also possible (but rare) for a single thread to have multiple step-in operations. // Since that's so rare, no present need to expand our logging to support it. //----------------------------------------------------------------------------- static bool IsStubLoggingEnabled(); // Call to reset the log. This is used at the start of a new step-operation. static void DbgBeginLog(TADDR addrCallInstruction, TADDR addrCallTarget); static void DbgFinishLog(); // Log arbitrary string. This is a nop if it's outside the Begin/Finish window. // We could consider making each log entry type-safe (and thus avoid the string operations). static void DbgWriteLog(const CHAR *format, ...); // Get the log as a string. static void DbgGetLog(SString * pStringOut); protected: // Implement log as a SString. static SString * s_pDbgStubManagerLog; static CrstStatic s_DbgLogCrst; #endif protected: // Each stubmanaged implements this. // This may throw, AV, etc depending on the implementation. This should not // be called directly unless you know exactly what you're doing. virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress) = 0; // The worker for TraceStub virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) = 0; #ifdef _DEBUG_IMPL static BOOL IsSingleOwner(PCODE stubAddress, StubManager * pOwner); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); public: // This is used by DAC to provide more information on who owns a stub. virtual LPCWSTR GetStubManagerName(PCODE addr) = 0; #endif private: SPTR_DECL(StubManager, g_pFirstManager); PTR_StubManager m_pNextManager; static CrstStatic s_StubManagerListCrst; }; //----------------------------------------------------------- // Stub manager for the prestub. Although there is just one, it has // unique behavior so it gets its own stub manager. //----------------------------------------------------------- class ThePreStubManager : public StubManager { VPTR_VTABLE_CLASS(ThePreStubManager, StubManager) public: #ifndef DACCESS_COMPILE ThePreStubManager() { LIMITED_METHOD_CONTRACT; } #endif #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "ThePreStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE static void Init(void); #endif #ifdef DACCESS_COMPILE protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("ThePreStub"); } #endif }; // ------------------------------------------------------- // Stub manager classes for method desc prestubs & normal // frame-pushing, StubLinker created stubs // ------------------------------------------------------- typedef VPTR(class PrecodeStubManager) PTR_PrecodeStubManager; class PrecodeStubManager : public StubManager { VPTR_VTABLE_CLASS(PrecodeStubManager, StubManager) public: SPTR_DECL(PrecodeStubManager, g_pManager); #ifdef _DEBUG // Debug helper to help identify stub-managers. virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "PrecodeStubManager"; } #endif static void Init(); #ifndef DACCESS_COMPILE PrecodeStubManager() {LIMITED_METHOD_CONTRACT;} ~PrecodeStubManager() {WRAPPER_NO_CONTRACT;} #endif public: virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("MethodDescPrestub"); } #endif }; // Note that this stub was written by a debugger guy, and thus when he refers to 'multicast' // stub, he really means multi or single cast stub. This was done b/c the same stub // services both types of stub. // Note from the debugger guy: the way to understand what this manager does is to // first grok EmitMulticastInvoke for the platform you're working on (right now, just x86). // Then return here, and understand that (for x86) the only way we know which method // we're going to invoke next is by inspecting EDI when we've got the debuggee stopped // in the stub, and so our trace frame will either (FRAME_PUSH) put a breakpoint // in the stub, or (if we hit the BP) examine EDI, etc, & figure out where we're going next. typedef VPTR(class StubLinkStubManager) PTR_StubLinkStubManager; class StubLinkStubManager : public StubManager { VPTR_VTABLE_CLASS(StubLinkStubManager, StubManager) public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "StubLinkStubManager"; } #endif SPTR_DECL(StubLinkStubManager, g_pManager); static void Init(); #ifndef DACCESS_COMPILE StubLinkStubManager() : StubManager(), m_rangeList() {LIMITED_METHOD_CONTRACT;} ~StubLinkStubManager() {WRAPPER_NO_CONTRACT;} #endif protected: LockedRangeList m_rangeList; public: // Get dac-ized pointer to rangelist. PTR_RangeList GetRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(StubLinkStubManager, this, m_rangeList); return PTR_RangeList(addr); } virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("StubLinkStub"); } #endif } ; // Stub manager for thunks. typedef VPTR(class ThunkHeapStubManager) PTR_ThunkHeapStubManager; class ThunkHeapStubManager : public StubManager { VPTR_VTABLE_CLASS(ThunkHeapStubManager, StubManager) public: SPTR_DECL(ThunkHeapStubManager, g_pManager); static void Init(); #ifndef DACCESS_COMPILE ThunkHeapStubManager() : StubManager(), m_rangeList() { LIMITED_METHOD_CONTRACT; } ~ThunkHeapStubManager() {WRAPPER_NO_CONTRACT;} #endif #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "ThunkHeapStubManager"; } #endif protected: LockedRangeList m_rangeList; public: // Get dac-ized pointer to rangelist. PTR_RangeList GetRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(ThunkHeapStubManager, this, m_rangeList); return PTR_RangeList(addr); } virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("ThunkHeapStub"); } #endif }; // // Stub manager for jump stubs created by ExecutionManager::jumpStub() // These are currently used only on the 64-bit targets IA64 and AMD64 // typedef VPTR(class JumpStubStubManager) PTR_JumpStubStubManager; class JumpStubStubManager : public StubManager { VPTR_VTABLE_CLASS(JumpStubStubManager, StubManager) public: SPTR_DECL(JumpStubStubManager, g_pManager); static void Init(); #ifndef DACCESS_COMPILE JumpStubStubManager() {LIMITED_METHOD_CONTRACT;} ~JumpStubStubManager() {WRAPPER_NO_CONTRACT;} #endif #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "JumpStubStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("JumpStub"); } #endif }; // // Stub manager for code sections. It forwards the query to the more appropriate // stub manager, or handles the query itself. // typedef VPTR(class RangeSectionStubManager) PTR_RangeSectionStubManager; class RangeSectionStubManager : public StubManager { VPTR_VTABLE_CLASS(RangeSectionStubManager, StubManager) public: SPTR_DECL(RangeSectionStubManager, g_pManager); static void Init(); #ifndef DACCESS_COMPILE RangeSectionStubManager() {LIMITED_METHOD_CONTRACT;} ~RangeSectionStubManager() {WRAPPER_NO_CONTRACT;} #endif static StubCodeBlockKind GetStubKind(PCODE stubStartAddress); static PCODE GetMethodThunkTarget(PCODE stubStartAddress); public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "RangeSectionStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr); #endif }; // // This is the stub manager for IL stubs. // typedef VPTR(class ILStubManager) PTR_ILStubManager; #ifdef FEATURE_COMINTEROP struct ComPlusCallInfo; #endif // FEATURE_COMINTEROP class ILStubManager : public StubManager { VPTR_VTABLE_CLASS(ILStubManager, StubManager) public: static void Init(); #ifndef DACCESS_COMPILE ILStubManager() : StubManager() {WRAPPER_NO_CONTRACT;} ~ILStubManager() { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; // StubManager::UnlinkStubManager uses a crst } CONTRACTL_END; } #endif public: #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "ILStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("ILStub"); } #endif }; // This is used to recognize // GenericComPlusCallStub() // VarargPInvokeStub() // GenericPInvokeCalliHelper() typedef VPTR(class InteropDispatchStubManager) PTR_InteropDispatchStubManager; class InteropDispatchStubManager : public StubManager { VPTR_VTABLE_CLASS(InteropDispatchStubManager, StubManager) public: static void Init(); #ifndef DACCESS_COMPILE InteropDispatchStubManager() : StubManager() {WRAPPER_NO_CONTRACT;} ~InteropDispatchStubManager() {WRAPPER_NO_CONTRACT;} #endif #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "InteropDispatchStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); #ifndef DACCESS_COMPILE virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); #endif #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("InteropDispatchStub"); } #endif }; // // Since we don't generate delegate invoke stubs at runtime on WIN64, we // can't use the StubLinkStubManager for these stubs. Instead, we create // an additional DelegateInvokeStubManager instead. // typedef VPTR(class DelegateInvokeStubManager) PTR_DelegateInvokeStubManager; class DelegateInvokeStubManager : public StubManager { VPTR_VTABLE_CLASS(DelegateInvokeStubManager, StubManager) public: SPTR_DECL(DelegateInvokeStubManager, g_pManager); static void Init(); #if !defined(DACCESS_COMPILE) DelegateInvokeStubManager() : StubManager(), m_rangeList() {LIMITED_METHOD_CONTRACT;} ~DelegateInvokeStubManager() {WRAPPER_NO_CONTRACT;} #endif // DACCESS_COMPILE BOOL AddStub(Stub* pStub); void RemoveStub(Stub* pStub); #ifdef _DEBUG virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "DelegateInvokeStubManager"; } #endif virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); #if !defined(DACCESS_COMPILE) virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, T_CONTEXT *pContext, BYTE **pRetAddr); static BOOL TraceDelegateObject(BYTE *orDel, TraceDestination *trace); #endif // DACCESS_COMPILE private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace); protected: LockedRangeList m_rangeList; public: // Get dac-ized pointer to rangelist. PTR_RangeList GetRangeList() { SUPPORTS_DAC; TADDR addr = PTR_HOST_MEMBER_TADDR(DelegateInvokeStubManager, this, m_rangeList); return PTR_RangeList(addr); } #ifdef DACCESS_COMPILE virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) { LIMITED_METHOD_CONTRACT; return W("DelegateInvokeStub"); } #endif }; #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) //--------------------------------------------------------------------------------------- // // This is the stub manager to help the managed debugger step into a tail call. // It helps the debugger trace through JIT_TailCall(). // typedef VPTR(class TailCallStubManager) PTR_TailCallStubManager; class TailCallStubManager : public StubManager { VPTR_VTABLE_CLASS(TailCallStubManager, StubManager) public: static void Init(); #if !defined(DACCESS_COMPILE) TailCallStubManager() : StubManager() {WRAPPER_NO_CONTRACT;} ~TailCallStubManager() {WRAPPER_NO_CONTRACT;} virtual BOOL TraceManager(Thread * pThread, TraceDestination * pTrace, T_CONTEXT * pContext, BYTE ** ppRetAddr); static bool IsTailCallJitHelper(PCODE code); #endif // DACCESS_COMPILE #if defined(_DEBUG) virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "TailCallStubManager"; } #endif // _DEBUG virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); private: virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination * pTrace); #if defined(DACCESS_COMPILE) virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags); protected: virtual LPCWSTR GetStubManagerName(PCODE addr) {LIMITED_METHOD_CONTRACT; return W("TailCallStub");} #endif // !DACCESS_COMPILE }; #else // TARGET_X86 && UNIX_X86_ABI class TailCallStubManager { public: static void Init() { } static bool IsTailCallJitHelper(PCODE code) { return false; } }; #endif // TARGET_X86 && UNIX_X86_ABI // // Helpers for common value locations in stubs to make stub managers more portable // class StubManagerHelpers { public: static PCODE GetReturnAddress(T_CONTEXT * pContext) { #if defined(TARGET_X86) return *dac_cast<PTR_PCODE>(pContext->Esp); #elif defined(TARGET_AMD64) return *dac_cast<PTR_PCODE>(pContext->Rsp); #elif defined(TARGET_ARM) return pContext->Lr; #elif defined(TARGET_ARM64) return pContext->Lr; #else PORTABILITY_ASSERT("StubManagerHelpers::GetReturnAddress"); return NULL; #endif } static PTR_Object GetThisPtr(T_CONTEXT * pContext) { #if defined(TARGET_X86) return dac_cast<PTR_Object>(pContext->Ecx); #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI return dac_cast<PTR_Object>(pContext->Rdi); #else return dac_cast<PTR_Object>(pContext->Rcx); #endif #elif defined(TARGET_ARM) return dac_cast<PTR_Object>((TADDR)pContext->R0); #elif defined(TARGET_ARM64) return dac_cast<PTR_Object>(pContext->X0); #else PORTABILITY_ASSERT("StubManagerHelpers::GetThisPtr"); return NULL; #endif } static PCODE GetTailCallTarget(T_CONTEXT * pContext) { #if defined(TARGET_X86) return pContext->Eax; #elif defined(TARGET_AMD64) return pContext->Rax; #elif defined(TARGET_ARM) return pContext->R12; #elif defined(TARGET_ARM64) return pContext->X12; #else PORTABILITY_ASSERT("StubManagerHelpers::GetTailCallTarget"); return NULL; #endif } static TADDR GetHiddenArg(T_CONTEXT * pContext) { #if defined(TARGET_X86) return pContext->Eax; #elif defined(TARGET_AMD64) return pContext->R10; #elif defined(TARGET_ARM) return pContext->R12; #elif defined(TARGET_ARM64) return pContext->X12; #else PORTABILITY_ASSERT("StubManagerHelpers::GetHiddenArg"); return NULL; #endif } static PCODE GetRetAddrFromMulticastILStubFrame(T_CONTEXT * pContext) { /* Following is the callstack corresponding to context received by ILStubManager::TraceManager. This function returns the return address (user code address) where control should return after all delegates in multicast delegate have been executed. StubHelpers::MulticastDebuggerTraceHelper IL_STUB_MulticastDelegate_Invoke UserCode which invokes multicast delegate <--- */ #if defined(TARGET_X86) return *((PCODE *)pContext->Ebp + 1); #elif defined(TARGET_AMD64) T_CONTEXT context(*pContext); Thread::VirtualUnwindCallFrame(&context); Thread::VirtualUnwindCallFrame(&context); return context.Rip; #elif defined(TARGET_ARM) return *((PCODE *)((TADDR)pContext->R11) + 1); #elif defined(TARGET_ARM64) return *((PCODE *)pContext->Fp + 1); #else PORTABILITY_ASSERT("StubManagerHelpers::GetRetAddrFromMulticastILStubFrame"); return NULL; #endif } static TADDR GetSecondArg(T_CONTEXT * pContext) { #if defined(TARGET_X86) return pContext->Edx; #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI return pContext->Rsi; #else return pContext->Rdx; #endif #elif defined(TARGET_ARM) return pContext->R1; #elif defined(TARGET_ARM64) return pContext->X1; #else PORTABILITY_ASSERT("StubManagerHelpers::GetSecondArg"); return NULL; #endif } }; #endif // !__stubmgr_h__
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/native/public/mono/utils/mono-jemalloc.h
/** * \file * * Header for jemalloc registration code */ #ifndef __MONO_JEMALLOC_H__ #define __MONO_JEMALLOC_H__ #if defined(MONO_JEMALLOC_ENABLED) #include <jemalloc/jemalloc.h> /* Jemalloc can be configured in three ways. * 1. You can use it with library loading hacks at run-time * 2. You can use it as a global malloc replacement * 3. You can use it with a prefix. If you use it with a prefix, you have to explicitly name the malloc function. * * In order to make this feature able to be toggled at run-time, I chose to use a prefix of mono_je. * This mapping is captured below in the header, in the spirit of "no magic constants". * * The place that configures jemalloc and sets this prefix is in the Makefile in * mono/jemalloc/Makefile.am * */ #define MONO_JEMALLOC_MALLOC mono_jemalloc #define MONO_JEMALLOC_REALLOC mono_jerealloc #define MONO_JEMALLOC_FREE mono_jefree #define MONO_JEMALLOC_CALLOC mono_jecalloc void mono_init_jemalloc (void); #endif #endif
/** * \file * * Header for jemalloc registration code */ #ifndef __MONO_JEMALLOC_H__ #define __MONO_JEMALLOC_H__ #if defined(MONO_JEMALLOC_ENABLED) #include <jemalloc/jemalloc.h> /* Jemalloc can be configured in three ways. * 1. You can use it with library loading hacks at run-time * 2. You can use it as a global malloc replacement * 3. You can use it with a prefix. If you use it with a prefix, you have to explicitly name the malloc function. * * In order to make this feature able to be toggled at run-time, I chose to use a prefix of mono_je. * This mapping is captured below in the header, in the spirit of "no magic constants". * * The place that configures jemalloc and sets this prefix is in the Makefile in * mono/jemalloc/Makefile.am * */ #define MONO_JEMALLOC_MALLOC mono_jemalloc #define MONO_JEMALLOC_REALLOC mono_jerealloc #define MONO_JEMALLOC_FREE mono_jefree #define MONO_JEMALLOC_CALLOC mono_jecalloc void mono_init_jemalloc (void); #endif #endif
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/md/tables/external.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: external.h // // // External types used in MetaData\Storage subcomponent classes. // This file is used for precompiled headers, so it has to be included at the beginning of every .cpp in // this directory. // // ====================================================================================== #pragma once #include "../external.h" #include "../export.h" #include "../inc/recordpool.h"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: external.h // // // External types used in MetaData\Storage subcomponent classes. // This file is used for precompiled headers, so it has to be included at the beginning of every .cpp in // this directory. // // ====================================================================================== #pragma once #include "../external.h" #include "../export.h" #include "../inc/recordpool.h"
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/native/eventpipe/ds-ipc-pal-types.h
#ifndef __DIAGNOSTICS_IPC_PAL_TYPES_H__ #define __DIAGNOSTICS_IPC_PAL_TYPES_H__ #ifdef ENABLE_PERFTRACING #include "ep-ipc-pal-types.h" #undef DS_IMPL_GETTER_SETTER #ifdef DS_IMPL_IPC_PAL_GETTER_SETTER #define DS_IMPL_GETTER_SETTER #endif #include "ds-getter-setter.h" /* * Diagnostics IPC PAL Structs. */ typedef struct _DiagnosticsIpc DiagnosticsIpc; typedef struct _DiagnosticsIpcPollHandle DiagnosticsIpcPollHandle; typedef struct _DiagnosticsIpcStream DiagnosticsIpcStream; /* * Diagnostics IPC PAL Enums. */ typedef enum { DS_IPC_POLL_EVENTS_NONE = 0x00, // no events DS_IPC_POLL_EVENTS_SIGNALED = 0x01, // ready for use DS_IPC_POLL_EVENTS_HANGUP = 0x02, // connection remotely closed DS_IPC_POLL_EVENTS_ERR = 0x04, // error DS_IPC_POLL_EVENTS_UNKNOWN = 0x80 // unknown state } DiagnosticsIpcPollEvents; typedef enum { DS_IPC_CONNECTION_MODE_CONNECT, DS_IPC_CONNECTION_MODE_LISTEN } DiagnosticsIpcConnectionMode; #define DS_IPC_MAX_TO_STRING_LEN 128 #define DS_IPC_TIMEOUT_INFINITE (uint32_t)-1 #define DS_IPC_POLL_TIMEOUT_FALLOFF_FACTOR (float)1.25 #define DS_IPC_POLL_TIMEOUT_MIN_MS (uint32_t)10 #define DS_IPC_POLL_TIMEOUT_MAX_MS (uint32_t)500 /* * DiagnosticsIpcPollHandle. */ // The bookeeping struct used for polling on server and client structs #if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_NAMEDPIPE_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER) struct _DiagnosticsIpcPollHandle { #else struct _DiagnosticsIpcPollHandle_Internal { #endif // Only one of these will be non-null, treat as a union DiagnosticsIpc *ipc; DiagnosticsIpcStream *stream; // contains some set of PollEvents // will be set by Poll // Any values here are ignored by Poll uint8_t events; // a cookie assignable by upstream users for additional bookkeeping void *user_data; }; #if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_NAMEDPIPE_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER) struct _DiagnosticsIpcPollHandle { uint8_t _internal [sizeof (struct _DiagnosticsIpcPollHandle_Internal)]; }; #endif DS_DEFINE_GETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, DiagnosticsIpc *, ipc) DS_DEFINE_GETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, DiagnosticsIpcStream *, stream) DS_DEFINE_GETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, uint8_t, events) DS_DEFINE_SETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, uint8_t, events) DS_DEFINE_GETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, void *, user_data) DS_DEFINE_SETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, void *, user_data) typedef void (*ds_ipc_error_callback_func)( const ep_char8_t *message, uint32_t code); #endif /* ENABLE_PERFTRACING */ #endif /* __DIAGNOSTICS_IPC_PAL_TYPES_H__ */
#ifndef __DIAGNOSTICS_IPC_PAL_TYPES_H__ #define __DIAGNOSTICS_IPC_PAL_TYPES_H__ #ifdef ENABLE_PERFTRACING #include "ep-ipc-pal-types.h" #undef DS_IMPL_GETTER_SETTER #ifdef DS_IMPL_IPC_PAL_GETTER_SETTER #define DS_IMPL_GETTER_SETTER #endif #include "ds-getter-setter.h" /* * Diagnostics IPC PAL Structs. */ typedef struct _DiagnosticsIpc DiagnosticsIpc; typedef struct _DiagnosticsIpcPollHandle DiagnosticsIpcPollHandle; typedef struct _DiagnosticsIpcStream DiagnosticsIpcStream; /* * Diagnostics IPC PAL Enums. */ typedef enum { DS_IPC_POLL_EVENTS_NONE = 0x00, // no events DS_IPC_POLL_EVENTS_SIGNALED = 0x01, // ready for use DS_IPC_POLL_EVENTS_HANGUP = 0x02, // connection remotely closed DS_IPC_POLL_EVENTS_ERR = 0x04, // error DS_IPC_POLL_EVENTS_UNKNOWN = 0x80 // unknown state } DiagnosticsIpcPollEvents; typedef enum { DS_IPC_CONNECTION_MODE_CONNECT, DS_IPC_CONNECTION_MODE_LISTEN } DiagnosticsIpcConnectionMode; #define DS_IPC_MAX_TO_STRING_LEN 128 #define DS_IPC_TIMEOUT_INFINITE (uint32_t)-1 #define DS_IPC_POLL_TIMEOUT_FALLOFF_FACTOR (float)1.25 #define DS_IPC_POLL_TIMEOUT_MIN_MS (uint32_t)10 #define DS_IPC_POLL_TIMEOUT_MAX_MS (uint32_t)500 /* * DiagnosticsIpcPollHandle. */ // The bookeeping struct used for polling on server and client structs #if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_NAMEDPIPE_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER) struct _DiagnosticsIpcPollHandle { #else struct _DiagnosticsIpcPollHandle_Internal { #endif // Only one of these will be non-null, treat as a union DiagnosticsIpc *ipc; DiagnosticsIpcStream *stream; // contains some set of PollEvents // will be set by Poll // Any values here are ignored by Poll uint8_t events; // a cookie assignable by upstream users for additional bookkeeping void *user_data; }; #if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_NAMEDPIPE_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER) struct _DiagnosticsIpcPollHandle { uint8_t _internal [sizeof (struct _DiagnosticsIpcPollHandle_Internal)]; }; #endif DS_DEFINE_GETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, DiagnosticsIpc *, ipc) DS_DEFINE_GETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, DiagnosticsIpcStream *, stream) DS_DEFINE_GETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, uint8_t, events) DS_DEFINE_SETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, uint8_t, events) DS_DEFINE_GETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, void *, user_data) DS_DEFINE_SETTER(DiagnosticsIpcPollHandle *, ipc_poll_handle, void *, user_data) typedef void (*ds_ipc_error_callback_func)( const ep_char8_t *message, uint32_t code); #endif /* ENABLE_PERFTRACING */ #endif /* __DIAGNOSTICS_IPC_PAL_TYPES_H__ */
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/pal/src/libunwind/src/ia64/Gtables.c
/* libunwind - a platform-independent unwind library Copyright (c) 2001-2005 Hewlett-Packard Development Company, L.P. Contributed by David Mosberger-Tang <davidm@hpl.hp.com> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <assert.h> #include <stdlib.h> #include <stddef.h> #include "unwind_i.h" #ifdef HAVE_IA64INTRIN_H # include <ia64intrin.h> #endif extern unw_addr_space_t _ULia64_local_addr_space; struct ia64_table_entry { uint64_t start_offset; uint64_t end_offset; uint64_t info_offset; }; #ifdef UNW_LOCAL_ONLY static inline int is_local_addr_space (unw_addr_space_t as) { return 1; } static inline int read_mem (unw_addr_space_t as, unw_word_t addr, unw_word_t *valp, void *arg) { *valp = *(unw_word_t *) addr; return 0; } #else /* !UNW_LOCAL_ONLY */ static inline int is_local_addr_space (unw_addr_space_t as) { return as == unw_local_addr_space; } static inline int read_mem (unw_addr_space_t as, unw_word_t addr, unw_word_t *valp, void *arg) { unw_accessors_t *a = unw_get_accessors_int (as); return (*a->access_mem) (as, addr, valp, 0, arg); } /* Helper macro for reading an ia64_table_entry from remote memory. */ #define remote_read(addr, member) \ (*a->access_mem) (as, (addr) + offsetof (struct ia64_table_entry, \ member), &member, 0, arg) /* Lookup an unwind-table entry in remote memory. Returns 1 if an entry is found, 0 if no entry is found, negative if an error occurred reading remote memory. */ static int remote_lookup (unw_addr_space_t as, unw_word_t table, size_t table_size, unw_word_t rel_ip, struct ia64_table_entry *e, void *arg) { unw_word_t e_addr = 0, start_offset, end_offset, info_offset; unw_accessors_t *a = unw_get_accessors_int (as); unsigned long lo, hi, mid; int ret; /* do a binary search for right entry: */ for (lo = 0, hi = table_size / sizeof (struct ia64_table_entry); lo < hi;) { mid = (lo + hi) / 2; e_addr = table + mid * sizeof (struct ia64_table_entry); if ((ret = remote_read (e_addr, start_offset)) < 0) return ret; if (rel_ip < start_offset) hi = mid; else { if ((ret = remote_read (e_addr, end_offset)) < 0) return ret; if (rel_ip >= end_offset) lo = mid + 1; else break; } } if (rel_ip < start_offset || rel_ip >= end_offset) return 0; e->start_offset = start_offset; e->end_offset = end_offset; if ((ret = remote_read (e_addr, info_offset)) < 0) return ret; e->info_offset = info_offset; return 1; } HIDDEN void tdep_put_unwind_info (unw_addr_space_t as, unw_proc_info_t *pi, void *arg) { if (!pi->unwind_info) return; if (is_local_addr_space (as)) { free (pi->unwind_info); pi->unwind_info = NULL; } } unw_word_t _Uia64_find_dyn_list (unw_addr_space_t as, unw_dyn_info_t *di, void *arg) { unw_word_t hdr_addr, info_addr, hdr, directives, pers, cookie, off; unw_word_t start_offset, end_offset, info_offset, segbase; struct ia64_table_entry *e; size_t table_size; unw_word_t gp = di->gp; int ret; switch (di->format) { case UNW_INFO_FORMAT_DYNAMIC: default: return 0; case UNW_INFO_FORMAT_TABLE: e = (struct ia64_table_entry *) di->u.ti.table_data; table_size = di->u.ti.table_len * sizeof (di->u.ti.table_data[0]); segbase = di->u.ti.segbase; if (table_size < sizeof (struct ia64_table_entry)) return 0; start_offset = e[0].start_offset; end_offset = e[0].end_offset; info_offset = e[0].info_offset; break; case UNW_INFO_FORMAT_REMOTE_TABLE: { unw_accessors_t *a = unw_get_accessors_int (as); unw_word_t e_addr = di->u.rti.table_data; table_size = di->u.rti.table_len * sizeof (unw_word_t); segbase = di->u.rti.segbase; if (table_size < sizeof (struct ia64_table_entry)) return 0; if ( (ret = remote_read (e_addr, start_offset) < 0) || (ret = remote_read (e_addr, end_offset) < 0) || (ret = remote_read (e_addr, info_offset) < 0)) return ret; } break; } if (start_offset != end_offset) /* dyn-list entry cover a zero-length "procedure" and should be first entry (note: technically a binary could contain code below the segment base, but this doesn't happen for normal binaries and certainly doesn't happen when libunwind is a separate shared object. For weird cases, the application may have to provide its own (slower) version of this routine. */ return 0; hdr_addr = info_offset + segbase; info_addr = hdr_addr + 8; /* read the header word: */ if ((ret = read_mem (as, hdr_addr, &hdr, arg)) < 0) return ret; if (IA64_UNW_VER (hdr) != 1 || IA64_UNW_FLAG_EHANDLER (hdr) || IA64_UNW_FLAG_UHANDLER (hdr)) /* dyn-list entry must be version 1 and doesn't have ehandler or uhandler */ return 0; if (IA64_UNW_LENGTH (hdr) != 1) /* dyn-list entry must consist of a single word of NOP directives */ return 0; if ( ((ret = read_mem (as, info_addr, &directives, arg)) < 0) || ((ret = read_mem (as, info_addr + 0x08, &pers, arg)) < 0) || ((ret = read_mem (as, info_addr + 0x10, &cookie, arg)) < 0) || ((ret = read_mem (as, info_addr + 0x18, &off, arg)) < 0)) return 0; if (directives != 0 || pers != 0 || (!as->big_endian && cookie != 0x7473696c2d6e7964ULL) || ( as->big_endian && cookie != 0x64796e2d6c697374ULL)) return 0; /* OK, we ran the gauntlet and found it: */ return off + gp; } #endif /* !UNW_LOCAL_ONLY */ static inline const struct ia64_table_entry * lookup (struct ia64_table_entry *table, size_t table_size, unw_word_t rel_ip) { const struct ia64_table_entry *e = 0; unsigned long lo, hi, mid; /* do a binary search for right entry: */ for (lo = 0, hi = table_size / sizeof (struct ia64_table_entry); lo < hi;) { mid = (lo + hi) / 2; e = table + mid; if (rel_ip < e->start_offset) hi = mid; else if (rel_ip >= e->end_offset) lo = mid + 1; else break; } if (rel_ip < e->start_offset || rel_ip >= e->end_offset) return NULL; return e; } int unw_search_ia64_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg) { unw_word_t addr, hdr_addr, info_addr, info_end_addr, hdr, *wp; const struct ia64_table_entry *e = NULL; unw_word_t handler_offset, segbase = 0; int ret, is_local; #ifndef UNW_LOCAL_ONLY struct ia64_table_entry ent; #endif assert ((di->format == UNW_INFO_FORMAT_TABLE || di->format == UNW_INFO_FORMAT_REMOTE_TABLE) && (ip >= di->start_ip && ip < di->end_ip)); pi->flags = 0; pi->unwind_info = 0; pi->handler = 0; if (likely (di->format == UNW_INFO_FORMAT_TABLE)) { segbase = di->u.ti.segbase; e = lookup ((struct ia64_table_entry *) di->u.ti.table_data, di->u.ti.table_len * sizeof (unw_word_t), ip - segbase); } #ifndef UNW_LOCAL_ONLY else { segbase = di->u.rti.segbase; if ((ret = remote_lookup (as, di->u.rti.table_data, di->u.rti.table_len * sizeof (unw_word_t), ip - segbase, &ent, arg)) < 0) return ret; if (ret) e = &ent; } #endif if (!e) { /* IP is inside this table's range, but there is no explicit unwind info => use default conventions (i.e., this is NOT an error). */ memset (pi, 0, sizeof (*pi)); pi->start_ip = 0; pi->end_ip = 0; pi->gp = di->gp; pi->lsda = 0; return 0; } pi->start_ip = e->start_offset + segbase; pi->end_ip = e->end_offset + segbase; hdr_addr = e->info_offset + segbase; info_addr = hdr_addr + 8; /* Read the header word. Note: the actual unwind-info is always assumed to reside in memory, independent of whether di->format is UNW_INFO_FORMAT_TABLE or UNW_INFO_FORMAT_REMOTE_TABLE. */ if ((ret = read_mem (as, hdr_addr, &hdr, arg)) < 0) return ret; if (IA64_UNW_VER (hdr) != 1) { Debug (1, "Unknown header version %ld (hdr word=0x%lx @ 0x%lx)\n", IA64_UNW_VER (hdr), (unsigned long) hdr, (unsigned long) hdr_addr); return -UNW_EBADVERSION; } info_end_addr = info_addr + 8 * IA64_UNW_LENGTH (hdr); is_local = is_local_addr_space (as); /* If we must have the unwind-info, return it. Also, if we are in the local address-space, return the unwind-info because it's so cheap to do so and it may come in handy later on. */ if (need_unwind_info || is_local) { pi->unwind_info_size = 8 * IA64_UNW_LENGTH (hdr); if (is_local) pi->unwind_info = (void *) (uintptr_t) info_addr; else { /* Internalize unwind info. Note: since we're doing this only for non-local address spaces, there is no signal-safety issue and it is OK to use malloc()/free(). */ pi->unwind_info = malloc (8 * IA64_UNW_LENGTH (hdr)); if (!pi->unwind_info) return -UNW_ENOMEM; wp = (unw_word_t *) pi->unwind_info; for (addr = info_addr; addr < info_end_addr; addr += 8, ++wp) { if ((ret = read_mem (as, addr, wp, arg)) < 0) { free (pi->unwind_info); return ret; } } } } if (IA64_UNW_FLAG_EHANDLER (hdr) || IA64_UNW_FLAG_UHANDLER (hdr)) { /* read the personality routine address (address is gp-relative): */ if ((ret = read_mem (as, info_end_addr, &handler_offset, arg)) < 0) return ret; Debug (4, "handler ptr @ offset=%lx, gp=%lx\n", handler_offset, di->gp); if ((read_mem (as, handler_offset + di->gp, &pi->handler, arg)) < 0) return ret; } pi->lsda = info_end_addr + 8; pi->gp = di->gp; pi->format = di->format; return 0; } #ifndef UNW_REMOTE_ONLY # if defined(HAVE_DL_ITERATE_PHDR) # include <link.h> # include <stdlib.h> # if __GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ < 2) \ || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && !defined(DT_CONFIG)) # error You need GLIBC 2.2.4 or later on IA-64 Linux # endif # if defined(HAVE_GETUNWIND) extern unsigned long getunwind (void *buf, size_t len); # else /* HAVE_GETUNWIND */ # include <unistd.h> # include <sys/syscall.h> # ifndef __NR_getunwind # define __NR_getunwind 1215 # endif static unsigned long getunwind (void *buf, size_t len) { return syscall (SYS_getunwind, buf, len); } # endif /* HAVE_GETUNWIND */ static unw_dyn_info_t kernel_table; static int get_kernel_table (unw_dyn_info_t *di) { struct ia64_table_entry *ktab, *etab; size_t size; Debug (16, "getting kernel table"); size = getunwind (NULL, 0); ktab = sos_alloc (size); if (!ktab) { Dprintf (__FILE__".%s: failed to allocate %zu bytes", __FUNCTION__, size); return -UNW_ENOMEM; } getunwind (ktab, size); /* Determine length of kernel's unwind table & relocate its entries. */ for (etab = ktab; etab->start_offset; ++etab) etab->info_offset += (uint64_t) ktab; di->format = UNW_INFO_FORMAT_TABLE; di->gp = 0; di->start_ip = ktab[0].start_offset; di->end_ip = etab[-1].end_offset; di->u.ti.name_ptr = (unw_word_t) "<kernel>"; di->u.ti.segbase = 0; di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t); di->u.ti.table_data = (unw_word_t *) ktab; Debug (16, "found table `%s': [%lx-%lx) segbase=%lx len=%lu\n", (char *) di->u.ti.name_ptr, di->start_ip, di->end_ip, di->u.ti.segbase, di->u.ti.table_len); return 0; } # ifndef UNW_LOCAL_ONLY /* This is exported for the benefit of libunwind-ptrace.a. */ int _Uia64_get_kernel_table (unw_dyn_info_t *di) { int ret; if (!kernel_table.u.ti.table_data) if ((ret = get_kernel_table (&kernel_table)) < 0) return ret; memcpy (di, &kernel_table, sizeof (*di)); return 0; } # endif /* !UNW_LOCAL_ONLY */ static inline unsigned long current_gp (void) { # if defined(__GNUC__) && !defined(__INTEL_COMPILER) register unsigned long gp __asm__("gp"); return gp; # elif HAVE_IA64INTRIN_H return __getReg (_IA64_REG_GP); # else # error Implement me. # endif } static int callback (struct dl_phdr_info *info, size_t size, void *ptr) { unw_dyn_info_t *di = ptr; const Elf64_Phdr *phdr, *p_unwind, *p_dynamic, *p_text; long n; Elf64_Addr load_base, segbase = 0; /* Make sure struct dl_phdr_info is at least as big as we need. */ if (size < offsetof (struct dl_phdr_info, dlpi_phnum) + sizeof (info->dlpi_phnum)) return -1; Debug (16, "checking `%s' (load_base=%lx)\n", info->dlpi_name, info->dlpi_addr); phdr = info->dlpi_phdr; load_base = info->dlpi_addr; p_text = NULL; p_unwind = NULL; p_dynamic = NULL; /* See if PC falls into one of the loaded segments. Find the unwind segment at the same time. */ for (n = info->dlpi_phnum; --n >= 0; phdr++) { if (phdr->p_type == PT_LOAD) { Elf64_Addr vaddr = phdr->p_vaddr + load_base; if (di->u.ti.segbase >= vaddr && di->u.ti.segbase < vaddr + phdr->p_memsz) p_text = phdr; } else if (phdr->p_type == PT_IA_64_UNWIND) p_unwind = phdr; else if (phdr->p_type == PT_DYNAMIC) p_dynamic = phdr; } if (!p_text || !p_unwind) return 0; if (likely (p_unwind->p_vaddr >= p_text->p_vaddr && p_unwind->p_vaddr < p_text->p_vaddr + p_text->p_memsz)) /* normal case: unwind table is inside text segment */ segbase = p_text->p_vaddr + load_base; else { /* Special case: unwind table is in some other segment; this happens for the Linux kernel's gate DSO, for example. */ phdr = info->dlpi_phdr; for (n = info->dlpi_phnum; --n >= 0; phdr++) { if (phdr->p_type == PT_LOAD && p_unwind->p_vaddr >= phdr->p_vaddr && p_unwind->p_vaddr < phdr->p_vaddr + phdr->p_memsz) { segbase = phdr->p_vaddr + load_base; break; } } } if (p_dynamic) { /* For dynamicly linked executables and shared libraries, DT_PLTGOT is the gp value for that object. */ Elf64_Dyn *dyn = (Elf64_Dyn *)(p_dynamic->p_vaddr + load_base); for (; dyn->d_tag != DT_NULL; ++dyn) if (dyn->d_tag == DT_PLTGOT) { /* On IA-64, _DYNAMIC is writable and GLIBC has relocated it. */ di->gp = dyn->d_un.d_ptr; break; } } else /* Otherwise this is a static executable with no _DYNAMIC. The gp is constant program-wide. */ di->gp = current_gp(); di->format = UNW_INFO_FORMAT_TABLE; di->start_ip = p_text->p_vaddr + load_base; di->end_ip = p_text->p_vaddr + load_base + p_text->p_memsz; di->u.ti.name_ptr = (unw_word_t) info->dlpi_name; di->u.ti.table_data = (void *) (p_unwind->p_vaddr + load_base); di->u.ti.table_len = p_unwind->p_memsz / sizeof (unw_word_t); di->u.ti.segbase = segbase; Debug (16, "found table `%s': segbase=%lx, len=%lu, gp=%lx, " "table_data=%p\n", (char *) di->u.ti.name_ptr, di->u.ti.segbase, di->u.ti.table_len, di->gp, di->u.ti.table_data); return 1; } # ifdef HAVE_DL_PHDR_REMOVALS_COUNTER static inline int validate_cache (unw_addr_space_t as) { /* Note: we don't need to serialize here with respect to dl_iterate_phdr() because if somebody were to remove an object that is required to complete the unwind on whose behalf we're validating the cache here, we'd be hosed anyhow. What we're guarding against here is the case where library FOO gets mapped, unwind info for FOO gets cached, FOO gets unmapped, BAR gets mapped in the place where FOO was and then we unwind across a function in FOO. Since no thread can execute in BAR before FOO has been removed, we are guaranteed that dl_phdr_removals_counter() would have been incremented before we get here. */ unsigned long long removals = dl_phdr_removals_counter (); if (removals == as->shared_object_removals) return 1; as->shared_object_removals = removals; unw_flush_cache (as, 0, 0); return -1; } # else /* !HAVE_DL_PHDR_REMOVALS_COUNTER */ /* Check whether any phdrs have been removed since we last flushed the cache. If so we flush the cache and return -1, if not, we do nothing and return 1. */ static int check_callback (struct dl_phdr_info *info, size_t size, void *ptr) { # ifdef HAVE_STRUCT_DL_PHDR_INFO_DLPI_SUBS unw_addr_space_t as = ptr; if (size < offsetof (struct dl_phdr_info, dlpi_subs) + sizeof (info->dlpi_subs)) /* It would be safer to flush the cache here, but that would disable caching for older libc's which would be incompatible with the behavior of older versions of libunwind so we return 1 instead and hope nobody runs into stale cache info... */ return 1; if (info->dlpi_subs == as->shared_object_removals) return 1; as->shared_object_removals = info->dlpi_subs; unw_flush_cache (as, 0, 0); return -1; /* indicate that there were removals */ # else return 1; # endif } static inline int validate_cache (unw_addr_space_t as) { intrmask_t saved_mask; int ret; SIGPROCMASK (SIG_SETMASK, &unwi_full_mask, &saved_mask); ret = dl_iterate_phdr (check_callback, as); SIGPROCMASK (SIG_SETMASK, &saved_mask, NULL); return ret; } # endif /* HAVE_DL_PHDR_REMOVALS_COUNTER */ # elif defined(HAVE_DLMODINFO) /* Support for HP-UX-style dlmodinfo() */ # include <dlfcn.h> static inline int validate_cache (unw_addr_space_t as) { return 1; } # endif /* !HAVE_DLMODINFO */ HIDDEN int tdep_find_proc_info (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, int need_unwind_info, void *arg) { # if defined(HAVE_DL_ITERATE_PHDR) unw_dyn_info_t di, *dip = &di; intrmask_t saved_mask; int ret; di.u.ti.segbase = ip; /* this is cheap... */ SIGPROCMASK (SIG_SETMASK, &unwi_full_mask, &saved_mask); ret = dl_iterate_phdr (callback, &di); SIGPROCMASK (SIG_SETMASK, &saved_mask, NULL); if (ret <= 0) { if (!kernel_table.u.ti.table_data) { if ((ret = get_kernel_table (&kernel_table)) < 0) return ret; } if (ip < kernel_table.start_ip || ip >= kernel_table.end_ip) return -UNW_ENOINFO; dip = &kernel_table; } # elif defined(HAVE_DLMODINFO) # define UNWIND_TBL_32BIT 0x8000000000000000 struct load_module_desc lmd; unw_dyn_info_t di, *dip = &di; struct unwind_header { uint64_t header_version; uint64_t start_offset; uint64_t end_offset; } *uhdr; if (!dlmodinfo (ip, &lmd, sizeof (lmd), NULL, 0, 0)) return -UNW_ENOINFO; di.format = UNW_INFO_FORMAT_TABLE; di.start_ip = lmd.text_base; di.end_ip = lmd.text_base + lmd.text_size; di.gp = lmd.linkage_ptr; di.u.ti.name_ptr = 0; /* no obvious table-name available */ di.u.ti.segbase = lmd.text_base; uhdr = (struct unwind_header *) lmd.unwind_base; if ((uhdr->header_version & ~UNWIND_TBL_32BIT) != 1 && (uhdr->header_version & ~UNWIND_TBL_32BIT) != 2) { Debug (1, "encountered unknown unwind header version %ld\n", (long) (uhdr->header_version & ~UNWIND_TBL_32BIT)); return -UNW_EBADVERSION; } if (uhdr->header_version & UNWIND_TBL_32BIT) { Debug (1, "32-bit unwind tables are not supported yet\n"); return -UNW_EINVAL; } di.u.ti.table_data = (unw_word_t *) (di.u.ti.segbase + uhdr->start_offset); di.u.ti.table_len = ((uhdr->end_offset - uhdr->start_offset) / sizeof (unw_word_t)); Debug (16, "found table `%s': segbase=%lx, len=%lu, gp=%lx, " "table_data=%p\n", (char *) di.u.ti.name_ptr, di.u.ti.segbase, di.u.ti.table_len, di.gp, di.u.ti.table_data); # endif /* now search the table: */ return tdep_search_unwind_table (as, ip, dip, pi, need_unwind_info, arg); } /* Returns 1 if the cache is up-to-date or -1 if the cache contained stale data and had to be flushed. */ HIDDEN int ia64_local_validate_cache (unw_addr_space_t as, void *arg) { return validate_cache (as); } #endif /* !UNW_REMOTE_ONLY */
/* libunwind - a platform-independent unwind library Copyright (c) 2001-2005 Hewlett-Packard Development Company, L.P. Contributed by David Mosberger-Tang <davidm@hpl.hp.com> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <assert.h> #include <stdlib.h> #include <stddef.h> #include "unwind_i.h" #ifdef HAVE_IA64INTRIN_H # include <ia64intrin.h> #endif extern unw_addr_space_t _ULia64_local_addr_space; struct ia64_table_entry { uint64_t start_offset; uint64_t end_offset; uint64_t info_offset; }; #ifdef UNW_LOCAL_ONLY static inline int is_local_addr_space (unw_addr_space_t as) { return 1; } static inline int read_mem (unw_addr_space_t as, unw_word_t addr, unw_word_t *valp, void *arg) { *valp = *(unw_word_t *) addr; return 0; } #else /* !UNW_LOCAL_ONLY */ static inline int is_local_addr_space (unw_addr_space_t as) { return as == unw_local_addr_space; } static inline int read_mem (unw_addr_space_t as, unw_word_t addr, unw_word_t *valp, void *arg) { unw_accessors_t *a = unw_get_accessors_int (as); return (*a->access_mem) (as, addr, valp, 0, arg); } /* Helper macro for reading an ia64_table_entry from remote memory. */ #define remote_read(addr, member) \ (*a->access_mem) (as, (addr) + offsetof (struct ia64_table_entry, \ member), &member, 0, arg) /* Lookup an unwind-table entry in remote memory. Returns 1 if an entry is found, 0 if no entry is found, negative if an error occurred reading remote memory. */ static int remote_lookup (unw_addr_space_t as, unw_word_t table, size_t table_size, unw_word_t rel_ip, struct ia64_table_entry *e, void *arg) { unw_word_t e_addr = 0, start_offset, end_offset, info_offset; unw_accessors_t *a = unw_get_accessors_int (as); unsigned long lo, hi, mid; int ret; /* do a binary search for right entry: */ for (lo = 0, hi = table_size / sizeof (struct ia64_table_entry); lo < hi;) { mid = (lo + hi) / 2; e_addr = table + mid * sizeof (struct ia64_table_entry); if ((ret = remote_read (e_addr, start_offset)) < 0) return ret; if (rel_ip < start_offset) hi = mid; else { if ((ret = remote_read (e_addr, end_offset)) < 0) return ret; if (rel_ip >= end_offset) lo = mid + 1; else break; } } if (rel_ip < start_offset || rel_ip >= end_offset) return 0; e->start_offset = start_offset; e->end_offset = end_offset; if ((ret = remote_read (e_addr, info_offset)) < 0) return ret; e->info_offset = info_offset; return 1; } HIDDEN void tdep_put_unwind_info (unw_addr_space_t as, unw_proc_info_t *pi, void *arg) { if (!pi->unwind_info) return; if (is_local_addr_space (as)) { free (pi->unwind_info); pi->unwind_info = NULL; } } unw_word_t _Uia64_find_dyn_list (unw_addr_space_t as, unw_dyn_info_t *di, void *arg) { unw_word_t hdr_addr, info_addr, hdr, directives, pers, cookie, off; unw_word_t start_offset, end_offset, info_offset, segbase; struct ia64_table_entry *e; size_t table_size; unw_word_t gp = di->gp; int ret; switch (di->format) { case UNW_INFO_FORMAT_DYNAMIC: default: return 0; case UNW_INFO_FORMAT_TABLE: e = (struct ia64_table_entry *) di->u.ti.table_data; table_size = di->u.ti.table_len * sizeof (di->u.ti.table_data[0]); segbase = di->u.ti.segbase; if (table_size < sizeof (struct ia64_table_entry)) return 0; start_offset = e[0].start_offset; end_offset = e[0].end_offset; info_offset = e[0].info_offset; break; case UNW_INFO_FORMAT_REMOTE_TABLE: { unw_accessors_t *a = unw_get_accessors_int (as); unw_word_t e_addr = di->u.rti.table_data; table_size = di->u.rti.table_len * sizeof (unw_word_t); segbase = di->u.rti.segbase; if (table_size < sizeof (struct ia64_table_entry)) return 0; if ( (ret = remote_read (e_addr, start_offset) < 0) || (ret = remote_read (e_addr, end_offset) < 0) || (ret = remote_read (e_addr, info_offset) < 0)) return ret; } break; } if (start_offset != end_offset) /* dyn-list entry cover a zero-length "procedure" and should be first entry (note: technically a binary could contain code below the segment base, but this doesn't happen for normal binaries and certainly doesn't happen when libunwind is a separate shared object. For weird cases, the application may have to provide its own (slower) version of this routine. */ return 0; hdr_addr = info_offset + segbase; info_addr = hdr_addr + 8; /* read the header word: */ if ((ret = read_mem (as, hdr_addr, &hdr, arg)) < 0) return ret; if (IA64_UNW_VER (hdr) != 1 || IA64_UNW_FLAG_EHANDLER (hdr) || IA64_UNW_FLAG_UHANDLER (hdr)) /* dyn-list entry must be version 1 and doesn't have ehandler or uhandler */ return 0; if (IA64_UNW_LENGTH (hdr) != 1) /* dyn-list entry must consist of a single word of NOP directives */ return 0; if ( ((ret = read_mem (as, info_addr, &directives, arg)) < 0) || ((ret = read_mem (as, info_addr + 0x08, &pers, arg)) < 0) || ((ret = read_mem (as, info_addr + 0x10, &cookie, arg)) < 0) || ((ret = read_mem (as, info_addr + 0x18, &off, arg)) < 0)) return 0; if (directives != 0 || pers != 0 || (!as->big_endian && cookie != 0x7473696c2d6e7964ULL) || ( as->big_endian && cookie != 0x64796e2d6c697374ULL)) return 0; /* OK, we ran the gauntlet and found it: */ return off + gp; } #endif /* !UNW_LOCAL_ONLY */ static inline const struct ia64_table_entry * lookup (struct ia64_table_entry *table, size_t table_size, unw_word_t rel_ip) { const struct ia64_table_entry *e = 0; unsigned long lo, hi, mid; /* do a binary search for right entry: */ for (lo = 0, hi = table_size / sizeof (struct ia64_table_entry); lo < hi;) { mid = (lo + hi) / 2; e = table + mid; if (rel_ip < e->start_offset) hi = mid; else if (rel_ip >= e->end_offset) lo = mid + 1; else break; } if (rel_ip < e->start_offset || rel_ip >= e->end_offset) return NULL; return e; } int unw_search_ia64_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg) { unw_word_t addr, hdr_addr, info_addr, info_end_addr, hdr, *wp; const struct ia64_table_entry *e = NULL; unw_word_t handler_offset, segbase = 0; int ret, is_local; #ifndef UNW_LOCAL_ONLY struct ia64_table_entry ent; #endif assert ((di->format == UNW_INFO_FORMAT_TABLE || di->format == UNW_INFO_FORMAT_REMOTE_TABLE) && (ip >= di->start_ip && ip < di->end_ip)); pi->flags = 0; pi->unwind_info = 0; pi->handler = 0; if (likely (di->format == UNW_INFO_FORMAT_TABLE)) { segbase = di->u.ti.segbase; e = lookup ((struct ia64_table_entry *) di->u.ti.table_data, di->u.ti.table_len * sizeof (unw_word_t), ip - segbase); } #ifndef UNW_LOCAL_ONLY else { segbase = di->u.rti.segbase; if ((ret = remote_lookup (as, di->u.rti.table_data, di->u.rti.table_len * sizeof (unw_word_t), ip - segbase, &ent, arg)) < 0) return ret; if (ret) e = &ent; } #endif if (!e) { /* IP is inside this table's range, but there is no explicit unwind info => use default conventions (i.e., this is NOT an error). */ memset (pi, 0, sizeof (*pi)); pi->start_ip = 0; pi->end_ip = 0; pi->gp = di->gp; pi->lsda = 0; return 0; } pi->start_ip = e->start_offset + segbase; pi->end_ip = e->end_offset + segbase; hdr_addr = e->info_offset + segbase; info_addr = hdr_addr + 8; /* Read the header word. Note: the actual unwind-info is always assumed to reside in memory, independent of whether di->format is UNW_INFO_FORMAT_TABLE or UNW_INFO_FORMAT_REMOTE_TABLE. */ if ((ret = read_mem (as, hdr_addr, &hdr, arg)) < 0) return ret; if (IA64_UNW_VER (hdr) != 1) { Debug (1, "Unknown header version %ld (hdr word=0x%lx @ 0x%lx)\n", IA64_UNW_VER (hdr), (unsigned long) hdr, (unsigned long) hdr_addr); return -UNW_EBADVERSION; } info_end_addr = info_addr + 8 * IA64_UNW_LENGTH (hdr); is_local = is_local_addr_space (as); /* If we must have the unwind-info, return it. Also, if we are in the local address-space, return the unwind-info because it's so cheap to do so and it may come in handy later on. */ if (need_unwind_info || is_local) { pi->unwind_info_size = 8 * IA64_UNW_LENGTH (hdr); if (is_local) pi->unwind_info = (void *) (uintptr_t) info_addr; else { /* Internalize unwind info. Note: since we're doing this only for non-local address spaces, there is no signal-safety issue and it is OK to use malloc()/free(). */ pi->unwind_info = malloc (8 * IA64_UNW_LENGTH (hdr)); if (!pi->unwind_info) return -UNW_ENOMEM; wp = (unw_word_t *) pi->unwind_info; for (addr = info_addr; addr < info_end_addr; addr += 8, ++wp) { if ((ret = read_mem (as, addr, wp, arg)) < 0) { free (pi->unwind_info); return ret; } } } } if (IA64_UNW_FLAG_EHANDLER (hdr) || IA64_UNW_FLAG_UHANDLER (hdr)) { /* read the personality routine address (address is gp-relative): */ if ((ret = read_mem (as, info_end_addr, &handler_offset, arg)) < 0) return ret; Debug (4, "handler ptr @ offset=%lx, gp=%lx\n", handler_offset, di->gp); if ((read_mem (as, handler_offset + di->gp, &pi->handler, arg)) < 0) return ret; } pi->lsda = info_end_addr + 8; pi->gp = di->gp; pi->format = di->format; return 0; } #ifndef UNW_REMOTE_ONLY # if defined(HAVE_DL_ITERATE_PHDR) # include <link.h> # include <stdlib.h> # if __GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ < 2) \ || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && !defined(DT_CONFIG)) # error You need GLIBC 2.2.4 or later on IA-64 Linux # endif # if defined(HAVE_GETUNWIND) extern unsigned long getunwind (void *buf, size_t len); # else /* HAVE_GETUNWIND */ # include <unistd.h> # include <sys/syscall.h> # ifndef __NR_getunwind # define __NR_getunwind 1215 # endif static unsigned long getunwind (void *buf, size_t len) { return syscall (SYS_getunwind, buf, len); } # endif /* HAVE_GETUNWIND */ static unw_dyn_info_t kernel_table; static int get_kernel_table (unw_dyn_info_t *di) { struct ia64_table_entry *ktab, *etab; size_t size; Debug (16, "getting kernel table"); size = getunwind (NULL, 0); ktab = sos_alloc (size); if (!ktab) { Dprintf (__FILE__".%s: failed to allocate %zu bytes", __FUNCTION__, size); return -UNW_ENOMEM; } getunwind (ktab, size); /* Determine length of kernel's unwind table & relocate its entries. */ for (etab = ktab; etab->start_offset; ++etab) etab->info_offset += (uint64_t) ktab; di->format = UNW_INFO_FORMAT_TABLE; di->gp = 0; di->start_ip = ktab[0].start_offset; di->end_ip = etab[-1].end_offset; di->u.ti.name_ptr = (unw_word_t) "<kernel>"; di->u.ti.segbase = 0; di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t); di->u.ti.table_data = (unw_word_t *) ktab; Debug (16, "found table `%s': [%lx-%lx) segbase=%lx len=%lu\n", (char *) di->u.ti.name_ptr, di->start_ip, di->end_ip, di->u.ti.segbase, di->u.ti.table_len); return 0; } # ifndef UNW_LOCAL_ONLY /* This is exported for the benefit of libunwind-ptrace.a. */ int _Uia64_get_kernel_table (unw_dyn_info_t *di) { int ret; if (!kernel_table.u.ti.table_data) if ((ret = get_kernel_table (&kernel_table)) < 0) return ret; memcpy (di, &kernel_table, sizeof (*di)); return 0; } # endif /* !UNW_LOCAL_ONLY */ static inline unsigned long current_gp (void) { # if defined(__GNUC__) && !defined(__INTEL_COMPILER) register unsigned long gp __asm__("gp"); return gp; # elif HAVE_IA64INTRIN_H return __getReg (_IA64_REG_GP); # else # error Implement me. # endif } static int callback (struct dl_phdr_info *info, size_t size, void *ptr) { unw_dyn_info_t *di = ptr; const Elf64_Phdr *phdr, *p_unwind, *p_dynamic, *p_text; long n; Elf64_Addr load_base, segbase = 0; /* Make sure struct dl_phdr_info is at least as big as we need. */ if (size < offsetof (struct dl_phdr_info, dlpi_phnum) + sizeof (info->dlpi_phnum)) return -1; Debug (16, "checking `%s' (load_base=%lx)\n", info->dlpi_name, info->dlpi_addr); phdr = info->dlpi_phdr; load_base = info->dlpi_addr; p_text = NULL; p_unwind = NULL; p_dynamic = NULL; /* See if PC falls into one of the loaded segments. Find the unwind segment at the same time. */ for (n = info->dlpi_phnum; --n >= 0; phdr++) { if (phdr->p_type == PT_LOAD) { Elf64_Addr vaddr = phdr->p_vaddr + load_base; if (di->u.ti.segbase >= vaddr && di->u.ti.segbase < vaddr + phdr->p_memsz) p_text = phdr; } else if (phdr->p_type == PT_IA_64_UNWIND) p_unwind = phdr; else if (phdr->p_type == PT_DYNAMIC) p_dynamic = phdr; } if (!p_text || !p_unwind) return 0; if (likely (p_unwind->p_vaddr >= p_text->p_vaddr && p_unwind->p_vaddr < p_text->p_vaddr + p_text->p_memsz)) /* normal case: unwind table is inside text segment */ segbase = p_text->p_vaddr + load_base; else { /* Special case: unwind table is in some other segment; this happens for the Linux kernel's gate DSO, for example. */ phdr = info->dlpi_phdr; for (n = info->dlpi_phnum; --n >= 0; phdr++) { if (phdr->p_type == PT_LOAD && p_unwind->p_vaddr >= phdr->p_vaddr && p_unwind->p_vaddr < phdr->p_vaddr + phdr->p_memsz) { segbase = phdr->p_vaddr + load_base; break; } } } if (p_dynamic) { /* For dynamicly linked executables and shared libraries, DT_PLTGOT is the gp value for that object. */ Elf64_Dyn *dyn = (Elf64_Dyn *)(p_dynamic->p_vaddr + load_base); for (; dyn->d_tag != DT_NULL; ++dyn) if (dyn->d_tag == DT_PLTGOT) { /* On IA-64, _DYNAMIC is writable and GLIBC has relocated it. */ di->gp = dyn->d_un.d_ptr; break; } } else /* Otherwise this is a static executable with no _DYNAMIC. The gp is constant program-wide. */ di->gp = current_gp(); di->format = UNW_INFO_FORMAT_TABLE; di->start_ip = p_text->p_vaddr + load_base; di->end_ip = p_text->p_vaddr + load_base + p_text->p_memsz; di->u.ti.name_ptr = (unw_word_t) info->dlpi_name; di->u.ti.table_data = (void *) (p_unwind->p_vaddr + load_base); di->u.ti.table_len = p_unwind->p_memsz / sizeof (unw_word_t); di->u.ti.segbase = segbase; Debug (16, "found table `%s': segbase=%lx, len=%lu, gp=%lx, " "table_data=%p\n", (char *) di->u.ti.name_ptr, di->u.ti.segbase, di->u.ti.table_len, di->gp, di->u.ti.table_data); return 1; } # ifdef HAVE_DL_PHDR_REMOVALS_COUNTER static inline int validate_cache (unw_addr_space_t as) { /* Note: we don't need to serialize here with respect to dl_iterate_phdr() because if somebody were to remove an object that is required to complete the unwind on whose behalf we're validating the cache here, we'd be hosed anyhow. What we're guarding against here is the case where library FOO gets mapped, unwind info for FOO gets cached, FOO gets unmapped, BAR gets mapped in the place where FOO was and then we unwind across a function in FOO. Since no thread can execute in BAR before FOO has been removed, we are guaranteed that dl_phdr_removals_counter() would have been incremented before we get here. */ unsigned long long removals = dl_phdr_removals_counter (); if (removals == as->shared_object_removals) return 1; as->shared_object_removals = removals; unw_flush_cache (as, 0, 0); return -1; } # else /* !HAVE_DL_PHDR_REMOVALS_COUNTER */ /* Check whether any phdrs have been removed since we last flushed the cache. If so we flush the cache and return -1, if not, we do nothing and return 1. */ static int check_callback (struct dl_phdr_info *info, size_t size, void *ptr) { # ifdef HAVE_STRUCT_DL_PHDR_INFO_DLPI_SUBS unw_addr_space_t as = ptr; if (size < offsetof (struct dl_phdr_info, dlpi_subs) + sizeof (info->dlpi_subs)) /* It would be safer to flush the cache here, but that would disable caching for older libc's which would be incompatible with the behavior of older versions of libunwind so we return 1 instead and hope nobody runs into stale cache info... */ return 1; if (info->dlpi_subs == as->shared_object_removals) return 1; as->shared_object_removals = info->dlpi_subs; unw_flush_cache (as, 0, 0); return -1; /* indicate that there were removals */ # else return 1; # endif } static inline int validate_cache (unw_addr_space_t as) { intrmask_t saved_mask; int ret; SIGPROCMASK (SIG_SETMASK, &unwi_full_mask, &saved_mask); ret = dl_iterate_phdr (check_callback, as); SIGPROCMASK (SIG_SETMASK, &saved_mask, NULL); return ret; } # endif /* HAVE_DL_PHDR_REMOVALS_COUNTER */ # elif defined(HAVE_DLMODINFO) /* Support for HP-UX-style dlmodinfo() */ # include <dlfcn.h> static inline int validate_cache (unw_addr_space_t as) { return 1; } # endif /* !HAVE_DLMODINFO */ HIDDEN int tdep_find_proc_info (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, int need_unwind_info, void *arg) { # if defined(HAVE_DL_ITERATE_PHDR) unw_dyn_info_t di, *dip = &di; intrmask_t saved_mask; int ret; di.u.ti.segbase = ip; /* this is cheap... */ SIGPROCMASK (SIG_SETMASK, &unwi_full_mask, &saved_mask); ret = dl_iterate_phdr (callback, &di); SIGPROCMASK (SIG_SETMASK, &saved_mask, NULL); if (ret <= 0) { if (!kernel_table.u.ti.table_data) { if ((ret = get_kernel_table (&kernel_table)) < 0) return ret; } if (ip < kernel_table.start_ip || ip >= kernel_table.end_ip) return -UNW_ENOINFO; dip = &kernel_table; } # elif defined(HAVE_DLMODINFO) # define UNWIND_TBL_32BIT 0x8000000000000000 struct load_module_desc lmd; unw_dyn_info_t di, *dip = &di; struct unwind_header { uint64_t header_version; uint64_t start_offset; uint64_t end_offset; } *uhdr; if (!dlmodinfo (ip, &lmd, sizeof (lmd), NULL, 0, 0)) return -UNW_ENOINFO; di.format = UNW_INFO_FORMAT_TABLE; di.start_ip = lmd.text_base; di.end_ip = lmd.text_base + lmd.text_size; di.gp = lmd.linkage_ptr; di.u.ti.name_ptr = 0; /* no obvious table-name available */ di.u.ti.segbase = lmd.text_base; uhdr = (struct unwind_header *) lmd.unwind_base; if ((uhdr->header_version & ~UNWIND_TBL_32BIT) != 1 && (uhdr->header_version & ~UNWIND_TBL_32BIT) != 2) { Debug (1, "encountered unknown unwind header version %ld\n", (long) (uhdr->header_version & ~UNWIND_TBL_32BIT)); return -UNW_EBADVERSION; } if (uhdr->header_version & UNWIND_TBL_32BIT) { Debug (1, "32-bit unwind tables are not supported yet\n"); return -UNW_EINVAL; } di.u.ti.table_data = (unw_word_t *) (di.u.ti.segbase + uhdr->start_offset); di.u.ti.table_len = ((uhdr->end_offset - uhdr->start_offset) / sizeof (unw_word_t)); Debug (16, "found table `%s': segbase=%lx, len=%lu, gp=%lx, " "table_data=%p\n", (char *) di.u.ti.name_ptr, di.u.ti.segbase, di.u.ti.table_len, di.gp, di.u.ti.table_data); # endif /* now search the table: */ return tdep_search_unwind_table (as, ip, dip, pi, need_unwind_info, arg); } /* Returns 1 if the cache is up-to-date or -1 if the cache contained stale data and had to be flushed. */ HIDDEN int ia64_local_validate_cache (unw_addr_space_t as, void *arg) { return validate_cache (as); } #endif /* !UNW_REMOTE_ONLY */
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/mono/dlls/mscordbi/cordb.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: CORDB.H // #ifndef __MONO_DEBUGGER_CORDB_H__ #define __MONO_DEBUGGER_CORDB_H__ #include "cor.h" #include "cordebug.h" #include "corhdr.h" #include "xcordebug.h" #include <mono/component/debugger-protocol.h> #include <mono/utils/mono-publib.h> #include "arraylist.h" #include "utsem.h" #include "ex.h" #include "log.h" #ifdef HOST_WIN32 #include <windows.h> #include <ws2tcpip.h> #define DIR_SEPARATOR '\\' #else #define DIR_SEPARATOR '/' #endif #define return_if_nok(error) \ do \ { \ if (!is_ok((error))) \ return S_FALSE; \ } while (0) static UTSemReadWrite* m_pSemReadWrite; #define dbg_lock() m_pSemReadWrite->LockRead(); #define dbg_unlock() m_pSemReadWrite->UnlockRead(); #ifdef _DEBUG #define LOGGING #endif #ifdef TARGET_AMD64 #define POS_RSP 0x98 #else #define POS_RSP 0 //TODO fix for other platforms #endif #define CreateProcess CreateProcessW class Socket; class Cordb; class CordbProcess; class CordbAppDomain; class CordbAssembly; class CordbModule; class CordbCode; class CordbThread; class CordbFunction; class CordbStepper; class RegMeta; class CordbRegisterSet; class CordbClass; class CordbNativeFrame; class CordbAppDomainEnum; class CordbTypeEnum; class CordbBlockingObjectEnum; class CordbFunctionBreakpoint; class CordbEval; class CordbType; class CordbStackWalk; enum CordbTypeKind { CordbTypeKindSimpleType, CordbTypeKindClassType, CordbTypeKindArrayType, CordbTypeKindTotal }; class ReceivedReplyPacket { int error; int error_2; int id; MdbgProtBuffer* buf; public: ReceivedReplyPacket(int error, int error_2, int id, MdbgProtBuffer* buf); ~ReceivedReplyPacket(); MdbgProtBuffer* Buffer() { return buf; } int Error() { return error; } int Error2() { return error_2; } int Id() { return id; } }; class Connection { Socket* m_socket; CordbProcess* m_pProcess; Cordb* m_pCordb; ArrayList* m_pReceiveReplies; // TODO use hashmap ArrayList* m_pReceivedPacketsToProcess; void ProcessPacketInternal(MdbgProtBuffer* recvbuf); void ProcessPacketFromQueue(); void EnableEvent(MdbgProtEventKind eventKind); void SendPacket(MdbgProtBuffer& sendbuf); int ProcessPacket(bool is_answer = false); public: CordbProcess* GetProcess() const { return m_pProcess; } Cordb* GetCordb() const { return m_pCordb; } Connection(CordbProcess* proc, Cordb* cordb); ~Connection(); void LoopSendReceive(); void CloseConnection(); void StartConnection(); void TransportHandshake(); void Receive(); int SendEvent(int cmd_set, int cmd, MdbgProtBuffer* sendbuf); ReceivedReplyPacket* GetReplyWithError(int cmdId); CordbAppDomain* GetCurrentAppDomain(); }; class CordbBaseMono { protected: Connection* conn; ULONG m_cRef; // Ref count. public: CordbBaseMono(Connection* conn); virtual ~CordbBaseMono(); void SetConnection(Connection* conn); ULONG BaseAddRef(void); ULONG BaseRelease(void); ULONG InternalAddRef(void); ULONG InternalRelease(void); virtual const char* GetClassName() { return "CordbBaseMono"; } }; class Cordb : public ICorDebug, public ICorDebugRemote, public CordbBaseMono { ICorDebugManagedCallback* m_pCallback; CordbProcess* m_pProcess; DWORD m_nPID; public: DWORD PID() { return m_nPID; } ICorDebugManagedCallback* GetCallback() const { return m_pCallback; } Cordb(DWORD pid); ULONG STDMETHODCALLTYPE AddRef(void) { return (BaseAddRef()); } ULONG STDMETHODCALLTYPE Release(void) { return (BaseRelease()); } const char* GetClassName() { return "Cordb"; } ~Cordb(); HRESULT STDMETHODCALLTYPE Initialize(void); HRESULT STDMETHODCALLTYPE Terminate(void); HRESULT STDMETHODCALLTYPE SetManagedHandler(ICorDebugManagedCallback* pCallback); HRESULT STDMETHODCALLTYPE SetUnmanagedHandler(ICorDebugUnmanagedCallback* pCallback); HRESULT STDMETHODCALLTYPE CreateProcess(LPCWSTR lpApplicationName, LPWSTR lpCommandLine, LPSECURITY_ATTRIBUTES lpProcessAttributes, LPSECURITY_ATTRIBUTES lpThreadAttributes, BOOL bInheritHandles, DWORD dwCreationFlags, PVOID lpEnvironment, LPCWSTR lpCurrentDirectory, LPSTARTUPINFOW lpStartupInfo, LPPROCESS_INFORMATION lpProcessInformation, CorDebugCreateProcessFlags debuggingFlags, ICorDebugProcess** ppProcess); HRESULT STDMETHODCALLTYPE DebugActiveProcess(DWORD id, BOOL win32Attach, ICorDebugProcess** ppProcess); HRESULT STDMETHODCALLTYPE EnumerateProcesses(ICorDebugProcessEnum** ppProcess); HRESULT STDMETHODCALLTYPE GetProcess(DWORD dwProcessId, ICorDebugProcess** ppProcess); HRESULT STDMETHODCALLTYPE CanLaunchOrAttach(DWORD dwProcessId, BOOL win32DebuggingEnabled); HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, _COM_Outptr_ void __RPC_FAR* __RPC_FAR* ppvObject); HRESULT STDMETHODCALLTYPE CreateProcessEx(ICorDebugRemoteTarget* pRemoteTarget, LPCWSTR lpApplicationName, _In_ LPWSTR lpCommandLine, LPSECURITY_ATTRIBUTES lpProcessAttributes, LPSECURITY_ATTRIBUTES lpThreadAttributes, BOOL bInheritHandles, DWORD dwCreationFlags, PVOID lpEnvironment, LPCWSTR lpCurrentDirectory, LPSTARTUPINFOW lpStartupInfo, LPPROCESS_INFORMATION lpProcessInformation, CorDebugCreateProcessFlags debuggingFlags, ICorDebugProcess** ppProcess); HRESULT STDMETHODCALLTYPE DebugActiveProcessEx(ICorDebugRemoteTarget* pRemoteTarget, DWORD dwProcessId, BOOL fWin32Attach, ICorDebugProcess** ppProcess); }; #define CHECK_ERROR_RETURN_FALSE(localbuf) \ do \ { \ if (localbuf->Error() > 0 || localbuf->Error2() > 0) \ {\ if (localbuf->Buffer()->end > localbuf->Buffer()->p) {\ char *error_msg = m_dbgprot_decode_string(localbuf->Buffer()->p, &localbuf->Buffer()->p, localbuf->Buffer()->end); \ LOG((LF_CORDB, LL_INFO100000, "ERROR RECEIVED - %s\n", error_msg)); \ free(error_msg); \ }\ else {\ LOG((LF_CORDB, LL_INFO100000, "ERROR RECEIVED - %d - %d\n", localbuf->Error(), localbuf->Error2())); \ }\ EX_THROW(HRException, (E_FAIL)); \ } \ } while (0) #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: CORDB.H // #ifndef __MONO_DEBUGGER_CORDB_H__ #define __MONO_DEBUGGER_CORDB_H__ #include "cor.h" #include "cordebug.h" #include "corhdr.h" #include "xcordebug.h" #include <mono/component/debugger-protocol.h> #include <mono/utils/mono-publib.h> #include "arraylist.h" #include "utsem.h" #include "ex.h" #include "log.h" #ifdef HOST_WIN32 #include <windows.h> #include <ws2tcpip.h> #define DIR_SEPARATOR '\\' #else #define DIR_SEPARATOR '/' #endif #define return_if_nok(error) \ do \ { \ if (!is_ok((error))) \ return S_FALSE; \ } while (0) static UTSemReadWrite* m_pSemReadWrite; #define dbg_lock() m_pSemReadWrite->LockRead(); #define dbg_unlock() m_pSemReadWrite->UnlockRead(); #ifdef _DEBUG #define LOGGING #endif #ifdef TARGET_AMD64 #define POS_RSP 0x98 #else #define POS_RSP 0 //TODO fix for other platforms #endif #define CreateProcess CreateProcessW class Socket; class Cordb; class CordbProcess; class CordbAppDomain; class CordbAssembly; class CordbModule; class CordbCode; class CordbThread; class CordbFunction; class CordbStepper; class RegMeta; class CordbRegisterSet; class CordbClass; class CordbNativeFrame; class CordbAppDomainEnum; class CordbTypeEnum; class CordbBlockingObjectEnum; class CordbFunctionBreakpoint; class CordbEval; class CordbType; class CordbStackWalk; enum CordbTypeKind { CordbTypeKindSimpleType, CordbTypeKindClassType, CordbTypeKindArrayType, CordbTypeKindTotal }; class ReceivedReplyPacket { int error; int error_2; int id; MdbgProtBuffer* buf; public: ReceivedReplyPacket(int error, int error_2, int id, MdbgProtBuffer* buf); ~ReceivedReplyPacket(); MdbgProtBuffer* Buffer() { return buf; } int Error() { return error; } int Error2() { return error_2; } int Id() { return id; } }; class Connection { Socket* m_socket; CordbProcess* m_pProcess; Cordb* m_pCordb; ArrayList* m_pReceiveReplies; // TODO use hashmap ArrayList* m_pReceivedPacketsToProcess; void ProcessPacketInternal(MdbgProtBuffer* recvbuf); void ProcessPacketFromQueue(); void EnableEvent(MdbgProtEventKind eventKind); void SendPacket(MdbgProtBuffer& sendbuf); int ProcessPacket(bool is_answer = false); public: CordbProcess* GetProcess() const { return m_pProcess; } Cordb* GetCordb() const { return m_pCordb; } Connection(CordbProcess* proc, Cordb* cordb); ~Connection(); void LoopSendReceive(); void CloseConnection(); void StartConnection(); void TransportHandshake(); void Receive(); int SendEvent(int cmd_set, int cmd, MdbgProtBuffer* sendbuf); ReceivedReplyPacket* GetReplyWithError(int cmdId); CordbAppDomain* GetCurrentAppDomain(); }; class CordbBaseMono { protected: Connection* conn; ULONG m_cRef; // Ref count. public: CordbBaseMono(Connection* conn); virtual ~CordbBaseMono(); void SetConnection(Connection* conn); ULONG BaseAddRef(void); ULONG BaseRelease(void); ULONG InternalAddRef(void); ULONG InternalRelease(void); virtual const char* GetClassName() { return "CordbBaseMono"; } }; class Cordb : public ICorDebug, public ICorDebugRemote, public CordbBaseMono { ICorDebugManagedCallback* m_pCallback; CordbProcess* m_pProcess; DWORD m_nPID; public: DWORD PID() { return m_nPID; } ICorDebugManagedCallback* GetCallback() const { return m_pCallback; } Cordb(DWORD pid); ULONG STDMETHODCALLTYPE AddRef(void) { return (BaseAddRef()); } ULONG STDMETHODCALLTYPE Release(void) { return (BaseRelease()); } const char* GetClassName() { return "Cordb"; } ~Cordb(); HRESULT STDMETHODCALLTYPE Initialize(void); HRESULT STDMETHODCALLTYPE Terminate(void); HRESULT STDMETHODCALLTYPE SetManagedHandler(ICorDebugManagedCallback* pCallback); HRESULT STDMETHODCALLTYPE SetUnmanagedHandler(ICorDebugUnmanagedCallback* pCallback); HRESULT STDMETHODCALLTYPE CreateProcess(LPCWSTR lpApplicationName, LPWSTR lpCommandLine, LPSECURITY_ATTRIBUTES lpProcessAttributes, LPSECURITY_ATTRIBUTES lpThreadAttributes, BOOL bInheritHandles, DWORD dwCreationFlags, PVOID lpEnvironment, LPCWSTR lpCurrentDirectory, LPSTARTUPINFOW lpStartupInfo, LPPROCESS_INFORMATION lpProcessInformation, CorDebugCreateProcessFlags debuggingFlags, ICorDebugProcess** ppProcess); HRESULT STDMETHODCALLTYPE DebugActiveProcess(DWORD id, BOOL win32Attach, ICorDebugProcess** ppProcess); HRESULT STDMETHODCALLTYPE EnumerateProcesses(ICorDebugProcessEnum** ppProcess); HRESULT STDMETHODCALLTYPE GetProcess(DWORD dwProcessId, ICorDebugProcess** ppProcess); HRESULT STDMETHODCALLTYPE CanLaunchOrAttach(DWORD dwProcessId, BOOL win32DebuggingEnabled); HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, _COM_Outptr_ void __RPC_FAR* __RPC_FAR* ppvObject); HRESULT STDMETHODCALLTYPE CreateProcessEx(ICorDebugRemoteTarget* pRemoteTarget, LPCWSTR lpApplicationName, _In_ LPWSTR lpCommandLine, LPSECURITY_ATTRIBUTES lpProcessAttributes, LPSECURITY_ATTRIBUTES lpThreadAttributes, BOOL bInheritHandles, DWORD dwCreationFlags, PVOID lpEnvironment, LPCWSTR lpCurrentDirectory, LPSTARTUPINFOW lpStartupInfo, LPPROCESS_INFORMATION lpProcessInformation, CorDebugCreateProcessFlags debuggingFlags, ICorDebugProcess** ppProcess); HRESULT STDMETHODCALLTYPE DebugActiveProcessEx(ICorDebugRemoteTarget* pRemoteTarget, DWORD dwProcessId, BOOL fWin32Attach, ICorDebugProcess** ppProcess); }; #define CHECK_ERROR_RETURN_FALSE(localbuf) \ do \ { \ if (localbuf->Error() > 0 || localbuf->Error2() > 0) \ {\ if (localbuf->Buffer()->end > localbuf->Buffer()->p) {\ char *error_msg = m_dbgprot_decode_string(localbuf->Buffer()->p, &localbuf->Buffer()->p, localbuf->Buffer()->end); \ LOG((LF_CORDB, LL_INFO100000, "ERROR RECEIVED - %s\n", error_msg)); \ free(error_msg); \ }\ else {\ LOG((LF_CORDB, LL_INFO100000, "ERROR RECEIVED - %d - %d\n", localbuf->Error(), localbuf->Error2())); \ }\ EX_THROW(HRException, (E_FAIL)); \ } \ } while (0) #endif
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/native/public/mono/metadata/details/mono-config-functions.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION #ifndef MONO_API_FUNCTION #error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header" #endif MONO_API_FUNCTION(const char *, mono_config_get_os, (void)) MONO_API_FUNCTION(const char *, mono_config_get_cpu, (void)) MONO_API_FUNCTION(const char *, mono_config_get_wordsize, (void)) MONO_API_FUNCTION(const char*, mono_get_config_dir, (void)) MONO_API_FUNCTION(void, mono_set_config_dir, (const char *dir)) MONO_API_FUNCTION(const char *, mono_get_machine_config, (void)) MONO_API_FUNCTION(void, mono_config_cleanup, (void)) MONO_API_FUNCTION(void, mono_config_parse, (const char *filename)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY void, mono_config_for_assembly, (MonoImage *assembly)) MONO_API_FUNCTION(void, mono_config_parse_memory, (const char *buffer)) MONO_API_FUNCTION(const char*, mono_config_string_for_assembly_file, (const char *filename)) MONO_API_FUNCTION(void, mono_config_set_server_mode, (mono_bool server_mode)) MONO_API_FUNCTION(mono_bool, mono_config_is_server_mode, (void))
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION #ifndef MONO_API_FUNCTION #error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header" #endif MONO_API_FUNCTION(const char *, mono_config_get_os, (void)) MONO_API_FUNCTION(const char *, mono_config_get_cpu, (void)) MONO_API_FUNCTION(const char *, mono_config_get_wordsize, (void)) MONO_API_FUNCTION(const char*, mono_get_config_dir, (void)) MONO_API_FUNCTION(void, mono_set_config_dir, (const char *dir)) MONO_API_FUNCTION(const char *, mono_get_machine_config, (void)) MONO_API_FUNCTION(void, mono_config_cleanup, (void)) MONO_API_FUNCTION(void, mono_config_parse, (const char *filename)) MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY void, mono_config_for_assembly, (MonoImage *assembly)) MONO_API_FUNCTION(void, mono_config_parse_memory, (const char *buffer)) MONO_API_FUNCTION(const char*, mono_config_string_for_assembly_file, (const char *filename)) MONO_API_FUNCTION(void, mono_config_set_server_mode, (mono_bool server_mode)) MONO_API_FUNCTION(mono_bool, mono_config_is_server_mode, (void))
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/inc/ecmakey.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once // The byte values of the ECMA pseudo public key and its token. const BYTE g_rbNeutralPublicKey[] = { 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0 }; const BYTE g_rbNeutralPublicKeyToken[] = { 0xb7, 0x7a, 0x5c, 0x56, 0x19, 0x34, 0xe0, 0x89 };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once // The byte values of the ECMA pseudo public key and its token. const BYTE g_rbNeutralPublicKey[] = { 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0 }; const BYTE g_rbNeutralPublicKeyToken[] = { 0xb7, 0x7a, 0x5c, 0x56, 0x19, 0x34, 0xe0, 0x89 };
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/mono/mono/eglib/glib.h
#ifndef __GLIB_H #define __GLIB_H // Ask stdint.h and inttypes.h for the full C99 features for CentOS 6 g++ 4.4, Android, etc. // See for example: // $HOME/android-toolchain/toolchains/armeabi-v7a-clang/sysroot/usr/include/inttypes.h // $HOME/android-toolchain/toolchains/armeabi-v7a-clang/sysroot/usr/include/stdint.h #ifdef __cplusplus #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #endif // __cplusplus #include <stdarg.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <stddef.h> #include <ctype.h> #include <limits.h> #include <stdint.h> #include <inttypes.h> #include <eglib-config.h> #include <minipal/utils.h> #include <time.h> // - Pointers should only be converted to or from pointer-sized integers. // - Any size integer can be converted to any other size integer. // - Therefore a pointer-sized integer is the intermediary between // a pointer and any integer type. #define GPOINTER_TO_INT(ptr) ((gint)(gssize)(ptr)) #define GPOINTER_TO_UINT(ptr) ((guint)(gsize)(ptr)) #define GINT_TO_POINTER(v) ((gpointer)(gssize)(v)) #define GUINT_TO_POINTER(v) ((gpointer)(gsize)(v)) #ifndef EGLIB_NO_REMAP #include <eglib-remap.h> #endif #ifdef G_HAVE_ALLOCA_H #include <alloca.h> #endif #ifdef WIN32 /* For alloca */ #include <malloc.h> #endif #ifdef G_HAVE_UNISTD_H #include <unistd.h> #endif #ifndef offsetof # define offsetof(s_name,n_name) (size_t)(char *)&(((s_name*)0)->m_name) #endif #ifdef __cplusplus #define G_BEGIN_DECLS extern "C" { #define G_END_DECLS } #define G_EXTERN_C extern "C" #else #define G_BEGIN_DECLS /* nothing */ #define G_END_DECLS /* nothing */ #define G_EXTERN_C /* nothing */ #endif #ifdef __cplusplus #define g_cast monoeg_g_cast // in case not inlined (see eglib-remap.h) // g_cast converts void* to T*. // e.g. #define malloc(x) (g_cast (malloc (x))) // FIXME It used to do more. Rename? struct g_cast { private: void * const x; public: explicit g_cast (void volatile *y) : x((void*)y) { } // Lack of rvalue constructor inhibits ternary operator. // Either don't use ternary, or cast each side. // sa = (salen <= 128) ? g_alloca (salen) : g_malloc (salen); // w32socket.c:1045:24: error: call to deleted constructor of 'monoeg_g_cast' //g_cast (g_cast&& y) : x(y.x) { } g_cast (g_cast&&) = delete; g_cast () = delete; g_cast (const g_cast&) = delete; template <typename TTo> operator TTo* () const { return (TTo*)x; } }; #else // FIXME? Parens are omitted to preserve prior meaning. #define g_cast(x) x #endif // G++4.4 breaks opeq below without this. #if defined (__GNUC__) || defined (__clang__) #define G_MAY_ALIAS __attribute__((__may_alias__)) #else #define G_MAY_ALIAS /* nothing */ #endif #ifdef __cplusplus // Provide for bit operations on enums, but not all integer operations. // This alleviates a fair number of casts in porting C to C++. // Forward declare template with no generic implementation. template <size_t> struct g_size_to_int; // Template specializations. template <> struct g_size_to_int<1> { typedef int8_t type; }; template <> struct g_size_to_int<2> { typedef int16_t type; }; template <> struct g_size_to_int<4> { typedef int32_t type; }; template <> struct g_size_to_int<8> { typedef int64_t type; }; // g++4.4 does not accept: //template <typename T> //using g_size_to_int_t = typename g_size_to_int <sizeof (T)>::type; #define g_size_to_int_t(x) g_size_to_int <sizeof (x)>::type #define G_ENUM_BINOP(Enum, op, opeq) \ inline Enum \ operator op (Enum a, Enum b) \ { \ typedef g_size_to_int_t (Enum) type; \ return static_cast<Enum>(static_cast<type>(a) op b); \ } \ \ inline Enum& \ operator opeq (Enum& a, Enum b) \ { \ typedef g_size_to_int_t (Enum) G_MAY_ALIAS type; \ return (Enum&)((type&)a opeq b); \ } \ #define G_ENUM_FUNCTIONS(Enum) \ extern "C++" { /* in case within extern "C" */ \ inline Enum \ operator~ (Enum a) \ { \ typedef g_size_to_int_t (Enum) type; \ return static_cast<Enum>(~static_cast<type>(a)); \ } \ \ G_ENUM_BINOP (Enum, |, |=) \ G_ENUM_BINOP (Enum, &, &=) \ G_ENUM_BINOP (Enum, ^, ^=) \ \ } /* extern "C++" */ #else #define G_ENUM_FUNCTIONS(Enum) /* nothing */ #endif G_BEGIN_DECLS /* * Basic data types */ typedef int gint; typedef unsigned int guint; typedef short gshort; typedef unsigned short gushort; typedef long glong; typedef unsigned long gulong; typedef void * gpointer; typedef const void * gconstpointer; typedef char gchar; typedef unsigned char guchar; /* Types defined in terms of the stdint.h */ typedef int8_t gint8; typedef uint8_t guint8; typedef int16_t gint16; typedef uint16_t guint16; typedef int32_t gint32; typedef uint32_t guint32; typedef int64_t gint64; typedef uint64_t guint64; typedef float gfloat; typedef double gdouble; typedef int32_t gboolean; #if defined (HOST_WIN32) || defined (_WIN32) G_END_DECLS #include <wchar.h> typedef wchar_t gunichar2; G_BEGIN_DECLS #else typedef guint16 gunichar2; #endif typedef guint32 gunichar; /* * Macros */ #define G_N_ELEMENTS(s) ARRAY_SIZE(s) #define FALSE 0 #define TRUE 1 #define G_MINSHORT SHRT_MIN #define G_MAXSHORT SHRT_MAX #define G_MAXUSHORT USHRT_MAX #define G_MAXINT INT_MAX #define G_MININT INT_MIN #define G_MAXINT8 INT8_MAX #define G_MAXUINT8 UINT8_MAX #define G_MININT8 INT8_MIN #define G_MAXINT16 INT16_MAX #define G_MAXUINT16 UINT16_MAX #define G_MININT16 INT16_MIN #define G_MAXINT32 INT32_MAX #define G_MAXUINT32 UINT32_MAX #define G_MININT32 INT32_MIN #define G_MININT64 INT64_MIN #define G_MAXINT64 INT64_MAX #define G_MAXUINT64 UINT64_MAX #define G_LITTLE_ENDIAN 1234 #define G_BIG_ENDIAN 4321 #define G_STMT_START do #define G_STMT_END while (0) #define G_USEC_PER_SEC 1000000 #ifndef ABS #define ABS(a) ((a) > 0 ? (a) : -(a)) #endif #define ALIGN_TO(val,align) ((((gssize)val) + (gssize)((align) - 1)) & (~((gssize)(align - 1)))) #define ALIGN_DOWN_TO(val,align) (((gssize)val) & (~((gssize)(align - 1)))) #define ALIGN_PTR_TO(ptr,align) (gpointer)((((gssize)(ptr)) + (gssize)(align - 1)) & (~((gssize)(align - 1)))) #define G_STRUCT_OFFSET(p_type,field) offsetof(p_type,field) #define EGLIB_STRINGIFY(x) #x #define EGLIB_TOSTRING(x) EGLIB_STRINGIFY(x) #define G_STRLOC __FILE__ ":" EGLIB_TOSTRING(__LINE__) ":" #define G_CONST_RETURN const #define G_GUINT64_FORMAT PRIu64 #define G_GINT64_FORMAT PRIi64 #define G_GUINT32_FORMAT PRIu32 #define G_GINT32_FORMAT PRIi32 #ifdef __GNUC__ #define G_ATTR_FORMAT_PRINTF(fmt_pos,arg_pos) __attribute__((__format__(__printf__,fmt_pos,arg_pos))) #else #define G_ATTR_FORMAT_PRINTF(fmt_pos,arg_pos) #endif /* * Allocation */ G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_free (void *ptr); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gpointer g_realloc (gpointer obj, gsize size); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gpointer g_malloc (gsize x); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gpointer g_malloc0 (gsize x); G_EXTERN_C // Used by profilers, at least. gpointer g_calloc (gsize n, gsize x); gpointer g_try_malloc (gsize x); gpointer g_try_realloc (gpointer obj, gsize size); #define g_new(type,size) ((type *) g_malloc (sizeof (type) * (size))) #define g_new0(type,size) ((type *) g_malloc0 (sizeof (type)* (size))) #define g_newa(type,size) ((type *) alloca (sizeof (type) * (size))) #define g_newa0(type,size) ((type *) memset (alloca (sizeof (type) * (size)), 0, sizeof (type) * (size))) #define g_memmove(dest,src,len) memmove (dest, src, len) #define g_renew(struct_type, mem, n_structs) ((struct_type*)g_realloc (mem, sizeof (struct_type) * n_structs)) #define g_alloca(size) (g_cast (alloca (size))) G_EXTERN_C // Used by libtest, at least. gpointer g_memdup (gconstpointer mem, guint byte_size); static inline gchar *g_strdup (const gchar *str) { if (str) { return (gchar*) g_memdup (str, (guint)strlen (str) + 1); } return NULL; } gchar **g_strdupv (gchar **str_array); typedef struct { gpointer (*malloc) (gsize n_bytes); gpointer (*realloc) (gpointer mem, gsize n_bytes); void (*free) (gpointer mem); gpointer (*calloc) (gsize n_blocks, gsize n_block_bytes); } GMemVTable; void g_mem_set_vtable (GMemVTable* vtable); void g_mem_get_vtable (GMemVTable* vtable); struct _GMemChunk { guint alloc_size; }; typedef struct _GMemChunk GMemChunk; /* * Misc. */ gboolean g_hasenv(const gchar *variable); gchar * g_getenv(const gchar *variable); G_EXTERN_C // sdks/wasm/driver.c is C and uses this gboolean g_setenv(const gchar *variable, const gchar *value, gboolean overwrite); gchar* g_win32_getlocale(void); /* * Precondition macros */ #define g_warn_if_fail(x) G_STMT_START { if (!(x)) { g_warning ("%s:%d: assertion '%s' failed\n", __FILE__, __LINE__, #x); } } G_STMT_END #define g_return_if_fail(x) G_STMT_START { if (!(x)) { g_critical ("%s:%d: assertion '%s' failed\n", __FILE__, __LINE__, #x); return; } } G_STMT_END #define g_return_val_if_fail(x,e) G_STMT_START { if (!(x)) { g_critical ("%s:%d: assertion '%s' failed\n", __FILE__, __LINE__, #x); return (e); } } G_STMT_END /* * Errors */ typedef struct { /* In the real glib, this is a GQuark, but we dont use/need that */ gpointer domain; gint code; gchar *message; } GError; void g_clear_error (GError **gerror); void g_error_free (GError *gerror); GError *g_error_new (gpointer domain, gint code, const char *format, ...); void g_set_error (GError **err, gpointer domain, gint code, const gchar *format, ...); void g_propagate_error (GError **dest, GError *src); /* * Strings utility */ G_EXTERN_C // Used by libtest, at least. gchar *g_strdup_printf (const gchar *format, ...) G_ATTR_FORMAT_PRINTF(1, 2); gchar *g_strdup_vprintf (const gchar *format, va_list args); gchar *g_strndup (const gchar *str, gsize n); const gchar *g_strerror (gint errnum); gchar *g_strndup (const gchar *str, gsize n); void g_strfreev (gchar **str_array); gchar *g_strconcat (const gchar *first, ...); gchar **g_strsplit (const gchar *string, const gchar *delimiter, gint max_tokens); gchar **g_strsplit_set (const gchar *string, const gchar *delimiter, gint max_tokens); gchar *g_strreverse (gchar *str); gboolean g_str_has_prefix (const gchar *str, const gchar *prefix); gboolean g_str_has_suffix (const gchar *str, const gchar *suffix); guint g_strv_length (gchar **str_array); gchar *g_strjoin (const gchar *separator, ...); gchar *g_strjoinv (const gchar *separator, gchar **str_array); gchar *g_strchug (gchar *str); gchar *g_strchomp (gchar *str); gchar *g_strnfill (gsize length, gchar fill_char); gsize g_strnlen (const char*, gsize); char *g_str_from_file_region (int fd, guint64 offset, gsize size); void g_strdelimit (char *string, char delimiter, char new_delimiter); gint g_printf (gchar const *format, ...) G_ATTR_FORMAT_PRINTF(1, 2); gint g_fprintf (FILE *file, gchar const *format, ...) G_ATTR_FORMAT_PRINTF(2, 3); gint g_sprintf (gchar *string, gchar const *format, ...) G_ATTR_FORMAT_PRINTF(2, 3); gint g_snprintf (gchar *string, gulong n, gchar const *format, ...) G_ATTR_FORMAT_PRINTF(3, 4); gint g_vasprintf (gchar **ret, const gchar *fmt, va_list ap); #define g_vprintf vprintf #define g_vfprintf vfprintf #define g_vsprintf vsprintf #define g_vsnprintf vsnprintf gsize g_strlcpy (gchar *dest, const gchar *src, gsize dest_size); gchar *g_stpcpy (gchar *dest, const char *src); gchar g_ascii_tolower (gchar c); gchar g_ascii_toupper (gchar c); gchar *g_ascii_strdown (const gchar *str, gssize len); void g_ascii_strdown_no_alloc (char* dst, const char* src, gsize len); gchar *g_ascii_strup (const gchar *str, gssize len); gint g_ascii_strncasecmp (const gchar *s1, const gchar *s2, gsize n); gint g_ascii_strcasecmp (const gchar *s1, const gchar *s2); gint g_ascii_xdigit_value (gchar c); #define g_ascii_isspace(c) (isspace (c) != 0) #define g_ascii_isalpha(c) (isalpha (c) != 0) #define g_ascii_isprint(c) (isprint (c) != 0) #define g_ascii_isxdigit(c) (isxdigit (c) != 0) gboolean g_utf16_ascii_equal (const gunichar2 *utf16, size_t ulen, const char *ascii, size_t alen); gboolean g_utf16_asciiz_equal (const gunichar2 *utf16, const char *ascii); static inline gboolean g_ascii_equal (const char *s1, gsize len1, const char *s2, gsize len2) { return len1 == len2 && (s1 == s2 || memcmp (s1, s2, len1) == 0); } static inline gboolean g_asciiz_equal (const char *s1, const char *s2) { return s1 == s2 || strcmp (s1, s2) == 0; } static inline gboolean g_ascii_equal_caseinsensitive (const char *s1, gsize len1, const char *s2, gsize len2) { return len1 == len2 && (s1 == s2 || g_ascii_strncasecmp (s1, s2, len1) == 0); } static inline gboolean g_asciiz_equal_caseinsensitive (const char *s1, const char *s2) { return s1 == s2 || g_ascii_strcasecmp (s1, s2) == 0; } /* FIXME: g_strcasecmp supports utf8 unicode stuff */ #ifdef _MSC_VER #define g_strcasecmp _stricmp #define g_strncasecmp _strnicmp #define g_strstrip(a) g_strchug (g_strchomp (a)) #else #define g_strcasecmp strcasecmp #define g_ascii_strtoull strtoull #define g_strncasecmp strncasecmp #define g_strstrip(a) g_strchug (g_strchomp (a)) #endif #define g_ascii_strdup strdup /* * String type */ typedef struct { char *str; gsize len; gsize allocated_len; } GString; GString *g_string_new (const gchar *init); GString *g_string_new_len (const gchar *init, gssize len); GString *g_string_sized_new (gsize default_size); gchar *g_string_free (GString *string, gboolean free_segment); GString *g_string_append (GString *string, const gchar *val); void g_string_printf (GString *string, const gchar *format, ...) G_ATTR_FORMAT_PRINTF(2, 3); void g_string_append_printf (GString *string, const gchar *format, ...) G_ATTR_FORMAT_PRINTF(2, 3); void g_string_append_vprintf (GString *string, const gchar *format, va_list args); GString *g_string_append_unichar (GString *string, gunichar c); GString *g_string_append_c (GString *string, gchar c); GString *g_string_append (GString *string, const gchar *val); GString *g_string_append_len (GString *string, const gchar *val, gssize len); GString *g_string_truncate (GString *string, gsize len); GString *g_string_set_size (GString *string, gsize len); #define g_string_sprintfa g_string_append_printf typedef void (*GFunc) (gpointer data, gpointer user_data); typedef gint (*GCompareFunc) (gconstpointer a, gconstpointer b); typedef gint (*GCompareDataFunc) (gconstpointer a, gconstpointer b, gpointer user_data); typedef void (*GHFunc) (gpointer key, gpointer value, gpointer user_data); typedef gboolean (*GHRFunc) (gpointer key, gpointer value, gpointer user_data); typedef void (*GDestroyNotify) (gpointer data); typedef guint (*GHashFunc) (gconstpointer key); typedef gboolean (*GEqualFunc) (gconstpointer a, gconstpointer b); typedef void (*GFreeFunc) (gpointer data); /* * Lists */ typedef struct _GSList GSList; struct _GSList { gpointer data; GSList *next; }; GSList *g_slist_alloc (void); GSList *g_slist_append (GSList *list, gpointer data); GSList *g_slist_prepend (GSList *list, gpointer data); void g_slist_free (GSList *list); void g_slist_free_1 (GSList *list); GSList *g_slist_copy (GSList *list); GSList *g_slist_concat (GSList *list1, GSList *list2); void g_slist_foreach (GSList *list, GFunc func, gpointer user_data); GSList *g_slist_last (GSList *list); GSList *g_slist_find (GSList *list, gconstpointer data); GSList *g_slist_find_custom (GSList *list, gconstpointer data, GCompareFunc func); GSList *g_slist_remove (GSList *list, gconstpointer data); GSList *g_slist_remove_all (GSList *list, gconstpointer data); GSList *g_slist_reverse (GSList *list); guint g_slist_length (GSList *list); GSList *g_slist_remove_link (GSList *list, GSList *link); GSList *g_slist_delete_link (GSList *list, GSList *link); GSList *g_slist_insert_sorted (GSList *list, gpointer data, GCompareFunc func); GSList *g_slist_insert_before (GSList *list, GSList *sibling, gpointer data); GSList *g_slist_sort (GSList *list, GCompareFunc func); gint g_slist_index (GSList *list, gconstpointer data); GSList *g_slist_nth (GSList *list, guint n); gpointer g_slist_nth_data (GSList *list, guint n); #define g_slist_next(slist) ((slist) ? (((GSList *) (slist))->next) : NULL) typedef struct _GList GList; struct _GList { gpointer data; GList *next; GList *prev; }; #define g_list_next(list) ((list) ? (((GList *) (list))->next) : NULL) #define g_list_previous(list) ((list) ? (((GList *) (list))->prev) : NULL) GList *g_list_alloc (void); GList *g_list_append (GList *list, gpointer data); GList *g_list_prepend (GList *list, gpointer data); void g_list_free (GList *list); void g_list_free_1 (GList *list); GList *g_list_copy (GList *list); guint g_list_length (GList *list); gint g_list_index (GList *list, gconstpointer data); GList *g_list_nth (GList *list, guint n); gpointer g_list_nth_data (GList *list, guint n); GList *g_list_last (GList *list); GList *g_list_concat (GList *list1, GList *list2); void g_list_foreach (GList *list, GFunc func, gpointer user_data); GList *g_list_first (GList *list); GList *g_list_find (GList *list, gconstpointer data); GList *g_list_find_custom (GList *list, gconstpointer data, GCompareFunc func); GList *g_list_remove (GList *list, gconstpointer data); GList *g_list_remove_all (GList *list, gconstpointer data); GList *g_list_reverse (GList *list); GList *g_list_remove_link (GList *list, GList *link); GList *g_list_delete_link (GList *list, GList *link); GList *g_list_insert_sorted (GList *list, gpointer data, GCompareFunc func); GList *g_list_insert_before (GList *list, GList *sibling, gpointer data); GList *g_list_sort (GList *sort, GCompareFunc func); /* * Hashtables */ typedef struct _GHashTable GHashTable; typedef struct _GHashTableIter GHashTableIter; /* Private, but needed for stack allocation */ struct _GHashTableIter { gpointer dummy [8]; }; G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. GHashTable *g_hash_table_new (GHashFunc hash_func, GEqualFunc key_equal_func); GHashTable *g_hash_table_new_full (GHashFunc hash_func, GEqualFunc key_equal_func, GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_hash_table_insert_replace (GHashTable *hash, gpointer key, gpointer value, gboolean replace); guint g_hash_table_size (GHashTable *hash); GList *g_hash_table_get_keys (GHashTable *hash); GList *g_hash_table_get_values (GHashTable *hash); gboolean g_hash_table_contains (GHashTable *hash, gconstpointer key); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gpointer g_hash_table_lookup (GHashTable *hash, gconstpointer key); gboolean g_hash_table_lookup_extended (GHashTable *hash, gconstpointer key, gpointer *orig_key, gpointer *value); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_hash_table_foreach (GHashTable *hash, GHFunc func, gpointer user_data); gpointer g_hash_table_find (GHashTable *hash, GHRFunc predicate, gpointer user_data); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gboolean g_hash_table_remove (GHashTable *hash, gconstpointer key); gboolean g_hash_table_steal (GHashTable *hash, gconstpointer key); void g_hash_table_remove_all (GHashTable *hash); guint g_hash_table_foreach_remove (GHashTable *hash, GHRFunc func, gpointer user_data); guint g_hash_table_foreach_steal (GHashTable *hash, GHRFunc func, gpointer user_data); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_hash_table_destroy (GHashTable *hash); void g_hash_table_print_stats (GHashTable *table); void g_hash_table_iter_init (GHashTableIter *iter, GHashTable *hash_table); gboolean g_hash_table_iter_next (GHashTableIter *iter, gpointer *key, gpointer *value); guint g_spaced_primes_closest (guint x); #define g_hash_table_insert(h,k,v) g_hash_table_insert_replace ((h),(k),(v),FALSE) #define g_hash_table_replace(h,k,v) g_hash_table_insert_replace ((h),(k),(v),TRUE) #define g_hash_table_add(h,k) g_hash_table_insert_replace ((h),(k),(k),TRUE) G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gboolean g_direct_equal (gconstpointer v1, gconstpointer v2); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. guint g_direct_hash (gconstpointer v1); gboolean g_int_equal (gconstpointer v1, gconstpointer v2); guint g_int_hash (gconstpointer v1); gboolean g_str_equal (gconstpointer v1, gconstpointer v2); guint g_str_hash (gconstpointer v1); /* * ByteArray */ typedef struct _GByteArray GByteArray; struct _GByteArray { guint8 *data; gint len; }; GByteArray *g_byte_array_new (void); GByteArray* g_byte_array_append (GByteArray *array, const guint8 *data, guint len); guint8* g_byte_array_free (GByteArray *array, gboolean free_segment); void g_byte_array_set_size (GByteArray *array, gint length); /* * Array */ typedef struct _GArray GArray; struct _GArray { gchar *data; gint len; }; GArray *g_array_new (gboolean zero_terminated, gboolean clear_, guint element_size); GArray *g_array_sized_new (gboolean zero_terminated, gboolean clear_, guint element_size, guint reserved_size); gchar* g_array_free (GArray *array, gboolean free_segment); GArray *g_array_append_vals (GArray *array, gconstpointer data, guint len); GArray* g_array_insert_vals (GArray *array, guint index_, gconstpointer data, guint len); GArray* g_array_remove_index (GArray *array, guint index_); GArray* g_array_remove_index_fast (GArray *array, guint index_); void g_array_set_size (GArray *array, gint length); #define g_array_append_val(a,v) (g_array_append_vals((a),&(v),1)) #define g_array_insert_val(a,i,v) (g_array_insert_vals((a),(i),&(v),1)) #define g_array_index(a,t,i) *(t*)(((a)->data) + sizeof(t) * (i)) //FIXME previous missing parens /* * Pointer Array */ typedef struct _GPtrArray GPtrArray; struct _GPtrArray { gpointer *pdata; guint len; }; GPtrArray *g_ptr_array_new (void); GPtrArray *g_ptr_array_sized_new (guint reserved_size); void g_ptr_array_add (GPtrArray *array, gpointer data); gboolean g_ptr_array_remove (GPtrArray *array, gpointer data); gpointer g_ptr_array_remove_index (GPtrArray *array, guint index); gboolean g_ptr_array_remove_fast (GPtrArray *array, gpointer data); gpointer g_ptr_array_remove_index_fast (GPtrArray *array, guint index); void g_ptr_array_sort (GPtrArray *array, GCompareFunc compare_func); void g_ptr_array_set_size (GPtrArray *array, gint length); gpointer *g_ptr_array_free (GPtrArray *array, gboolean free_seg); void g_ptr_array_foreach (GPtrArray *array, GFunc func, gpointer user_data); guint g_ptr_array_capacity (GPtrArray *array); gboolean g_ptr_array_find (GPtrArray *array, gconstpointer needle, guint *index); #define g_ptr_array_index(array,index) (array)->pdata[(index)] //FIXME previous missing parens /* * Queues */ typedef struct { GList *head; GList *tail; guint length; } GQueue; gpointer g_queue_pop_head (GQueue *queue); void g_queue_push_head (GQueue *queue, gpointer data); void g_queue_push_tail (GQueue *queue, gpointer data); gboolean g_queue_is_empty (GQueue *queue); GQueue *g_queue_new (void); void g_queue_free (GQueue *queue); void g_queue_foreach (GQueue *queue, GFunc func, gpointer user_data); /* * Messages */ #ifndef G_LOG_DOMAIN #define G_LOG_DOMAIN ((gchar*) 0) #endif typedef enum { G_LOG_FLAG_RECURSION = 1 << 0, G_LOG_FLAG_FATAL = 1 << 1, G_LOG_LEVEL_ERROR = 1 << 2, G_LOG_LEVEL_CRITICAL = 1 << 3, G_LOG_LEVEL_WARNING = 1 << 4, G_LOG_LEVEL_MESSAGE = 1 << 5, G_LOG_LEVEL_INFO = 1 << 6, G_LOG_LEVEL_DEBUG = 1 << 7, G_LOG_LEVEL_MASK = ~(G_LOG_FLAG_RECURSION | G_LOG_FLAG_FATAL) } GLogLevelFlags; G_ENUM_FUNCTIONS (GLogLevelFlags) gint g_printv (const gchar *format, va_list args); void g_print (const gchar *format, ...); void g_printerr (const gchar *format, ...); GLogLevelFlags g_log_set_always_fatal (GLogLevelFlags fatal_mask); GLogLevelFlags g_log_set_fatal_mask (const gchar *log_domain, GLogLevelFlags fatal_mask); void g_logv (const gchar *log_domain, GLogLevelFlags log_level, const gchar *format, va_list args); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_log (const gchar *log_domain, GLogLevelFlags log_level, const gchar *format, ...); void g_log_disabled (const gchar *log_domain, GLogLevelFlags log_level, const char *file, int line); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_assertion_message (const gchar *format, ...) G_GNUC_NORETURN; void mono_assertion_message_disabled (const char *file, int line) G_GNUC_NORETURN; void mono_assertion_message (const char *file, int line, const char *condition) G_GNUC_NORETURN; void mono_assertion_message_unreachable (const char *file, int line) G_GNUC_NORETURN; const char * g_get_assertion_message (void); #ifndef DISABLE_ASSERT_MESSAGES /* The for (;;) tells gc thats g_error () doesn't return, avoiding warnings */ #define g_error(...) do { g_log (G_LOG_DOMAIN, G_LOG_LEVEL_ERROR, __VA_ARGS__); for (;;); } while (0) #define g_critical(...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_CRITICAL, __VA_ARGS__) #define g_warning(...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_WARNING, __VA_ARGS__) #define g_message(...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_MESSAGE, __VA_ARGS__) #define g_debug(...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, __VA_ARGS__) #else #define g_error(...) do { g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_ERROR, __FILE__, __LINE__); for (;;); } while (0) #define g_critical(...) g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_CRITICAL, __FILE__, __LINE__) #define g_warning(...) g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_WARNING, __FILE__, __LINE__) #define g_message(...) g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_MESSAGE, __FILE__, __LINE__) #define g_debug(...) g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, __FILE__, __LINE__) #endif typedef void (*GLogFunc) (const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); typedef void (*GPrintFunc) (const gchar *string); typedef void (*GAbortFunc) (void); void g_assertion_disable_global (GAbortFunc func); void g_assert_abort (void); void g_log_default_handler (const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer unused_data); GLogFunc g_log_set_default_handler (GLogFunc log_func, gpointer user_data); GPrintFunc g_set_print_handler (GPrintFunc func); GPrintFunc g_set_printerr_handler (GPrintFunc func); /* * Conversions */ gpointer g_convert_error_quark(void); #ifndef MAX #define MAX(a,b) (((a)>(b)) ? (a) : (b)) #endif #ifndef MIN #define MIN(a,b) (((a)<(b)) ? (a) : (b)) #endif #ifndef CLAMP #define CLAMP(a,low,high) (((a) < (low)) ? (low) : (((a) > (high)) ? (high) : (a))) #endif #if defined(__GNUC__) && (__GNUC__ > 2) #define G_LIKELY(expr) (__builtin_expect ((expr) != 0, 1)) #define G_UNLIKELY(expr) (__builtin_expect ((expr) != 0, 0)) #else #define G_LIKELY(x) (x) #define G_UNLIKELY(x) (x) #endif #if defined(_MSC_VER) #define eg_unreachable() __assume(0) #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 5))) #define eg_unreachable() __builtin_unreachable() #else #define eg_unreachable() #endif /* g_assert is a boolean expression; the precise value is not preserved, just true or false. */ #ifdef DISABLE_ASSERT_MESSAGES // This is smaller than the equivalent mono_assertion_message (..."disabled"); #define g_assert(x) (G_LIKELY((x)) ? 1 : (mono_assertion_message_disabled (__FILE__, __LINE__), 0)) #else #define g_assert(x) (G_LIKELY((x)) ? 1 : (mono_assertion_message (__FILE__, __LINE__, #x), 0)) #endif #ifdef __cplusplus #define g_static_assert(x) static_assert (x, "") #else #define g_static_assert(x) g_assert (x) #endif #define g_assert_not_reached() G_STMT_START { mono_assertion_message_unreachable (__FILE__, __LINE__); eg_unreachable(); } G_STMT_END /* f is format -- like printf and scanf * Where you might have said: * if (!(expr)) * g_error("%s invalid bar:%d", __func__, bar) * * You can say: * g_assertf(expr, "bar:%d", bar); * * The usual assertion text of file/line/expr/newline are builtin, and __func__. * * g_assertf is a boolean expression -- the precise value is not preserved, just true or false. * * Other than expr, the parameters are not evaluated unless expr is false. * * format must be a string literal, in order to be concatenated. * If this is too restrictive, g_error remains. */ #ifdef DISABLE_ASSERT_MESSAGES #define g_assertf(x, format, ...) (G_LIKELY((x)) ? 1 : (mono_assertion_message_disabled (__FILE__, __LINE__), 0)) #elif defined(_MSC_VER) && (_MSC_VER < 1910) #define g_assertf(x, format, ...) (G_LIKELY((x)) ? 1 : (g_assertion_message ("* Assertion at %s:%d, condition `%s' not met, function:%s, " format "\n", __FILE__, __LINE__, #x, __func__, __VA_ARGS__), 0)) #else #define g_assertf(x, format, ...) (G_LIKELY((x)) ? 1 : (g_assertion_message ("* Assertion at %s:%d, condition `%s' not met, function:%s, " format "\n", __FILE__, __LINE__, #x, __func__, ##__VA_ARGS__), 0)) #endif /* * Unicode conversion */ #define G_CONVERT_ERROR g_convert_error_quark() typedef enum { G_CONVERT_ERROR_NO_CONVERSION, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, G_CONVERT_ERROR_FAILED, G_CONVERT_ERROR_PARTIAL_INPUT, G_CONVERT_ERROR_BAD_URI, G_CONVERT_ERROR_NOT_ABSOLUTE_PATH, G_CONVERT_ERROR_NO_MEMORY } GConvertError; gint g_unichar_to_utf8 (gunichar c, gchar *outbuf); gunichar *g_utf8_to_ucs4_fast (const gchar *str, glong len, glong *items_written); gunichar *g_utf8_to_ucs4 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err); G_EXTERN_C // Used by libtest, at least. gunichar2 *g_utf8_to_utf16 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err); gunichar2 *eg_utf8_to_utf16_with_nuls (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err); gunichar2 *eg_wtf8_to_utf16 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err); G_EXTERN_C // Used by libtest, at least. gchar *g_utf16_to_utf8 (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GError **err); gunichar *g_utf16_to_ucs4 (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GError **err); gchar *g_ucs4_to_utf8 (const gunichar *str, glong len, glong *items_read, glong *items_written, GError **err); gunichar2 *g_ucs4_to_utf16 (const gunichar *str, glong len, glong *items_read, glong *items_written, GError **err); size_t g_utf16_len (const gunichar2 *); #define u8to16(str) g_utf8_to_utf16(str, (glong)strlen(str), NULL, NULL, NULL) #ifdef G_OS_WIN32 #define u16to8(str) g_utf16_to_utf8((gunichar2 *) (str), (glong)wcslen((wchar_t *) (str)), NULL, NULL, NULL) #else #define u16to8(str) g_utf16_to_utf8(str, (glong)strlen(str), NULL, NULL, NULL) #endif typedef gpointer (*GCustomAllocator) (gsize req_size, gpointer custom_alloc_data); typedef struct { gpointer buffer; gsize buffer_size; gsize req_buffer_size; } GFixedBufferCustomAllocatorData; gpointer g_fixed_buffer_custom_allocator (gsize req_size, gpointer custom_alloc_data); gunichar2 *g_utf8_to_utf16_custom_alloc (const gchar *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err); gchar *g_utf16_to_utf8_custom_alloc (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err); /* * Path */ gchar *g_build_path (const gchar *separator, const gchar *first_element, ...); #define g_build_filename(x, ...) g_build_path(G_DIR_SEPARATOR_S, x, __VA_ARGS__) gchar *g_path_get_dirname (const gchar *filename); gchar *g_path_get_basename (const char *filename); gchar *g_find_program_in_path (const gchar *program); gchar *g_get_current_dir (void); gboolean g_path_is_absolute (const char *filename); const gchar *g_get_home_dir (void); const gchar *g_get_tmp_dir (void); const gchar *g_get_user_name (void); gchar *g_get_prgname (void); void g_set_prgname (const gchar *prgname); gboolean g_ensure_directory_exists (const gchar *filename); #ifndef G_OS_WIN32 // Spawn could be implemented but is not. int eg_getdtablesize (void); #if !defined (HAVE_FORK) || !defined (HAVE_EXECVE) #define HAVE_G_SPAWN 0 #else #define HAVE_G_SPAWN 1 /* * Spawn */ typedef enum { G_SPAWN_LEAVE_DESCRIPTORS_OPEN = 1, G_SPAWN_DO_NOT_REAP_CHILD = 1 << 1, G_SPAWN_SEARCH_PATH = 1 << 2, G_SPAWN_STDOUT_TO_DEV_NULL = 1 << 3, G_SPAWN_STDERR_TO_DEV_NULL = 1 << 4, G_SPAWN_CHILD_INHERITS_STDIN = 1 << 5, G_SPAWN_FILE_AND_ARGV_ZERO = 1 << 6 } GSpawnFlags; typedef void (*GSpawnChildSetupFunc) (gpointer user_data); gboolean g_spawn_async_with_pipes (const gchar *working_directory, gchar **argv, gchar **envp, GSpawnFlags flags, GSpawnChildSetupFunc child_setup, gpointer user_data, GPid *child_pid, gint *standard_input, gint *standard_output, gint *standard_error, GError **gerror); #endif #endif /* * Timer */ typedef struct _GTimer GTimer; GTimer *g_timer_new (void); void g_timer_destroy (GTimer *timer); gdouble g_timer_elapsed (GTimer *timer, gulong *microseconds); void g_timer_stop (GTimer *timer); void g_timer_start (GTimer *timer); /* * Date and time */ typedef struct { glong tv_sec; glong tv_usec; } GTimeVal; void g_get_current_time (GTimeVal *result); void g_usleep (gulong microseconds); /* * File */ gpointer g_file_error_quark (void); #define G_FILE_ERROR g_file_error_quark () typedef enum { G_FILE_ERROR_EXIST, G_FILE_ERROR_ISDIR, G_FILE_ERROR_ACCES, G_FILE_ERROR_NAMETOOLONG, G_FILE_ERROR_NOENT, G_FILE_ERROR_NOTDIR, G_FILE_ERROR_NXIO, G_FILE_ERROR_NODEV, G_FILE_ERROR_ROFS, G_FILE_ERROR_TXTBSY, G_FILE_ERROR_FAULT, G_FILE_ERROR_LOOP, G_FILE_ERROR_NOSPC, G_FILE_ERROR_NOMEM, G_FILE_ERROR_MFILE, G_FILE_ERROR_NFILE, G_FILE_ERROR_BADF, G_FILE_ERROR_INVAL, G_FILE_ERROR_PIPE, G_FILE_ERROR_AGAIN, G_FILE_ERROR_INTR, G_FILE_ERROR_IO, G_FILE_ERROR_PERM, G_FILE_ERROR_NOSYS, G_FILE_ERROR_FAILED } GFileError; typedef enum { G_FILE_TEST_IS_REGULAR = 1 << 0, G_FILE_TEST_IS_SYMLINK = 1 << 1, G_FILE_TEST_IS_DIR = 1 << 2, G_FILE_TEST_IS_EXECUTABLE = 1 << 3, G_FILE_TEST_EXISTS = 1 << 4 } GFileTest; G_ENUM_FUNCTIONS (GFileTest) gboolean g_file_set_contents (const gchar *filename, const gchar *contents, gssize length, GError **gerror); gboolean g_file_get_contents (const gchar *filename, gchar **contents, gsize *length, GError **gerror); GFileError g_file_error_from_errno (gint err_no); gint g_file_open_tmp (const gchar *tmpl, gchar **name_used, GError **gerror); gboolean g_file_test (const gchar *filename, GFileTest test); #ifdef G_OS_WIN32 #define g_open _open #else #define g_open open #endif #define g_rename rename #define g_stat stat #ifdef G_OS_WIN32 #define g_access _access #else #define g_access access #endif #ifdef G_OS_WIN32 #define g_mktemp _mktemp #else #define g_mktemp mktemp #endif #ifdef G_OS_WIN32 #define g_unlink _unlink #else #define g_unlink unlink #endif #ifdef G_OS_WIN32 #define g_write _write #else #define g_write write #endif #ifdef G_OS_WIN32 #define g_read _read #else #define g_read read #endif #define g_fopen fopen #define g_lstat lstat #define g_rmdir rmdir #define g_mkstemp mkstemp #define g_ascii_isdigit isdigit #define g_ascii_strtod strtod #define g_ascii_isalnum isalnum gchar *g_mkdtemp (gchar *tmpl); /* * Low-level write-based printing functions */ static inline int g_async_safe_fgets (char *str, int num, int handle, gboolean *newline) { memset (str, 0, num); // Make sure we don't overwrite the last index so that we are // guaranteed to be NULL-terminated int without_padding = num - 1; int i=0; while (i < without_padding && g_read (handle, &str [i], sizeof(char))) { if (str [i] == '\n') { str [i] = '\0'; *newline = TRUE; } if (!isprint (str [i])) str [i] = '\0'; if (str [i] == '\0') break; i++; } return i; } static inline gint g_async_safe_vfprintf (int handle, gchar const *format, va_list args) { char print_buff [1024]; print_buff [0] = '\0'; g_vsnprintf (print_buff, sizeof(print_buff), format, args); int ret = g_write (handle, print_buff, (guint32) strlen (print_buff)); return ret; } static inline gint g_async_safe_fprintf (int handle, gchar const *format, ...) { va_list args; va_start (args, format); int ret = g_async_safe_vfprintf (handle, format, args); va_end (args); return ret; } static inline gint g_async_safe_vprintf (gchar const *format, va_list args) { return g_async_safe_vfprintf (1, format, args); } static inline gint g_async_safe_printf (gchar const *format, ...) { va_list args; va_start (args, format); int ret = g_async_safe_vfprintf (1, format, args); va_end (args); return ret; } /* * Directory */ typedef struct _GDir GDir; GDir *g_dir_open (const gchar *path, guint flags, GError **gerror); const gchar *g_dir_read_name (GDir *dir); void g_dir_rewind (GDir *dir); void g_dir_close (GDir *dir); int g_mkdir_with_parents (const gchar *pathname, int mode); #define g_mkdir mkdir /* * Unicode manipulation */ extern const guchar g_utf8_jump_table[256]; gboolean g_utf8_validate (const gchar *str, gssize max_len, const gchar **end); gunichar g_utf8_get_char_validated (const gchar *str, gssize max_len); #define g_utf8_next_char(p) ((p) + g_utf8_jump_table[(guchar)(*p)]) gunichar g_utf8_get_char (const gchar *src); glong g_utf8_strlen (const gchar *str, gssize max); gchar *g_utf8_offset_to_pointer (const gchar *str, glong offset); glong g_utf8_pointer_to_offset (const gchar *str, const gchar *pos); /* * priorities */ #define G_PRIORITY_DEFAULT 0 #define G_PRIORITY_DEFAULT_IDLE 200 #define GUINT16_SWAP_LE_BE_CONSTANT(x) ((((guint16) x) >> 8) | ((((guint16) x) << 8))) #define GUINT16_SWAP_LE_BE(x) ((guint16) (((guint16) x) >> 8) | ((((guint16)(x)) & 0xff) << 8)) #define GUINT32_SWAP_LE_BE(x) ((guint32) \ ( (((guint32) (x)) << 24)| \ ((((guint32) (x)) & 0xff0000) >> 8) | \ ((((guint32) (x)) & 0xff00) << 8) | \ (((guint32) (x)) >> 24)) ) #define GUINT64_SWAP_LE_BE(x) ((guint64) (((guint64)(GUINT32_SWAP_LE_BE(((guint64)x) & 0xffffffff))) << 32) | \ GUINT32_SWAP_LE_BE(((guint64)x) >> 32)) #if G_BYTE_ORDER == G_LITTLE_ENDIAN # define GUINT64_FROM_BE(x) GUINT64_SWAP_LE_BE(x) # define GUINT32_FROM_BE(x) GUINT32_SWAP_LE_BE(x) # define GUINT16_FROM_BE(x) GUINT16_SWAP_LE_BE(x) # define GUINT_FROM_BE(x) GUINT32_SWAP_LE_BE(x) # define GUINT64_FROM_LE(x) (x) # define GUINT32_FROM_LE(x) (x) # define GUINT16_FROM_LE(x) (x) # define GUINT_FROM_LE(x) (x) # define GUINT64_TO_BE(x) GUINT64_SWAP_LE_BE(x) # define GUINT32_TO_BE(x) GUINT32_SWAP_LE_BE(x) # define GUINT16_TO_BE(x) GUINT16_SWAP_LE_BE(x) # define GUINT_TO_BE(x) GUINT32_SWAP_LE_BE(x) # define GUINT64_TO_LE(x) (x) # define GUINT32_TO_LE(x) (x) # define GUINT16_TO_LE(x) (x) # define GUINT_TO_LE(x) (x) #else # define GUINT64_FROM_BE(x) (x) # define GUINT32_FROM_BE(x) (x) # define GUINT16_FROM_BE(x) (x) # define GUINT_FROM_BE(x) (x) # define GUINT64_FROM_LE(x) GUINT64_SWAP_LE_BE(x) # define GUINT32_FROM_LE(x) GUINT32_SWAP_LE_BE(x) # define GUINT16_FROM_LE(x) GUINT16_SWAP_LE_BE(x) # define GUINT_FROM_LE(x) GUINT32_SWAP_LE_BE(x) # define GUINT64_TO_BE(x) (x) # define GUINT32_TO_BE(x) (x) # define GUINT16_TO_BE(x) (x) # define GUINT_TO_BE(x) (x) # define GUINT64_TO_LE(x) GUINT64_SWAP_LE_BE(x) # define GUINT32_TO_LE(x) GUINT32_SWAP_LE_BE(x) # define GUINT16_TO_LE(x) GUINT16_SWAP_LE_BE(x) # define GUINT_TO_LE(x) GUINT32_SWAP_LE_BE(x) #endif #define GINT64_FROM_BE(x) (GUINT64_TO_BE (x)) #define GINT32_FROM_BE(x) (GUINT32_TO_BE (x)) #define GINT16_FROM_BE(x) (GUINT16_TO_BE (x)) #define GINT64_FROM_LE(x) (GUINT64_TO_LE (x)) #define GINT32_FROM_LE(x) (GUINT32_TO_LE (x)) #define GINT16_FROM_LE(x) (GUINT16_TO_LE (x)) #define _EGLIB_MAJOR 2 #define _EGLIB_MIDDLE 4 #define _EGLIB_MINOR 0 #define GLIB_CHECK_VERSION(a,b,c) ((a < _EGLIB_MAJOR) || (a == _EGLIB_MAJOR && (b < _EGLIB_MIDDLE || (b == _EGLIB_MIDDLE && c <= _EGLIB_MINOR)))) #define G_HAVE_API_SUPPORT(x) (x) #define G_UNSUPPORTED_API "%s:%d: '%s' not supported.", __FILE__, __LINE__ #define g_unsupported_api(name) G_STMT_START { g_debug (G_UNSUPPORTED_API, name); } G_STMT_END #if _WIN32 // g_free the result // No MAX_PATH limit. gboolean mono_get_module_filename (gpointer mod, gunichar2 **pstr, guint32 *plength); // g_free the result // No MAX_PATH limit. gboolean mono_get_module_filename_ex (gpointer process, gpointer mod, gunichar2 **pstr, guint32 *plength); // g_free the result // No MAX_PATH limit. gboolean mono_get_module_basename (gpointer process, gpointer mod, gunichar2 **pstr, guint32 *plength); // g_free the result // No MAX_PATH limit. gboolean mono_get_current_directory (gunichar2 **pstr, guint32 *plength); #endif G_END_DECLS // FIXME: There is more extern C than there should be. static inline void mono_qsort (void* base, size_t num, size_t size, int (*compare)(const void*, const void*)) { g_assert (compare); g_assert (size); if (num < 2 || !size || !base) return; qsort (base, num, size, compare); } #define MONO_DECL_CALLBACK(prefix, ret, name, sig) ret (*name) sig; #define MONO_INIT_CALLBACK(prefix, ret, name, sig) prefix ## _ ## name, // For each allocator; i.e. returning gpointer that needs to be cast. // Macros do not recurse, so naming function and macro the same is ok. // However these are also already macros. #undef g_malloc #undef g_realloc #undef g_malloc0 #undef g_calloc #undef g_try_malloc #undef g_try_realloc #undef g_memdup #define g_malloc(x) (g_cast (monoeg_malloc (x))) #define g_realloc(obj, size) (g_cast (monoeg_realloc ((obj), (size)))) #define g_malloc0(x) (g_cast (monoeg_malloc0 (x))) #define g_calloc(x, y) (g_cast (monoeg_g_calloc ((x), (y)))) #define g_try_malloc(x) (g_cast (monoeg_try_malloc (x))) #define g_try_realloc(obj, size) (g_cast (monoeg_try_realloc ((obj), (size)))) #define g_memdup(mem, size) (g_cast (monoeg_g_memdup ((mem), (size)))) /* * Clock Nanosleep */ #ifdef HAVE_CLOCK_NANOSLEEP gint g_clock_nanosleep (clockid_t clockid, gint flags, const struct timespec *request, struct timespec *remain); #endif #endif // __GLIB_H
#ifndef __GLIB_H #define __GLIB_H // Ask stdint.h and inttypes.h for the full C99 features for CentOS 6 g++ 4.4, Android, etc. // See for example: // $HOME/android-toolchain/toolchains/armeabi-v7a-clang/sysroot/usr/include/inttypes.h // $HOME/android-toolchain/toolchains/armeabi-v7a-clang/sysroot/usr/include/stdint.h #ifdef __cplusplus #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #endif // __cplusplus #include <stdarg.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <stddef.h> #include <ctype.h> #include <limits.h> #include <stdint.h> #include <inttypes.h> #include <eglib-config.h> #include <minipal/utils.h> #include <time.h> // - Pointers should only be converted to or from pointer-sized integers. // - Any size integer can be converted to any other size integer. // - Therefore a pointer-sized integer is the intermediary between // a pointer and any integer type. #define GPOINTER_TO_INT(ptr) ((gint)(gssize)(ptr)) #define GPOINTER_TO_UINT(ptr) ((guint)(gsize)(ptr)) #define GINT_TO_POINTER(v) ((gpointer)(gssize)(v)) #define GUINT_TO_POINTER(v) ((gpointer)(gsize)(v)) #ifndef EGLIB_NO_REMAP #include <eglib-remap.h> #endif #ifdef G_HAVE_ALLOCA_H #include <alloca.h> #endif #ifdef WIN32 /* For alloca */ #include <malloc.h> #endif #ifdef G_HAVE_UNISTD_H #include <unistd.h> #endif #ifndef offsetof # define offsetof(s_name,n_name) (size_t)(char *)&(((s_name*)0)->m_name) #endif #ifdef __cplusplus #define G_BEGIN_DECLS extern "C" { #define G_END_DECLS } #define G_EXTERN_C extern "C" #else #define G_BEGIN_DECLS /* nothing */ #define G_END_DECLS /* nothing */ #define G_EXTERN_C /* nothing */ #endif #ifdef __cplusplus #define g_cast monoeg_g_cast // in case not inlined (see eglib-remap.h) // g_cast converts void* to T*. // e.g. #define malloc(x) (g_cast (malloc (x))) // FIXME It used to do more. Rename? struct g_cast { private: void * const x; public: explicit g_cast (void volatile *y) : x((void*)y) { } // Lack of rvalue constructor inhibits ternary operator. // Either don't use ternary, or cast each side. // sa = (salen <= 128) ? g_alloca (salen) : g_malloc (salen); // w32socket.c:1045:24: error: call to deleted constructor of 'monoeg_g_cast' //g_cast (g_cast&& y) : x(y.x) { } g_cast (g_cast&&) = delete; g_cast () = delete; g_cast (const g_cast&) = delete; template <typename TTo> operator TTo* () const { return (TTo*)x; } }; #else // FIXME? Parens are omitted to preserve prior meaning. #define g_cast(x) x #endif // G++4.4 breaks opeq below without this. #if defined (__GNUC__) || defined (__clang__) #define G_MAY_ALIAS __attribute__((__may_alias__)) #else #define G_MAY_ALIAS /* nothing */ #endif #ifdef __cplusplus // Provide for bit operations on enums, but not all integer operations. // This alleviates a fair number of casts in porting C to C++. // Forward declare template with no generic implementation. template <size_t> struct g_size_to_int; // Template specializations. template <> struct g_size_to_int<1> { typedef int8_t type; }; template <> struct g_size_to_int<2> { typedef int16_t type; }; template <> struct g_size_to_int<4> { typedef int32_t type; }; template <> struct g_size_to_int<8> { typedef int64_t type; }; // g++4.4 does not accept: //template <typename T> //using g_size_to_int_t = typename g_size_to_int <sizeof (T)>::type; #define g_size_to_int_t(x) g_size_to_int <sizeof (x)>::type #define G_ENUM_BINOP(Enum, op, opeq) \ inline Enum \ operator op (Enum a, Enum b) \ { \ typedef g_size_to_int_t (Enum) type; \ return static_cast<Enum>(static_cast<type>(a) op b); \ } \ \ inline Enum& \ operator opeq (Enum& a, Enum b) \ { \ typedef g_size_to_int_t (Enum) G_MAY_ALIAS type; \ return (Enum&)((type&)a opeq b); \ } \ #define G_ENUM_FUNCTIONS(Enum) \ extern "C++" { /* in case within extern "C" */ \ inline Enum \ operator~ (Enum a) \ { \ typedef g_size_to_int_t (Enum) type; \ return static_cast<Enum>(~static_cast<type>(a)); \ } \ \ G_ENUM_BINOP (Enum, |, |=) \ G_ENUM_BINOP (Enum, &, &=) \ G_ENUM_BINOP (Enum, ^, ^=) \ \ } /* extern "C++" */ #else #define G_ENUM_FUNCTIONS(Enum) /* nothing */ #endif G_BEGIN_DECLS /* * Basic data types */ typedef int gint; typedef unsigned int guint; typedef short gshort; typedef unsigned short gushort; typedef long glong; typedef unsigned long gulong; typedef void * gpointer; typedef const void * gconstpointer; typedef char gchar; typedef unsigned char guchar; /* Types defined in terms of the stdint.h */ typedef int8_t gint8; typedef uint8_t guint8; typedef int16_t gint16; typedef uint16_t guint16; typedef int32_t gint32; typedef uint32_t guint32; typedef int64_t gint64; typedef uint64_t guint64; typedef float gfloat; typedef double gdouble; typedef int32_t gboolean; #if defined (HOST_WIN32) || defined (_WIN32) G_END_DECLS #include <wchar.h> typedef wchar_t gunichar2; G_BEGIN_DECLS #else typedef guint16 gunichar2; #endif typedef guint32 gunichar; /* * Macros */ #define G_N_ELEMENTS(s) ARRAY_SIZE(s) #define FALSE 0 #define TRUE 1 #define G_MINSHORT SHRT_MIN #define G_MAXSHORT SHRT_MAX #define G_MAXUSHORT USHRT_MAX #define G_MAXINT INT_MAX #define G_MININT INT_MIN #define G_MAXINT8 INT8_MAX #define G_MAXUINT8 UINT8_MAX #define G_MININT8 INT8_MIN #define G_MAXINT16 INT16_MAX #define G_MAXUINT16 UINT16_MAX #define G_MININT16 INT16_MIN #define G_MAXINT32 INT32_MAX #define G_MAXUINT32 UINT32_MAX #define G_MININT32 INT32_MIN #define G_MININT64 INT64_MIN #define G_MAXINT64 INT64_MAX #define G_MAXUINT64 UINT64_MAX #define G_LITTLE_ENDIAN 1234 #define G_BIG_ENDIAN 4321 #define G_STMT_START do #define G_STMT_END while (0) #define G_USEC_PER_SEC 1000000 #ifndef ABS #define ABS(a) ((a) > 0 ? (a) : -(a)) #endif #define ALIGN_TO(val,align) ((((gssize)val) + (gssize)((align) - 1)) & (~((gssize)(align - 1)))) #define ALIGN_DOWN_TO(val,align) (((gssize)val) & (~((gssize)(align - 1)))) #define ALIGN_PTR_TO(ptr,align) (gpointer)((((gssize)(ptr)) + (gssize)(align - 1)) & (~((gssize)(align - 1)))) #define G_STRUCT_OFFSET(p_type,field) offsetof(p_type,field) #define EGLIB_STRINGIFY(x) #x #define EGLIB_TOSTRING(x) EGLIB_STRINGIFY(x) #define G_STRLOC __FILE__ ":" EGLIB_TOSTRING(__LINE__) ":" #define G_CONST_RETURN const #define G_GUINT64_FORMAT PRIu64 #define G_GINT64_FORMAT PRIi64 #define G_GUINT32_FORMAT PRIu32 #define G_GINT32_FORMAT PRIi32 #ifdef __GNUC__ #define G_ATTR_FORMAT_PRINTF(fmt_pos,arg_pos) __attribute__((__format__(__printf__,fmt_pos,arg_pos))) #else #define G_ATTR_FORMAT_PRINTF(fmt_pos,arg_pos) #endif /* * Allocation */ G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_free (void *ptr); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gpointer g_realloc (gpointer obj, gsize size); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gpointer g_malloc (gsize x); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gpointer g_malloc0 (gsize x); G_EXTERN_C // Used by profilers, at least. gpointer g_calloc (gsize n, gsize x); gpointer g_try_malloc (gsize x); gpointer g_try_realloc (gpointer obj, gsize size); #define g_new(type,size) ((type *) g_malloc (sizeof (type) * (size))) #define g_new0(type,size) ((type *) g_malloc0 (sizeof (type)* (size))) #define g_newa(type,size) ((type *) alloca (sizeof (type) * (size))) #define g_newa0(type,size) ((type *) memset (alloca (sizeof (type) * (size)), 0, sizeof (type) * (size))) #define g_memmove(dest,src,len) memmove (dest, src, len) #define g_renew(struct_type, mem, n_structs) ((struct_type*)g_realloc (mem, sizeof (struct_type) * n_structs)) #define g_alloca(size) (g_cast (alloca (size))) G_EXTERN_C // Used by libtest, at least. gpointer g_memdup (gconstpointer mem, guint byte_size); static inline gchar *g_strdup (const gchar *str) { if (str) { return (gchar*) g_memdup (str, (guint)strlen (str) + 1); } return NULL; } gchar **g_strdupv (gchar **str_array); typedef struct { gpointer (*malloc) (gsize n_bytes); gpointer (*realloc) (gpointer mem, gsize n_bytes); void (*free) (gpointer mem); gpointer (*calloc) (gsize n_blocks, gsize n_block_bytes); } GMemVTable; void g_mem_set_vtable (GMemVTable* vtable); void g_mem_get_vtable (GMemVTable* vtable); struct _GMemChunk { guint alloc_size; }; typedef struct _GMemChunk GMemChunk; /* * Misc. */ gboolean g_hasenv(const gchar *variable); gchar * g_getenv(const gchar *variable); G_EXTERN_C // sdks/wasm/driver.c is C and uses this gboolean g_setenv(const gchar *variable, const gchar *value, gboolean overwrite); gchar* g_win32_getlocale(void); /* * Precondition macros */ #define g_warn_if_fail(x) G_STMT_START { if (!(x)) { g_warning ("%s:%d: assertion '%s' failed\n", __FILE__, __LINE__, #x); } } G_STMT_END #define g_return_if_fail(x) G_STMT_START { if (!(x)) { g_critical ("%s:%d: assertion '%s' failed\n", __FILE__, __LINE__, #x); return; } } G_STMT_END #define g_return_val_if_fail(x,e) G_STMT_START { if (!(x)) { g_critical ("%s:%d: assertion '%s' failed\n", __FILE__, __LINE__, #x); return (e); } } G_STMT_END /* * Errors */ typedef struct { /* In the real glib, this is a GQuark, but we dont use/need that */ gpointer domain; gint code; gchar *message; } GError; void g_clear_error (GError **gerror); void g_error_free (GError *gerror); GError *g_error_new (gpointer domain, gint code, const char *format, ...); void g_set_error (GError **err, gpointer domain, gint code, const gchar *format, ...); void g_propagate_error (GError **dest, GError *src); /* * Strings utility */ G_EXTERN_C // Used by libtest, at least. gchar *g_strdup_printf (const gchar *format, ...) G_ATTR_FORMAT_PRINTF(1, 2); gchar *g_strdup_vprintf (const gchar *format, va_list args); gchar *g_strndup (const gchar *str, gsize n); const gchar *g_strerror (gint errnum); gchar *g_strndup (const gchar *str, gsize n); void g_strfreev (gchar **str_array); gchar *g_strconcat (const gchar *first, ...); gchar **g_strsplit (const gchar *string, const gchar *delimiter, gint max_tokens); gchar **g_strsplit_set (const gchar *string, const gchar *delimiter, gint max_tokens); gchar *g_strreverse (gchar *str); gboolean g_str_has_prefix (const gchar *str, const gchar *prefix); gboolean g_str_has_suffix (const gchar *str, const gchar *suffix); guint g_strv_length (gchar **str_array); gchar *g_strjoin (const gchar *separator, ...); gchar *g_strjoinv (const gchar *separator, gchar **str_array); gchar *g_strchug (gchar *str); gchar *g_strchomp (gchar *str); gchar *g_strnfill (gsize length, gchar fill_char); gsize g_strnlen (const char*, gsize); char *g_str_from_file_region (int fd, guint64 offset, gsize size); void g_strdelimit (char *string, char delimiter, char new_delimiter); gint g_printf (gchar const *format, ...) G_ATTR_FORMAT_PRINTF(1, 2); gint g_fprintf (FILE *file, gchar const *format, ...) G_ATTR_FORMAT_PRINTF(2, 3); gint g_sprintf (gchar *string, gchar const *format, ...) G_ATTR_FORMAT_PRINTF(2, 3); gint g_snprintf (gchar *string, gulong n, gchar const *format, ...) G_ATTR_FORMAT_PRINTF(3, 4); gint g_vasprintf (gchar **ret, const gchar *fmt, va_list ap); #define g_vprintf vprintf #define g_vfprintf vfprintf #define g_vsprintf vsprintf #define g_vsnprintf vsnprintf gsize g_strlcpy (gchar *dest, const gchar *src, gsize dest_size); gchar *g_stpcpy (gchar *dest, const char *src); gchar g_ascii_tolower (gchar c); gchar g_ascii_toupper (gchar c); gchar *g_ascii_strdown (const gchar *str, gssize len); void g_ascii_strdown_no_alloc (char* dst, const char* src, gsize len); gchar *g_ascii_strup (const gchar *str, gssize len); gint g_ascii_strncasecmp (const gchar *s1, const gchar *s2, gsize n); gint g_ascii_strcasecmp (const gchar *s1, const gchar *s2); gint g_ascii_xdigit_value (gchar c); #define g_ascii_isspace(c) (isspace (c) != 0) #define g_ascii_isalpha(c) (isalpha (c) != 0) #define g_ascii_isprint(c) (isprint (c) != 0) #define g_ascii_isxdigit(c) (isxdigit (c) != 0) gboolean g_utf16_ascii_equal (const gunichar2 *utf16, size_t ulen, const char *ascii, size_t alen); gboolean g_utf16_asciiz_equal (const gunichar2 *utf16, const char *ascii); static inline gboolean g_ascii_equal (const char *s1, gsize len1, const char *s2, gsize len2) { return len1 == len2 && (s1 == s2 || memcmp (s1, s2, len1) == 0); } static inline gboolean g_asciiz_equal (const char *s1, const char *s2) { return s1 == s2 || strcmp (s1, s2) == 0; } static inline gboolean g_ascii_equal_caseinsensitive (const char *s1, gsize len1, const char *s2, gsize len2) { return len1 == len2 && (s1 == s2 || g_ascii_strncasecmp (s1, s2, len1) == 0); } static inline gboolean g_asciiz_equal_caseinsensitive (const char *s1, const char *s2) { return s1 == s2 || g_ascii_strcasecmp (s1, s2) == 0; } /* FIXME: g_strcasecmp supports utf8 unicode stuff */ #ifdef _MSC_VER #define g_strcasecmp _stricmp #define g_strncasecmp _strnicmp #define g_strstrip(a) g_strchug (g_strchomp (a)) #else #define g_strcasecmp strcasecmp #define g_ascii_strtoull strtoull #define g_strncasecmp strncasecmp #define g_strstrip(a) g_strchug (g_strchomp (a)) #endif #define g_ascii_strdup strdup /* * String type */ typedef struct { char *str; gsize len; gsize allocated_len; } GString; GString *g_string_new (const gchar *init); GString *g_string_new_len (const gchar *init, gssize len); GString *g_string_sized_new (gsize default_size); gchar *g_string_free (GString *string, gboolean free_segment); GString *g_string_append (GString *string, const gchar *val); void g_string_printf (GString *string, const gchar *format, ...) G_ATTR_FORMAT_PRINTF(2, 3); void g_string_append_printf (GString *string, const gchar *format, ...) G_ATTR_FORMAT_PRINTF(2, 3); void g_string_append_vprintf (GString *string, const gchar *format, va_list args); GString *g_string_append_unichar (GString *string, gunichar c); GString *g_string_append_c (GString *string, gchar c); GString *g_string_append (GString *string, const gchar *val); GString *g_string_append_len (GString *string, const gchar *val, gssize len); GString *g_string_truncate (GString *string, gsize len); GString *g_string_set_size (GString *string, gsize len); #define g_string_sprintfa g_string_append_printf typedef void (*GFunc) (gpointer data, gpointer user_data); typedef gint (*GCompareFunc) (gconstpointer a, gconstpointer b); typedef gint (*GCompareDataFunc) (gconstpointer a, gconstpointer b, gpointer user_data); typedef void (*GHFunc) (gpointer key, gpointer value, gpointer user_data); typedef gboolean (*GHRFunc) (gpointer key, gpointer value, gpointer user_data); typedef void (*GDestroyNotify) (gpointer data); typedef guint (*GHashFunc) (gconstpointer key); typedef gboolean (*GEqualFunc) (gconstpointer a, gconstpointer b); typedef void (*GFreeFunc) (gpointer data); /* * Lists */ typedef struct _GSList GSList; struct _GSList { gpointer data; GSList *next; }; GSList *g_slist_alloc (void); GSList *g_slist_append (GSList *list, gpointer data); GSList *g_slist_prepend (GSList *list, gpointer data); void g_slist_free (GSList *list); void g_slist_free_1 (GSList *list); GSList *g_slist_copy (GSList *list); GSList *g_slist_concat (GSList *list1, GSList *list2); void g_slist_foreach (GSList *list, GFunc func, gpointer user_data); GSList *g_slist_last (GSList *list); GSList *g_slist_find (GSList *list, gconstpointer data); GSList *g_slist_find_custom (GSList *list, gconstpointer data, GCompareFunc func); GSList *g_slist_remove (GSList *list, gconstpointer data); GSList *g_slist_remove_all (GSList *list, gconstpointer data); GSList *g_slist_reverse (GSList *list); guint g_slist_length (GSList *list); GSList *g_slist_remove_link (GSList *list, GSList *link); GSList *g_slist_delete_link (GSList *list, GSList *link); GSList *g_slist_insert_sorted (GSList *list, gpointer data, GCompareFunc func); GSList *g_slist_insert_before (GSList *list, GSList *sibling, gpointer data); GSList *g_slist_sort (GSList *list, GCompareFunc func); gint g_slist_index (GSList *list, gconstpointer data); GSList *g_slist_nth (GSList *list, guint n); gpointer g_slist_nth_data (GSList *list, guint n); #define g_slist_next(slist) ((slist) ? (((GSList *) (slist))->next) : NULL) typedef struct _GList GList; struct _GList { gpointer data; GList *next; GList *prev; }; #define g_list_next(list) ((list) ? (((GList *) (list))->next) : NULL) #define g_list_previous(list) ((list) ? (((GList *) (list))->prev) : NULL) GList *g_list_alloc (void); GList *g_list_append (GList *list, gpointer data); GList *g_list_prepend (GList *list, gpointer data); void g_list_free (GList *list); void g_list_free_1 (GList *list); GList *g_list_copy (GList *list); guint g_list_length (GList *list); gint g_list_index (GList *list, gconstpointer data); GList *g_list_nth (GList *list, guint n); gpointer g_list_nth_data (GList *list, guint n); GList *g_list_last (GList *list); GList *g_list_concat (GList *list1, GList *list2); void g_list_foreach (GList *list, GFunc func, gpointer user_data); GList *g_list_first (GList *list); GList *g_list_find (GList *list, gconstpointer data); GList *g_list_find_custom (GList *list, gconstpointer data, GCompareFunc func); GList *g_list_remove (GList *list, gconstpointer data); GList *g_list_remove_all (GList *list, gconstpointer data); GList *g_list_reverse (GList *list); GList *g_list_remove_link (GList *list, GList *link); GList *g_list_delete_link (GList *list, GList *link); GList *g_list_insert_sorted (GList *list, gpointer data, GCompareFunc func); GList *g_list_insert_before (GList *list, GList *sibling, gpointer data); GList *g_list_sort (GList *sort, GCompareFunc func); /* * Hashtables */ typedef struct _GHashTable GHashTable; typedef struct _GHashTableIter GHashTableIter; /* Private, but needed for stack allocation */ struct _GHashTableIter { gpointer dummy [8]; }; G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. GHashTable *g_hash_table_new (GHashFunc hash_func, GEqualFunc key_equal_func); GHashTable *g_hash_table_new_full (GHashFunc hash_func, GEqualFunc key_equal_func, GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_hash_table_insert_replace (GHashTable *hash, gpointer key, gpointer value, gboolean replace); guint g_hash_table_size (GHashTable *hash); GList *g_hash_table_get_keys (GHashTable *hash); GList *g_hash_table_get_values (GHashTable *hash); gboolean g_hash_table_contains (GHashTable *hash, gconstpointer key); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gpointer g_hash_table_lookup (GHashTable *hash, gconstpointer key); gboolean g_hash_table_lookup_extended (GHashTable *hash, gconstpointer key, gpointer *orig_key, gpointer *value); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_hash_table_foreach (GHashTable *hash, GHFunc func, gpointer user_data); gpointer g_hash_table_find (GHashTable *hash, GHRFunc predicate, gpointer user_data); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gboolean g_hash_table_remove (GHashTable *hash, gconstpointer key); gboolean g_hash_table_steal (GHashTable *hash, gconstpointer key); void g_hash_table_remove_all (GHashTable *hash); guint g_hash_table_foreach_remove (GHashTable *hash, GHRFunc func, gpointer user_data); guint g_hash_table_foreach_steal (GHashTable *hash, GHRFunc func, gpointer user_data); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_hash_table_destroy (GHashTable *hash); void g_hash_table_print_stats (GHashTable *table); void g_hash_table_iter_init (GHashTableIter *iter, GHashTable *hash_table); gboolean g_hash_table_iter_next (GHashTableIter *iter, gpointer *key, gpointer *value); guint g_spaced_primes_closest (guint x); #define g_hash_table_insert(h,k,v) g_hash_table_insert_replace ((h),(k),(v),FALSE) #define g_hash_table_replace(h,k,v) g_hash_table_insert_replace ((h),(k),(v),TRUE) #define g_hash_table_add(h,k) g_hash_table_insert_replace ((h),(k),(k),TRUE) G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. gboolean g_direct_equal (gconstpointer v1, gconstpointer v2); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. guint g_direct_hash (gconstpointer v1); gboolean g_int_equal (gconstpointer v1, gconstpointer v2); guint g_int_hash (gconstpointer v1); gboolean g_str_equal (gconstpointer v1, gconstpointer v2); guint g_str_hash (gconstpointer v1); /* * ByteArray */ typedef struct _GByteArray GByteArray; struct _GByteArray { guint8 *data; gint len; }; GByteArray *g_byte_array_new (void); GByteArray* g_byte_array_append (GByteArray *array, const guint8 *data, guint len); guint8* g_byte_array_free (GByteArray *array, gboolean free_segment); void g_byte_array_set_size (GByteArray *array, gint length); /* * Array */ typedef struct _GArray GArray; struct _GArray { gchar *data; gint len; }; GArray *g_array_new (gboolean zero_terminated, gboolean clear_, guint element_size); GArray *g_array_sized_new (gboolean zero_terminated, gboolean clear_, guint element_size, guint reserved_size); gchar* g_array_free (GArray *array, gboolean free_segment); GArray *g_array_append_vals (GArray *array, gconstpointer data, guint len); GArray* g_array_insert_vals (GArray *array, guint index_, gconstpointer data, guint len); GArray* g_array_remove_index (GArray *array, guint index_); GArray* g_array_remove_index_fast (GArray *array, guint index_); void g_array_set_size (GArray *array, gint length); #define g_array_append_val(a,v) (g_array_append_vals((a),&(v),1)) #define g_array_insert_val(a,i,v) (g_array_insert_vals((a),(i),&(v),1)) #define g_array_index(a,t,i) *(t*)(((a)->data) + sizeof(t) * (i)) //FIXME previous missing parens /* * Pointer Array */ typedef struct _GPtrArray GPtrArray; struct _GPtrArray { gpointer *pdata; guint len; }; GPtrArray *g_ptr_array_new (void); GPtrArray *g_ptr_array_sized_new (guint reserved_size); void g_ptr_array_add (GPtrArray *array, gpointer data); gboolean g_ptr_array_remove (GPtrArray *array, gpointer data); gpointer g_ptr_array_remove_index (GPtrArray *array, guint index); gboolean g_ptr_array_remove_fast (GPtrArray *array, gpointer data); gpointer g_ptr_array_remove_index_fast (GPtrArray *array, guint index); void g_ptr_array_sort (GPtrArray *array, GCompareFunc compare_func); void g_ptr_array_set_size (GPtrArray *array, gint length); gpointer *g_ptr_array_free (GPtrArray *array, gboolean free_seg); void g_ptr_array_foreach (GPtrArray *array, GFunc func, gpointer user_data); guint g_ptr_array_capacity (GPtrArray *array); gboolean g_ptr_array_find (GPtrArray *array, gconstpointer needle, guint *index); #define g_ptr_array_index(array,index) (array)->pdata[(index)] //FIXME previous missing parens /* * Queues */ typedef struct { GList *head; GList *tail; guint length; } GQueue; gpointer g_queue_pop_head (GQueue *queue); void g_queue_push_head (GQueue *queue, gpointer data); void g_queue_push_tail (GQueue *queue, gpointer data); gboolean g_queue_is_empty (GQueue *queue); GQueue *g_queue_new (void); void g_queue_free (GQueue *queue); void g_queue_foreach (GQueue *queue, GFunc func, gpointer user_data); /* * Messages */ #ifndef G_LOG_DOMAIN #define G_LOG_DOMAIN ((gchar*) 0) #endif typedef enum { G_LOG_FLAG_RECURSION = 1 << 0, G_LOG_FLAG_FATAL = 1 << 1, G_LOG_LEVEL_ERROR = 1 << 2, G_LOG_LEVEL_CRITICAL = 1 << 3, G_LOG_LEVEL_WARNING = 1 << 4, G_LOG_LEVEL_MESSAGE = 1 << 5, G_LOG_LEVEL_INFO = 1 << 6, G_LOG_LEVEL_DEBUG = 1 << 7, G_LOG_LEVEL_MASK = ~(G_LOG_FLAG_RECURSION | G_LOG_FLAG_FATAL) } GLogLevelFlags; G_ENUM_FUNCTIONS (GLogLevelFlags) gint g_printv (const gchar *format, va_list args); void g_print (const gchar *format, ...); void g_printerr (const gchar *format, ...); GLogLevelFlags g_log_set_always_fatal (GLogLevelFlags fatal_mask); GLogLevelFlags g_log_set_fatal_mask (const gchar *log_domain, GLogLevelFlags fatal_mask); void g_logv (const gchar *log_domain, GLogLevelFlags log_level, const gchar *format, va_list args); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_log (const gchar *log_domain, GLogLevelFlags log_level, const gchar *format, ...); void g_log_disabled (const gchar *log_domain, GLogLevelFlags log_level, const char *file, int line); G_EXTERN_C // Used by MonoPosixHelper or MonoSupportW, at least. void g_assertion_message (const gchar *format, ...) G_GNUC_NORETURN; void mono_assertion_message_disabled (const char *file, int line) G_GNUC_NORETURN; void mono_assertion_message (const char *file, int line, const char *condition) G_GNUC_NORETURN; void mono_assertion_message_unreachable (const char *file, int line) G_GNUC_NORETURN; const char * g_get_assertion_message (void); #ifndef DISABLE_ASSERT_MESSAGES /* The for (;;) tells gc thats g_error () doesn't return, avoiding warnings */ #define g_error(...) do { g_log (G_LOG_DOMAIN, G_LOG_LEVEL_ERROR, __VA_ARGS__); for (;;); } while (0) #define g_critical(...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_CRITICAL, __VA_ARGS__) #define g_warning(...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_WARNING, __VA_ARGS__) #define g_message(...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_MESSAGE, __VA_ARGS__) #define g_debug(...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, __VA_ARGS__) #else #define g_error(...) do { g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_ERROR, __FILE__, __LINE__); for (;;); } while (0) #define g_critical(...) g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_CRITICAL, __FILE__, __LINE__) #define g_warning(...) g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_WARNING, __FILE__, __LINE__) #define g_message(...) g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_MESSAGE, __FILE__, __LINE__) #define g_debug(...) g_log_disabled (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, __FILE__, __LINE__) #endif typedef void (*GLogFunc) (const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data); typedef void (*GPrintFunc) (const gchar *string); typedef void (*GAbortFunc) (void); void g_assertion_disable_global (GAbortFunc func); void g_assert_abort (void); void g_log_default_handler (const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer unused_data); GLogFunc g_log_set_default_handler (GLogFunc log_func, gpointer user_data); GPrintFunc g_set_print_handler (GPrintFunc func); GPrintFunc g_set_printerr_handler (GPrintFunc func); /* * Conversions */ gpointer g_convert_error_quark(void); #ifndef MAX #define MAX(a,b) (((a)>(b)) ? (a) : (b)) #endif #ifndef MIN #define MIN(a,b) (((a)<(b)) ? (a) : (b)) #endif #ifndef CLAMP #define CLAMP(a,low,high) (((a) < (low)) ? (low) : (((a) > (high)) ? (high) : (a))) #endif #if defined(__GNUC__) && (__GNUC__ > 2) #define G_LIKELY(expr) (__builtin_expect ((expr) != 0, 1)) #define G_UNLIKELY(expr) (__builtin_expect ((expr) != 0, 0)) #else #define G_LIKELY(x) (x) #define G_UNLIKELY(x) (x) #endif #if defined(_MSC_VER) #define eg_unreachable() __assume(0) #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 5))) #define eg_unreachable() __builtin_unreachable() #else #define eg_unreachable() #endif /* g_assert is a boolean expression; the precise value is not preserved, just true or false. */ #ifdef DISABLE_ASSERT_MESSAGES // This is smaller than the equivalent mono_assertion_message (..."disabled"); #define g_assert(x) (G_LIKELY((x)) ? 1 : (mono_assertion_message_disabled (__FILE__, __LINE__), 0)) #else #define g_assert(x) (G_LIKELY((x)) ? 1 : (mono_assertion_message (__FILE__, __LINE__, #x), 0)) #endif #ifdef __cplusplus #define g_static_assert(x) static_assert (x, "") #else #define g_static_assert(x) g_assert (x) #endif #define g_assert_not_reached() G_STMT_START { mono_assertion_message_unreachable (__FILE__, __LINE__); eg_unreachable(); } G_STMT_END /* f is format -- like printf and scanf * Where you might have said: * if (!(expr)) * g_error("%s invalid bar:%d", __func__, bar) * * You can say: * g_assertf(expr, "bar:%d", bar); * * The usual assertion text of file/line/expr/newline are builtin, and __func__. * * g_assertf is a boolean expression -- the precise value is not preserved, just true or false. * * Other than expr, the parameters are not evaluated unless expr is false. * * format must be a string literal, in order to be concatenated. * If this is too restrictive, g_error remains. */ #ifdef DISABLE_ASSERT_MESSAGES #define g_assertf(x, format, ...) (G_LIKELY((x)) ? 1 : (mono_assertion_message_disabled (__FILE__, __LINE__), 0)) #elif defined(_MSC_VER) && (_MSC_VER < 1910) #define g_assertf(x, format, ...) (G_LIKELY((x)) ? 1 : (g_assertion_message ("* Assertion at %s:%d, condition `%s' not met, function:%s, " format "\n", __FILE__, __LINE__, #x, __func__, __VA_ARGS__), 0)) #else #define g_assertf(x, format, ...) (G_LIKELY((x)) ? 1 : (g_assertion_message ("* Assertion at %s:%d, condition `%s' not met, function:%s, " format "\n", __FILE__, __LINE__, #x, __func__, ##__VA_ARGS__), 0)) #endif /* * Unicode conversion */ #define G_CONVERT_ERROR g_convert_error_quark() typedef enum { G_CONVERT_ERROR_NO_CONVERSION, G_CONVERT_ERROR_ILLEGAL_SEQUENCE, G_CONVERT_ERROR_FAILED, G_CONVERT_ERROR_PARTIAL_INPUT, G_CONVERT_ERROR_BAD_URI, G_CONVERT_ERROR_NOT_ABSOLUTE_PATH, G_CONVERT_ERROR_NO_MEMORY } GConvertError; gint g_unichar_to_utf8 (gunichar c, gchar *outbuf); gunichar *g_utf8_to_ucs4_fast (const gchar *str, glong len, glong *items_written); gunichar *g_utf8_to_ucs4 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err); G_EXTERN_C // Used by libtest, at least. gunichar2 *g_utf8_to_utf16 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err); gunichar2 *eg_utf8_to_utf16_with_nuls (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err); gunichar2 *eg_wtf8_to_utf16 (const gchar *str, glong len, glong *items_read, glong *items_written, GError **err); G_EXTERN_C // Used by libtest, at least. gchar *g_utf16_to_utf8 (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GError **err); gunichar *g_utf16_to_ucs4 (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GError **err); gchar *g_ucs4_to_utf8 (const gunichar *str, glong len, glong *items_read, glong *items_written, GError **err); gunichar2 *g_ucs4_to_utf16 (const gunichar *str, glong len, glong *items_read, glong *items_written, GError **err); size_t g_utf16_len (const gunichar2 *); #define u8to16(str) g_utf8_to_utf16(str, (glong)strlen(str), NULL, NULL, NULL) #ifdef G_OS_WIN32 #define u16to8(str) g_utf16_to_utf8((gunichar2 *) (str), (glong)wcslen((wchar_t *) (str)), NULL, NULL, NULL) #else #define u16to8(str) g_utf16_to_utf8(str, (glong)strlen(str), NULL, NULL, NULL) #endif typedef gpointer (*GCustomAllocator) (gsize req_size, gpointer custom_alloc_data); typedef struct { gpointer buffer; gsize buffer_size; gsize req_buffer_size; } GFixedBufferCustomAllocatorData; gpointer g_fixed_buffer_custom_allocator (gsize req_size, gpointer custom_alloc_data); gunichar2 *g_utf8_to_utf16_custom_alloc (const gchar *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err); gchar *g_utf16_to_utf8_custom_alloc (const gunichar2 *str, glong len, glong *items_read, glong *items_written, GCustomAllocator custom_alloc_func, gpointer custom_alloc_data, GError **err); /* * Path */ gchar *g_build_path (const gchar *separator, const gchar *first_element, ...); #define g_build_filename(x, ...) g_build_path(G_DIR_SEPARATOR_S, x, __VA_ARGS__) gchar *g_path_get_dirname (const gchar *filename); gchar *g_path_get_basename (const char *filename); gchar *g_find_program_in_path (const gchar *program); gchar *g_get_current_dir (void); gboolean g_path_is_absolute (const char *filename); const gchar *g_get_home_dir (void); const gchar *g_get_tmp_dir (void); const gchar *g_get_user_name (void); gchar *g_get_prgname (void); void g_set_prgname (const gchar *prgname); gboolean g_ensure_directory_exists (const gchar *filename); #ifndef G_OS_WIN32 // Spawn could be implemented but is not. int eg_getdtablesize (void); #if !defined (HAVE_FORK) || !defined (HAVE_EXECVE) #define HAVE_G_SPAWN 0 #else #define HAVE_G_SPAWN 1 /* * Spawn */ typedef enum { G_SPAWN_LEAVE_DESCRIPTORS_OPEN = 1, G_SPAWN_DO_NOT_REAP_CHILD = 1 << 1, G_SPAWN_SEARCH_PATH = 1 << 2, G_SPAWN_STDOUT_TO_DEV_NULL = 1 << 3, G_SPAWN_STDERR_TO_DEV_NULL = 1 << 4, G_SPAWN_CHILD_INHERITS_STDIN = 1 << 5, G_SPAWN_FILE_AND_ARGV_ZERO = 1 << 6 } GSpawnFlags; typedef void (*GSpawnChildSetupFunc) (gpointer user_data); gboolean g_spawn_async_with_pipes (const gchar *working_directory, gchar **argv, gchar **envp, GSpawnFlags flags, GSpawnChildSetupFunc child_setup, gpointer user_data, GPid *child_pid, gint *standard_input, gint *standard_output, gint *standard_error, GError **gerror); #endif #endif /* * Timer */ typedef struct _GTimer GTimer; GTimer *g_timer_new (void); void g_timer_destroy (GTimer *timer); gdouble g_timer_elapsed (GTimer *timer, gulong *microseconds); void g_timer_stop (GTimer *timer); void g_timer_start (GTimer *timer); /* * Date and time */ typedef struct { glong tv_sec; glong tv_usec; } GTimeVal; void g_get_current_time (GTimeVal *result); void g_usleep (gulong microseconds); /* * File */ gpointer g_file_error_quark (void); #define G_FILE_ERROR g_file_error_quark () typedef enum { G_FILE_ERROR_EXIST, G_FILE_ERROR_ISDIR, G_FILE_ERROR_ACCES, G_FILE_ERROR_NAMETOOLONG, G_FILE_ERROR_NOENT, G_FILE_ERROR_NOTDIR, G_FILE_ERROR_NXIO, G_FILE_ERROR_NODEV, G_FILE_ERROR_ROFS, G_FILE_ERROR_TXTBSY, G_FILE_ERROR_FAULT, G_FILE_ERROR_LOOP, G_FILE_ERROR_NOSPC, G_FILE_ERROR_NOMEM, G_FILE_ERROR_MFILE, G_FILE_ERROR_NFILE, G_FILE_ERROR_BADF, G_FILE_ERROR_INVAL, G_FILE_ERROR_PIPE, G_FILE_ERROR_AGAIN, G_FILE_ERROR_INTR, G_FILE_ERROR_IO, G_FILE_ERROR_PERM, G_FILE_ERROR_NOSYS, G_FILE_ERROR_FAILED } GFileError; typedef enum { G_FILE_TEST_IS_REGULAR = 1 << 0, G_FILE_TEST_IS_SYMLINK = 1 << 1, G_FILE_TEST_IS_DIR = 1 << 2, G_FILE_TEST_IS_EXECUTABLE = 1 << 3, G_FILE_TEST_EXISTS = 1 << 4 } GFileTest; G_ENUM_FUNCTIONS (GFileTest) gboolean g_file_set_contents (const gchar *filename, const gchar *contents, gssize length, GError **gerror); gboolean g_file_get_contents (const gchar *filename, gchar **contents, gsize *length, GError **gerror); GFileError g_file_error_from_errno (gint err_no); gint g_file_open_tmp (const gchar *tmpl, gchar **name_used, GError **gerror); gboolean g_file_test (const gchar *filename, GFileTest test); #ifdef G_OS_WIN32 #define g_open _open #else #define g_open open #endif #define g_rename rename #define g_stat stat #ifdef G_OS_WIN32 #define g_access _access #else #define g_access access #endif #ifdef G_OS_WIN32 #define g_mktemp _mktemp #else #define g_mktemp mktemp #endif #ifdef G_OS_WIN32 #define g_unlink _unlink #else #define g_unlink unlink #endif #ifdef G_OS_WIN32 #define g_write _write #else #define g_write write #endif #ifdef G_OS_WIN32 #define g_read _read #else #define g_read read #endif #define g_fopen fopen #define g_lstat lstat #define g_rmdir rmdir #define g_mkstemp mkstemp #define g_ascii_isdigit isdigit #define g_ascii_strtod strtod #define g_ascii_isalnum isalnum gchar *g_mkdtemp (gchar *tmpl); /* * Low-level write-based printing functions */ static inline int g_async_safe_fgets (char *str, int num, int handle, gboolean *newline) { memset (str, 0, num); // Make sure we don't overwrite the last index so that we are // guaranteed to be NULL-terminated int without_padding = num - 1; int i=0; while (i < without_padding && g_read (handle, &str [i], sizeof(char))) { if (str [i] == '\n') { str [i] = '\0'; *newline = TRUE; } if (!isprint (str [i])) str [i] = '\0'; if (str [i] == '\0') break; i++; } return i; } static inline gint g_async_safe_vfprintf (int handle, gchar const *format, va_list args) { char print_buff [1024]; print_buff [0] = '\0'; g_vsnprintf (print_buff, sizeof(print_buff), format, args); int ret = g_write (handle, print_buff, (guint32) strlen (print_buff)); return ret; } static inline gint g_async_safe_fprintf (int handle, gchar const *format, ...) { va_list args; va_start (args, format); int ret = g_async_safe_vfprintf (handle, format, args); va_end (args); return ret; } static inline gint g_async_safe_vprintf (gchar const *format, va_list args) { return g_async_safe_vfprintf (1, format, args); } static inline gint g_async_safe_printf (gchar const *format, ...) { va_list args; va_start (args, format); int ret = g_async_safe_vfprintf (1, format, args); va_end (args); return ret; } /* * Directory */ typedef struct _GDir GDir; GDir *g_dir_open (const gchar *path, guint flags, GError **gerror); const gchar *g_dir_read_name (GDir *dir); void g_dir_rewind (GDir *dir); void g_dir_close (GDir *dir); int g_mkdir_with_parents (const gchar *pathname, int mode); #define g_mkdir mkdir /* * Unicode manipulation */ extern const guchar g_utf8_jump_table[256]; gboolean g_utf8_validate (const gchar *str, gssize max_len, const gchar **end); gunichar g_utf8_get_char_validated (const gchar *str, gssize max_len); #define g_utf8_next_char(p) ((p) + g_utf8_jump_table[(guchar)(*p)]) gunichar g_utf8_get_char (const gchar *src); glong g_utf8_strlen (const gchar *str, gssize max); gchar *g_utf8_offset_to_pointer (const gchar *str, glong offset); glong g_utf8_pointer_to_offset (const gchar *str, const gchar *pos); /* * priorities */ #define G_PRIORITY_DEFAULT 0 #define G_PRIORITY_DEFAULT_IDLE 200 #define GUINT16_SWAP_LE_BE_CONSTANT(x) ((((guint16) x) >> 8) | ((((guint16) x) << 8))) #define GUINT16_SWAP_LE_BE(x) ((guint16) (((guint16) x) >> 8) | ((((guint16)(x)) & 0xff) << 8)) #define GUINT32_SWAP_LE_BE(x) ((guint32) \ ( (((guint32) (x)) << 24)| \ ((((guint32) (x)) & 0xff0000) >> 8) | \ ((((guint32) (x)) & 0xff00) << 8) | \ (((guint32) (x)) >> 24)) ) #define GUINT64_SWAP_LE_BE(x) ((guint64) (((guint64)(GUINT32_SWAP_LE_BE(((guint64)x) & 0xffffffff))) << 32) | \ GUINT32_SWAP_LE_BE(((guint64)x) >> 32)) #if G_BYTE_ORDER == G_LITTLE_ENDIAN # define GUINT64_FROM_BE(x) GUINT64_SWAP_LE_BE(x) # define GUINT32_FROM_BE(x) GUINT32_SWAP_LE_BE(x) # define GUINT16_FROM_BE(x) GUINT16_SWAP_LE_BE(x) # define GUINT_FROM_BE(x) GUINT32_SWAP_LE_BE(x) # define GUINT64_FROM_LE(x) (x) # define GUINT32_FROM_LE(x) (x) # define GUINT16_FROM_LE(x) (x) # define GUINT_FROM_LE(x) (x) # define GUINT64_TO_BE(x) GUINT64_SWAP_LE_BE(x) # define GUINT32_TO_BE(x) GUINT32_SWAP_LE_BE(x) # define GUINT16_TO_BE(x) GUINT16_SWAP_LE_BE(x) # define GUINT_TO_BE(x) GUINT32_SWAP_LE_BE(x) # define GUINT64_TO_LE(x) (x) # define GUINT32_TO_LE(x) (x) # define GUINT16_TO_LE(x) (x) # define GUINT_TO_LE(x) (x) #else # define GUINT64_FROM_BE(x) (x) # define GUINT32_FROM_BE(x) (x) # define GUINT16_FROM_BE(x) (x) # define GUINT_FROM_BE(x) (x) # define GUINT64_FROM_LE(x) GUINT64_SWAP_LE_BE(x) # define GUINT32_FROM_LE(x) GUINT32_SWAP_LE_BE(x) # define GUINT16_FROM_LE(x) GUINT16_SWAP_LE_BE(x) # define GUINT_FROM_LE(x) GUINT32_SWAP_LE_BE(x) # define GUINT64_TO_BE(x) (x) # define GUINT32_TO_BE(x) (x) # define GUINT16_TO_BE(x) (x) # define GUINT_TO_BE(x) (x) # define GUINT64_TO_LE(x) GUINT64_SWAP_LE_BE(x) # define GUINT32_TO_LE(x) GUINT32_SWAP_LE_BE(x) # define GUINT16_TO_LE(x) GUINT16_SWAP_LE_BE(x) # define GUINT_TO_LE(x) GUINT32_SWAP_LE_BE(x) #endif #define GINT64_FROM_BE(x) (GUINT64_TO_BE (x)) #define GINT32_FROM_BE(x) (GUINT32_TO_BE (x)) #define GINT16_FROM_BE(x) (GUINT16_TO_BE (x)) #define GINT64_FROM_LE(x) (GUINT64_TO_LE (x)) #define GINT32_FROM_LE(x) (GUINT32_TO_LE (x)) #define GINT16_FROM_LE(x) (GUINT16_TO_LE (x)) #define _EGLIB_MAJOR 2 #define _EGLIB_MIDDLE 4 #define _EGLIB_MINOR 0 #define GLIB_CHECK_VERSION(a,b,c) ((a < _EGLIB_MAJOR) || (a == _EGLIB_MAJOR && (b < _EGLIB_MIDDLE || (b == _EGLIB_MIDDLE && c <= _EGLIB_MINOR)))) #define G_HAVE_API_SUPPORT(x) (x) #define G_UNSUPPORTED_API "%s:%d: '%s' not supported.", __FILE__, __LINE__ #define g_unsupported_api(name) G_STMT_START { g_debug (G_UNSUPPORTED_API, name); } G_STMT_END #if _WIN32 // g_free the result // No MAX_PATH limit. gboolean mono_get_module_filename (gpointer mod, gunichar2 **pstr, guint32 *plength); // g_free the result // No MAX_PATH limit. gboolean mono_get_module_filename_ex (gpointer process, gpointer mod, gunichar2 **pstr, guint32 *plength); // g_free the result // No MAX_PATH limit. gboolean mono_get_module_basename (gpointer process, gpointer mod, gunichar2 **pstr, guint32 *plength); // g_free the result // No MAX_PATH limit. gboolean mono_get_current_directory (gunichar2 **pstr, guint32 *plength); #endif G_END_DECLS // FIXME: There is more extern C than there should be. static inline void mono_qsort (void* base, size_t num, size_t size, int (*compare)(const void*, const void*)) { g_assert (compare); g_assert (size); if (num < 2 || !size || !base) return; qsort (base, num, size, compare); } #define MONO_DECL_CALLBACK(prefix, ret, name, sig) ret (*name) sig; #define MONO_INIT_CALLBACK(prefix, ret, name, sig) prefix ## _ ## name, // For each allocator; i.e. returning gpointer that needs to be cast. // Macros do not recurse, so naming function and macro the same is ok. // However these are also already macros. #undef g_malloc #undef g_realloc #undef g_malloc0 #undef g_calloc #undef g_try_malloc #undef g_try_realloc #undef g_memdup #define g_malloc(x) (g_cast (monoeg_malloc (x))) #define g_realloc(obj, size) (g_cast (monoeg_realloc ((obj), (size)))) #define g_malloc0(x) (g_cast (monoeg_malloc0 (x))) #define g_calloc(x, y) (g_cast (monoeg_g_calloc ((x), (y)))) #define g_try_malloc(x) (g_cast (monoeg_try_malloc (x))) #define g_try_realloc(obj, size) (g_cast (monoeg_try_realloc ((obj), (size)))) #define g_memdup(mem, size) (g_cast (monoeg_g_memdup ((mem), (size)))) /* * Clock Nanosleep */ #ifdef HAVE_CLOCK_NANOSLEEP gint g_clock_nanosleep (clockid_t clockid, gint flags, const struct timespec *request, struct timespec *remain); #endif #endif // __GLIB_H
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/gc/gcevent_serializers.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __GCEVENT_SERIALIZERS_H__ #define __GCEVENT_SERIALIZERS_H__ /* * gcevent_serializers.h - Serialization traits and plumbing for * serializing dynamic events. * * Dynamic events are events that can be fired by the GC without prior * knowledge of the EE. In order to accomplish this, the GC sends raw * bytes to the EE using the `IGCToCLR::FireDynamicEvent` callback, which * the EE will then fire as its own event. * * In order to keep the friction of adding new dynamic events low, this * file defines a simple ETW-style binary serialization format that * is efficient and easy to both serialize and deserialize. * * ## Serializing Types * * This file makes use of `EventSerializationTraits` to serialize * types. A type can opt-in to serialization using the mechanisms * in this file by specializing the `EventSerializationTraits` template, * providing implementations of `Serialize` and `SerializedSize`. * * If you attempt to serialize a type that does not implement this trait, * you will receive an error message like this: * * bool gc_event::EventSerializationTraits<Head>::Serialize(const T&,uint8_t **)': attempting to reference a deleted function * with * [ * Head=<your type you tried to serialize>, * T=<your type you tried to serialize> * ] * * If you get this message, you will need to specialize `EventSerializationTraits` * for the type you want to serialize. */ #ifdef _MSC_VER #define ByteSwap32 _byteswap_ulong #define ByteSwap64 _byteswap_uint64 #else #define ByteSwap32 __bulitin_bswap32 #define ByteSwap64 __builtin_bswap64 #endif // MSC_VER namespace gc_event { /* * `EventSerializatonTraits` is a trait implemented by types that * can be serialized to the payload of a dynamic event. */ template<class T> struct EventSerializationTraits { /* * Serializes the value `value` to the buffer `buffer`, incrementing * the buffer double-pointer to point to the next byte to be written. * * It is the responsibility of the caller to ensure that the buffer is * large enough to accomodate the serialized form of T. */ static void Serialize(const T& value, uint8_t** buffer) = delete; /* * Returns the size of the value `value` if it were to be serialized. */ static size_t SerializedSize(const T& value) = delete; }; /* * EventSerializationTraits implementation for uint32_t. Other integral types * can follow this pattern. * * The convention here is that integral types are always serialized as * little-endian. */ template<> struct EventSerializationTraits<uint32_t> { static void Serialize(const uint32_t& value, uint8_t** buffer) { #if defined(BIGENDIAN) **((uint32_t**)buffer) = ByteSwap32(value); #else **((uint32_t**)buffer) = value; #endif // BIGENDIAN *buffer += sizeof(uint32_t); } static size_t SerializedSize(const uint32_t& value) { return sizeof(uint32_t); } }; /* * Helper routines for serializing lists of arguments. */ /* * Given a list of arguments , returns the total size of * the buffer required to fully serialize the list of arguments. */ template<class Head> size_t SerializedSize(Head head) { return EventSerializationTraits<Head>::SerializedSize(head); } template<class Head, class... Tail> size_t SerializedSize(Head head, Tail... tail) { return EventSerializationTraits<Head>::SerializedSize(head) + SerializedSize(tail...); } /* * Given a list of arguments and a list of actual parameters, serialize * the arguments into the buffer that's given to us. */ template<class Head> void Serialize(uint8_t** buf, Head head) { EventSerializationTraits<Head>::Serialize(head, buf); } template<class Head, class... Tail> void Serialize(uint8_t** buf, Head head, Tail... tail) { EventSerializationTraits<Head>::Serialize(head, buf); Serialize(buf, tail...); } } // namespace gc_event #endif // __GCEVENT_SERIALIZERS_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __GCEVENT_SERIALIZERS_H__ #define __GCEVENT_SERIALIZERS_H__ /* * gcevent_serializers.h - Serialization traits and plumbing for * serializing dynamic events. * * Dynamic events are events that can be fired by the GC without prior * knowledge of the EE. In order to accomplish this, the GC sends raw * bytes to the EE using the `IGCToCLR::FireDynamicEvent` callback, which * the EE will then fire as its own event. * * In order to keep the friction of adding new dynamic events low, this * file defines a simple ETW-style binary serialization format that * is efficient and easy to both serialize and deserialize. * * ## Serializing Types * * This file makes use of `EventSerializationTraits` to serialize * types. A type can opt-in to serialization using the mechanisms * in this file by specializing the `EventSerializationTraits` template, * providing implementations of `Serialize` and `SerializedSize`. * * If you attempt to serialize a type that does not implement this trait, * you will receive an error message like this: * * bool gc_event::EventSerializationTraits<Head>::Serialize(const T&,uint8_t **)': attempting to reference a deleted function * with * [ * Head=<your type you tried to serialize>, * T=<your type you tried to serialize> * ] * * If you get this message, you will need to specialize `EventSerializationTraits` * for the type you want to serialize. */ #ifdef _MSC_VER #define ByteSwap32 _byteswap_ulong #define ByteSwap64 _byteswap_uint64 #else #define ByteSwap32 __bulitin_bswap32 #define ByteSwap64 __builtin_bswap64 #endif // MSC_VER namespace gc_event { /* * `EventSerializatonTraits` is a trait implemented by types that * can be serialized to the payload of a dynamic event. */ template<class T> struct EventSerializationTraits { /* * Serializes the value `value` to the buffer `buffer`, incrementing * the buffer double-pointer to point to the next byte to be written. * * It is the responsibility of the caller to ensure that the buffer is * large enough to accomodate the serialized form of T. */ static void Serialize(const T& value, uint8_t** buffer) = delete; /* * Returns the size of the value `value` if it were to be serialized. */ static size_t SerializedSize(const T& value) = delete; }; /* * EventSerializationTraits implementation for uint32_t. Other integral types * can follow this pattern. * * The convention here is that integral types are always serialized as * little-endian. */ template<> struct EventSerializationTraits<uint32_t> { static void Serialize(const uint32_t& value, uint8_t** buffer) { #if defined(BIGENDIAN) **((uint32_t**)buffer) = ByteSwap32(value); #else **((uint32_t**)buffer) = value; #endif // BIGENDIAN *buffer += sizeof(uint32_t); } static size_t SerializedSize(const uint32_t& value) { return sizeof(uint32_t); } }; /* * Helper routines for serializing lists of arguments. */ /* * Given a list of arguments , returns the total size of * the buffer required to fully serialize the list of arguments. */ template<class Head> size_t SerializedSize(Head head) { return EventSerializationTraits<Head>::SerializedSize(head); } template<class Head, class... Tail> size_t SerializedSize(Head head, Tail... tail) { return EventSerializationTraits<Head>::SerializedSize(head) + SerializedSize(tail...); } /* * Given a list of arguments and a list of actual parameters, serialize * the arguments into the buffer that's given to us. */ template<class Head> void Serialize(uint8_t** buf, Head head) { EventSerializationTraits<Head>::Serialize(head, buf); } template<class Head, class... Tail> void Serialize(uint8_t** buf, Head head, Tail... tail) { EventSerializationTraits<Head>::Serialize(head, buf); Serialize(buf, tail...); } } // namespace gc_event #endif // __GCEVENT_SERIALIZERS_H__
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/mono/mono/sgen/gc-internal-agnostic.h
/** * \file * Mono-agnostic GC interface. * * Copyright (C) 2015 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_METADATA_GCINTERNALAGNOSTIC_H__ #define __MONO_METADATA_GCINTERNALAGNOSTIC_H__ #include <config.h> #include <glib.h> #include <stdio.h> #include "mono/utils/ward.h" #include "mono/utils/mono-compiler.h" #include "mono/utils/parse.h" #include "mono/utils/memfuncs.h" #ifdef HAVE_SGEN_GC #include "mono/sgen/sgen-conf.h" #endif /* h indicates whether to hide or just tag. * (-!!h ^ p) is used instead of (h ? ~p : p) to avoid multiple mentions of p. */ #define MONO_GC_HIDE_POINTER(p,t,h) ((gpointer)(((-(size_t)!!(h) ^ (size_t)(p)) & ~(size_t)3) | ((t) & (size_t)3))) #define MONO_GC_REVEAL_POINTER(p,h) ((gpointer)((-(size_t)!!(h) ^ (size_t)(p)) & ~(size_t)3)) #define MONO_GC_POINTER_TAG(p) ((size_t)(p) & (size_t)3) #define MONO_GC_HANDLE_OCCUPIED_MASK (1) #define MONO_GC_HANDLE_VALID_MASK (2) #define MONO_GC_HANDLE_TAG_MASK (MONO_GC_HANDLE_OCCUPIED_MASK | MONO_GC_HANDLE_VALID_MASK) #define MONO_GC_HANDLE_METADATA_POINTER(p,h) (MONO_GC_HIDE_POINTER ((p), MONO_GC_HANDLE_OCCUPIED_MASK, (h))) #define MONO_GC_HANDLE_OBJECT_POINTER(p,h) (MONO_GC_HIDE_POINTER ((p), MONO_GC_HANDLE_OCCUPIED_MASK | MONO_GC_HANDLE_VALID_MASK, (h))) #define MONO_GC_HANDLE_OCCUPIED(slot) ((size_t)(slot) & MONO_GC_HANDLE_OCCUPIED_MASK) #define MONO_GC_HANDLE_VALID(slot) ((size_t)(slot) & MONO_GC_HANDLE_VALID_MASK) #define MONO_GC_HANDLE_TAG(slot) ((size_t)(slot) & MONO_GC_HANDLE_TAG_MASK) #define MONO_GC_HANDLE_IS_OBJECT_POINTER(slot) (MONO_GC_HANDLE_TAG (slot) == (MONO_GC_HANDLE_OCCUPIED_MASK | MONO_GC_HANDLE_VALID_MASK)) #define MONO_GC_HANDLE_IS_METADATA_POINTER(slot) (MONO_GC_HANDLE_TAG (slot) == MONO_GC_HANDLE_OCCUPIED_MASK) /* These should match System.Runtime.InteropServices.GCHandleType */ typedef enum { HANDLE_TYPE_MIN = 0, HANDLE_WEAK = HANDLE_TYPE_MIN, HANDLE_WEAK_TRACK, HANDLE_NORMAL, HANDLE_PINNED, HANDLE_WEAK_FIELDS, HANDLE_TYPE_MAX } GCHandleType; #define GC_HANDLE_TYPE_IS_WEAK(x) ((x) <= HANDLE_WEAK_TRACK) #define MONO_GC_HANDLE_TYPE_SHIFT (3) #define MONO_GC_HANDLE_TYPE_MASK ((1 << MONO_GC_HANDLE_TYPE_SHIFT) - 1) #define MONO_GC_HANDLE_TYPE(x) ((GCHandleType)(((x) & MONO_GC_HANDLE_TYPE_MASK) - 1)) #define MONO_GC_HANDLE_SLOT(x) ((x) >> MONO_GC_HANDLE_TYPE_SHIFT) #define MONO_GC_HANDLE_TYPE_IS_WEAK(x) ((x) <= HANDLE_WEAK_TRACK) #define MONO_GC_HANDLE(slot, type) (((slot) << MONO_GC_HANDLE_TYPE_SHIFT) | (((type) & MONO_GC_HANDLE_TYPE_MASK) + 1)) typedef struct { gint32 minor_gc_count; gint32 major_gc_count; gint64 minor_gc_time; gint64 major_gc_time; gint64 major_gc_time_concurrent; } GCStats; extern GCStats mono_gc_stats; #ifdef HAVE_SGEN_GC typedef SgenDescriptor MonoGCDescriptor; #define MONO_GC_DESCRIPTOR_NULL SGEN_DESCRIPTOR_NULL #else typedef void* MonoGCDescriptor; #define MONO_GC_DESCRIPTOR_NULL NULL #endif gboolean mono_gc_parse_environment_string_extract_number (const char *str, size_t *out); MonoGCDescriptor mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size) MONO_PERMIT (need (sgen_lock_gc)); MonoGCDescriptor mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_t elem_size) MONO_PERMIT (need (sgen_lock_gc)); /* simple interface for data structures needed in the runtime */ MonoGCDescriptor mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits) MONO_PERMIT (need (sgen_lock_gc)); /* Return a root descriptor for a vector with repeating refs bitmap */ MonoGCDescriptor mono_gc_make_vector_descr (void); /* Return a root descriptor for a root with all refs */ MONO_COMPONENT_API MonoGCDescriptor mono_gc_make_root_descr_all_refs (int numbits) MONO_PERMIT (need (sgen_lock_gc)); /* Return the bitmap encoded by a descriptor */ gsize* mono_gc_get_bitmap_for_descr (MonoGCDescriptor descr, int *numbits); /* These functions must be used when it's possible that either destination is not word aligned or size is not a multiple of word size. */ void mono_gc_bzero_atomic (void *dest, size_t size); void mono_gc_bzero_aligned (void *dest, size_t size); MONO_COMPONENT_API void mono_gc_memmove_atomic (void *dest, const void *src, size_t size); void mono_gc_memmove_aligned (void *dest, const void *src, size_t size); FILE *mono_gc_get_logfile (void); /* equivalent to options set via MONO_GC_PARAMS */ void mono_gc_params_set (const char* options); /* equivalent to options set via MONO_GC_DEBUG */ void mono_gc_debug_set (const char* options); #endif
/** * \file * Mono-agnostic GC interface. * * Copyright (C) 2015 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_METADATA_GCINTERNALAGNOSTIC_H__ #define __MONO_METADATA_GCINTERNALAGNOSTIC_H__ #include <config.h> #include <glib.h> #include <stdio.h> #include "mono/utils/ward.h" #include "mono/utils/mono-compiler.h" #include "mono/utils/parse.h" #include "mono/utils/memfuncs.h" #ifdef HAVE_SGEN_GC #include "mono/sgen/sgen-conf.h" #endif /* h indicates whether to hide or just tag. * (-!!h ^ p) is used instead of (h ? ~p : p) to avoid multiple mentions of p. */ #define MONO_GC_HIDE_POINTER(p,t,h) ((gpointer)(((-(size_t)!!(h) ^ (size_t)(p)) & ~(size_t)3) | ((t) & (size_t)3))) #define MONO_GC_REVEAL_POINTER(p,h) ((gpointer)((-(size_t)!!(h) ^ (size_t)(p)) & ~(size_t)3)) #define MONO_GC_POINTER_TAG(p) ((size_t)(p) & (size_t)3) #define MONO_GC_HANDLE_OCCUPIED_MASK (1) #define MONO_GC_HANDLE_VALID_MASK (2) #define MONO_GC_HANDLE_TAG_MASK (MONO_GC_HANDLE_OCCUPIED_MASK | MONO_GC_HANDLE_VALID_MASK) #define MONO_GC_HANDLE_METADATA_POINTER(p,h) (MONO_GC_HIDE_POINTER ((p), MONO_GC_HANDLE_OCCUPIED_MASK, (h))) #define MONO_GC_HANDLE_OBJECT_POINTER(p,h) (MONO_GC_HIDE_POINTER ((p), MONO_GC_HANDLE_OCCUPIED_MASK | MONO_GC_HANDLE_VALID_MASK, (h))) #define MONO_GC_HANDLE_OCCUPIED(slot) ((size_t)(slot) & MONO_GC_HANDLE_OCCUPIED_MASK) #define MONO_GC_HANDLE_VALID(slot) ((size_t)(slot) & MONO_GC_HANDLE_VALID_MASK) #define MONO_GC_HANDLE_TAG(slot) ((size_t)(slot) & MONO_GC_HANDLE_TAG_MASK) #define MONO_GC_HANDLE_IS_OBJECT_POINTER(slot) (MONO_GC_HANDLE_TAG (slot) == (MONO_GC_HANDLE_OCCUPIED_MASK | MONO_GC_HANDLE_VALID_MASK)) #define MONO_GC_HANDLE_IS_METADATA_POINTER(slot) (MONO_GC_HANDLE_TAG (slot) == MONO_GC_HANDLE_OCCUPIED_MASK) /* These should match System.Runtime.InteropServices.GCHandleType */ typedef enum { HANDLE_TYPE_MIN = 0, HANDLE_WEAK = HANDLE_TYPE_MIN, HANDLE_WEAK_TRACK, HANDLE_NORMAL, HANDLE_PINNED, HANDLE_WEAK_FIELDS, HANDLE_TYPE_MAX } GCHandleType; #define GC_HANDLE_TYPE_IS_WEAK(x) ((x) <= HANDLE_WEAK_TRACK) #define MONO_GC_HANDLE_TYPE_SHIFT (3) #define MONO_GC_HANDLE_TYPE_MASK ((1 << MONO_GC_HANDLE_TYPE_SHIFT) - 1) #define MONO_GC_HANDLE_TYPE(x) ((GCHandleType)(((x) & MONO_GC_HANDLE_TYPE_MASK) - 1)) #define MONO_GC_HANDLE_SLOT(x) ((x) >> MONO_GC_HANDLE_TYPE_SHIFT) #define MONO_GC_HANDLE_TYPE_IS_WEAK(x) ((x) <= HANDLE_WEAK_TRACK) #define MONO_GC_HANDLE(slot, type) (((slot) << MONO_GC_HANDLE_TYPE_SHIFT) | (((type) & MONO_GC_HANDLE_TYPE_MASK) + 1)) typedef struct { gint32 minor_gc_count; gint32 major_gc_count; gint64 minor_gc_time; gint64 major_gc_time; gint64 major_gc_time_concurrent; } GCStats; extern GCStats mono_gc_stats; #ifdef HAVE_SGEN_GC typedef SgenDescriptor MonoGCDescriptor; #define MONO_GC_DESCRIPTOR_NULL SGEN_DESCRIPTOR_NULL #else typedef void* MonoGCDescriptor; #define MONO_GC_DESCRIPTOR_NULL NULL #endif gboolean mono_gc_parse_environment_string_extract_number (const char *str, size_t *out); MonoGCDescriptor mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size) MONO_PERMIT (need (sgen_lock_gc)); MonoGCDescriptor mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_t elem_size) MONO_PERMIT (need (sgen_lock_gc)); /* simple interface for data structures needed in the runtime */ MonoGCDescriptor mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits) MONO_PERMIT (need (sgen_lock_gc)); /* Return a root descriptor for a vector with repeating refs bitmap */ MonoGCDescriptor mono_gc_make_vector_descr (void); /* Return a root descriptor for a root with all refs */ MONO_COMPONENT_API MonoGCDescriptor mono_gc_make_root_descr_all_refs (int numbits) MONO_PERMIT (need (sgen_lock_gc)); /* Return the bitmap encoded by a descriptor */ gsize* mono_gc_get_bitmap_for_descr (MonoGCDescriptor descr, int *numbits); /* These functions must be used when it's possible that either destination is not word aligned or size is not a multiple of word size. */ void mono_gc_bzero_atomic (void *dest, size_t size); void mono_gc_bzero_aligned (void *dest, size_t size); MONO_COMPONENT_API void mono_gc_memmove_atomic (void *dest, const void *src, size_t size); void mono_gc_memmove_aligned (void *dest, const void *src, size_t size); FILE *mono_gc_get_logfile (void); /* equivalent to options set via MONO_GC_PARAMS */ void mono_gc_params_set (const char* options); /* equivalent to options set via MONO_GC_DEBUG */ void mono_gc_debug_set (const char* options); #endif
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/pal/inc/rt/winnt.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: winnt.h // // =========================================================================== // dummy winnt.h for PAL #include "palrt.h"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: winnt.h // // =========================================================================== // dummy winnt.h for PAL #include "palrt.h"
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/pal/src/libunwind/src/mips/Ginit_local.c
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" #include "init.h" #ifdef UNW_REMOTE_ONLY int unw_init_local (unw_cursor_t *cursor, ucontext_t *uc) { return -UNW_EINVAL; } #else /* !UNW_REMOTE_ONLY */ static int unw_init_local_common(unw_cursor_t *cursor, ucontext_t *uc, unsigned use_prev_instr) { struct cursor *c = (struct cursor *) cursor; if (!atomic_load(&tdep_init_done)) tdep_init (); Debug (1, "(cursor=%p)\n", c); c->dwarf.as = unw_local_addr_space; c->dwarf.as_arg = uc; return common_init (c, use_prev_instr); } int unw_init_local(unw_cursor_t *cursor, ucontext_t *uc) { return unw_init_local_common(cursor, uc, 1); } int unw_init_local2 (unw_cursor_t *cursor, ucontext_t *uc, int flag) { if (!flag) { return unw_init_local_common(cursor, uc, 1); } else if (flag == UNW_INIT_SIGNAL_FRAME) { return unw_init_local_common(cursor, uc, 0); } else { return -UNW_EINVAL; } } #endif /* !UNW_REMOTE_ONLY */
/* libunwind - a platform-independent unwind library Copyright (C) 2008 CodeSourcery This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" #include "init.h" #ifdef UNW_REMOTE_ONLY int unw_init_local (unw_cursor_t *cursor, ucontext_t *uc) { return -UNW_EINVAL; } #else /* !UNW_REMOTE_ONLY */ static int unw_init_local_common(unw_cursor_t *cursor, ucontext_t *uc, unsigned use_prev_instr) { struct cursor *c = (struct cursor *) cursor; if (!atomic_load(&tdep_init_done)) tdep_init (); Debug (1, "(cursor=%p)\n", c); c->dwarf.as = unw_local_addr_space; c->dwarf.as_arg = uc; return common_init (c, use_prev_instr); } int unw_init_local(unw_cursor_t *cursor, ucontext_t *uc) { return unw_init_local_common(cursor, uc, 1); } int unw_init_local2 (unw_cursor_t *cursor, ucontext_t *uc, int flag) { if (!flag) { return unw_init_local_common(cursor, uc, 1); } else if (flag == UNW_INIT_SIGNAL_FRAME) { return unw_init_local_common(cursor, uc, 0); } else { return -UNW_EINVAL; } } #endif /* !UNW_REMOTE_ONLY */
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/pal/src/libunwind/src/hppa/Gstep.c
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2004 Hewlett-Packard Co Contributed by David Mosberger This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" #include "offsets.h" int unw_step (unw_cursor_t *cursor) { struct cursor *c = (struct cursor *) cursor; int ret, i; Debug (1, "(cursor=%p, ip=0x%08x)\n", c, (unsigned) c->dwarf.ip); /* Try DWARF-based unwinding... */ ret = dwarf_step (&c->dwarf); if (ret < 0 && ret != -UNW_ENOINFO) { Debug (2, "returning %d\n", ret); return ret; } if (unlikely (ret < 0)) { /* DWARF failed, let's see if we can follow the frame-chain or skip over the signal trampoline. */ Debug (13, "dwarf_step() failed (ret=%d), trying fallback\n", ret); if (unw_is_signal_frame (cursor)) { #ifdef __linux__ /* Assume that the trampoline is at the beginning of the sigframe. */ unw_word_t ip, sc_addr = c->dwarf.ip + LINUX_RT_SIGFRAME_UC_OFF; dwarf_loc_t iaoq_loc = DWARF_LOC (sc_addr + LINUX_SC_IAOQ_OFF, 0); c->sigcontext_format = HPPA_SCF_LINUX_RT_SIGFRAME; c->sigcontext_addr = sc_addr; if ((ret = dwarf_get (&c->dwarf, iaoq_loc, &ip)) < 0) { Debug (2, "failed to read IAOQ[1] (ret=%d)\n", ret); return ret; } c->dwarf.ip = ip & ~0x3; /* mask out the privilege level */ for (i = 0; i < 32; ++i) { c->dwarf.loc[UNW_HPPA_GR + i] = DWARF_LOC (sc_addr + LINUX_SC_GR_OFF + 4*i, 0); c->dwarf.loc[UNW_HPPA_FR + i] = DWARF_LOC (sc_addr + LINUX_SC_FR_OFF + 4*i, 0); } if ((ret = dwarf_get (&c->dwarf, c->dwarf.loc[UNW_HPPA_SP], &c->dwarf.cfa)) < 0) { Debug (2, "failed to read SP (ret=%d)\n", ret); return ret; } #else # error Implement me! #endif } else c->dwarf.ip = 0; } ret = (c->dwarf.ip == 0) ? 0 : 1; Debug (2, "returning %d\n", ret); return ret; }
/* libunwind - a platform-independent unwind library Copyright (C) 2003-2004 Hewlett-Packard Co Contributed by David Mosberger This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "unwind_i.h" #include "offsets.h" int unw_step (unw_cursor_t *cursor) { struct cursor *c = (struct cursor *) cursor; int ret, i; Debug (1, "(cursor=%p, ip=0x%08x)\n", c, (unsigned) c->dwarf.ip); /* Try DWARF-based unwinding... */ ret = dwarf_step (&c->dwarf); if (ret < 0 && ret != -UNW_ENOINFO) { Debug (2, "returning %d\n", ret); return ret; } if (unlikely (ret < 0)) { /* DWARF failed, let's see if we can follow the frame-chain or skip over the signal trampoline. */ Debug (13, "dwarf_step() failed (ret=%d), trying fallback\n", ret); if (unw_is_signal_frame (cursor)) { #ifdef __linux__ /* Assume that the trampoline is at the beginning of the sigframe. */ unw_word_t ip, sc_addr = c->dwarf.ip + LINUX_RT_SIGFRAME_UC_OFF; dwarf_loc_t iaoq_loc = DWARF_LOC (sc_addr + LINUX_SC_IAOQ_OFF, 0); c->sigcontext_format = HPPA_SCF_LINUX_RT_SIGFRAME; c->sigcontext_addr = sc_addr; if ((ret = dwarf_get (&c->dwarf, iaoq_loc, &ip)) < 0) { Debug (2, "failed to read IAOQ[1] (ret=%d)\n", ret); return ret; } c->dwarf.ip = ip & ~0x3; /* mask out the privilege level */ for (i = 0; i < 32; ++i) { c->dwarf.loc[UNW_HPPA_GR + i] = DWARF_LOC (sc_addr + LINUX_SC_GR_OFF + 4*i, 0); c->dwarf.loc[UNW_HPPA_FR + i] = DWARF_LOC (sc_addr + LINUX_SC_FR_OFF + 4*i, 0); } if ((ret = dwarf_get (&c->dwarf, c->dwarf.loc[UNW_HPPA_SP], &c->dwarf.cfa)) < 0) { Debug (2, "failed to read SP (ret=%d)\n", ret); return ret; } #else # error Implement me! #endif } else c->dwarf.ip = 0; } ret = (c->dwarf.ip == 0) ? 0 : 1; Debug (2, "returning %d\n", ret); return ret; }
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/vm/callingconvention.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // Provides an abstraction over platform specific calling conventions (specifically, the calling convention // utilized by the JIT on that platform). The caller enumerates each argument of a signature in turn, and is // provided with information mapping that argument into registers and/or stack locations. // #ifndef __CALLING_CONVENTION_INCLUDED #define __CALLING_CONVENTION_INCLUDED BOOL IsRetBuffPassedAsFirstArg(); // Describes how a single argument is laid out in registers and/or stack locations when given as an input to a // managed method as part of a larger signature. // // Locations are split into floating point registers, general registers and stack offsets. Registers are // obviously architecture dependent but are represented as a zero-based index into the usual sequence in which // such registers are allocated for input on the platform in question. For instance: // X86: 0 == ecx, 1 == edx // ARM: 0 == r0, 1 == r1, 2 == r2 etc. // // Stack locations are represented as offsets from the stack pointer (at the point of the call). The offset is // given as an index of a pointer sized slot. Similarly the size of data on the stack is given in slot-sized // units. For instance, given an index of 2 and a size of 3: // X86: argument starts at [ESP + 8] and is 12 bytes long // AMD64: argument starts at [RSP + 16] and is 24 bytes long // // The structure is flexible enough to describe an argument that is split over several (consecutive) registers // and possibly on to the stack as well. struct ArgLocDesc { int m_idxFloatReg; // First floating point register used (or -1) int m_cFloatReg; // Count of floating point registers used (or 0) int m_idxGenReg; // First general register used (or -1) int m_cGenReg; // Count of general registers used (or 0) int m_byteStackIndex; // Stack offset in bytes (or -1) int m_byteStackSize; // Stack size in bytes #if defined(UNIX_AMD64_ABI) EEClass* m_eeClass; // For structs passed in register, it points to the EEClass of the struct #endif // UNIX_AMD64_ABI #ifdef FEATURE_HFA static unsigned getHFAFieldSize(CorInfoHFAElemType hfaType) { switch (hfaType) { case CORINFO_HFA_ELEM_FLOAT: return 4; case CORINFO_HFA_ELEM_DOUBLE: return 8; case CORINFO_HFA_ELEM_VECTOR64: return 8; case CORINFO_HFA_ELEM_VECTOR128: return 16; default: _ASSERTE(!"Invalid HFA Type"); return 0; } } #endif #if defined(TARGET_ARM64) unsigned m_hfaFieldSize; // Size of HFA field in bytes. void setHFAFieldSize(CorInfoHFAElemType hfaType) { m_hfaFieldSize = getHFAFieldSize(hfaType); } #endif // defined(TARGET_ARM64) #if defined(TARGET_ARM) BOOL m_fRequires64BitAlignment; // True if the argument should always be aligned (in registers or on the stack #endif ArgLocDesc() { Init(); } // Initialize to represent a non-placed argument (no register or stack slots referenced). void Init() { m_idxFloatReg = -1; m_cFloatReg = 0; m_idxGenReg = -1; m_cGenReg = 0; m_byteStackIndex = -1; m_byteStackSize = 0; #if defined(TARGET_ARM) m_fRequires64BitAlignment = FALSE; #endif #if defined(TARGET_ARM64) m_hfaFieldSize = 0; #endif // defined(TARGET_ARM64) #if defined(UNIX_AMD64_ABI) m_eeClass = NULL; #endif } }; // // TransitionBlock is layout of stack frame of method call, saved argument registers and saved callee saved registers. Even though not // all fields are used all the time, we use uniform form for simplicity. // struct TransitionBlock { #if defined(TARGET_X86) ArgumentRegisters m_argumentRegisters; CalleeSavedRegisters m_calleeSavedRegisters; TADDR m_ReturnAddress; #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI ArgumentRegisters m_argumentRegisters; #endif CalleeSavedRegisters m_calleeSavedRegisters; TADDR m_ReturnAddress; #elif defined(TARGET_ARM) union { CalleeSavedRegisters m_calleeSavedRegisters; // alias saved link register as m_ReturnAddress struct { INT32 r4, r5, r6, r7, r8, r9, r10; INT32 r11; TADDR m_ReturnAddress; }; }; ArgumentRegisters m_argumentRegisters; #elif defined(TARGET_ARM64) union { CalleeSavedRegisters m_calleeSavedRegisters; struct { INT64 x29; // frame pointer TADDR m_ReturnAddress; INT64 x19, x20, x21, x22, x23, x24, x25, x26, x27, x28; }; }; TADDR padding; // Keep size of TransitionBlock as multiple of 16-byte. Simplifies code in PROLOG_WITH_TRANSITION_BLOCK INT64 m_x8RetBuffReg; ArgumentRegisters m_argumentRegisters; #else PORTABILITY_ASSERT("TransitionBlock"); #endif // The transition block should define everything pushed by callee. The code assumes in number of places that // end of the transition block is caller's stack pointer. static int GetOffsetOfReturnAddress() { LIMITED_METHOD_CONTRACT; return offsetof(TransitionBlock, m_ReturnAddress); } #ifdef TARGET_ARM64 static int GetOffsetOfRetBuffArgReg() { LIMITED_METHOD_CONTRACT; return offsetof(TransitionBlock, m_x8RetBuffReg); } static int GetOffsetOfFirstGCRefMapSlot() { return GetOffsetOfRetBuffArgReg(); } #else static int GetOffsetOfFirstGCRefMapSlot() { return GetOffsetOfArgumentRegisters(); } #endif static BYTE GetOffsetOfArgs() { LIMITED_METHOD_CONTRACT; // Offset of the stack args (which are after the TransitionBlock) return sizeof(TransitionBlock); } static int GetOffsetOfArgumentRegisters() { LIMITED_METHOD_CONTRACT; int offs; #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) offs = sizeof(TransitionBlock); #else offs = offsetof(TransitionBlock, m_argumentRegisters); #endif return offs; } static BOOL IsStackArgumentOffset(int offset) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) return offset >= (int)sizeof(TransitionBlock); #else int ofsArgRegs = GetOffsetOfArgumentRegisters(); return offset >= (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE); #endif } static BOOL IsArgumentRegisterOffset(int offset) { LIMITED_METHOD_CONTRACT; int ofsArgRegs = GetOffsetOfArgumentRegisters(); return offset >= ofsArgRegs && offset < (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE); } static UINT GetArgumentIndexFromOffset(int offset) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) _ASSERTE(offset != TransitionBlock::StructInRegsOffset); #endif offset -= GetOffsetOfArgumentRegisters(); _ASSERTE((offset % TARGET_POINTER_SIZE) == 0); return offset / TARGET_POINTER_SIZE; } static UINT GetStackArgumentIndexFromOffset(int offset) { LIMITED_METHOD_CONTRACT; return (offset - TransitionBlock::GetOffsetOfArgs()) / TARGET_POINTER_SIZE; } static UINT GetStackArgumentByteIndexFromOffset(int offset) { LIMITED_METHOD_CONTRACT; return (offset - TransitionBlock::GetOffsetOfArgs()); } #ifdef CALLDESCR_FPARGREGS static BOOL IsFloatArgumentRegisterOffset(int offset) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) return (offset != TransitionBlock::StructInRegsOffset) && (offset < 0); #else return offset < 0; #endif } // Check if an argument has floating point register, that means that it is // either a floating point argument or a struct passed in registers that // has a floating point member. static BOOL HasFloatRegister(int offset, ArgLocDesc* argLocDescForStructInRegs) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) if (offset == TransitionBlock::StructInRegsOffset) { return argLocDescForStructInRegs->m_cFloatReg > 0; } #endif return offset < 0; } static int GetOffsetOfFloatArgumentRegisters() { LIMITED_METHOD_CONTRACT; return -GetNegSpaceSize(); } #endif // CALLDESCR_FPARGREGS static int GetOffsetOfCalleeSavedRegisters() { LIMITED_METHOD_CONTRACT; return offsetof(TransitionBlock, m_calleeSavedRegisters); } static int GetNegSpaceSize() { LIMITED_METHOD_CONTRACT; int negSpaceSize = 0; #ifdef CALLDESCR_FPARGREGS negSpaceSize += sizeof(FloatArgumentRegisters); #endif #ifdef TARGET_ARM negSpaceSize += TARGET_POINTER_SIZE; // padding to make FloatArgumentRegisters address 8-byte aligned #endif return negSpaceSize; } static const int InvalidOffset = -1; #if defined(UNIX_AMD64_ABI) // Special offset value to represent struct passed in registers. Such a struct can span both // general purpose and floating point registers, so it can have two different offsets. static const int StructInRegsOffset = -2; #endif }; //----------------------------------------------------------------------- // ArgIterator is helper for dealing with calling conventions. // It is tightly coupled with TransitionBlock. It uses offsets into // TransitionBlock to represent argument locations for efficiency // reasons. Alternatively, it can also return ArgLocDesc for less // performance critical code. // // The ARGITERATOR_BASE argument of the template is provider of the parsed // method signature. Typically, the arg iterator works on top of MetaSig. // Reflection invoke uses alternative implementation to save signature parsing // time because of it has the parsed signature available. //----------------------------------------------------------------------- template<class ARGITERATOR_BASE> class ArgIteratorTemplate : public ARGITERATOR_BASE { public: //------------------------------------------------------------ // Constructor //------------------------------------------------------------ ArgIteratorTemplate() { WRAPPER_NO_CONTRACT; m_dwFlags = 0; } UINT SizeOfArgStack() { WRAPPER_NO_CONTRACT; if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED)) ForceSigWalk(); _ASSERTE((m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED) != 0); _ASSERTE((m_nSizeOfArgStack % TARGET_POINTER_SIZE) == 0); return m_nSizeOfArgStack; } // For use with ArgIterator. This function computes the amount of additional // memory required above the TransitionBlock. The parameter offsets // returned by ArgIteratorTemplate::GetNextOffset are relative to a // FramedMethodFrame, and may be in either of these regions. UINT SizeOfFrameArgumentArray() { WRAPPER_NO_CONTRACT; UINT size = SizeOfArgStack(); #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // The argument registers are not included in the stack size on AMD64 size += ARGUMENTREGISTERS_SIZE; #endif _ASSERTE((size % TARGET_POINTER_SIZE) == 0); return size; } //------------------------------------------------------------------------ #ifdef TARGET_X86 UINT CbStackPop() { WRAPPER_NO_CONTRACT; if (this->IsVarArg()) return 0; else return SizeOfArgStack(); } #endif // Is there a hidden parameter for the return parameter? // BOOL HasRetBuffArg() { WRAPPER_NO_CONTRACT; if (!(m_dwFlags & RETURN_FLAGS_COMPUTED)) ComputeReturnFlags(); return (m_dwFlags & RETURN_HAS_RET_BUFFER); } UINT GetFPReturnSize() { WRAPPER_NO_CONTRACT; if (!(m_dwFlags & RETURN_FLAGS_COMPUTED)) ComputeReturnFlags(); return m_dwFlags >> RETURN_FP_SIZE_SHIFT; } #ifdef TARGET_X86 //========================================================================= // Indicates whether an argument is to be put in a register using the // default IL calling convention. This should be called on each parameter // in the order it appears in the call signature. For a non-static method, // this function should also be called once for the "this" argument, prior // to calling it for the "real" arguments. Pass in a typ of ELEMENT_TYPE_CLASS. // // *pNumRegistersUsed: [in,out]: keeps track of the number of argument // registers assigned previously. The caller should // initialize this variable to 0 - then each call // will update it. // // typ: the signature type //========================================================================= static BOOL IsArgumentInRegister(int * pNumRegistersUsed, CorElementType typ, TypeHandle hnd) { LIMITED_METHOD_CONTRACT; if ( (*pNumRegistersUsed) < NUM_ARGUMENT_REGISTERS) { if (typ == ELEMENT_TYPE_VALUETYPE) { // The JIT enables passing trivial pointer sized structs in registers. MethodTable* pMT = hnd.GetMethodTable(); while (typ == ELEMENT_TYPE_VALUETYPE && pMT->GetNumInstanceFields() == 1 && (!pMT->HasLayout() || pMT->GetNumInstanceFieldBytes() == 4 )) // Don't do the optimization if we're getting specified anything but the trivial layout. { FieldDesc * pFD = pMT->GetApproxFieldDescListRaw(); CorElementType type = pFD->GetFieldType(); bool exitLoop = false; switch (type) { case ELEMENT_TYPE_VALUETYPE: { //@todo: Is it more apropos to call LookupApproxFieldTypeHandle() here? TypeHandle fldHnd = pFD->GetApproxFieldTypeHandleThrowing(); CONSISTENCY_CHECK(!fldHnd.IsNull()); pMT = fldHnd.GetMethodTable(); FALLTHROUGH; } case ELEMENT_TYPE_PTR: case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: case ELEMENT_TYPE_I4: case ELEMENT_TYPE_U4: { typ = type; break; } default: exitLoop = true; break; } if (exitLoop) { break; } } } if (gElementTypeInfo[typ].m_enregister) { (*pNumRegistersUsed)++; return(TRUE); } } return(FALSE); } #endif // TARGET_X86 #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE) // Note that this overload does not handle varargs static BOOL IsArgPassedByRef(TypeHandle th) { LIMITED_METHOD_CONTRACT; _ASSERTE(!th.IsNull()); // This method only works for valuetypes. It includes true value types, // primitives, enums and TypedReference. _ASSERTE(th.IsValueType()); size_t size = th.GetSize(); #ifdef TARGET_AMD64 return IsArgPassedByRef(size); #elif defined(TARGET_ARM64) // Composites greater than 16 bytes are passed by reference return ((size > ENREGISTERED_PARAMTYPE_MAXSIZE) && !th.IsHFA()); #else PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef"); return FALSE; #endif } #ifdef TARGET_AMD64 // This overload should only be used in AMD64-specific code only. static BOOL IsArgPassedByRef(size_t size) { LIMITED_METHOD_CONTRACT; #ifdef UNIX_AMD64_ABI // No arguments are passed by reference on AMD64 on Unix return FALSE; #else // If the size is bigger than ENREGISTERED_PARAM_TYPE_MAXSIZE, or if the size is NOT a power of 2, then // the argument is passed by reference. return (size > ENREGISTERED_PARAMTYPE_MAXSIZE) || ((size & (size-1)) != 0); #endif } #endif // TARGET_AMD64 // This overload should be used for varargs only. static BOOL IsVarArgPassedByRef(size_t size) { LIMITED_METHOD_CONTRACT; #ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI PORTABILITY_ASSERT("ArgIteratorTemplate::IsVarArgPassedByRef"); return FALSE; #else // UNIX_AMD64_ABI return IsArgPassedByRef(size); #endif // UNIX_AMD64_ABI #else return (size > ENREGISTERED_PARAMTYPE_MAXSIZE); #endif } BOOL IsArgPassedByRef() { LIMITED_METHOD_CONTRACT; #ifdef TARGET_AMD64 return IsArgPassedByRef(m_argSize); #elif defined(TARGET_ARM64) if (m_argType == ELEMENT_TYPE_VALUETYPE) { _ASSERTE(!m_argTypeHandle.IsNull()); return ((m_argSize > ENREGISTERED_PARAMTYPE_MAXSIZE) && (!m_argTypeHandle.IsHFA() || this->IsVarArg())); } return FALSE; #else PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef"); return FALSE; #endif } #endif // ENREGISTERED_PARAMTYPE_MAXSIZE //------------------------------------------------------------ // Return the offsets of the special arguments //------------------------------------------------------------ static int GetThisOffset(); int GetRetBuffArgOffset(); int GetVASigCookieOffset(); int GetParamTypeArgOffset(); //------------------------------------------------------------ // Each time this is called, this returns a byte offset of the next // argument from the TransitionBlock* pointer. // // Returns TransitionBlock::InvalidOffset once you've hit the end // of the list. //------------------------------------------------------------ int GetNextOffset(); CorElementType GetArgType(TypeHandle *pTypeHandle = NULL) { LIMITED_METHOD_CONTRACT; if (pTypeHandle != NULL) { *pTypeHandle = m_argTypeHandle; } return m_argType; } int GetArgSize() { LIMITED_METHOD_CONTRACT; return m_argSize; } void ForceSigWalk(); #ifndef TARGET_X86 // Accessors for built in argument descriptions of the special implicit parameters not mentioned directly // in signatures (this pointer and the like). Whether or not these can be used successfully before all the // explicit arguments have been scanned is platform dependent. void GetThisLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetThisOffset(), pLoc); } void GetParamTypeLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetParamTypeArgOffset(), pLoc); } void GetVASigCookieLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetVASigCookieOffset(), pLoc); } #ifndef CALLDESCR_RETBUFFARGREG void GetRetBuffArgLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetRetBuffArgOffset(), pLoc); } #endif #endif // !TARGET_X86 ArgLocDesc* GetArgLocDescForStructInRegs() { #if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) return m_hasArgLocDescForStructInRegs ? &m_argLocDescForStructInRegs : NULL; #else return NULL; #endif } #ifdef TARGET_X86 // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc *pLoc) { LIMITED_METHOD_CONTRACT; pLoc->Init(); if (!TransitionBlock::IsStackArgumentOffset(argOffset)) { pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); _ASSERTE(GetArgSize() <= TARGET_POINTER_SIZE); pLoc->m_cGenReg = 1; } else { pLoc->m_byteStackSize = GetArgSize(); pLoc->m_byteStackIndex = TransitionBlock::GetStackArgumentByteIndexFromOffset(argOffset); } } #endif #ifdef TARGET_ARM // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc *pLoc) { LIMITED_METHOD_CONTRACT; pLoc->Init(); pLoc->m_fRequires64BitAlignment = m_fRequires64BitAlignment; const int byteArgSize = GetArgSize(); if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset)) { const int floatRegOfsInBytes = argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters(); _ASSERTE((floatRegOfsInBytes % FLOAT_REGISTER_SIZE) == 0); pLoc->m_idxFloatReg = floatRegOfsInBytes / FLOAT_REGISTER_SIZE; pLoc->m_cFloatReg = ALIGN_UP(byteArgSize, FLOAT_REGISTER_SIZE) / FLOAT_REGISTER_SIZE; return; } if (!TransitionBlock::IsStackArgumentOffset(argOffset)) { pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); if (byteArgSize <= (4 - pLoc->m_idxGenReg) * TARGET_POINTER_SIZE) { pLoc->m_cGenReg = ALIGN_UP(byteArgSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } else { pLoc->m_cGenReg = 4 - pLoc->m_idxGenReg; pLoc->m_byteStackIndex = 0; pLoc->m_byteStackSize = StackElemSize(byteArgSize) - pLoc->m_cGenReg * TARGET_POINTER_SIZE; } } else { pLoc->m_byteStackIndex = TransitionBlock::GetStackArgumentByteIndexFromOffset(argOffset); pLoc->m_byteStackSize = StackElemSize(byteArgSize); } } #endif // TARGET_ARM #ifdef TARGET_ARM64 // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc *pLoc) { LIMITED_METHOD_CONTRACT; pLoc->Init(); if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset)) { const int floatRegOfsInBytes = argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters(); _ASSERTE((floatRegOfsInBytes % FLOAT_REGISTER_SIZE) == 0); pLoc->m_idxFloatReg = floatRegOfsInBytes / FLOAT_REGISTER_SIZE; if (!m_argTypeHandle.IsNull() && m_argTypeHandle.IsHFA()) { CorInfoHFAElemType type = m_argTypeHandle.GetHFAType(); pLoc->setHFAFieldSize(type); pLoc->m_cFloatReg = GetArgSize() / pLoc->m_hfaFieldSize; } else { pLoc->m_cFloatReg = 1; } return; } unsigned byteArgSize = GetArgSize(); // On ARM64 some composites are implicitly passed by reference. if (IsArgPassedByRef()) { byteArgSize = TARGET_POINTER_SIZE; } // Sanity check to make sure no caller is trying to get an ArgLocDesc that // describes the return buffer reg field that's in the TransitionBlock. _ASSERTE(argOffset != TransitionBlock::GetOffsetOfRetBuffArgReg()); if (!TransitionBlock::IsStackArgumentOffset(argOffset)) { pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); pLoc->m_cGenReg = ALIGN_UP(byteArgSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;; } else { pLoc->m_byteStackIndex = TransitionBlock::GetStackArgumentByteIndexFromOffset(argOffset); const bool isValueType = (m_argType == ELEMENT_TYPE_VALUETYPE); const bool isFloatHfa = (isValueType && !m_argTypeHandle.IsNull() && m_argTypeHandle.IsHFA()); if (isFloatHfa) { CorInfoHFAElemType type = m_argTypeHandle.GetHFAType(); pLoc->setHFAFieldSize(type); } pLoc->m_byteStackSize = StackElemSize(byteArgSize, isValueType, isFloatHfa); } } #endif // TARGET_ARM64 #if defined(TARGET_AMD64) // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc* pLoc) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) if (m_hasArgLocDescForStructInRegs) { *pLoc = m_argLocDescForStructInRegs; return; } if (argOffset == TransitionBlock::StructInRegsOffset) { // We always already have argLocDesc for structs passed in registers, we // compute it in the GetNextOffset for those since it is always needed. _ASSERTE(false); return; } #endif // UNIX_AMD64_ABI pLoc->Init(); #if defined(UNIX_AMD64_ABI) if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset)) { const int floatRegOfsInBytes = argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters(); _ASSERTE((floatRegOfsInBytes % FLOAT_REGISTER_SIZE) == 0); pLoc->m_idxFloatReg = floatRegOfsInBytes / FLOAT_REGISTER_SIZE; pLoc->m_cFloatReg = 1; } else #endif // UNIX_AMD64_ABI if (!TransitionBlock::IsStackArgumentOffset(argOffset)) { #if !defined(UNIX_AMD64_ABI) // On Windows x64, we re-use the location in the transition block for both the integer and floating point registers if ((m_argType == ELEMENT_TYPE_R4) || (m_argType == ELEMENT_TYPE_R8)) { pLoc->m_idxFloatReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); pLoc->m_cFloatReg = 1; } else #endif { pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); pLoc->m_cGenReg = 1; } } else { pLoc->m_byteStackIndex = TransitionBlock::GetStackArgumentByteIndexFromOffset(argOffset); int argSizeInBytes; if (IsArgPassedByRef()) argSizeInBytes = TARGET_POINTER_SIZE; else argSizeInBytes = GetArgSize(); pLoc->m_byteStackSize = StackElemSize(argSizeInBytes); } } #endif // TARGET_AMD64 protected: DWORD m_dwFlags; // Cached flags int m_nSizeOfArgStack; // Cached value of SizeOfArgStack DWORD m_argNum; // Cached information about last argument CorElementType m_argType; int m_argSize; TypeHandle m_argTypeHandle; #if (defined(TARGET_AMD64) && defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) ArgLocDesc m_argLocDescForStructInRegs; bool m_hasArgLocDescForStructInRegs; #endif // (TARGET_AMD64 && UNIX_AMD64_ABI) || TARGET_ARM64 int m_ofsStack; // Current position of the stack iterator, in bytes #ifdef TARGET_X86 int m_numRegistersUsed; #ifdef FEATURE_INTERPRETER bool m_fUnmanagedCallConv; #endif #endif #ifdef UNIX_AMD64_ABI int m_idxGenReg; // Next general register to be assigned a value int m_idxFPReg; // Next floating point register to be assigned a value bool m_fArgInRegisters; // Indicates that the current argument is stored in registers #endif #ifdef TARGET_ARM int m_idxGenReg; // Next general register to be assigned a value WORD m_wFPRegs; // Bitmask of available floating point argument registers (s0-s15/d0-d7) bool m_fRequires64BitAlignment; // Cached info about the current arg #endif #ifdef TARGET_ARM64 int m_idxGenReg; // Next general register to be assigned a value int m_idxFPReg; // Next FP register to be assigned a value #endif enum { ITERATION_STARTED = 0x0001, // Started iterating over arguments SIZE_OF_ARG_STACK_COMPUTED = 0x0002, RETURN_FLAGS_COMPUTED = 0x0004, RETURN_HAS_RET_BUFFER = 0x0008, // Cached value of HasRetBuffArg #ifdef TARGET_X86 PARAM_TYPE_REGISTER_MASK = 0x0030, PARAM_TYPE_REGISTER_STACK = 0x0010, PARAM_TYPE_REGISTER_ECX = 0x0020, PARAM_TYPE_REGISTER_EDX = 0x0030, #endif METHOD_INVOKE_NEEDS_ACTIVATION = 0x0040, // Flag used by ArgIteratorForMethodInvoke RETURN_FP_SIZE_SHIFT = 8, // The rest of the flags is cached value of GetFPReturnSize }; void ComputeReturnFlags(); #ifndef TARGET_X86 void GetSimpleLoc(int offset, ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; #ifdef CALLDESCR_RETBUFFARGREG // Codepaths where this could happen have been removed. If this occurs, something // has been missed and this needs another look. _ASSERTE(offset != TransitionBlock::GetOffsetOfRetBuffArgReg()); #endif pLoc->Init(); pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(offset); pLoc->m_cGenReg = 1; } #endif }; template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetThisOffset() { WRAPPER_NO_CONTRACT; // This pointer is in the first argument register by default int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); #ifdef TARGET_X86 // x86 is special as always ret += offsetof(ArgumentRegisters, ECX); #endif return ret; } template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetRetBuffArgOffset() { WRAPPER_NO_CONTRACT; _ASSERTE(this->HasRetBuffArg()); // RetBuf arg is in the second argument register by default int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); #if TARGET_X86 // x86 is special as always ret += this->HasThis() ? offsetof(ArgumentRegisters, EDX) : offsetof(ArgumentRegisters, ECX); #elif TARGET_ARM64 ret = TransitionBlock::GetOffsetOfRetBuffArgReg(); #else if (this->HasThis()) ret += TARGET_POINTER_SIZE; #endif return ret; } template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetVASigCookieOffset() { WRAPPER_NO_CONTRACT; _ASSERTE(this->IsVarArg()); #if defined(TARGET_X86) // x86 is special as always return sizeof(TransitionBlock); #else // VaSig cookie is after this and retbuf arguments by default. int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); if (this->HasThis()) { ret += TARGET_POINTER_SIZE; } if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg()) { ret += TARGET_POINTER_SIZE; } return ret; #endif } //----------------------------------------------------------- // Get the extra param offset for shared generic code //----------------------------------------------------------- template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetParamTypeArgOffset() { CONTRACTL { INSTANCE_CHECK; if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS; if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS; if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); } MODE_ANY; } CONTRACTL_END _ASSERTE(this->HasParamType()); #ifdef TARGET_X86 // x86 is special as always if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED)) ForceSigWalk(); switch (m_dwFlags & PARAM_TYPE_REGISTER_MASK) { case PARAM_TYPE_REGISTER_ECX: return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, ECX); case PARAM_TYPE_REGISTER_EDX: return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, EDX); default: break; } // The param type arg is last stack argument otherwise return sizeof(TransitionBlock); #else // The hidden arg is after this and retbuf arguments by default. int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); if (this->HasThis()) { ret += TARGET_POINTER_SIZE; } if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg()) { ret += TARGET_POINTER_SIZE; } return ret; #endif } // To avoid corner case bugs, limit maximum size of the arguments with sufficient margin #define MAX_ARG_SIZE 0xFFFFFF //------------------------------------------------------------ // Each time this is called, this returns a byte offset of the next // argument from the Frame* pointer. This offset can be positive *or* negative. // // Returns TransitionBlock::InvalidOffset once you've hit the end of the list. //------------------------------------------------------------ template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetNextOffset() { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; if (!(m_dwFlags & ITERATION_STARTED)) { int numRegistersUsed = 0; if (this->HasThis()) numRegistersUsed++; if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg()) numRegistersUsed++; _ASSERTE(!this->IsVarArg() || !this->HasParamType()); #ifndef TARGET_X86 if (this->IsVarArg() || this->HasParamType()) { numRegistersUsed++; } #endif #ifdef TARGET_X86 if (this->IsVarArg()) { numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs } #ifdef FEATURE_INTERPRETER BYTE callconv = CallConv(); switch (callconv) { case IMAGE_CEE_CS_CALLCONV_C: case IMAGE_CEE_CS_CALLCONV_STDCALL: m_numRegistersUsed = NUM_ARGUMENT_REGISTERS; m_ofsStack = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *); m_fUnmanagedCallConv = true; break; case IMAGE_CEE_CS_CALLCONV_THISCALL: case IMAGE_CEE_CS_CALLCONV_FASTCALL: _ASSERTE_MSG(false, "Unsupported calling convention."); default: m_fUnmanagedCallConv = false; m_numRegistersUsed = numRegistersUsed; m_ofsStack = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack(); break; } #else m_numRegistersUsed = numRegistersUsed; m_ofsStack = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack(); #endif #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI m_idxGenReg = numRegistersUsed; m_ofsStack = 0; m_idxFPReg = 0; #else m_ofsStack = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *); #endif #elif defined(TARGET_ARM) m_idxGenReg = numRegistersUsed; m_ofsStack = 0; m_wFPRegs = 0; #elif defined(TARGET_ARM64) m_idxGenReg = numRegistersUsed; m_ofsStack = 0; m_idxFPReg = 0; #else PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset"); #endif m_argNum = 0; m_dwFlags |= ITERATION_STARTED; } // We're done going through the args for this MetaSig if (m_argNum == this->NumFixedArgs()) return TransitionBlock::InvalidOffset; TypeHandle thValueType; CorElementType argType = this->GetNextArgumentType(m_argNum++, &thValueType); int argSize = MetaSig::GetElemSize(argType, thValueType); m_argType = argType; m_argSize = argSize; m_argTypeHandle = thValueType; #if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) m_hasArgLocDescForStructInRegs = false; #endif #ifdef TARGET_X86 #ifdef FEATURE_INTERPRETER if (m_fUnmanagedCallConv) { int argOfs = m_ofsStack; m_ofsStack += StackElemSize(argSize); return argOfs; } #endif if (IsArgumentInRegister(&m_numRegistersUsed, argType, thValueType)) { return TransitionBlock::GetOffsetOfArgumentRegisters() + (NUM_ARGUMENT_REGISTERS - m_numRegistersUsed) * sizeof(void *); } m_ofsStack -= StackElemSize(argSize); _ASSERTE(m_ofsStack >= TransitionBlock::GetOffsetOfArgs()); return m_ofsStack; #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI m_fArgInRegisters = true; int cFPRegs = 0; int cGenRegs = 0; int cbArg = StackElemSize(argSize); switch (argType) { case ELEMENT_TYPE_R4: // 32-bit floating point argument. cFPRegs = 1; break; case ELEMENT_TYPE_R8: // 64-bit floating point argument. cFPRegs = 1; break; case ELEMENT_TYPE_VALUETYPE: { MethodTable *pMT = m_argTypeHandle.GetMethodTable(); if (this->IsRegPassedStruct(pMT)) { EEClass* eeClass = pMT->GetClass(); cGenRegs = 0; for (int i = 0; i < eeClass->GetNumberEightBytes(); i++) { switch (eeClass->GetEightByteClassification(i)) { case SystemVClassificationTypeInteger: case SystemVClassificationTypeIntegerReference: case SystemVClassificationTypeIntegerByRef: cGenRegs++; break; case SystemVClassificationTypeSSE: cFPRegs++; break; default: _ASSERTE(false); break; } } // Check if we have enough registers available for the struct passing if ((cFPRegs + m_idxFPReg <= NUM_FLOAT_ARGUMENT_REGISTERS) && (cGenRegs + m_idxGenReg) <= NUM_ARGUMENT_REGISTERS) { m_argLocDescForStructInRegs.Init(); m_argLocDescForStructInRegs.m_cGenReg = cGenRegs; m_argLocDescForStructInRegs.m_cFloatReg = cFPRegs; m_argLocDescForStructInRegs.m_idxGenReg = m_idxGenReg; m_argLocDescForStructInRegs.m_idxFloatReg = m_idxFPReg; m_argLocDescForStructInRegs.m_eeClass = eeClass; m_hasArgLocDescForStructInRegs = true; m_idxGenReg += cGenRegs; m_idxFPReg += cFPRegs; return TransitionBlock::StructInRegsOffset; } } // Set the register counts to indicate that this argument will not be passed in registers cFPRegs = 0; cGenRegs = 0; break; } default: cGenRegs = cbArg / 8; // GP reg size break; } if ((cFPRegs > 0) && (cFPRegs + m_idxFPReg <= NUM_FLOAT_ARGUMENT_REGISTERS)) { int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 16; m_idxFPReg += cFPRegs; return argOfs; } else if ((cGenRegs > 0) && (m_idxGenReg + cGenRegs <= NUM_ARGUMENT_REGISTERS)) { int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8; m_idxGenReg += cGenRegs; return argOfs; } m_fArgInRegisters = false; int argOfs = TransitionBlock::GetOffsetOfArgs() + m_ofsStack; m_ofsStack += cbArg; return argOfs; #else // Each argument takes exactly one slot on AMD64 on Windows int argOfs = m_ofsStack; m_ofsStack += sizeof(void *); return argOfs; #endif #elif defined(TARGET_ARM) // First look at the underlying type of the argument to determine some basic properties: // 1) The size of the argument in bytes (rounded up to the stack slot size of 4 if necessary). // 2) Whether the argument represents a floating point primitive (ELEMENT_TYPE_R4 or ELEMENT_TYPE_R8). // 3) Whether the argument requires 64-bit alignment (anything that contains a Int64/UInt64). bool fFloatingPoint = false; bool fRequiresAlign64Bit = false; switch (argType) { case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: // 64-bit integers require 64-bit alignment on ARM. fRequiresAlign64Bit = true; break; case ELEMENT_TYPE_R4: // 32-bit floating point argument. fFloatingPoint = true; break; case ELEMENT_TYPE_R8: // 64-bit floating point argument. fFloatingPoint = true; fRequiresAlign64Bit = true; break; case ELEMENT_TYPE_VALUETYPE: { // Value type case: extract the alignment requirement, note that this has to handle // the interop "native value types". fRequiresAlign64Bit = thValueType.RequiresAlign8(); #ifdef FEATURE_HFA // Handle HFAs: packed structures of 1-4 floats or doubles that are passed in FP argument // registers if possible. if (thValueType.IsHFA()) { fFloatingPoint = true; } #endif break; } default: // The default is are 4-byte arguments (or promoted to 4 bytes), non-FP and don't require any // 64-bit alignment. break; } // Now attempt to place the argument into some combination of floating point or general registers and // the stack. // Save the alignment requirement m_fRequires64BitAlignment = fRequiresAlign64Bit; int cbArg = StackElemSize(argSize); _ASSERTE((cbArg % TARGET_POINTER_SIZE) == 0); // Ignore floating point argument placement in registers if we're dealing with a vararg function (the ABI // specifies this so that vararg processing on the callee side is simplified). #ifndef ARM_SOFTFP if (fFloatingPoint && !this->IsVarArg()) { // Handle floating point (primitive) arguments. // First determine whether we can place the argument in VFP registers. There are 16 32-bit // and 8 64-bit argument registers that share the same register space (e.g. D0 overlaps S0 and // S1). The ABI specifies that VFP values will be passed in the lowest sequence of registers that // haven't been used yet and have the required alignment. So the sequence (float, double, float) // would be mapped to (S0, D1, S1) or (S0, S2/S3, S1). // // We use a 16-bit bitmap to record which registers have been used so far. // // So we can use the same basic loop for each argument type (float, double or HFA struct) we set up // the following input parameters based on the size and alignment requirements of the arguments: // wAllocMask : bitmask of the number of 32-bit registers we need (1 for 1, 3 for 2, 7 for 3 etc.) // cSteps : number of loop iterations it'll take to search the 16 registers // cShift : how many bits to shift the allocation mask on each attempt WORD wAllocMask = (1 << (cbArg / 4)) - 1; WORD cSteps = (WORD)(fRequiresAlign64Bit ? 9 - (cbArg / 8) : 17 - (cbArg / 4)); WORD cShift = fRequiresAlign64Bit ? 2 : 1; // Look through the availability bitmask for a free register or register pair. for (WORD i = 0; i < cSteps; i++) { if ((m_wFPRegs & wAllocMask) == 0) { // We found one, mark the register or registers as used. m_wFPRegs |= wAllocMask; // Indicate the registers used to the caller and return. return TransitionBlock::GetOffsetOfFloatArgumentRegisters() + (i * cShift * 4); } wAllocMask <<= cShift; } // The FP argument is going to live on the stack. Once this happens the ABI demands we mark all FP // registers as unavailable. m_wFPRegs = 0xffff; // Doubles or HFAs containing doubles need the stack aligned appropriately. if (fRequiresAlign64Bit) { m_ofsStack = (int)ALIGN_UP(m_ofsStack, TARGET_POINTER_SIZE * 2); } // Indicate the stack location of the argument to the caller. int argOfs = TransitionBlock::GetOffsetOfArgs() + m_ofsStack; // Record the stack usage. m_ofsStack += cbArg; return argOfs; } #endif // ARM_SOFTFP // // Handle the non-floating point case. // if (m_idxGenReg < 4) { if (fRequiresAlign64Bit) { // The argument requires 64-bit alignment. Align either the next general argument register if // we have any left. See step C.3 in the algorithm in the ABI spec. m_idxGenReg = (int)ALIGN_UP(m_idxGenReg, 2); } int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 4; int cRemainingRegs = 4 - m_idxGenReg; if (cbArg <= cRemainingRegs * TARGET_POINTER_SIZE) { // Mark the registers just allocated as used. m_idxGenReg += ALIGN_UP(cbArg, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; return argOfs; } // The ABI supports splitting a non-FP argument across registers and the stack. But this is // disabled if the FP arguments already overflowed onto the stack (i.e. the stack index is not // zero). The following code marks the general argument registers as exhausted if this condition // holds. See steps C.5 in the algorithm in the ABI spec. m_idxGenReg = 4; if (m_ofsStack == 0) { m_ofsStack += cbArg - cRemainingRegs * TARGET_POINTER_SIZE; return argOfs; } } if (fRequiresAlign64Bit) { // The argument requires 64-bit alignment. If it is going to be passed on the stack, align // the next stack slot. See step C.6 in the algorithm in the ABI spec. m_ofsStack = (int)ALIGN_UP(m_ofsStack, TARGET_POINTER_SIZE * 2); } int argOfs = TransitionBlock::GetOffsetOfArgs() + m_ofsStack; // Advance the stack pointer over the argument just placed. m_ofsStack += cbArg; return argOfs; #elif defined(TARGET_ARM64) int cFPRegs = 0; switch (argType) { case ELEMENT_TYPE_R4: // 32-bit floating point argument. cFPRegs = 1; break; case ELEMENT_TYPE_R8: // 64-bit floating point argument. cFPRegs = 1; break; case ELEMENT_TYPE_VALUETYPE: { // Handle HFAs: packed structures of 1-4 floats, doubles, or short vectors // that are passed in FP argument registers if possible. if (thValueType.IsHFA()) { CorInfoHFAElemType type = thValueType.GetHFAType(); m_argLocDescForStructInRegs.Init(); m_argLocDescForStructInRegs.m_idxFloatReg = m_idxFPReg; m_argLocDescForStructInRegs.setHFAFieldSize(type); cFPRegs = argSize/m_argLocDescForStructInRegs.m_hfaFieldSize; m_argLocDescForStructInRegs.m_cFloatReg = cFPRegs; // Check if we have enough registers available for the HFA passing if ((cFPRegs + m_idxFPReg) <= 8) { m_hasArgLocDescForStructInRegs = true; } } else { // Composite greater than 16bytes should be passed by reference if (argSize > ENREGISTERED_PARAMTYPE_MAXSIZE) { argSize = sizeof(TADDR); } } break; } default: break; } const bool isValueType = (argType == ELEMENT_TYPE_VALUETYPE); const bool isFloatHfa = thValueType.IsFloatHfa(); const int cbArg = StackElemSize(argSize, isValueType, isFloatHfa); if (cFPRegs>0 && !this->IsVarArg()) { if (cFPRegs + m_idxFPReg <= 8) { // Each floating point register in the argument area is 16 bytes. int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 16; m_idxFPReg += cFPRegs; return argOfs; } else { m_idxFPReg = 8; } } else { #if !defined(OSX_ARM64_ABI) _ASSERTE((cbArg% TARGET_POINTER_SIZE) == 0); #endif const int regSlots = ALIGN_UP(cbArg, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; // Only x0-x7 are valid argument registers (x8 is always the return buffer) if (m_idxGenReg + regSlots <= 8) { // The entirety of the arg fits in the register slots. int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8; m_idxGenReg += regSlots; return argOfs; } else { #ifdef _WIN32 if (this->IsVarArg() && m_idxGenReg < 8) { // Address the Windows ARM64 varargs case where an arg is split between regs and stack. // This can happen in the varargs case because the first 64 bytes of the stack are loaded // into x0-x7, and any remaining stack arguments are placed normally. int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8; // Increase m_ofsStack to account for the space used for the remainder of the arg after // registers are filled. m_ofsStack += cbArg + (m_idxGenReg - 8) * TARGET_POINTER_SIZE; // We used up the remaining reg slots. m_idxGenReg = 8; return argOfs; } else #endif { // Don't use reg slots for this. It will be passed purely on the stack arg space. m_idxGenReg = 8; } } } #ifdef OSX_ARM64_ABI int alignment; if (!isValueType) { _ASSERTE((cbArg & (cbArg - 1)) == 0); alignment = cbArg; } else if (isFloatHfa) { alignment = 4; } else { alignment = 8; } m_ofsStack = (int)ALIGN_UP(m_ofsStack, alignment); #endif // OSX_ARM64_ABI int argOfs = TransitionBlock::GetOffsetOfArgs() + m_ofsStack; m_ofsStack += cbArg; return argOfs; #else PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset"); return TransitionBlock::InvalidOffset; #endif } template<class ARGITERATOR_BASE> void ArgIteratorTemplate<ARGITERATOR_BASE>::ComputeReturnFlags() { CONTRACTL { INSTANCE_CHECK; if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS; if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS; if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); } MODE_ANY; } CONTRACTL_END TypeHandle thValueType; CorElementType type = this->GetReturnType(&thValueType); DWORD flags = RETURN_FLAGS_COMPUTED; switch (type) { case ELEMENT_TYPE_TYPEDBYREF: #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE if (sizeof(TypedByRef) > ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE) flags |= RETURN_HAS_RET_BUFFER; #else flags |= RETURN_HAS_RET_BUFFER; #endif break; case ELEMENT_TYPE_R4: #ifndef ARM_SOFTFP flags |= sizeof(float) << RETURN_FP_SIZE_SHIFT; #endif break; case ELEMENT_TYPE_R8: #ifndef ARM_SOFTFP flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT; #endif break; case ELEMENT_TYPE_VALUETYPE: #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE { _ASSERTE(!thValueType.IsNull()); #if defined(UNIX_AMD64_ABI) MethodTable *pMT = thValueType.AsMethodTable(); if (pMT->IsRegPassedStruct()) { EEClass* eeClass = pMT->GetClass(); if (eeClass->GetNumberEightBytes() == 1) { // Structs occupying just one eightbyte are treated as int / double if (eeClass->GetEightByteClassification(0) == SystemVClassificationTypeSSE) { flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT; } } else { // Size of the struct is 16 bytes flags |= (16 << RETURN_FP_SIZE_SHIFT); // The lowest two bits of the size encode the order of the int and SSE fields if (eeClass->GetEightByteClassification(0) == SystemVClassificationTypeSSE) { flags |= (1 << RETURN_FP_SIZE_SHIFT); } if (eeClass->GetEightByteClassification(1) == SystemVClassificationTypeSSE) { flags |= (2 << RETURN_FP_SIZE_SHIFT); } } break; } #else // UNIX_AMD64_ABI #ifdef FEATURE_HFA if (thValueType.IsHFA() && !this->IsVarArg()) { CorInfoHFAElemType hfaType = thValueType.GetHFAType(); int hfaFieldSize = ArgLocDesc::getHFAFieldSize(hfaType); flags |= ((4 * hfaFieldSize) << RETURN_FP_SIZE_SHIFT); break; } #endif size_t size = thValueType.GetSize(); #if defined(TARGET_X86) || defined(TARGET_AMD64) // Return value types of size which are not powers of 2 using a RetBuffArg if ((size & (size-1)) != 0) { flags |= RETURN_HAS_RET_BUFFER; break; } #endif // defined(TARGET_X86) || defined(TARGET_AMD64) if (size <= ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE) break; #endif // UNIX_AMD64_ABI } #endif // ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE // Value types are returned using return buffer by default flags |= RETURN_HAS_RET_BUFFER; break; default: break; } m_dwFlags |= flags; } template<class ARGITERATOR_BASE> void ArgIteratorTemplate<ARGITERATOR_BASE>::ForceSigWalk() { CONTRACTL { INSTANCE_CHECK; if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS; if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS; if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); } MODE_ANY; } CONTRACTL_END // This can be only used before the actual argument iteration started _ASSERTE((m_dwFlags & ITERATION_STARTED) == 0); #ifdef TARGET_X86 // // x86 is special as always // int numRegistersUsed = 0; int nSizeOfArgStack = 0; if (this->HasThis()) numRegistersUsed++; if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg()) numRegistersUsed++; if (this->IsVarArg()) { nSizeOfArgStack += sizeof(void *); numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs } #ifdef FEATURE_INTERPRETER BYTE callconv = CallConv(); switch (callconv) { case IMAGE_CEE_CS_CALLCONV_C: case IMAGE_CEE_CS_CALLCONV_STDCALL: numRegistersUsed = NUM_ARGUMENT_REGISTERS; nSizeOfArgStack = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *); break; case IMAGE_CEE_CS_CALLCONV_THISCALL: case IMAGE_CEE_CS_CALLCONV_FASTCALL: _ASSERTE_MSG(false, "Unsupported calling convention."); default: break; } #endif // FEATURE_INTERPRETER DWORD nArgs = this->NumFixedArgs(); for (DWORD i = 0; i < nArgs; i++) { TypeHandle thValueType; CorElementType type = this->GetNextArgumentType(i, &thValueType); if (!IsArgumentInRegister(&numRegistersUsed, type, thValueType)) { int structSize = MetaSig::GetElemSize(type, thValueType); nSizeOfArgStack += StackElemSize(structSize); #ifndef DACCESS_COMPILE if (nSizeOfArgStack > MAX_ARG_SIZE) { #ifdef _DEBUG // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode. // The contract violation is required to workaround bug in the static contract analyzer. _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED()); CONTRACT_VIOLATION(ThrowsViolation); #endif COMPlusThrow(kNotSupportedException); } #endif } } if (this->HasParamType()) { DWORD paramTypeFlags = 0; if (numRegistersUsed < NUM_ARGUMENT_REGISTERS) { numRegistersUsed++; paramTypeFlags = (numRegistersUsed == 1) ? PARAM_TYPE_REGISTER_ECX : PARAM_TYPE_REGISTER_EDX; } else { nSizeOfArgStack += sizeof(void *); paramTypeFlags = PARAM_TYPE_REGISTER_STACK; } m_dwFlags |= paramTypeFlags; } #else // TARGET_X86 int maxOffset = TransitionBlock::GetOffsetOfArgs(); int ofs; while (TransitionBlock::InvalidOffset != (ofs = GetNextOffset())) { int stackElemSize; #ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI if (m_fArgInRegisters) { // Arguments passed in registers don't consume any stack continue; } stackElemSize = StackElemSize(GetArgSize()); #else // UNIX_AMD64_ABI // All stack arguments take just one stack slot on AMD64 because of arguments bigger // than a stack slot are passed by reference. stackElemSize = TARGET_POINTER_SIZE; #endif // UNIX_AMD64_ABI #else // TARGET_AMD64 TypeHandle thValueType; const CorElementType argType = GetArgType(&thValueType); const bool isValueType = (argType == ELEMENT_TYPE_VALUETYPE); stackElemSize = StackElemSize(GetArgSize(), isValueType, thValueType.IsFloatHfa()); #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE) if (IsArgPassedByRef()) stackElemSize = TARGET_POINTER_SIZE; #endif #endif // TARGET_AMD64 int endOfs = ofs + stackElemSize; if (endOfs > maxOffset) { #if !defined(DACCESS_COMPILE) if (endOfs > MAX_ARG_SIZE) { #ifdef _DEBUG // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode. // The contract violation is required to workaround bug in the static contract analyzer. _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED()); CONTRACT_VIOLATION(ThrowsViolation); #endif COMPlusThrow(kNotSupportedException); } #endif maxOffset = endOfs; } } // Clear the iterator started flag m_dwFlags &= ~ITERATION_STARTED; int nSizeOfArgStack = maxOffset - TransitionBlock::GetOffsetOfArgs(); #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) nSizeOfArgStack = (nSizeOfArgStack > (int)sizeof(ArgumentRegisters)) ? (nSizeOfArgStack - sizeof(ArgumentRegisters)) : 0; #endif #endif // TARGET_X86 // arg stack size is rounded to the pointer size on all platforms. nSizeOfArgStack = (int)ALIGN_UP(nSizeOfArgStack, TARGET_POINTER_SIZE); // Cache the result m_nSizeOfArgStack = nSizeOfArgStack; m_dwFlags |= SIZE_OF_ARG_STACK_COMPUTED; this->Reset(); } class ArgIteratorBase { protected: MetaSig * m_pSig; FORCEINLINE CorElementType GetReturnType(TypeHandle * pthValueType) { WRAPPER_NO_CONTRACT; #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE return m_pSig->GetReturnTypeNormalized(pthValueType); #else return m_pSig->GetReturnTypeNormalized(); #endif } FORCEINLINE CorElementType GetNextArgumentType(DWORD iArg, TypeHandle * pthValueType) { WRAPPER_NO_CONTRACT; _ASSERTE(iArg == m_pSig->GetArgNum()); CorElementType et = m_pSig->PeekArgNormalized(pthValueType); m_pSig->SkipArg(); return et; } FORCEINLINE void Reset() { WRAPPER_NO_CONTRACT; m_pSig->Reset(); } FORCEINLINE BOOL IsRegPassedStruct(MethodTable* pMT) { return pMT->IsRegPassedStruct(); } public: BOOL HasThis() { LIMITED_METHOD_CONTRACT; return m_pSig->HasThis(); } BOOL HasParamType() { LIMITED_METHOD_CONTRACT; return m_pSig->GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE; } BOOL IsVarArg() { LIMITED_METHOD_CONTRACT; return m_pSig->IsVarArg() || m_pSig->IsTreatAsVarArg(); } DWORD NumFixedArgs() { LIMITED_METHOD_CONTRACT; return m_pSig->NumFixedArgs(); } #ifdef FEATURE_INTERPRETER BYTE CallConv() { return m_pSig->GetCallingConvention(); } #endif // FEATURE_INTERPRETER // // The following is used by the profiler to dig into the iterator for // discovering if the method has a This pointer or a return buffer. // Do not use this to re-initialize the signature, use the exposed Init() // method in this class. // MetaSig *GetSig(void) { return m_pSig; } }; class ArgIterator : public ArgIteratorTemplate<ArgIteratorBase> { public: ArgIterator(MetaSig * pSig) { m_pSig = pSig; } // This API returns true if we are returning a structure in registers instead of using a byref return buffer BOOL HasNonStandardByvalReturn() { WRAPPER_NO_CONTRACT; #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE CorElementType type = m_pSig->GetReturnTypeNormalized(); return (type == ELEMENT_TYPE_VALUETYPE || type == ELEMENT_TYPE_TYPEDBYREF) && !HasRetBuffArg(); #else return FALSE; #endif } BOOL HasValueTypeReturn() { WRAPPER_NO_CONTRACT; TypeHandle thValueType; CorElementType type = m_pSig->GetReturnTypeNormalized(&thValueType); // Enums are normalized to their underlying type when passing to and from functions. // This occurs in both managed and native calling conventions. return type == ELEMENT_TYPE_VALUETYPE && !thValueType.IsEnum(); } }; // Conventience helper inline BOOL HasRetBuffArg(MetaSig * pSig) { WRAPPER_NO_CONTRACT; ArgIterator argit(pSig); return argit.HasRetBuffArg(); } #ifdef UNIX_X86_ABI // For UNIX_X86_ABI and unmanaged function, we always need RetBuf if the return type is VALUETYPE inline BOOL HasRetBuffArgUnmanagedFixup(MetaSig * pSig) { WRAPPER_NO_CONTRACT; // We cannot just pSig->GetReturnType() here since it will return ELEMENT_TYPE_VALUETYPE for enums CorElementType type = pSig->GetRetTypeHandleThrowing().GetVerifierCorElementType(); return type == ELEMENT_TYPE_VALUETYPE; } #endif inline BOOL IsRetBuffPassedAsFirstArg() { WRAPPER_NO_CONTRACT; #ifndef TARGET_ARM64 return TRUE; #else return FALSE; #endif } #endif // __CALLING_CONVENTION_INCLUDED
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // Provides an abstraction over platform specific calling conventions (specifically, the calling convention // utilized by the JIT on that platform). The caller enumerates each argument of a signature in turn, and is // provided with information mapping that argument into registers and/or stack locations. // #ifndef __CALLING_CONVENTION_INCLUDED #define __CALLING_CONVENTION_INCLUDED BOOL IsRetBuffPassedAsFirstArg(); // Describes how a single argument is laid out in registers and/or stack locations when given as an input to a // managed method as part of a larger signature. // // Locations are split into floating point registers, general registers and stack offsets. Registers are // obviously architecture dependent but are represented as a zero-based index into the usual sequence in which // such registers are allocated for input on the platform in question. For instance: // X86: 0 == ecx, 1 == edx // ARM: 0 == r0, 1 == r1, 2 == r2 etc. // // Stack locations are represented as offsets from the stack pointer (at the point of the call). The offset is // given as an index of a pointer sized slot. Similarly the size of data on the stack is given in slot-sized // units. For instance, given an index of 2 and a size of 3: // X86: argument starts at [ESP + 8] and is 12 bytes long // AMD64: argument starts at [RSP + 16] and is 24 bytes long // // The structure is flexible enough to describe an argument that is split over several (consecutive) registers // and possibly on to the stack as well. struct ArgLocDesc { int m_idxFloatReg; // First floating point register used (or -1) int m_cFloatReg; // Count of floating point registers used (or 0) int m_idxGenReg; // First general register used (or -1) int m_cGenReg; // Count of general registers used (or 0) int m_byteStackIndex; // Stack offset in bytes (or -1) int m_byteStackSize; // Stack size in bytes #if defined(UNIX_AMD64_ABI) EEClass* m_eeClass; // For structs passed in register, it points to the EEClass of the struct #endif // UNIX_AMD64_ABI #ifdef FEATURE_HFA static unsigned getHFAFieldSize(CorInfoHFAElemType hfaType) { switch (hfaType) { case CORINFO_HFA_ELEM_FLOAT: return 4; case CORINFO_HFA_ELEM_DOUBLE: return 8; case CORINFO_HFA_ELEM_VECTOR64: return 8; case CORINFO_HFA_ELEM_VECTOR128: return 16; default: _ASSERTE(!"Invalid HFA Type"); return 0; } } #endif #if defined(TARGET_ARM64) unsigned m_hfaFieldSize; // Size of HFA field in bytes. void setHFAFieldSize(CorInfoHFAElemType hfaType) { m_hfaFieldSize = getHFAFieldSize(hfaType); } #endif // defined(TARGET_ARM64) #if defined(TARGET_ARM) BOOL m_fRequires64BitAlignment; // True if the argument should always be aligned (in registers or on the stack #endif ArgLocDesc() { Init(); } // Initialize to represent a non-placed argument (no register or stack slots referenced). void Init() { m_idxFloatReg = -1; m_cFloatReg = 0; m_idxGenReg = -1; m_cGenReg = 0; m_byteStackIndex = -1; m_byteStackSize = 0; #if defined(TARGET_ARM) m_fRequires64BitAlignment = FALSE; #endif #if defined(TARGET_ARM64) m_hfaFieldSize = 0; #endif // defined(TARGET_ARM64) #if defined(UNIX_AMD64_ABI) m_eeClass = NULL; #endif } }; // // TransitionBlock is layout of stack frame of method call, saved argument registers and saved callee saved registers. Even though not // all fields are used all the time, we use uniform form for simplicity. // struct TransitionBlock { #if defined(TARGET_X86) ArgumentRegisters m_argumentRegisters; CalleeSavedRegisters m_calleeSavedRegisters; TADDR m_ReturnAddress; #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI ArgumentRegisters m_argumentRegisters; #endif CalleeSavedRegisters m_calleeSavedRegisters; TADDR m_ReturnAddress; #elif defined(TARGET_ARM) union { CalleeSavedRegisters m_calleeSavedRegisters; // alias saved link register as m_ReturnAddress struct { INT32 r4, r5, r6, r7, r8, r9, r10; INT32 r11; TADDR m_ReturnAddress; }; }; ArgumentRegisters m_argumentRegisters; #elif defined(TARGET_ARM64) union { CalleeSavedRegisters m_calleeSavedRegisters; struct { INT64 x29; // frame pointer TADDR m_ReturnAddress; INT64 x19, x20, x21, x22, x23, x24, x25, x26, x27, x28; }; }; TADDR padding; // Keep size of TransitionBlock as multiple of 16-byte. Simplifies code in PROLOG_WITH_TRANSITION_BLOCK INT64 m_x8RetBuffReg; ArgumentRegisters m_argumentRegisters; #else PORTABILITY_ASSERT("TransitionBlock"); #endif // The transition block should define everything pushed by callee. The code assumes in number of places that // end of the transition block is caller's stack pointer. static int GetOffsetOfReturnAddress() { LIMITED_METHOD_CONTRACT; return offsetof(TransitionBlock, m_ReturnAddress); } #ifdef TARGET_ARM64 static int GetOffsetOfRetBuffArgReg() { LIMITED_METHOD_CONTRACT; return offsetof(TransitionBlock, m_x8RetBuffReg); } static int GetOffsetOfFirstGCRefMapSlot() { return GetOffsetOfRetBuffArgReg(); } #else static int GetOffsetOfFirstGCRefMapSlot() { return GetOffsetOfArgumentRegisters(); } #endif static BYTE GetOffsetOfArgs() { LIMITED_METHOD_CONTRACT; // Offset of the stack args (which are after the TransitionBlock) return sizeof(TransitionBlock); } static int GetOffsetOfArgumentRegisters() { LIMITED_METHOD_CONTRACT; int offs; #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) offs = sizeof(TransitionBlock); #else offs = offsetof(TransitionBlock, m_argumentRegisters); #endif return offs; } static BOOL IsStackArgumentOffset(int offset) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) return offset >= (int)sizeof(TransitionBlock); #else int ofsArgRegs = GetOffsetOfArgumentRegisters(); return offset >= (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE); #endif } static BOOL IsArgumentRegisterOffset(int offset) { LIMITED_METHOD_CONTRACT; int ofsArgRegs = GetOffsetOfArgumentRegisters(); return offset >= ofsArgRegs && offset < (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE); } static UINT GetArgumentIndexFromOffset(int offset) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) _ASSERTE(offset != TransitionBlock::StructInRegsOffset); #endif offset -= GetOffsetOfArgumentRegisters(); _ASSERTE((offset % TARGET_POINTER_SIZE) == 0); return offset / TARGET_POINTER_SIZE; } static UINT GetStackArgumentIndexFromOffset(int offset) { LIMITED_METHOD_CONTRACT; return (offset - TransitionBlock::GetOffsetOfArgs()) / TARGET_POINTER_SIZE; } static UINT GetStackArgumentByteIndexFromOffset(int offset) { LIMITED_METHOD_CONTRACT; return (offset - TransitionBlock::GetOffsetOfArgs()); } #ifdef CALLDESCR_FPARGREGS static BOOL IsFloatArgumentRegisterOffset(int offset) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) return (offset != TransitionBlock::StructInRegsOffset) && (offset < 0); #else return offset < 0; #endif } // Check if an argument has floating point register, that means that it is // either a floating point argument or a struct passed in registers that // has a floating point member. static BOOL HasFloatRegister(int offset, ArgLocDesc* argLocDescForStructInRegs) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) if (offset == TransitionBlock::StructInRegsOffset) { return argLocDescForStructInRegs->m_cFloatReg > 0; } #endif return offset < 0; } static int GetOffsetOfFloatArgumentRegisters() { LIMITED_METHOD_CONTRACT; return -GetNegSpaceSize(); } #endif // CALLDESCR_FPARGREGS static int GetOffsetOfCalleeSavedRegisters() { LIMITED_METHOD_CONTRACT; return offsetof(TransitionBlock, m_calleeSavedRegisters); } static int GetNegSpaceSize() { LIMITED_METHOD_CONTRACT; int negSpaceSize = 0; #ifdef CALLDESCR_FPARGREGS negSpaceSize += sizeof(FloatArgumentRegisters); #endif #ifdef TARGET_ARM negSpaceSize += TARGET_POINTER_SIZE; // padding to make FloatArgumentRegisters address 8-byte aligned #endif return negSpaceSize; } static const int InvalidOffset = -1; #if defined(UNIX_AMD64_ABI) // Special offset value to represent struct passed in registers. Such a struct can span both // general purpose and floating point registers, so it can have two different offsets. static const int StructInRegsOffset = -2; #endif }; //----------------------------------------------------------------------- // ArgIterator is helper for dealing with calling conventions. // It is tightly coupled with TransitionBlock. It uses offsets into // TransitionBlock to represent argument locations for efficiency // reasons. Alternatively, it can also return ArgLocDesc for less // performance critical code. // // The ARGITERATOR_BASE argument of the template is provider of the parsed // method signature. Typically, the arg iterator works on top of MetaSig. // Reflection invoke uses alternative implementation to save signature parsing // time because of it has the parsed signature available. //----------------------------------------------------------------------- template<class ARGITERATOR_BASE> class ArgIteratorTemplate : public ARGITERATOR_BASE { public: //------------------------------------------------------------ // Constructor //------------------------------------------------------------ ArgIteratorTemplate() { WRAPPER_NO_CONTRACT; m_dwFlags = 0; } UINT SizeOfArgStack() { WRAPPER_NO_CONTRACT; if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED)) ForceSigWalk(); _ASSERTE((m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED) != 0); _ASSERTE((m_nSizeOfArgStack % TARGET_POINTER_SIZE) == 0); return m_nSizeOfArgStack; } // For use with ArgIterator. This function computes the amount of additional // memory required above the TransitionBlock. The parameter offsets // returned by ArgIteratorTemplate::GetNextOffset are relative to a // FramedMethodFrame, and may be in either of these regions. UINT SizeOfFrameArgumentArray() { WRAPPER_NO_CONTRACT; UINT size = SizeOfArgStack(); #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // The argument registers are not included in the stack size on AMD64 size += ARGUMENTREGISTERS_SIZE; #endif _ASSERTE((size % TARGET_POINTER_SIZE) == 0); return size; } //------------------------------------------------------------------------ #ifdef TARGET_X86 UINT CbStackPop() { WRAPPER_NO_CONTRACT; if (this->IsVarArg()) return 0; else return SizeOfArgStack(); } #endif // Is there a hidden parameter for the return parameter? // BOOL HasRetBuffArg() { WRAPPER_NO_CONTRACT; if (!(m_dwFlags & RETURN_FLAGS_COMPUTED)) ComputeReturnFlags(); return (m_dwFlags & RETURN_HAS_RET_BUFFER); } UINT GetFPReturnSize() { WRAPPER_NO_CONTRACT; if (!(m_dwFlags & RETURN_FLAGS_COMPUTED)) ComputeReturnFlags(); return m_dwFlags >> RETURN_FP_SIZE_SHIFT; } #ifdef TARGET_X86 //========================================================================= // Indicates whether an argument is to be put in a register using the // default IL calling convention. This should be called on each parameter // in the order it appears in the call signature. For a non-static method, // this function should also be called once for the "this" argument, prior // to calling it for the "real" arguments. Pass in a typ of ELEMENT_TYPE_CLASS. // // *pNumRegistersUsed: [in,out]: keeps track of the number of argument // registers assigned previously. The caller should // initialize this variable to 0 - then each call // will update it. // // typ: the signature type //========================================================================= static BOOL IsArgumentInRegister(int * pNumRegistersUsed, CorElementType typ, TypeHandle hnd) { LIMITED_METHOD_CONTRACT; if ( (*pNumRegistersUsed) < NUM_ARGUMENT_REGISTERS) { if (typ == ELEMENT_TYPE_VALUETYPE) { // The JIT enables passing trivial pointer sized structs in registers. MethodTable* pMT = hnd.GetMethodTable(); while (typ == ELEMENT_TYPE_VALUETYPE && pMT->GetNumInstanceFields() == 1 && (!pMT->HasLayout() || pMT->GetNumInstanceFieldBytes() == 4 )) // Don't do the optimization if we're getting specified anything but the trivial layout. { FieldDesc * pFD = pMT->GetApproxFieldDescListRaw(); CorElementType type = pFD->GetFieldType(); bool exitLoop = false; switch (type) { case ELEMENT_TYPE_VALUETYPE: { //@todo: Is it more apropos to call LookupApproxFieldTypeHandle() here? TypeHandle fldHnd = pFD->GetApproxFieldTypeHandleThrowing(); CONSISTENCY_CHECK(!fldHnd.IsNull()); pMT = fldHnd.GetMethodTable(); FALLTHROUGH; } case ELEMENT_TYPE_PTR: case ELEMENT_TYPE_I: case ELEMENT_TYPE_U: case ELEMENT_TYPE_I4: case ELEMENT_TYPE_U4: { typ = type; break; } default: exitLoop = true; break; } if (exitLoop) { break; } } } if (gElementTypeInfo[typ].m_enregister) { (*pNumRegistersUsed)++; return(TRUE); } } return(FALSE); } #endif // TARGET_X86 #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE) // Note that this overload does not handle varargs static BOOL IsArgPassedByRef(TypeHandle th) { LIMITED_METHOD_CONTRACT; _ASSERTE(!th.IsNull()); // This method only works for valuetypes. It includes true value types, // primitives, enums and TypedReference. _ASSERTE(th.IsValueType()); size_t size = th.GetSize(); #ifdef TARGET_AMD64 return IsArgPassedByRef(size); #elif defined(TARGET_ARM64) // Composites greater than 16 bytes are passed by reference return ((size > ENREGISTERED_PARAMTYPE_MAXSIZE) && !th.IsHFA()); #else PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef"); return FALSE; #endif } #ifdef TARGET_AMD64 // This overload should only be used in AMD64-specific code only. static BOOL IsArgPassedByRef(size_t size) { LIMITED_METHOD_CONTRACT; #ifdef UNIX_AMD64_ABI // No arguments are passed by reference on AMD64 on Unix return FALSE; #else // If the size is bigger than ENREGISTERED_PARAM_TYPE_MAXSIZE, or if the size is NOT a power of 2, then // the argument is passed by reference. return (size > ENREGISTERED_PARAMTYPE_MAXSIZE) || ((size & (size-1)) != 0); #endif } #endif // TARGET_AMD64 // This overload should be used for varargs only. static BOOL IsVarArgPassedByRef(size_t size) { LIMITED_METHOD_CONTRACT; #ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI PORTABILITY_ASSERT("ArgIteratorTemplate::IsVarArgPassedByRef"); return FALSE; #else // UNIX_AMD64_ABI return IsArgPassedByRef(size); #endif // UNIX_AMD64_ABI #else return (size > ENREGISTERED_PARAMTYPE_MAXSIZE); #endif } BOOL IsArgPassedByRef() { LIMITED_METHOD_CONTRACT; #ifdef TARGET_AMD64 return IsArgPassedByRef(m_argSize); #elif defined(TARGET_ARM64) if (m_argType == ELEMENT_TYPE_VALUETYPE) { _ASSERTE(!m_argTypeHandle.IsNull()); return ((m_argSize > ENREGISTERED_PARAMTYPE_MAXSIZE) && (!m_argTypeHandle.IsHFA() || this->IsVarArg())); } return FALSE; #else PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef"); return FALSE; #endif } #endif // ENREGISTERED_PARAMTYPE_MAXSIZE //------------------------------------------------------------ // Return the offsets of the special arguments //------------------------------------------------------------ static int GetThisOffset(); int GetRetBuffArgOffset(); int GetVASigCookieOffset(); int GetParamTypeArgOffset(); //------------------------------------------------------------ // Each time this is called, this returns a byte offset of the next // argument from the TransitionBlock* pointer. // // Returns TransitionBlock::InvalidOffset once you've hit the end // of the list. //------------------------------------------------------------ int GetNextOffset(); CorElementType GetArgType(TypeHandle *pTypeHandle = NULL) { LIMITED_METHOD_CONTRACT; if (pTypeHandle != NULL) { *pTypeHandle = m_argTypeHandle; } return m_argType; } int GetArgSize() { LIMITED_METHOD_CONTRACT; return m_argSize; } void ForceSigWalk(); #ifndef TARGET_X86 // Accessors for built in argument descriptions of the special implicit parameters not mentioned directly // in signatures (this pointer and the like). Whether or not these can be used successfully before all the // explicit arguments have been scanned is platform dependent. void GetThisLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetThisOffset(), pLoc); } void GetParamTypeLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetParamTypeArgOffset(), pLoc); } void GetVASigCookieLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetVASigCookieOffset(), pLoc); } #ifndef CALLDESCR_RETBUFFARGREG void GetRetBuffArgLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetRetBuffArgOffset(), pLoc); } #endif #endif // !TARGET_X86 ArgLocDesc* GetArgLocDescForStructInRegs() { #if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) return m_hasArgLocDescForStructInRegs ? &m_argLocDescForStructInRegs : NULL; #else return NULL; #endif } #ifdef TARGET_X86 // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc *pLoc) { LIMITED_METHOD_CONTRACT; pLoc->Init(); if (!TransitionBlock::IsStackArgumentOffset(argOffset)) { pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); _ASSERTE(GetArgSize() <= TARGET_POINTER_SIZE); pLoc->m_cGenReg = 1; } else { pLoc->m_byteStackSize = GetArgSize(); pLoc->m_byteStackIndex = TransitionBlock::GetStackArgumentByteIndexFromOffset(argOffset); } } #endif #ifdef TARGET_ARM // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc *pLoc) { LIMITED_METHOD_CONTRACT; pLoc->Init(); pLoc->m_fRequires64BitAlignment = m_fRequires64BitAlignment; const int byteArgSize = GetArgSize(); if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset)) { const int floatRegOfsInBytes = argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters(); _ASSERTE((floatRegOfsInBytes % FLOAT_REGISTER_SIZE) == 0); pLoc->m_idxFloatReg = floatRegOfsInBytes / FLOAT_REGISTER_SIZE; pLoc->m_cFloatReg = ALIGN_UP(byteArgSize, FLOAT_REGISTER_SIZE) / FLOAT_REGISTER_SIZE; return; } if (!TransitionBlock::IsStackArgumentOffset(argOffset)) { pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); if (byteArgSize <= (4 - pLoc->m_idxGenReg) * TARGET_POINTER_SIZE) { pLoc->m_cGenReg = ALIGN_UP(byteArgSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } else { pLoc->m_cGenReg = 4 - pLoc->m_idxGenReg; pLoc->m_byteStackIndex = 0; pLoc->m_byteStackSize = StackElemSize(byteArgSize) - pLoc->m_cGenReg * TARGET_POINTER_SIZE; } } else { pLoc->m_byteStackIndex = TransitionBlock::GetStackArgumentByteIndexFromOffset(argOffset); pLoc->m_byteStackSize = StackElemSize(byteArgSize); } } #endif // TARGET_ARM #ifdef TARGET_ARM64 // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc *pLoc) { LIMITED_METHOD_CONTRACT; pLoc->Init(); if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset)) { const int floatRegOfsInBytes = argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters(); _ASSERTE((floatRegOfsInBytes % FLOAT_REGISTER_SIZE) == 0); pLoc->m_idxFloatReg = floatRegOfsInBytes / FLOAT_REGISTER_SIZE; if (!m_argTypeHandle.IsNull() && m_argTypeHandle.IsHFA()) { CorInfoHFAElemType type = m_argTypeHandle.GetHFAType(); pLoc->setHFAFieldSize(type); pLoc->m_cFloatReg = GetArgSize() / pLoc->m_hfaFieldSize; } else { pLoc->m_cFloatReg = 1; } return; } unsigned byteArgSize = GetArgSize(); // On ARM64 some composites are implicitly passed by reference. if (IsArgPassedByRef()) { byteArgSize = TARGET_POINTER_SIZE; } // Sanity check to make sure no caller is trying to get an ArgLocDesc that // describes the return buffer reg field that's in the TransitionBlock. _ASSERTE(argOffset != TransitionBlock::GetOffsetOfRetBuffArgReg()); if (!TransitionBlock::IsStackArgumentOffset(argOffset)) { pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); pLoc->m_cGenReg = ALIGN_UP(byteArgSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;; } else { pLoc->m_byteStackIndex = TransitionBlock::GetStackArgumentByteIndexFromOffset(argOffset); const bool isValueType = (m_argType == ELEMENT_TYPE_VALUETYPE); const bool isFloatHfa = (isValueType && !m_argTypeHandle.IsNull() && m_argTypeHandle.IsHFA()); if (isFloatHfa) { CorInfoHFAElemType type = m_argTypeHandle.GetHFAType(); pLoc->setHFAFieldSize(type); } pLoc->m_byteStackSize = StackElemSize(byteArgSize, isValueType, isFloatHfa); } } #endif // TARGET_ARM64 #if defined(TARGET_AMD64) // Get layout information for the argument that the ArgIterator is currently visiting. void GetArgLoc(int argOffset, ArgLocDesc* pLoc) { LIMITED_METHOD_CONTRACT; #if defined(UNIX_AMD64_ABI) if (m_hasArgLocDescForStructInRegs) { *pLoc = m_argLocDescForStructInRegs; return; } if (argOffset == TransitionBlock::StructInRegsOffset) { // We always already have argLocDesc for structs passed in registers, we // compute it in the GetNextOffset for those since it is always needed. _ASSERTE(false); return; } #endif // UNIX_AMD64_ABI pLoc->Init(); #if defined(UNIX_AMD64_ABI) if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset)) { const int floatRegOfsInBytes = argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters(); _ASSERTE((floatRegOfsInBytes % FLOAT_REGISTER_SIZE) == 0); pLoc->m_idxFloatReg = floatRegOfsInBytes / FLOAT_REGISTER_SIZE; pLoc->m_cFloatReg = 1; } else #endif // UNIX_AMD64_ABI if (!TransitionBlock::IsStackArgumentOffset(argOffset)) { #if !defined(UNIX_AMD64_ABI) // On Windows x64, we re-use the location in the transition block for both the integer and floating point registers if ((m_argType == ELEMENT_TYPE_R4) || (m_argType == ELEMENT_TYPE_R8)) { pLoc->m_idxFloatReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); pLoc->m_cFloatReg = 1; } else #endif { pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset); pLoc->m_cGenReg = 1; } } else { pLoc->m_byteStackIndex = TransitionBlock::GetStackArgumentByteIndexFromOffset(argOffset); int argSizeInBytes; if (IsArgPassedByRef()) argSizeInBytes = TARGET_POINTER_SIZE; else argSizeInBytes = GetArgSize(); pLoc->m_byteStackSize = StackElemSize(argSizeInBytes); } } #endif // TARGET_AMD64 protected: DWORD m_dwFlags; // Cached flags int m_nSizeOfArgStack; // Cached value of SizeOfArgStack DWORD m_argNum; // Cached information about last argument CorElementType m_argType; int m_argSize; TypeHandle m_argTypeHandle; #if (defined(TARGET_AMD64) && defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) ArgLocDesc m_argLocDescForStructInRegs; bool m_hasArgLocDescForStructInRegs; #endif // (TARGET_AMD64 && UNIX_AMD64_ABI) || TARGET_ARM64 int m_ofsStack; // Current position of the stack iterator, in bytes #ifdef TARGET_X86 int m_numRegistersUsed; #ifdef FEATURE_INTERPRETER bool m_fUnmanagedCallConv; #endif #endif #ifdef UNIX_AMD64_ABI int m_idxGenReg; // Next general register to be assigned a value int m_idxFPReg; // Next floating point register to be assigned a value bool m_fArgInRegisters; // Indicates that the current argument is stored in registers #endif #ifdef TARGET_ARM int m_idxGenReg; // Next general register to be assigned a value WORD m_wFPRegs; // Bitmask of available floating point argument registers (s0-s15/d0-d7) bool m_fRequires64BitAlignment; // Cached info about the current arg #endif #ifdef TARGET_ARM64 int m_idxGenReg; // Next general register to be assigned a value int m_idxFPReg; // Next FP register to be assigned a value #endif enum { ITERATION_STARTED = 0x0001, // Started iterating over arguments SIZE_OF_ARG_STACK_COMPUTED = 0x0002, RETURN_FLAGS_COMPUTED = 0x0004, RETURN_HAS_RET_BUFFER = 0x0008, // Cached value of HasRetBuffArg #ifdef TARGET_X86 PARAM_TYPE_REGISTER_MASK = 0x0030, PARAM_TYPE_REGISTER_STACK = 0x0010, PARAM_TYPE_REGISTER_ECX = 0x0020, PARAM_TYPE_REGISTER_EDX = 0x0030, #endif METHOD_INVOKE_NEEDS_ACTIVATION = 0x0040, // Flag used by ArgIteratorForMethodInvoke RETURN_FP_SIZE_SHIFT = 8, // The rest of the flags is cached value of GetFPReturnSize }; void ComputeReturnFlags(); #ifndef TARGET_X86 void GetSimpleLoc(int offset, ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; #ifdef CALLDESCR_RETBUFFARGREG // Codepaths where this could happen have been removed. If this occurs, something // has been missed and this needs another look. _ASSERTE(offset != TransitionBlock::GetOffsetOfRetBuffArgReg()); #endif pLoc->Init(); pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(offset); pLoc->m_cGenReg = 1; } #endif }; template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetThisOffset() { WRAPPER_NO_CONTRACT; // This pointer is in the first argument register by default int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); #ifdef TARGET_X86 // x86 is special as always ret += offsetof(ArgumentRegisters, ECX); #endif return ret; } template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetRetBuffArgOffset() { WRAPPER_NO_CONTRACT; _ASSERTE(this->HasRetBuffArg()); // RetBuf arg is in the second argument register by default int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); #if TARGET_X86 // x86 is special as always ret += this->HasThis() ? offsetof(ArgumentRegisters, EDX) : offsetof(ArgumentRegisters, ECX); #elif TARGET_ARM64 ret = TransitionBlock::GetOffsetOfRetBuffArgReg(); #else if (this->HasThis()) ret += TARGET_POINTER_SIZE; #endif return ret; } template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetVASigCookieOffset() { WRAPPER_NO_CONTRACT; _ASSERTE(this->IsVarArg()); #if defined(TARGET_X86) // x86 is special as always return sizeof(TransitionBlock); #else // VaSig cookie is after this and retbuf arguments by default. int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); if (this->HasThis()) { ret += TARGET_POINTER_SIZE; } if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg()) { ret += TARGET_POINTER_SIZE; } return ret; #endif } //----------------------------------------------------------- // Get the extra param offset for shared generic code //----------------------------------------------------------- template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetParamTypeArgOffset() { CONTRACTL { INSTANCE_CHECK; if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS; if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS; if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); } MODE_ANY; } CONTRACTL_END _ASSERTE(this->HasParamType()); #ifdef TARGET_X86 // x86 is special as always if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED)) ForceSigWalk(); switch (m_dwFlags & PARAM_TYPE_REGISTER_MASK) { case PARAM_TYPE_REGISTER_ECX: return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, ECX); case PARAM_TYPE_REGISTER_EDX: return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, EDX); default: break; } // The param type arg is last stack argument otherwise return sizeof(TransitionBlock); #else // The hidden arg is after this and retbuf arguments by default. int ret = TransitionBlock::GetOffsetOfArgumentRegisters(); if (this->HasThis()) { ret += TARGET_POINTER_SIZE; } if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg()) { ret += TARGET_POINTER_SIZE; } return ret; #endif } // To avoid corner case bugs, limit maximum size of the arguments with sufficient margin #define MAX_ARG_SIZE 0xFFFFFF //------------------------------------------------------------ // Each time this is called, this returns a byte offset of the next // argument from the Frame* pointer. This offset can be positive *or* negative. // // Returns TransitionBlock::InvalidOffset once you've hit the end of the list. //------------------------------------------------------------ template<class ARGITERATOR_BASE> int ArgIteratorTemplate<ARGITERATOR_BASE>::GetNextOffset() { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; if (!(m_dwFlags & ITERATION_STARTED)) { int numRegistersUsed = 0; if (this->HasThis()) numRegistersUsed++; if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg()) numRegistersUsed++; _ASSERTE(!this->IsVarArg() || !this->HasParamType()); #ifndef TARGET_X86 if (this->IsVarArg() || this->HasParamType()) { numRegistersUsed++; } #endif #ifdef TARGET_X86 if (this->IsVarArg()) { numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs } #ifdef FEATURE_INTERPRETER BYTE callconv = CallConv(); switch (callconv) { case IMAGE_CEE_CS_CALLCONV_C: case IMAGE_CEE_CS_CALLCONV_STDCALL: m_numRegistersUsed = NUM_ARGUMENT_REGISTERS; m_ofsStack = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *); m_fUnmanagedCallConv = true; break; case IMAGE_CEE_CS_CALLCONV_THISCALL: case IMAGE_CEE_CS_CALLCONV_FASTCALL: _ASSERTE_MSG(false, "Unsupported calling convention."); default: m_fUnmanagedCallConv = false; m_numRegistersUsed = numRegistersUsed; m_ofsStack = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack(); break; } #else m_numRegistersUsed = numRegistersUsed; m_ofsStack = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack(); #endif #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI m_idxGenReg = numRegistersUsed; m_ofsStack = 0; m_idxFPReg = 0; #else m_ofsStack = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *); #endif #elif defined(TARGET_ARM) m_idxGenReg = numRegistersUsed; m_ofsStack = 0; m_wFPRegs = 0; #elif defined(TARGET_ARM64) m_idxGenReg = numRegistersUsed; m_ofsStack = 0; m_idxFPReg = 0; #else PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset"); #endif m_argNum = 0; m_dwFlags |= ITERATION_STARTED; } // We're done going through the args for this MetaSig if (m_argNum == this->NumFixedArgs()) return TransitionBlock::InvalidOffset; TypeHandle thValueType; CorElementType argType = this->GetNextArgumentType(m_argNum++, &thValueType); int argSize = MetaSig::GetElemSize(argType, thValueType); m_argType = argType; m_argSize = argSize; m_argTypeHandle = thValueType; #if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) m_hasArgLocDescForStructInRegs = false; #endif #ifdef TARGET_X86 #ifdef FEATURE_INTERPRETER if (m_fUnmanagedCallConv) { int argOfs = m_ofsStack; m_ofsStack += StackElemSize(argSize); return argOfs; } #endif if (IsArgumentInRegister(&m_numRegistersUsed, argType, thValueType)) { return TransitionBlock::GetOffsetOfArgumentRegisters() + (NUM_ARGUMENT_REGISTERS - m_numRegistersUsed) * sizeof(void *); } m_ofsStack -= StackElemSize(argSize); _ASSERTE(m_ofsStack >= TransitionBlock::GetOffsetOfArgs()); return m_ofsStack; #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI m_fArgInRegisters = true; int cFPRegs = 0; int cGenRegs = 0; int cbArg = StackElemSize(argSize); switch (argType) { case ELEMENT_TYPE_R4: // 32-bit floating point argument. cFPRegs = 1; break; case ELEMENT_TYPE_R8: // 64-bit floating point argument. cFPRegs = 1; break; case ELEMENT_TYPE_VALUETYPE: { MethodTable *pMT = m_argTypeHandle.GetMethodTable(); if (this->IsRegPassedStruct(pMT)) { EEClass* eeClass = pMT->GetClass(); cGenRegs = 0; for (int i = 0; i < eeClass->GetNumberEightBytes(); i++) { switch (eeClass->GetEightByteClassification(i)) { case SystemVClassificationTypeInteger: case SystemVClassificationTypeIntegerReference: case SystemVClassificationTypeIntegerByRef: cGenRegs++; break; case SystemVClassificationTypeSSE: cFPRegs++; break; default: _ASSERTE(false); break; } } // Check if we have enough registers available for the struct passing if ((cFPRegs + m_idxFPReg <= NUM_FLOAT_ARGUMENT_REGISTERS) && (cGenRegs + m_idxGenReg) <= NUM_ARGUMENT_REGISTERS) { m_argLocDescForStructInRegs.Init(); m_argLocDescForStructInRegs.m_cGenReg = cGenRegs; m_argLocDescForStructInRegs.m_cFloatReg = cFPRegs; m_argLocDescForStructInRegs.m_idxGenReg = m_idxGenReg; m_argLocDescForStructInRegs.m_idxFloatReg = m_idxFPReg; m_argLocDescForStructInRegs.m_eeClass = eeClass; m_hasArgLocDescForStructInRegs = true; m_idxGenReg += cGenRegs; m_idxFPReg += cFPRegs; return TransitionBlock::StructInRegsOffset; } } // Set the register counts to indicate that this argument will not be passed in registers cFPRegs = 0; cGenRegs = 0; break; } default: cGenRegs = cbArg / 8; // GP reg size break; } if ((cFPRegs > 0) && (cFPRegs + m_idxFPReg <= NUM_FLOAT_ARGUMENT_REGISTERS)) { int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 16; m_idxFPReg += cFPRegs; return argOfs; } else if ((cGenRegs > 0) && (m_idxGenReg + cGenRegs <= NUM_ARGUMENT_REGISTERS)) { int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8; m_idxGenReg += cGenRegs; return argOfs; } m_fArgInRegisters = false; int argOfs = TransitionBlock::GetOffsetOfArgs() + m_ofsStack; m_ofsStack += cbArg; return argOfs; #else // Each argument takes exactly one slot on AMD64 on Windows int argOfs = m_ofsStack; m_ofsStack += sizeof(void *); return argOfs; #endif #elif defined(TARGET_ARM) // First look at the underlying type of the argument to determine some basic properties: // 1) The size of the argument in bytes (rounded up to the stack slot size of 4 if necessary). // 2) Whether the argument represents a floating point primitive (ELEMENT_TYPE_R4 or ELEMENT_TYPE_R8). // 3) Whether the argument requires 64-bit alignment (anything that contains a Int64/UInt64). bool fFloatingPoint = false; bool fRequiresAlign64Bit = false; switch (argType) { case ELEMENT_TYPE_I8: case ELEMENT_TYPE_U8: // 64-bit integers require 64-bit alignment on ARM. fRequiresAlign64Bit = true; break; case ELEMENT_TYPE_R4: // 32-bit floating point argument. fFloatingPoint = true; break; case ELEMENT_TYPE_R8: // 64-bit floating point argument. fFloatingPoint = true; fRequiresAlign64Bit = true; break; case ELEMENT_TYPE_VALUETYPE: { // Value type case: extract the alignment requirement, note that this has to handle // the interop "native value types". fRequiresAlign64Bit = thValueType.RequiresAlign8(); #ifdef FEATURE_HFA // Handle HFAs: packed structures of 1-4 floats or doubles that are passed in FP argument // registers if possible. if (thValueType.IsHFA()) { fFloatingPoint = true; } #endif break; } default: // The default is are 4-byte arguments (or promoted to 4 bytes), non-FP and don't require any // 64-bit alignment. break; } // Now attempt to place the argument into some combination of floating point or general registers and // the stack. // Save the alignment requirement m_fRequires64BitAlignment = fRequiresAlign64Bit; int cbArg = StackElemSize(argSize); _ASSERTE((cbArg % TARGET_POINTER_SIZE) == 0); // Ignore floating point argument placement in registers if we're dealing with a vararg function (the ABI // specifies this so that vararg processing on the callee side is simplified). #ifndef ARM_SOFTFP if (fFloatingPoint && !this->IsVarArg()) { // Handle floating point (primitive) arguments. // First determine whether we can place the argument in VFP registers. There are 16 32-bit // and 8 64-bit argument registers that share the same register space (e.g. D0 overlaps S0 and // S1). The ABI specifies that VFP values will be passed in the lowest sequence of registers that // haven't been used yet and have the required alignment. So the sequence (float, double, float) // would be mapped to (S0, D1, S1) or (S0, S2/S3, S1). // // We use a 16-bit bitmap to record which registers have been used so far. // // So we can use the same basic loop for each argument type (float, double or HFA struct) we set up // the following input parameters based on the size and alignment requirements of the arguments: // wAllocMask : bitmask of the number of 32-bit registers we need (1 for 1, 3 for 2, 7 for 3 etc.) // cSteps : number of loop iterations it'll take to search the 16 registers // cShift : how many bits to shift the allocation mask on each attempt WORD wAllocMask = (1 << (cbArg / 4)) - 1; WORD cSteps = (WORD)(fRequiresAlign64Bit ? 9 - (cbArg / 8) : 17 - (cbArg / 4)); WORD cShift = fRequiresAlign64Bit ? 2 : 1; // Look through the availability bitmask for a free register or register pair. for (WORD i = 0; i < cSteps; i++) { if ((m_wFPRegs & wAllocMask) == 0) { // We found one, mark the register or registers as used. m_wFPRegs |= wAllocMask; // Indicate the registers used to the caller and return. return TransitionBlock::GetOffsetOfFloatArgumentRegisters() + (i * cShift * 4); } wAllocMask <<= cShift; } // The FP argument is going to live on the stack. Once this happens the ABI demands we mark all FP // registers as unavailable. m_wFPRegs = 0xffff; // Doubles or HFAs containing doubles need the stack aligned appropriately. if (fRequiresAlign64Bit) { m_ofsStack = (int)ALIGN_UP(m_ofsStack, TARGET_POINTER_SIZE * 2); } // Indicate the stack location of the argument to the caller. int argOfs = TransitionBlock::GetOffsetOfArgs() + m_ofsStack; // Record the stack usage. m_ofsStack += cbArg; return argOfs; } #endif // ARM_SOFTFP // // Handle the non-floating point case. // if (m_idxGenReg < 4) { if (fRequiresAlign64Bit) { // The argument requires 64-bit alignment. Align either the next general argument register if // we have any left. See step C.3 in the algorithm in the ABI spec. m_idxGenReg = (int)ALIGN_UP(m_idxGenReg, 2); } int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 4; int cRemainingRegs = 4 - m_idxGenReg; if (cbArg <= cRemainingRegs * TARGET_POINTER_SIZE) { // Mark the registers just allocated as used. m_idxGenReg += ALIGN_UP(cbArg, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; return argOfs; } // The ABI supports splitting a non-FP argument across registers and the stack. But this is // disabled if the FP arguments already overflowed onto the stack (i.e. the stack index is not // zero). The following code marks the general argument registers as exhausted if this condition // holds. See steps C.5 in the algorithm in the ABI spec. m_idxGenReg = 4; if (m_ofsStack == 0) { m_ofsStack += cbArg - cRemainingRegs * TARGET_POINTER_SIZE; return argOfs; } } if (fRequiresAlign64Bit) { // The argument requires 64-bit alignment. If it is going to be passed on the stack, align // the next stack slot. See step C.6 in the algorithm in the ABI spec. m_ofsStack = (int)ALIGN_UP(m_ofsStack, TARGET_POINTER_SIZE * 2); } int argOfs = TransitionBlock::GetOffsetOfArgs() + m_ofsStack; // Advance the stack pointer over the argument just placed. m_ofsStack += cbArg; return argOfs; #elif defined(TARGET_ARM64) int cFPRegs = 0; switch (argType) { case ELEMENT_TYPE_R4: // 32-bit floating point argument. cFPRegs = 1; break; case ELEMENT_TYPE_R8: // 64-bit floating point argument. cFPRegs = 1; break; case ELEMENT_TYPE_VALUETYPE: { // Handle HFAs: packed structures of 1-4 floats, doubles, or short vectors // that are passed in FP argument registers if possible. if (thValueType.IsHFA()) { CorInfoHFAElemType type = thValueType.GetHFAType(); m_argLocDescForStructInRegs.Init(); m_argLocDescForStructInRegs.m_idxFloatReg = m_idxFPReg; m_argLocDescForStructInRegs.setHFAFieldSize(type); cFPRegs = argSize/m_argLocDescForStructInRegs.m_hfaFieldSize; m_argLocDescForStructInRegs.m_cFloatReg = cFPRegs; // Check if we have enough registers available for the HFA passing if ((cFPRegs + m_idxFPReg) <= 8) { m_hasArgLocDescForStructInRegs = true; } } else { // Composite greater than 16bytes should be passed by reference if (argSize > ENREGISTERED_PARAMTYPE_MAXSIZE) { argSize = sizeof(TADDR); } } break; } default: break; } const bool isValueType = (argType == ELEMENT_TYPE_VALUETYPE); const bool isFloatHfa = thValueType.IsFloatHfa(); const int cbArg = StackElemSize(argSize, isValueType, isFloatHfa); if (cFPRegs>0 && !this->IsVarArg()) { if (cFPRegs + m_idxFPReg <= 8) { // Each floating point register in the argument area is 16 bytes. int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 16; m_idxFPReg += cFPRegs; return argOfs; } else { m_idxFPReg = 8; } } else { #if !defined(OSX_ARM64_ABI) _ASSERTE((cbArg% TARGET_POINTER_SIZE) == 0); #endif const int regSlots = ALIGN_UP(cbArg, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; // Only x0-x7 are valid argument registers (x8 is always the return buffer) if (m_idxGenReg + regSlots <= 8) { // The entirety of the arg fits in the register slots. int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8; m_idxGenReg += regSlots; return argOfs; } else { #ifdef _WIN32 if (this->IsVarArg() && m_idxGenReg < 8) { // Address the Windows ARM64 varargs case where an arg is split between regs and stack. // This can happen in the varargs case because the first 64 bytes of the stack are loaded // into x0-x7, and any remaining stack arguments are placed normally. int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8; // Increase m_ofsStack to account for the space used for the remainder of the arg after // registers are filled. m_ofsStack += cbArg + (m_idxGenReg - 8) * TARGET_POINTER_SIZE; // We used up the remaining reg slots. m_idxGenReg = 8; return argOfs; } else #endif { // Don't use reg slots for this. It will be passed purely on the stack arg space. m_idxGenReg = 8; } } } #ifdef OSX_ARM64_ABI int alignment; if (!isValueType) { _ASSERTE((cbArg & (cbArg - 1)) == 0); alignment = cbArg; } else if (isFloatHfa) { alignment = 4; } else { alignment = 8; } m_ofsStack = (int)ALIGN_UP(m_ofsStack, alignment); #endif // OSX_ARM64_ABI int argOfs = TransitionBlock::GetOffsetOfArgs() + m_ofsStack; m_ofsStack += cbArg; return argOfs; #else PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset"); return TransitionBlock::InvalidOffset; #endif } template<class ARGITERATOR_BASE> void ArgIteratorTemplate<ARGITERATOR_BASE>::ComputeReturnFlags() { CONTRACTL { INSTANCE_CHECK; if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS; if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS; if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); } MODE_ANY; } CONTRACTL_END TypeHandle thValueType; CorElementType type = this->GetReturnType(&thValueType); DWORD flags = RETURN_FLAGS_COMPUTED; switch (type) { case ELEMENT_TYPE_TYPEDBYREF: #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE if (sizeof(TypedByRef) > ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE) flags |= RETURN_HAS_RET_BUFFER; #else flags |= RETURN_HAS_RET_BUFFER; #endif break; case ELEMENT_TYPE_R4: #ifndef ARM_SOFTFP flags |= sizeof(float) << RETURN_FP_SIZE_SHIFT; #endif break; case ELEMENT_TYPE_R8: #ifndef ARM_SOFTFP flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT; #endif break; case ELEMENT_TYPE_VALUETYPE: #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE { _ASSERTE(!thValueType.IsNull()); #if defined(UNIX_AMD64_ABI) MethodTable *pMT = thValueType.AsMethodTable(); if (pMT->IsRegPassedStruct()) { EEClass* eeClass = pMT->GetClass(); if (eeClass->GetNumberEightBytes() == 1) { // Structs occupying just one eightbyte are treated as int / double if (eeClass->GetEightByteClassification(0) == SystemVClassificationTypeSSE) { flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT; } } else { // Size of the struct is 16 bytes flags |= (16 << RETURN_FP_SIZE_SHIFT); // The lowest two bits of the size encode the order of the int and SSE fields if (eeClass->GetEightByteClassification(0) == SystemVClassificationTypeSSE) { flags |= (1 << RETURN_FP_SIZE_SHIFT); } if (eeClass->GetEightByteClassification(1) == SystemVClassificationTypeSSE) { flags |= (2 << RETURN_FP_SIZE_SHIFT); } } break; } #else // UNIX_AMD64_ABI #ifdef FEATURE_HFA if (thValueType.IsHFA() && !this->IsVarArg()) { CorInfoHFAElemType hfaType = thValueType.GetHFAType(); int hfaFieldSize = ArgLocDesc::getHFAFieldSize(hfaType); flags |= ((4 * hfaFieldSize) << RETURN_FP_SIZE_SHIFT); break; } #endif size_t size = thValueType.GetSize(); #if defined(TARGET_X86) || defined(TARGET_AMD64) // Return value types of size which are not powers of 2 using a RetBuffArg if ((size & (size-1)) != 0) { flags |= RETURN_HAS_RET_BUFFER; break; } #endif // defined(TARGET_X86) || defined(TARGET_AMD64) if (size <= ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE) break; #endif // UNIX_AMD64_ABI } #endif // ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE // Value types are returned using return buffer by default flags |= RETURN_HAS_RET_BUFFER; break; default: break; } m_dwFlags |= flags; } template<class ARGITERATOR_BASE> void ArgIteratorTemplate<ARGITERATOR_BASE>::ForceSigWalk() { CONTRACTL { INSTANCE_CHECK; if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS; if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS; if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); } MODE_ANY; } CONTRACTL_END // This can be only used before the actual argument iteration started _ASSERTE((m_dwFlags & ITERATION_STARTED) == 0); #ifdef TARGET_X86 // // x86 is special as always // int numRegistersUsed = 0; int nSizeOfArgStack = 0; if (this->HasThis()) numRegistersUsed++; if (this->HasRetBuffArg() && IsRetBuffPassedAsFirstArg()) numRegistersUsed++; if (this->IsVarArg()) { nSizeOfArgStack += sizeof(void *); numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs } #ifdef FEATURE_INTERPRETER BYTE callconv = CallConv(); switch (callconv) { case IMAGE_CEE_CS_CALLCONV_C: case IMAGE_CEE_CS_CALLCONV_STDCALL: numRegistersUsed = NUM_ARGUMENT_REGISTERS; nSizeOfArgStack = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *); break; case IMAGE_CEE_CS_CALLCONV_THISCALL: case IMAGE_CEE_CS_CALLCONV_FASTCALL: _ASSERTE_MSG(false, "Unsupported calling convention."); default: break; } #endif // FEATURE_INTERPRETER DWORD nArgs = this->NumFixedArgs(); for (DWORD i = 0; i < nArgs; i++) { TypeHandle thValueType; CorElementType type = this->GetNextArgumentType(i, &thValueType); if (!IsArgumentInRegister(&numRegistersUsed, type, thValueType)) { int structSize = MetaSig::GetElemSize(type, thValueType); nSizeOfArgStack += StackElemSize(structSize); #ifndef DACCESS_COMPILE if (nSizeOfArgStack > MAX_ARG_SIZE) { #ifdef _DEBUG // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode. // The contract violation is required to workaround bug in the static contract analyzer. _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED()); CONTRACT_VIOLATION(ThrowsViolation); #endif COMPlusThrow(kNotSupportedException); } #endif } } if (this->HasParamType()) { DWORD paramTypeFlags = 0; if (numRegistersUsed < NUM_ARGUMENT_REGISTERS) { numRegistersUsed++; paramTypeFlags = (numRegistersUsed == 1) ? PARAM_TYPE_REGISTER_ECX : PARAM_TYPE_REGISTER_EDX; } else { nSizeOfArgStack += sizeof(void *); paramTypeFlags = PARAM_TYPE_REGISTER_STACK; } m_dwFlags |= paramTypeFlags; } #else // TARGET_X86 int maxOffset = TransitionBlock::GetOffsetOfArgs(); int ofs; while (TransitionBlock::InvalidOffset != (ofs = GetNextOffset())) { int stackElemSize; #ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI if (m_fArgInRegisters) { // Arguments passed in registers don't consume any stack continue; } stackElemSize = StackElemSize(GetArgSize()); #else // UNIX_AMD64_ABI // All stack arguments take just one stack slot on AMD64 because of arguments bigger // than a stack slot are passed by reference. stackElemSize = TARGET_POINTER_SIZE; #endif // UNIX_AMD64_ABI #else // TARGET_AMD64 TypeHandle thValueType; const CorElementType argType = GetArgType(&thValueType); const bool isValueType = (argType == ELEMENT_TYPE_VALUETYPE); stackElemSize = StackElemSize(GetArgSize(), isValueType, thValueType.IsFloatHfa()); #if defined(ENREGISTERED_PARAMTYPE_MAXSIZE) if (IsArgPassedByRef()) stackElemSize = TARGET_POINTER_SIZE; #endif #endif // TARGET_AMD64 int endOfs = ofs + stackElemSize; if (endOfs > maxOffset) { #if !defined(DACCESS_COMPILE) if (endOfs > MAX_ARG_SIZE) { #ifdef _DEBUG // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode. // The contract violation is required to workaround bug in the static contract analyzer. _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED()); CONTRACT_VIOLATION(ThrowsViolation); #endif COMPlusThrow(kNotSupportedException); } #endif maxOffset = endOfs; } } // Clear the iterator started flag m_dwFlags &= ~ITERATION_STARTED; int nSizeOfArgStack = maxOffset - TransitionBlock::GetOffsetOfArgs(); #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) nSizeOfArgStack = (nSizeOfArgStack > (int)sizeof(ArgumentRegisters)) ? (nSizeOfArgStack - sizeof(ArgumentRegisters)) : 0; #endif #endif // TARGET_X86 // arg stack size is rounded to the pointer size on all platforms. nSizeOfArgStack = (int)ALIGN_UP(nSizeOfArgStack, TARGET_POINTER_SIZE); // Cache the result m_nSizeOfArgStack = nSizeOfArgStack; m_dwFlags |= SIZE_OF_ARG_STACK_COMPUTED; this->Reset(); } class ArgIteratorBase { protected: MetaSig * m_pSig; FORCEINLINE CorElementType GetReturnType(TypeHandle * pthValueType) { WRAPPER_NO_CONTRACT; #ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE return m_pSig->GetReturnTypeNormalized(pthValueType); #else return m_pSig->GetReturnTypeNormalized(); #endif } FORCEINLINE CorElementType GetNextArgumentType(DWORD iArg, TypeHandle * pthValueType) { WRAPPER_NO_CONTRACT; _ASSERTE(iArg == m_pSig->GetArgNum()); CorElementType et = m_pSig->PeekArgNormalized(pthValueType); m_pSig->SkipArg(); return et; } FORCEINLINE void Reset() { WRAPPER_NO_CONTRACT; m_pSig->Reset(); } FORCEINLINE BOOL IsRegPassedStruct(MethodTable* pMT) { return pMT->IsRegPassedStruct(); } public: BOOL HasThis() { LIMITED_METHOD_CONTRACT; return m_pSig->HasThis(); } BOOL HasParamType() { LIMITED_METHOD_CONTRACT; return m_pSig->GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE; } BOOL IsVarArg() { LIMITED_METHOD_CONTRACT; return m_pSig->IsVarArg() || m_pSig->IsTreatAsVarArg(); } DWORD NumFixedArgs() { LIMITED_METHOD_CONTRACT; return m_pSig->NumFixedArgs(); } #ifdef FEATURE_INTERPRETER BYTE CallConv() { return m_pSig->GetCallingConvention(); } #endif // FEATURE_INTERPRETER // // The following is used by the profiler to dig into the iterator for // discovering if the method has a This pointer or a return buffer. // Do not use this to re-initialize the signature, use the exposed Init() // method in this class. // MetaSig *GetSig(void) { return m_pSig; } }; class ArgIterator : public ArgIteratorTemplate<ArgIteratorBase> { public: ArgIterator(MetaSig * pSig) { m_pSig = pSig; } // This API returns true if we are returning a structure in registers instead of using a byref return buffer BOOL HasNonStandardByvalReturn() { WRAPPER_NO_CONTRACT; #ifdef ENREGISTERED_RETURNTYPE_MAXSIZE CorElementType type = m_pSig->GetReturnTypeNormalized(); return (type == ELEMENT_TYPE_VALUETYPE || type == ELEMENT_TYPE_TYPEDBYREF) && !HasRetBuffArg(); #else return FALSE; #endif } BOOL HasValueTypeReturn() { WRAPPER_NO_CONTRACT; TypeHandle thValueType; CorElementType type = m_pSig->GetReturnTypeNormalized(&thValueType); // Enums are normalized to their underlying type when passing to and from functions. // This occurs in both managed and native calling conventions. return type == ELEMENT_TYPE_VALUETYPE && !thValueType.IsEnum(); } }; // Conventience helper inline BOOL HasRetBuffArg(MetaSig * pSig) { WRAPPER_NO_CONTRACT; ArgIterator argit(pSig); return argit.HasRetBuffArg(); } #ifdef UNIX_X86_ABI // For UNIX_X86_ABI and unmanaged function, we always need RetBuf if the return type is VALUETYPE inline BOOL HasRetBuffArgUnmanagedFixup(MetaSig * pSig) { WRAPPER_NO_CONTRACT; // We cannot just pSig->GetReturnType() here since it will return ELEMENT_TYPE_VALUETYPE for enums CorElementType type = pSig->GetRetTypeHandleThrowing().GetVerifierCorElementType(); return type == ELEMENT_TYPE_VALUETYPE; } #endif inline BOOL IsRetBuffPassedAsFirstArg() { WRAPPER_NO_CONTRACT; #ifndef TARGET_ARM64 return TRUE; #else return FALSE; #endif } #endif // __CALLING_CONVENTION_INCLUDED
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/nativeaot/libunwind/src/config.h
//===----------------------------- config.h -------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // // // Defines macros used within libunwind project. // //===----------------------------------------------------------------------===// #ifndef LIBUNWIND_CONFIG_H #define LIBUNWIND_CONFIG_H #include <assert.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> // Define static_assert() unless already defined by compiler. #ifndef __has_feature #define __has_feature(__x) 0 #endif #if !(__has_feature(cxx_static_assert)) && !defined(static_assert) #define static_assert(__b, __m) \ extern int compile_time_assert_failed[ ( __b ) ? 1 : -1 ] \ __attribute__( ( unused ) ); #endif // Platform specific configuration defines. #ifdef __APPLE__ #if defined(FOR_DYLD) #define _LIBUNWIND_SUPPORT_COMPACT_UNWIND #else #define _LIBUNWIND_SUPPORT_COMPACT_UNWIND #define _LIBUNWIND_SUPPORT_DWARF_UNWIND 1 #endif #elif defined(_WIN32) #ifdef __SEH__ #define _LIBUNWIND_SUPPORT_SEH_UNWIND 1 #else #define _LIBUNWIND_SUPPORT_DWARF_UNWIND 1 #endif #else #if defined(__ARM_DWARF_EH__) || !defined(__arm__) #define _LIBUNWIND_SUPPORT_DWARF_UNWIND 1 #define _LIBUNWIND_SUPPORT_DWARF_INDEX 1 #endif #endif #if defined(_LIBUNWIND_DISABLE_VISIBILITY_ANNOTATIONS) #define _LIBUNWIND_EXPORT #define _LIBUNWIND_HIDDEN #else #if !defined(__ELF__) && !defined(__MACH__) #define _LIBUNWIND_EXPORT __declspec(dllexport) #define _LIBUNWIND_HIDDEN #else #define _LIBUNWIND_EXPORT __attribute__((visibility("default"))) #define _LIBUNWIND_HIDDEN __attribute__((visibility("hidden"))) #endif #endif #define STR(a) #a #define XSTR(a) STR(a) #define SYMBOL_NAME(name) XSTR(__USER_LABEL_PREFIX__) #name #if defined(__APPLE__) #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ __asm__(".globl " SYMBOL_NAME(aliasname)); \ __asm__(SYMBOL_NAME(aliasname) " = " SYMBOL_NAME(name)); \ extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname \ __attribute__((weak_import)); #elif defined(__ELF__) #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname \ __attribute__((weak, alias(#name))); #elif defined(_WIN32) #if defined(__MINGW32__) #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname \ __attribute__((alias(#name))); #else #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ __pragma(comment(linker, "/alternatename:" SYMBOL_NAME(aliasname) "=" \ SYMBOL_NAME(name))) \ extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname; #endif #else #error Unsupported target #endif #if (defined(__APPLE__) && defined(__arm__)) || defined(__USING_SJLJ_EXCEPTIONS__) #define _LIBUNWIND_BUILD_SJLJ_APIS #endif #if defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || defined(__ppc64__) || defined(__powerpc64__) #define _LIBUNWIND_SUPPORT_FRAME_APIS #endif #if defined(__i386__) || defined(__x86_64__) || \ defined(__ppc__) || defined(__ppc64__) || defined(__powerpc64__) || \ (!defined(__APPLE__) && defined(__arm__)) || \ (defined(__arm64__) || defined(__aarch64__)) || \ defined(__mips__) #if !defined(_LIBUNWIND_BUILD_SJLJ_APIS) #define _LIBUNWIND_BUILD_ZERO_COST_APIS #endif #endif #if defined(__powerpc64__) && defined(_ARCH_PWR8) #define PPC64_HAS_VMX #endif #if defined(NDEBUG) && defined(_LIBUNWIND_IS_BAREMETAL) #define _LIBUNWIND_ABORT(msg) \ do { \ abort(); \ } while (0) #else #define _LIBUNWIND_ABORT(msg) \ do { \ fprintf(stderr, "libunwind: %s %s:%d - %s\n", __func__, __FILE__, \ __LINE__, msg); \ fflush(stderr); \ abort(); \ } while (0) #endif #if defined(NDEBUG) && defined(_LIBUNWIND_IS_BAREMETAL) #define _LIBUNWIND_LOG0(msg) #define _LIBUNWIND_LOG(msg, ...) #else #define _LIBUNWIND_LOG0(msg) \ fprintf(stderr, "libunwind: " msg "\n") #define _LIBUNWIND_LOG(msg, ...) \ fprintf(stderr, "libunwind: " msg "\n", __VA_ARGS__) #endif #if defined(NDEBUG) #define _LIBUNWIND_LOG_IF_FALSE(x) x #else #define _LIBUNWIND_LOG_IF_FALSE(x) \ do { \ bool _ret = x; \ if (!_ret) \ _LIBUNWIND_LOG("" #x " failed in %s", __FUNCTION__); \ } while (0) #endif // Macros that define away in non-Debug builds #ifdef NDEBUG #define _LIBUNWIND_DEBUG_LOG(msg, ...) #define _LIBUNWIND_TRACE_API(msg, ...) #define _LIBUNWIND_TRACING_UNWINDING (0) #define _LIBUNWIND_TRACING_DWARF (0) #define _LIBUNWIND_TRACE_UNWINDING(msg, ...) #define _LIBUNWIND_TRACE_DWARF(...) #else #ifdef __cplusplus extern "C" { #endif extern bool logAPIs(); extern bool logUnwinding(); extern bool logDWARF(); #ifdef __cplusplus } #endif #define _LIBUNWIND_DEBUG_LOG(msg, ...) _LIBUNWIND_LOG(msg, __VA_ARGS__) #define _LIBUNWIND_TRACE_API(msg, ...) \ do { \ if (logAPIs()) \ _LIBUNWIND_LOG(msg, __VA_ARGS__); \ } while (0) #define _LIBUNWIND_TRACING_UNWINDING logUnwinding() #define _LIBUNWIND_TRACING_DWARF logDWARF() #define _LIBUNWIND_TRACE_UNWINDING(msg, ...) \ do { \ if (logUnwinding()) \ _LIBUNWIND_LOG(msg, __VA_ARGS__); \ } while (0) #define _LIBUNWIND_TRACE_DWARF(...) \ do { \ if (logDWARF()) \ fprintf(stderr, __VA_ARGS__); \ } while (0) #endif #ifdef __cplusplus // Used to fit UnwindCursor and Registers_xxx types against unw_context_t / // unw_cursor_t sized memory blocks. #if defined(_LIBUNWIND_IS_NATIVE_ONLY) # define COMP_OP == #else # define COMP_OP <= #endif template <typename _Type, typename _Mem> struct check_fit { template <typename T> struct blk_count { static const size_t count = (sizeof(T) + sizeof(uint64_t) - 1) / sizeof(uint64_t); }; static const bool does_fit = (blk_count<_Type>::count COMP_OP blk_count<_Mem>::count); }; #undef COMP_OP #endif // __cplusplus #endif // LIBUNWIND_CONFIG_H
//===----------------------------- config.h -------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // // // Defines macros used within libunwind project. // //===----------------------------------------------------------------------===// #ifndef LIBUNWIND_CONFIG_H #define LIBUNWIND_CONFIG_H #include <assert.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> // Define static_assert() unless already defined by compiler. #ifndef __has_feature #define __has_feature(__x) 0 #endif #if !(__has_feature(cxx_static_assert)) && !defined(static_assert) #define static_assert(__b, __m) \ extern int compile_time_assert_failed[ ( __b ) ? 1 : -1 ] \ __attribute__( ( unused ) ); #endif // Platform specific configuration defines. #ifdef __APPLE__ #if defined(FOR_DYLD) #define _LIBUNWIND_SUPPORT_COMPACT_UNWIND #else #define _LIBUNWIND_SUPPORT_COMPACT_UNWIND #define _LIBUNWIND_SUPPORT_DWARF_UNWIND 1 #endif #elif defined(_WIN32) #ifdef __SEH__ #define _LIBUNWIND_SUPPORT_SEH_UNWIND 1 #else #define _LIBUNWIND_SUPPORT_DWARF_UNWIND 1 #endif #else #if defined(__ARM_DWARF_EH__) || !defined(__arm__) #define _LIBUNWIND_SUPPORT_DWARF_UNWIND 1 #define _LIBUNWIND_SUPPORT_DWARF_INDEX 1 #endif #endif #if defined(_LIBUNWIND_DISABLE_VISIBILITY_ANNOTATIONS) #define _LIBUNWIND_EXPORT #define _LIBUNWIND_HIDDEN #else #if !defined(__ELF__) && !defined(__MACH__) #define _LIBUNWIND_EXPORT __declspec(dllexport) #define _LIBUNWIND_HIDDEN #else #define _LIBUNWIND_EXPORT __attribute__((visibility("default"))) #define _LIBUNWIND_HIDDEN __attribute__((visibility("hidden"))) #endif #endif #define STR(a) #a #define XSTR(a) STR(a) #define SYMBOL_NAME(name) XSTR(__USER_LABEL_PREFIX__) #name #if defined(__APPLE__) #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ __asm__(".globl " SYMBOL_NAME(aliasname)); \ __asm__(SYMBOL_NAME(aliasname) " = " SYMBOL_NAME(name)); \ extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname \ __attribute__((weak_import)); #elif defined(__ELF__) #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname \ __attribute__((weak, alias(#name))); #elif defined(_WIN32) #if defined(__MINGW32__) #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname \ __attribute__((alias(#name))); #else #define _LIBUNWIND_WEAK_ALIAS(name, aliasname) \ __pragma(comment(linker, "/alternatename:" SYMBOL_NAME(aliasname) "=" \ SYMBOL_NAME(name))) \ extern "C" _LIBUNWIND_EXPORT __typeof(name) aliasname; #endif #else #error Unsupported target #endif #if (defined(__APPLE__) && defined(__arm__)) || defined(__USING_SJLJ_EXCEPTIONS__) #define _LIBUNWIND_BUILD_SJLJ_APIS #endif #if defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || defined(__ppc64__) || defined(__powerpc64__) #define _LIBUNWIND_SUPPORT_FRAME_APIS #endif #if defined(__i386__) || defined(__x86_64__) || \ defined(__ppc__) || defined(__ppc64__) || defined(__powerpc64__) || \ (!defined(__APPLE__) && defined(__arm__)) || \ (defined(__arm64__) || defined(__aarch64__)) || \ defined(__mips__) #if !defined(_LIBUNWIND_BUILD_SJLJ_APIS) #define _LIBUNWIND_BUILD_ZERO_COST_APIS #endif #endif #if defined(__powerpc64__) && defined(_ARCH_PWR8) #define PPC64_HAS_VMX #endif #if defined(NDEBUG) && defined(_LIBUNWIND_IS_BAREMETAL) #define _LIBUNWIND_ABORT(msg) \ do { \ abort(); \ } while (0) #else #define _LIBUNWIND_ABORT(msg) \ do { \ fprintf(stderr, "libunwind: %s %s:%d - %s\n", __func__, __FILE__, \ __LINE__, msg); \ fflush(stderr); \ abort(); \ } while (0) #endif #if defined(NDEBUG) && defined(_LIBUNWIND_IS_BAREMETAL) #define _LIBUNWIND_LOG0(msg) #define _LIBUNWIND_LOG(msg, ...) #else #define _LIBUNWIND_LOG0(msg) \ fprintf(stderr, "libunwind: " msg "\n") #define _LIBUNWIND_LOG(msg, ...) \ fprintf(stderr, "libunwind: " msg "\n", __VA_ARGS__) #endif #if defined(NDEBUG) #define _LIBUNWIND_LOG_IF_FALSE(x) x #else #define _LIBUNWIND_LOG_IF_FALSE(x) \ do { \ bool _ret = x; \ if (!_ret) \ _LIBUNWIND_LOG("" #x " failed in %s", __FUNCTION__); \ } while (0) #endif // Macros that define away in non-Debug builds #ifdef NDEBUG #define _LIBUNWIND_DEBUG_LOG(msg, ...) #define _LIBUNWIND_TRACE_API(msg, ...) #define _LIBUNWIND_TRACING_UNWINDING (0) #define _LIBUNWIND_TRACING_DWARF (0) #define _LIBUNWIND_TRACE_UNWINDING(msg, ...) #define _LIBUNWIND_TRACE_DWARF(...) #else #ifdef __cplusplus extern "C" { #endif extern bool logAPIs(); extern bool logUnwinding(); extern bool logDWARF(); #ifdef __cplusplus } #endif #define _LIBUNWIND_DEBUG_LOG(msg, ...) _LIBUNWIND_LOG(msg, __VA_ARGS__) #define _LIBUNWIND_TRACE_API(msg, ...) \ do { \ if (logAPIs()) \ _LIBUNWIND_LOG(msg, __VA_ARGS__); \ } while (0) #define _LIBUNWIND_TRACING_UNWINDING logUnwinding() #define _LIBUNWIND_TRACING_DWARF logDWARF() #define _LIBUNWIND_TRACE_UNWINDING(msg, ...) \ do { \ if (logUnwinding()) \ _LIBUNWIND_LOG(msg, __VA_ARGS__); \ } while (0) #define _LIBUNWIND_TRACE_DWARF(...) \ do { \ if (logDWARF()) \ fprintf(stderr, __VA_ARGS__); \ } while (0) #endif #ifdef __cplusplus // Used to fit UnwindCursor and Registers_xxx types against unw_context_t / // unw_cursor_t sized memory blocks. #if defined(_LIBUNWIND_IS_NATIVE_ONLY) # define COMP_OP == #else # define COMP_OP <= #endif template <typename _Type, typename _Mem> struct check_fit { template <typename T> struct blk_count { static const size_t count = (sizeof(T) + sizeof(uint64_t) - 1) / sizeof(uint64_t); }; static const bool does_fit = (blk_count<_Type>::count COMP_OP blk_count<_Mem>::count); }; #undef COMP_OP #endif // __cplusplus #endif // LIBUNWIND_CONFIG_H
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/pal/src/libunwind/include/tdep-s390x/libunwind_i.h
/* libunwind - a platform-independent unwind library Copyright (C) 2002-2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <davidm@hpl.hp.com> Modified for x86_64 by Max Asbock <masbock@us.ibm.com> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef S390X_LIBUNWIND_I_H #define S390X_LIBUNWIND_I_H /* Target-dependent definitions that are internal to libunwind but need to be shared with target-independent code. */ #include <stdlib.h> #include <libunwind.h> #include <stdatomic.h> #include "elf64.h" #include "mempool.h" #include "dwarf.h" struct unw_addr_space { struct unw_accessors acc; unw_caching_policy_t caching_policy; _Atomic uint32_t cache_generation; unw_word_t dyn_generation; /* see dyn-common.h */ unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */ struct dwarf_rs_cache global_cache; struct unw_debug_frame_list *debug_frames; }; struct cursor { struct dwarf_cursor dwarf; /* must be first */ /* Format of sigcontext structure and address at which it is stored: */ enum { S390X_SCF_NONE = 0, /* no signal frame encountered */ S390X_SCF_LINUX_SIGFRAME = 1, /* Linux struct sigcontext */ S390X_SCF_LINUX_RT_SIGFRAME = 2, /* Linux ucontext_t */ } sigcontext_format; unw_word_t sigcontext_addr; unw_word_t sigcontext_sp; unw_word_t sigcontext_pc; int validate; ucontext_t *uc; }; static inline ucontext_t * dwarf_get_uc(const struct dwarf_cursor *cursor) { const struct cursor *c = (struct cursor *) cursor->as_arg; return c->uc; } #define DWARF_GET_LOC(l) ((l).val) # define DWARF_LOC_TYPE_MEM (0 << 0) # define DWARF_LOC_TYPE_FP (1 << 0) # define DWARF_LOC_TYPE_REG (1 << 1) # define DWARF_LOC_TYPE_VAL (1 << 2) # define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0) # define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0) # define DWARF_IS_MEM_LOC(l) ((l).type == DWARF_LOC_TYPE_MEM) # define DWARF_IS_VAL_LOC(l) (((l).type & DWARF_LOC_TYPE_VAL) != 0) # define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) }) # define DWARF_VAL_LOC(c,v) DWARF_LOC ((v), DWARF_LOC_TYPE_VAL) # define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), DWARF_LOC_TYPE_MEM) #ifdef UNW_LOCAL_ONLY # define DWARF_NULL_LOC DWARF_LOC (0, 0) # define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0) # define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) \ tdep_uc_addr(dwarf_get_uc(c), (r)), 0)) # define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) \ tdep_uc_addr(dwarf_get_uc(c), (r)), 0)) #else /* !UNW_LOCAL_ONLY */ # define DWARF_NULL_LOC DWARF_LOC (0, 0) # define DWARF_IS_NULL_LOC(l) \ ({ dwarf_loc_t _l = (l); _l.val == 0 && _l.type == 0; }) # define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG) # define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \ | DWARF_LOC_TYPE_FP)) #endif /* !UNW_LOCAL_ONLY */ static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t)); if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; if (DWARF_IS_FP_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); /* FPRs may be saved in GPRs */ if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val, 0, c->as_arg); if (DWARF_IS_MEM_LOC (loc)) return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val, 0, c->as_arg); assert(DWARF_IS_VAL_LOC (loc)); *val = *(unw_fpreg_t*) DWARF_GET_LOC (loc); return 0; } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t)); assert(!DWARF_IS_VAL_LOC (loc)); if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; if (DWARF_IS_FP_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); /* FPRs may be saved in GPRs */ if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val, 1, c->as_arg); assert(DWARF_IS_MEM_LOC (loc)); return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val, 1, c->as_arg); } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t)); if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* GPRs may be saved in FPRs */ if (DWARF_IS_FP_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*)val, 0, c->as_arg); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); if (DWARF_IS_MEM_LOC (loc)) return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); assert(DWARF_IS_VAL_LOC (loc)); *val = DWARF_GET_LOC (loc); return 0; } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t)); assert(!DWARF_IS_VAL_LOC (loc)); if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* GPRs may be saved in FPRs */ if (DWARF_IS_FP_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*) &val, 1, c->as_arg); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); assert(DWARF_IS_MEM_LOC (loc)); return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); } #define tdep_getcontext_trace unw_getcontext #define tdep_init_done UNW_OBJ(init_done) #define tdep_init_mem_validate UNW_OBJ(init_mem_validate) #define tdep_init UNW_OBJ(init) /* Platforms that support UNW_INFO_FORMAT_TABLE need to define tdep_search_unwind_table. */ #define tdep_search_unwind_table dwarf_search_unwind_table #define tdep_find_unwind_table dwarf_find_unwind_table #define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image) #define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path) #define tdep_access_reg UNW_OBJ(access_reg) #define tdep_access_fpreg UNW_OBJ(access_fpreg) #define tdep_fetch_frame(c,ip,n) do {} while(0) #define tdep_cache_frame(c) 0 #define tdep_reuse_frame(c,rs) do {} while(0) #define tdep_stash_frame(cs,rs) do {} while(0) #define tdep_trace(cur,addr,n) (-UNW_ENOINFO) #define tdep_uc_addr UNW_OBJ(uc_addr) #ifdef UNW_LOCAL_ONLY # define tdep_find_proc_info(c,ip,n) \ dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) # define tdep_put_unwind_info(as,pi,arg) \ dwarf_put_unwind_info((as), (pi), (arg)) #else # define tdep_find_proc_info(c,ip,n) \ (*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) # define tdep_put_unwind_info(as,pi,arg) \ (*(as)->acc.put_unwind_info)((as), (pi), (arg)) #endif #define tdep_get_as(c) ((c)->dwarf.as) #define tdep_get_as_arg(c) ((c)->dwarf.as_arg) #define tdep_get_ip(c) ((c)->dwarf.ip) #define tdep_big_endian(as) 1 extern atomic_bool tdep_init_done; extern void tdep_init (void); extern void tdep_init_mem_validate (void); extern int tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg); extern void *tdep_uc_addr (unw_tdep_context_t *uc, int reg); extern int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip, unsigned long *segbase, unsigned long *mapoff, char *path, size_t pathlen); extern void tdep_get_exe_image_path (char *path); extern int tdep_access_reg (struct cursor *c, unw_regnum_t reg, unw_word_t *valp, int write); extern int tdep_access_fpreg (struct cursor *c, unw_regnum_t reg, unw_fpreg_t *valp, int write); #endif /* S390X_LIBUNWIND_I_H */
/* libunwind - a platform-independent unwind library Copyright (C) 2002-2005 Hewlett-Packard Co Contributed by David Mosberger-Tang <davidm@hpl.hp.com> Modified for x86_64 by Max Asbock <masbock@us.ibm.com> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef S390X_LIBUNWIND_I_H #define S390X_LIBUNWIND_I_H /* Target-dependent definitions that are internal to libunwind but need to be shared with target-independent code. */ #include <stdlib.h> #include <libunwind.h> #include <stdatomic.h> #include "elf64.h" #include "mempool.h" #include "dwarf.h" struct unw_addr_space { struct unw_accessors acc; unw_caching_policy_t caching_policy; _Atomic uint32_t cache_generation; unw_word_t dyn_generation; /* see dyn-common.h */ unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */ struct dwarf_rs_cache global_cache; struct unw_debug_frame_list *debug_frames; }; struct cursor { struct dwarf_cursor dwarf; /* must be first */ /* Format of sigcontext structure and address at which it is stored: */ enum { S390X_SCF_NONE = 0, /* no signal frame encountered */ S390X_SCF_LINUX_SIGFRAME = 1, /* Linux struct sigcontext */ S390X_SCF_LINUX_RT_SIGFRAME = 2, /* Linux ucontext_t */ } sigcontext_format; unw_word_t sigcontext_addr; unw_word_t sigcontext_sp; unw_word_t sigcontext_pc; int validate; ucontext_t *uc; }; static inline ucontext_t * dwarf_get_uc(const struct dwarf_cursor *cursor) { const struct cursor *c = (struct cursor *) cursor->as_arg; return c->uc; } #define DWARF_GET_LOC(l) ((l).val) # define DWARF_LOC_TYPE_MEM (0 << 0) # define DWARF_LOC_TYPE_FP (1 << 0) # define DWARF_LOC_TYPE_REG (1 << 1) # define DWARF_LOC_TYPE_VAL (1 << 2) # define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0) # define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0) # define DWARF_IS_MEM_LOC(l) ((l).type == DWARF_LOC_TYPE_MEM) # define DWARF_IS_VAL_LOC(l) (((l).type & DWARF_LOC_TYPE_VAL) != 0) # define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) }) # define DWARF_VAL_LOC(c,v) DWARF_LOC ((v), DWARF_LOC_TYPE_VAL) # define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), DWARF_LOC_TYPE_MEM) #ifdef UNW_LOCAL_ONLY # define DWARF_NULL_LOC DWARF_LOC (0, 0) # define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0) # define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) \ tdep_uc_addr(dwarf_get_uc(c), (r)), 0)) # define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) \ tdep_uc_addr(dwarf_get_uc(c), (r)), 0)) #else /* !UNW_LOCAL_ONLY */ # define DWARF_NULL_LOC DWARF_LOC (0, 0) # define DWARF_IS_NULL_LOC(l) \ ({ dwarf_loc_t _l = (l); _l.val == 0 && _l.type == 0; }) # define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG) # define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \ | DWARF_LOC_TYPE_FP)) #endif /* !UNW_LOCAL_ONLY */ static inline int dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val) { assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t)); if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; if (DWARF_IS_FP_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); /* FPRs may be saved in GPRs */ if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val, 0, c->as_arg); if (DWARF_IS_MEM_LOC (loc)) return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val, 0, c->as_arg); assert(DWARF_IS_VAL_LOC (loc)); *val = *(unw_fpreg_t*) DWARF_GET_LOC (loc); return 0; } static inline int dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val) { assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t)); assert(!DWARF_IS_VAL_LOC (loc)); if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; if (DWARF_IS_FP_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); /* FPRs may be saved in GPRs */ if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val, 1, c->as_arg); assert(DWARF_IS_MEM_LOC (loc)); return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val, 1, c->as_arg); } static inline int dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val) { assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t)); if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* GPRs may be saved in FPRs */ if (DWARF_IS_FP_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*)val, 0, c->as_arg); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); if (DWARF_IS_MEM_LOC (loc)) return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val, 0, c->as_arg); assert(DWARF_IS_VAL_LOC (loc)); *val = DWARF_GET_LOC (loc); return 0; } static inline int dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val) { assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t)); assert(!DWARF_IS_VAL_LOC (loc)); if (DWARF_IS_NULL_LOC (loc)) return -UNW_EBADREG; /* GPRs may be saved in FPRs */ if (DWARF_IS_FP_LOC (loc)) return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*) &val, 1, c->as_arg); if (DWARF_IS_REG_LOC (loc)) return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); assert(DWARF_IS_MEM_LOC (loc)); return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val, 1, c->as_arg); } #define tdep_getcontext_trace unw_getcontext #define tdep_init_done UNW_OBJ(init_done) #define tdep_init_mem_validate UNW_OBJ(init_mem_validate) #define tdep_init UNW_OBJ(init) /* Platforms that support UNW_INFO_FORMAT_TABLE need to define tdep_search_unwind_table. */ #define tdep_search_unwind_table dwarf_search_unwind_table #define tdep_find_unwind_table dwarf_find_unwind_table #define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image) #define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path) #define tdep_access_reg UNW_OBJ(access_reg) #define tdep_access_fpreg UNW_OBJ(access_fpreg) #define tdep_fetch_frame(c,ip,n) do {} while(0) #define tdep_cache_frame(c) 0 #define tdep_reuse_frame(c,rs) do {} while(0) #define tdep_stash_frame(cs,rs) do {} while(0) #define tdep_trace(cur,addr,n) (-UNW_ENOINFO) #define tdep_uc_addr UNW_OBJ(uc_addr) #ifdef UNW_LOCAL_ONLY # define tdep_find_proc_info(c,ip,n) \ dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) # define tdep_put_unwind_info(as,pi,arg) \ dwarf_put_unwind_info((as), (pi), (arg)) #else # define tdep_find_proc_info(c,ip,n) \ (*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \ (c)->as_arg) # define tdep_put_unwind_info(as,pi,arg) \ (*(as)->acc.put_unwind_info)((as), (pi), (arg)) #endif #define tdep_get_as(c) ((c)->dwarf.as) #define tdep_get_as_arg(c) ((c)->dwarf.as_arg) #define tdep_get_ip(c) ((c)->dwarf.ip) #define tdep_big_endian(as) 1 extern atomic_bool tdep_init_done; extern void tdep_init (void); extern void tdep_init_mem_validate (void); extern int tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip, unw_dyn_info_t *di, unw_proc_info_t *pi, int need_unwind_info, void *arg); extern void *tdep_uc_addr (unw_tdep_context_t *uc, int reg); extern int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip, unsigned long *segbase, unsigned long *mapoff, char *path, size_t pathlen); extern void tdep_get_exe_image_path (char *path); extern int tdep_access_reg (struct cursor *c, unw_regnum_t reg, unw_word_t *valp, int write); extern int tdep_access_fpreg (struct cursor *c, unw_regnum_t reg, unw_fpreg_t *valp, int write); #endif /* S390X_LIBUNWIND_I_H */
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/pal/src/libunwind/src/x86/Lregs.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gregs.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gregs.c" #endif
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/tests/JIT/Methodical/eh/rethrow/samerethrowtwice_d.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="samerethrowtwice.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\..\common\eh_common.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="samerethrowtwice.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\..\common\eh_common.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/Meter.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Threading; using System.Diagnostics; namespace System.Diagnostics.Metrics { /// <summary> /// Meter is the class responsible for creating and tracking the Instruments. /// </summary> #if ALLOW_PARTIALLY_TRUSTED_CALLERS [System.Security.SecuritySafeCriticalAttribute] #endif public class Meter : IDisposable { private static readonly List<Meter> s_allMeters = new List<Meter>(); private List<Instrument> _instruments = new List<Instrument>(); internal bool Disposed { get; private set; } /// <summary> /// Initializes a new instance of the Meter using the meter name. /// </summary> /// <param name="name">The Meter name.</param> public Meter(string name) : this (name, null) {} /// <summary> /// Initializes a new instance of the Meter using the meter name and version. /// </summary> /// <param name="name">The Meter name.</param> /// <param name="version">The optional Meter version.</param> public Meter(string name!!, string? version) { Name = name; Version = version; lock (Instrument.SyncObject) { s_allMeters.Add(this); } // Ensure the metrics EventSource has been created in case we need to log this meter GC.KeepAlive(MetricsEventSource.Log); } /// <summary> /// Returns the Meter name. /// </summary> public string Name { get; } /// <summary> /// Returns the Meter Version. /// </summary> public string? Version { get; } /// <summary> /// Create a metrics Counter object. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Counter is an Instrument which supports non-negative increments. /// Example uses for Counter: count the number of bytes received, count the number of requests completed, count the number of accounts created, count the number of checkpoints run, and count the number of HTTP 5xx errors. /// </remarks> public Counter<T> CreateCounter<T>(string name, string? unit = null, string? description = null) where T : struct => new Counter<T>(this, name, unit, description); /// <summary> /// Histogram is an Instrument which can be used to report arbitrary values that are likely to be statistically meaningful. It is intended for statistics such as histograms, summaries, and percentile. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for Histogram: the request duration and the size of the response payload. /// </remarks> public Histogram<T> CreateHistogram<T>(string name, string? unit = null, string? description = null) where T : struct => new Histogram<T>(this, name, unit, description); /// <summary> /// Create a metrics UpDownCounter object. /// </summary> /// <param name="name">The instrument name. Cannot be null.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// UpDownCounter is an Instrument which supports reporting positive or negative metric values. /// Example uses for UpDownCounter: reporting the change in active requests or queue size. /// </remarks> public UpDownCounter<T> CreateUpDownCounter<T>(string name, string? unit = null, string? description = null) where T : struct => new UpDownCounter<T>(this, name, unit, description); /// <summary> /// Create an ObservableUpDownCounter object. ObservableUpDownCounter is an Instrument which reports increasing or decreasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. Cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableUpDownCounter: the process heap size or the approximate number of items in a lock-free circular buffer. /// </remarks> public ObservableUpDownCounter<T> CreateObservableUpDownCounter<T>(string name, Func<T> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableUpDownCounter<T>(this, name, observeValue, unit, description); /// <summary> /// Create an ObservableUpDownCounter object. ObservableUpDownCounter is an Instrument which reports increasing or decreasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. Cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" /></param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableUpDownCounter: the process heap size or the approximate number of items in a lock-free circular buffer. /// </remarks> public ObservableUpDownCounter<T> CreateObservableUpDownCounter<T>(string name, Func<Measurement<T>> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableUpDownCounter<T>(this, name, observeValue, unit, description); /// <summary> /// Create an ObservableUpDownCounter object. ObservableUpDownCounter is an Instrument which reports increasing or decreasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. Cannot be null.</param> /// <param name="observeValues">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableUpDownCounter: the process heap size or the approximate number of items in a lock-free circular buffer. /// </remarks> public ObservableUpDownCounter<T> CreateObservableUpDownCounter<T>(string name, Func<IEnumerable<Measurement<T>>> observeValues, string? unit = null, string? description = null) where T : struct => new ObservableUpDownCounter<T>(this, name, observeValues, unit, description); /// <summary> /// ObservableCounter is an Instrument which reports monotonically increasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableCounter: The number of page faults for each process. /// </remarks> public ObservableCounter<T> CreateObservableCounter<T>(string name, Func<T> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableCounter<T>(this, name, observeValue, unit, description); /// <summary> /// ObservableCounter is an Instrument which reports monotonically increasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" /></param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableCounter: The number of page faults for each process. /// </remarks> public ObservableCounter<T> CreateObservableCounter<T>(string name, Func<Measurement<T>> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableCounter<T>(this, name, observeValue, unit, description); /// <summary> /// ObservableCounter is an Instrument which reports monotonically increasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValues">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableCounter: The number of page faults for each process. /// </remarks> public ObservableCounter<T> CreateObservableCounter<T>(string name, Func<IEnumerable<Measurement<T>>> observeValues, string? unit = null, string? description = null) where T : struct => new ObservableCounter<T>(this, name, observeValues, unit, description); /// <summary> /// ObservableGauge is an asynchronous Instrument which reports non-additive value(s) (e.g. the room temperature - it makes no sense to report the temperature value from multiple rooms and sum them up) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> public ObservableGauge<T> CreateObservableGauge<T>(string name, Func<T> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableGauge<T>(this, name, observeValue, unit, description); /// <summary> /// ObservableGauge is an asynchronous Instrument which reports non-additive value(s) (e.g. the room temperature - it makes no sense to report the temperature value from multiple rooms and sum them up) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> public ObservableGauge<T> CreateObservableGauge<T>(string name, Func<Measurement<T>> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableGauge<T>(this, name, observeValue, unit, description); /// <summary> /// ObservableGauge is an asynchronous Instrument which reports non-additive value(s) (e.g. the room temperature - it makes no sense to report the temperature value from multiple rooms and sum them up) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValues">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> public ObservableGauge<T> CreateObservableGauge<T>(string name, Func<IEnumerable<Measurement<T>>> observeValues, string? unit = null, string? description = null) where T : struct => new ObservableGauge<T>(this, name, observeValues, unit, description); /// <summary> /// Dispose the Meter which will disable all instruments created by this meter. /// </summary> public void Dispose() { List<Instrument>? instruments = null; lock (Instrument.SyncObject) { if (Disposed) { return; } Disposed = true; s_allMeters.Remove(this); instruments = _instruments; _instruments = new List<Instrument>(); } if (instruments is not null) { foreach (Instrument instrument in instruments) { instrument.NotifyForUnpublishedInstrument(); } } } // AddInstrument will be called when publishing the instrument (i.e. calling Instrument.Publish()). internal bool AddInstrument(Instrument instrument) { if (!_instruments.Contains(instrument)) { _instruments.Add(instrument); return true; } return false; } // Called from MeterListener.Start internal static List<Instrument>? GetPublishedInstruments() { List<Instrument>? instruments = null; if (s_allMeters.Count > 0) { instruments = new List<Instrument>(); foreach (Meter meter in s_allMeters) { foreach (Instrument instrument in meter._instruments) { instruments.Add(instrument); } } } return instruments; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Threading; using System.Diagnostics; namespace System.Diagnostics.Metrics { /// <summary> /// Meter is the class responsible for creating and tracking the Instruments. /// </summary> #if ALLOW_PARTIALLY_TRUSTED_CALLERS [System.Security.SecuritySafeCriticalAttribute] #endif public class Meter : IDisposable { private static readonly List<Meter> s_allMeters = new List<Meter>(); private List<Instrument> _instruments = new List<Instrument>(); internal bool Disposed { get; private set; } /// <summary> /// Initializes a new instance of the Meter using the meter name. /// </summary> /// <param name="name">The Meter name.</param> public Meter(string name) : this (name, null) {} /// <summary> /// Initializes a new instance of the Meter using the meter name and version. /// </summary> /// <param name="name">The Meter name.</param> /// <param name="version">The optional Meter version.</param> public Meter(string name!!, string? version) { Name = name; Version = version; lock (Instrument.SyncObject) { s_allMeters.Add(this); } // Ensure the metrics EventSource has been created in case we need to log this meter GC.KeepAlive(MetricsEventSource.Log); } /// <summary> /// Returns the Meter name. /// </summary> public string Name { get; } /// <summary> /// Returns the Meter Version. /// </summary> public string? Version { get; } /// <summary> /// Create a metrics Counter object. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Counter is an Instrument which supports non-negative increments. /// Example uses for Counter: count the number of bytes received, count the number of requests completed, count the number of accounts created, count the number of checkpoints run, and count the number of HTTP 5xx errors. /// </remarks> public Counter<T> CreateCounter<T>(string name, string? unit = null, string? description = null) where T : struct => new Counter<T>(this, name, unit, description); /// <summary> /// Histogram is an Instrument which can be used to report arbitrary values that are likely to be statistically meaningful. It is intended for statistics such as histograms, summaries, and percentile. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for Histogram: the request duration and the size of the response payload. /// </remarks> public Histogram<T> CreateHistogram<T>(string name, string? unit = null, string? description = null) where T : struct => new Histogram<T>(this, name, unit, description); /// <summary> /// Create a metrics UpDownCounter object. /// </summary> /// <param name="name">The instrument name. Cannot be null.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// UpDownCounter is an Instrument which supports reporting positive or negative metric values. /// Example uses for UpDownCounter: reporting the change in active requests or queue size. /// </remarks> public UpDownCounter<T> CreateUpDownCounter<T>(string name, string? unit = null, string? description = null) where T : struct => new UpDownCounter<T>(this, name, unit, description); /// <summary> /// Create an ObservableUpDownCounter object. ObservableUpDownCounter is an Instrument which reports increasing or decreasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. Cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableUpDownCounter: the process heap size or the approximate number of items in a lock-free circular buffer. /// </remarks> public ObservableUpDownCounter<T> CreateObservableUpDownCounter<T>(string name, Func<T> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableUpDownCounter<T>(this, name, observeValue, unit, description); /// <summary> /// Create an ObservableUpDownCounter object. ObservableUpDownCounter is an Instrument which reports increasing or decreasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. Cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" /></param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableUpDownCounter: the process heap size or the approximate number of items in a lock-free circular buffer. /// </remarks> public ObservableUpDownCounter<T> CreateObservableUpDownCounter<T>(string name, Func<Measurement<T>> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableUpDownCounter<T>(this, name, observeValue, unit, description); /// <summary> /// Create an ObservableUpDownCounter object. ObservableUpDownCounter is an Instrument which reports increasing or decreasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. Cannot be null.</param> /// <param name="observeValues">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableUpDownCounter: the process heap size or the approximate number of items in a lock-free circular buffer. /// </remarks> public ObservableUpDownCounter<T> CreateObservableUpDownCounter<T>(string name, Func<IEnumerable<Measurement<T>>> observeValues, string? unit = null, string? description = null) where T : struct => new ObservableUpDownCounter<T>(this, name, observeValues, unit, description); /// <summary> /// ObservableCounter is an Instrument which reports monotonically increasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableCounter: The number of page faults for each process. /// </remarks> public ObservableCounter<T> CreateObservableCounter<T>(string name, Func<T> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableCounter<T>(this, name, observeValue, unit, description); /// <summary> /// ObservableCounter is an Instrument which reports monotonically increasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" /></param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableCounter: The number of page faults for each process. /// </remarks> public ObservableCounter<T> CreateObservableCounter<T>(string name, Func<Measurement<T>> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableCounter<T>(this, name, observeValue, unit, description); /// <summary> /// ObservableCounter is an Instrument which reports monotonically increasing value(s) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValues">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> /// <remarks> /// Example uses for ObservableCounter: The number of page faults for each process. /// </remarks> public ObservableCounter<T> CreateObservableCounter<T>(string name, Func<IEnumerable<Measurement<T>>> observeValues, string? unit = null, string? description = null) where T : struct => new ObservableCounter<T>(this, name, observeValues, unit, description); /// <summary> /// ObservableGauge is an asynchronous Instrument which reports non-additive value(s) (e.g. the room temperature - it makes no sense to report the temperature value from multiple rooms and sum them up) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> public ObservableGauge<T> CreateObservableGauge<T>(string name, Func<T> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableGauge<T>(this, name, observeValue, unit, description); /// <summary> /// ObservableGauge is an asynchronous Instrument which reports non-additive value(s) (e.g. the room temperature - it makes no sense to report the temperature value from multiple rooms and sum them up) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValue">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> public ObservableGauge<T> CreateObservableGauge<T>(string name, Func<Measurement<T>> observeValue, string? unit = null, string? description = null) where T : struct => new ObservableGauge<T>(this, name, observeValue, unit, description); /// <summary> /// ObservableGauge is an asynchronous Instrument which reports non-additive value(s) (e.g. the room temperature - it makes no sense to report the temperature value from multiple rooms and sum them up) when the instrument is being observed. /// </summary> /// <param name="name">The instrument name. cannot be null.</param> /// <param name="observeValues">The callback to call to get the measurements when the <see cref="ObservableCounter{t}.Observe" /> is called by <see cref="MeterListener.RecordObservableInstruments" />.</param> /// <param name="unit">Optional instrument unit of measurements.</param> /// <param name="description">Optional instrument description.</param> public ObservableGauge<T> CreateObservableGauge<T>(string name, Func<IEnumerable<Measurement<T>>> observeValues, string? unit = null, string? description = null) where T : struct => new ObservableGauge<T>(this, name, observeValues, unit, description); /// <summary> /// Dispose the Meter which will disable all instruments created by this meter. /// </summary> public void Dispose() { List<Instrument>? instruments = null; lock (Instrument.SyncObject) { if (Disposed) { return; } Disposed = true; s_allMeters.Remove(this); instruments = _instruments; _instruments = new List<Instrument>(); } if (instruments is not null) { foreach (Instrument instrument in instruments) { instrument.NotifyForUnpublishedInstrument(); } } } // AddInstrument will be called when publishing the instrument (i.e. calling Instrument.Publish()). internal bool AddInstrument(Instrument instrument) { if (!_instruments.Contains(instrument)) { _instruments.Add(instrument); return true; } return false; } // Called from MeterListener.Start internal static List<Instrument>? GetPublishedInstruments() { List<Instrument>? instruments = null; if (s_allMeters.Count > 0) { instruments = new List<Instrument>(); foreach (Meter meter in s_allMeters) { foreach (Instrument instrument in meter._instruments) { instruments.Add(instrument); } } } return instruments; } } }
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/tests/GC/Scenarios/GCSimulator/GCSimulator_82.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <GCStressIncompatible>true</GCStressIncompatible> <CLRTestExecutionArguments>-t 3 -tp 0 -dz 17 -sdz 8500 -dc 10000 -sdc 5000 -lt 3 -f -dp 0.4 -dw 0.4</CLRTestExecutionArguments> <IsGCSimulatorTest>true</IsGCSimulatorTest> <CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="GCSimulator.cs" /> <Compile Include="lifetimefx.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <GCStressIncompatible>true</GCStressIncompatible> <CLRTestExecutionArguments>-t 3 -tp 0 -dz 17 -sdz 8500 -dc 10000 -sdc 5000 -lt 3 -f -dp 0.4 -dw 0.4</CLRTestExecutionArguments> <IsGCSimulatorTest>true</IsGCSimulatorTest> <CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="GCSimulator.cs" /> <Compile Include="lifetimefx.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/mono/System.Private.CoreLib/src/System/Diagnostics/StackTrace.Mono.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; namespace System.Diagnostics { // Need our own stackframe class since the shared version has its own fields [StructLayout(LayoutKind.Sequential)] internal sealed class MonoStackFrame { #region Keep in sync with object-internals.h internal int ilOffset; internal int nativeOffset; // Unused internal long methodAddress; // Unused internal uint methodIndex; internal MethodBase? methodBase; internal string? fileName; internal int lineNumber; internal int columnNumber; // Unused internal string? internalMethodName; #endregion internal bool isLastFrameFromForeignException; } public partial class StackTrace { [MethodImplAttribute(MethodImplOptions.InternalCall)] internal static extern MonoStackFrame[] get_trace(Exception e, int skipFrames, bool needFileInfo); [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode", Justification = "StackFrame.GetMethod is getting compared to null but nothing else on it is touched.")] [MethodImplAttribute(MethodImplOptions.NoInlining)] private void InitializeForCurrentThread(int skipFrames, bool needFileInfo) { skipFrames += 2; // Current method + parent ctor StackFrame sf; var frames = new List<StackFrame>(); while (skipFrames >= 0) { sf = new StackFrame(skipFrames, needFileInfo); if (sf.GetMethod() == null) { break; } frames.Add(sf); skipFrames++; } _stackFrames = frames.ToArray(); _numOfFrames = _stackFrames.Length; } private void InitializeForException(Exception e, int skipFrames, bool needFileInfo) { MonoStackFrame[] frames = get_trace(e, skipFrames, needFileInfo); _numOfFrames = frames.Length; int foreignFrames; MonoStackFrame[]? foreignExceptions = e.foreignExceptionsFrames; if (foreignExceptions != null) { foreignFrames = foreignExceptions.Length; _numOfFrames += foreignFrames; _stackFrames = new StackFrame[_numOfFrames]; for (int i = 0; i < foreignExceptions.Length; ++i) { _stackFrames[i] = new StackFrame(foreignExceptions[i], needFileInfo); } } else { _stackFrames = new StackFrame[_numOfFrames]; foreignFrames = 0; } for (int i = 0; i < frames.Length; ++i) { _stackFrames[foreignFrames + i] = new StackFrame(frames[i], needFileInfo); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; namespace System.Diagnostics { // Need our own stackframe class since the shared version has its own fields [StructLayout(LayoutKind.Sequential)] internal sealed class MonoStackFrame { #region Keep in sync with object-internals.h internal int ilOffset; internal int nativeOffset; // Unused internal long methodAddress; // Unused internal uint methodIndex; internal MethodBase? methodBase; internal string? fileName; internal int lineNumber; internal int columnNumber; // Unused internal string? internalMethodName; #endregion internal bool isLastFrameFromForeignException; } public partial class StackTrace { [MethodImplAttribute(MethodImplOptions.InternalCall)] internal static extern MonoStackFrame[] get_trace(Exception e, int skipFrames, bool needFileInfo); [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode", Justification = "StackFrame.GetMethod is getting compared to null but nothing else on it is touched.")] [MethodImplAttribute(MethodImplOptions.NoInlining)] private void InitializeForCurrentThread(int skipFrames, bool needFileInfo) { skipFrames += 2; // Current method + parent ctor StackFrame sf; var frames = new List<StackFrame>(); while (skipFrames >= 0) { sf = new StackFrame(skipFrames, needFileInfo); if (sf.GetMethod() == null) { break; } frames.Add(sf); skipFrames++; } _stackFrames = frames.ToArray(); _numOfFrames = _stackFrames.Length; } private void InitializeForException(Exception e, int skipFrames, bool needFileInfo) { MonoStackFrame[] frames = get_trace(e, skipFrames, needFileInfo); _numOfFrames = frames.Length; int foreignFrames; MonoStackFrame[]? foreignExceptions = e.foreignExceptionsFrames; if (foreignExceptions != null) { foreignFrames = foreignExceptions.Length; _numOfFrames += foreignFrames; _stackFrames = new StackFrame[_numOfFrames]; for (int i = 0; i < foreignExceptions.Length; ++i) { _stackFrames[i] = new StackFrame(foreignExceptions[i], needFileInfo); } } else { _stackFrames = new StackFrame[_numOfFrames]; foreignFrames = 0; } for (int i = 0; i < frames.Length; ++i) { _stackFrames[foreignFrames + i] = new StackFrame(frames[i], needFileInfo); } } } }
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/tests/Loader/classloader/generics/regressions/188892/test188892.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="test188892.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="test188892.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/libraries/Common/src/Interop/Windows/Advapi32/Interop.CryptSignHash.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using System.Security.Cryptography; internal static partial class Interop { internal static partial class Advapi32 { internal enum KeySpec : int { AT_KEYEXCHANGE = 1, AT_SIGNATURE = 2, } [Flags] internal enum CryptSignAndVerifyHashFlags : int { None = 0x00000000, CRYPT_NOHASHOID = 0x00000001, CRYPT_TYPE2_FORMAT = 0x00000002, // Not supported CRYPT_X931_FORMAT = 0x00000004, // Not supported } [GeneratedDllImport(Libraries.Advapi32, EntryPoint = "CryptSignHashW", SetLastError = true, StringMarshalling = StringMarshalling.Utf16)] [return: MarshalAs(UnmanagedType.Bool)] public static partial bool CryptSignHash( SafeHashHandle hHash, KeySpec dwKeySpec, string? szDescription, CryptSignAndVerifyHashFlags dwFlags, byte[]? pbSignature, ref int pdwSigLen); [GeneratedDllImport(Libraries.Advapi32, EntryPoint = "CryptVerifySignatureW", SetLastError = true, StringMarshalling = StringMarshalling.Utf16)] [return: MarshalAs(UnmanagedType.Bool)] public static partial bool CryptVerifySignature( SafeHashHandle hHash, byte[] pbSignature, int dwSigLen, SafeCapiKeyHandle hPubKey, string? szDescription, CryptSignAndVerifyHashFlags dwFlags); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using System.Security.Cryptography; internal static partial class Interop { internal static partial class Advapi32 { internal enum KeySpec : int { AT_KEYEXCHANGE = 1, AT_SIGNATURE = 2, } [Flags] internal enum CryptSignAndVerifyHashFlags : int { None = 0x00000000, CRYPT_NOHASHOID = 0x00000001, CRYPT_TYPE2_FORMAT = 0x00000002, // Not supported CRYPT_X931_FORMAT = 0x00000004, // Not supported } [GeneratedDllImport(Libraries.Advapi32, EntryPoint = "CryptSignHashW", SetLastError = true, StringMarshalling = StringMarshalling.Utf16)] [return: MarshalAs(UnmanagedType.Bool)] public static partial bool CryptSignHash( SafeHashHandle hHash, KeySpec dwKeySpec, string? szDescription, CryptSignAndVerifyHashFlags dwFlags, byte[]? pbSignature, ref int pdwSigLen); [GeneratedDllImport(Libraries.Advapi32, EntryPoint = "CryptVerifySignatureW", SetLastError = true, StringMarshalling = StringMarshalling.Utf16)] [return: MarshalAs(UnmanagedType.Bool)] public static partial bool CryptVerifySignature( SafeHashHandle hHash, byte[] pbSignature, int dwSigLen, SafeCapiKeyHandle hPubKey, string? szDescription, CryptSignAndVerifyHashFlags dwFlags); } }
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/libraries/System.Speech/Directory.Build.props
<Project> <Import Project="..\Directory.Build.props" /> <PropertyGroup> <!-- this assembly is inbox in desktop, do not version it unless you plan on shipping a new desktop version out of band. Instead add API to a different assembly. --> <AssemblyVersion>4.0.0.0</AssemblyVersion> <StrongNameKeyId>MicrosoftShared</StrongNameKeyId> <SupportedOSPlatforms>windows</SupportedOSPlatforms> </PropertyGroup> </Project>
<Project> <Import Project="..\Directory.Build.props" /> <PropertyGroup> <!-- this assembly is inbox in desktop, do not version it unless you plan on shipping a new desktop version out of band. Instead add API to a different assembly. --> <AssemblyVersion>4.0.0.0</AssemblyVersion> <StrongNameKeyId>MicrosoftShared</StrongNameKeyId> <SupportedOSPlatforms>windows</SupportedOSPlatforms> </PropertyGroup> </Project>
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/utilcode/executableallocator.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pedecoder.h" #include "executableallocator.h" #if USE_LAZY_PREFERRED_RANGE // Preferred region to allocate the code in. BYTE * ExecutableAllocator::g_lazyPreferredRangeStart; // Next address to try to allocate for code in the preferred region. BYTE * ExecutableAllocator::g_lazyPreferredRangeHint; #endif // USE_LAZY_PREFERRED_RANGE BYTE * ExecutableAllocator::g_preferredRangeMin; BYTE * ExecutableAllocator::g_preferredRangeMax; bool ExecutableAllocator::g_isWXorXEnabled = false; ExecutableAllocator::FatalErrorHandler ExecutableAllocator::g_fatalErrorHandler = NULL; ExecutableAllocator* ExecutableAllocator::g_instance = NULL; bool ExecutableAllocator::IsDoubleMappingEnabled() { LIMITED_METHOD_CONTRACT; #if defined(HOST_OSX) && defined(HOST_ARM64) return false; #else return g_isWXorXEnabled; #endif } bool ExecutableAllocator::IsWXORXEnabled() { LIMITED_METHOD_CONTRACT; #if defined(HOST_OSX) && defined(HOST_ARM64) return true; #else return g_isWXorXEnabled; #endif } extern SYSTEM_INFO g_SystemInfo; size_t ExecutableAllocator::Granularity() { LIMITED_METHOD_CONTRACT; return g_SystemInfo.dwAllocationGranularity; } void ExecutableAllocator::InitLazyPreferredRange(size_t base, size_t size, int randomPageOffset) { #if USE_LAZY_PREFERRED_RANGE #ifdef _DEBUG // If GetForceRelocs is enabled we don't constrain the pMinAddr if (PEDecoder::GetForceRelocs()) return; #endif // // If we are using USE_LAZY_PREFERRED_RANGE then we try to allocate memory close // to coreclr.dll. This avoids having to create jump stubs for calls to // helpers and R2R images loaded close to coreclr.dll. // SIZE_T reach = 0x7FFF0000u; // We will choose the preferred code region based on the address of coreclr.dll. The JIT helpers // in coreclr.dll are the most heavily called functions. g_preferredRangeMin = (base + size > reach) ? (BYTE *)(base + size - reach) : (BYTE *)0; g_preferredRangeMax = (base + reach > base) ? (BYTE *)(base + reach) : (BYTE *)-1; BYTE * pStart; if (base > UINT32_MAX) { // Try to occupy the space as far as possible to minimize collisions with other ASLR assigned // addresses. Do not start at g_codeMinAddr exactly so that we can also reach common native images // that can be placed at higher addresses than coreclr.dll. pStart = g_preferredRangeMin + (g_preferredRangeMax - g_preferredRangeMin) / 8; } else { // clr.dll missed the base address? // Try to occupy the space right after it. pStart = (BYTE *)(base + size); } // Randomize the address space pStart += GetOsPageSize() * randomPageOffset; g_lazyPreferredRangeStart = pStart; g_lazyPreferredRangeHint = pStart; #endif } void ExecutableAllocator::InitPreferredRange() { #ifdef TARGET_UNIX void *start, *end; PAL_GetExecutableMemoryAllocatorPreferredRange(&start, &end); g_preferredRangeMin = (BYTE *)start; g_preferredRangeMax = (BYTE *)end; #endif } void ExecutableAllocator::ResetLazyPreferredRangeHint() { LIMITED_METHOD_CONTRACT; #if USE_LAZY_PREFERRED_RANGE g_lazyPreferredRangeHint = g_lazyPreferredRangeStart; #endif } // Returns TRUE if p is is located in the memory area where we prefer to put // executable code and static fields. This area is typically close to the // coreclr library. bool ExecutableAllocator::IsPreferredExecutableRange(void * p) { LIMITED_METHOD_CONTRACT; return g_preferredRangeMin <= (BYTE *)p && (BYTE *)p < g_preferredRangeMax; } ExecutableAllocator* ExecutableAllocator::Instance() { LIMITED_METHOD_CONTRACT; return g_instance; } ExecutableAllocator::~ExecutableAllocator() { if (IsDoubleMappingEnabled()) { VMToOSInterface::DestroyDoubleMemoryMapper(m_doubleMemoryMapperHandle); } } HRESULT ExecutableAllocator::StaticInitialize(FatalErrorHandler fatalErrorHandler) { LIMITED_METHOD_CONTRACT; g_fatalErrorHandler = fatalErrorHandler; g_isWXorXEnabled = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableWriteXorExecute) != 0; g_instance = new (nothrow) ExecutableAllocator(); if (g_instance == NULL) { return E_OUTOFMEMORY; } if (!g_instance->Initialize()) { return E_FAIL; } return S_OK; } bool ExecutableAllocator::Initialize() { LIMITED_METHOD_CONTRACT; if (IsDoubleMappingEnabled()) { if (!VMToOSInterface::CreateDoubleMemoryMapper(&m_doubleMemoryMapperHandle, &m_maxExecutableCodeSize)) { return false; } m_CriticalSection = ClrCreateCriticalSection(CrstExecutableAllocatorLock,CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)); } return true; } //#define ENABLE_CACHED_MAPPINGS void ExecutableAllocator::UpdateCachedMapping(BlockRW* pBlock) { LIMITED_METHOD_CONTRACT; #ifdef ENABLE_CACHED_MAPPINGS if (m_cachedMapping == NULL) { m_cachedMapping = pBlock; pBlock->refCount++; } else if (m_cachedMapping != pBlock) { void* unmapAddress = NULL; size_t unmapSize; if (!RemoveRWBlock(m_cachedMapping->baseRW, &unmapAddress, &unmapSize)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RW block to unmap was not found")); } if (unmapAddress && !VMToOSInterface::ReleaseRWMapping(unmapAddress, unmapSize)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the RW mapping failed")); } m_cachedMapping = pBlock; pBlock->refCount++; } #endif // ENABLE_CACHED_MAPPINGS } void* ExecutableAllocator::FindRWBlock(void* baseRX, size_t size) { LIMITED_METHOD_CONTRACT; for (BlockRW* pBlock = m_pFirstBlockRW; pBlock != NULL; pBlock = pBlock->next) { if (pBlock->baseRX <= baseRX && ((size_t)baseRX + size) <= ((size_t)pBlock->baseRX + pBlock->size)) { pBlock->refCount++; UpdateCachedMapping(pBlock); return (BYTE*)pBlock->baseRW + ((size_t)baseRX - (size_t)pBlock->baseRX); } } return NULL; } bool ExecutableAllocator::AddRWBlock(void* baseRW, void* baseRX, size_t size) { LIMITED_METHOD_CONTRACT; for (BlockRW* pBlock = m_pFirstBlockRW; pBlock != NULL; pBlock = pBlock->next) { if (pBlock->baseRX <= baseRX && ((size_t)baseRX + size) <= ((size_t)pBlock->baseRX + pBlock->size)) { break; } } // The new "nothrow" below failure is handled as fail fast since it is not recoverable PERMANENT_CONTRACT_VIOLATION(FaultViolation, ReasonContractInfrastructure); BlockRW* pBlockRW = new (nothrow) BlockRW(); if (pBlockRW == NULL) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RW block metadata cannot be allocated")); return false; } pBlockRW->baseRW = baseRW; pBlockRW->baseRX = baseRX; pBlockRW->size = size; pBlockRW->next = m_pFirstBlockRW; pBlockRW->refCount = 1; m_pFirstBlockRW = pBlockRW; UpdateCachedMapping(pBlockRW); return true; } bool ExecutableAllocator::RemoveRWBlock(void* pRW, void** pUnmapAddress, size_t* pUnmapSize) { LIMITED_METHOD_CONTRACT; BlockRW* pPrevBlockRW = NULL; for (BlockRW* pBlockRW = m_pFirstBlockRW; pBlockRW != NULL; pBlockRW = pBlockRW->next) { if (pBlockRW->baseRW <= pRW && (size_t)pRW < ((size_t)pBlockRW->baseRW + pBlockRW->size)) { // found pBlockRW->refCount--; if (pBlockRW->refCount != 0) { *pUnmapAddress = NULL; return true; } if (pPrevBlockRW == NULL) { m_pFirstBlockRW = pBlockRW->next; } else { pPrevBlockRW->next = pBlockRW->next; } *pUnmapAddress = pBlockRW->baseRW; *pUnmapSize = pBlockRW->size; delete pBlockRW; return true; } pPrevBlockRW = pBlockRW; } return false; } bool ExecutableAllocator::AllocateOffset(size_t* pOffset, size_t size) { LIMITED_METHOD_CONTRACT; size_t offset = m_freeOffset; size_t newFreeOffset = offset + size; if (newFreeOffset > m_maxExecutableCodeSize) { return false; } m_freeOffset = newFreeOffset; *pOffset = offset; return true; } void ExecutableAllocator::AddRXBlock(BlockRX* pBlock) { LIMITED_METHOD_CONTRACT; pBlock->next = m_pFirstBlockRX; m_pFirstBlockRX = pBlock; } void* ExecutableAllocator::Commit(void* pStart, size_t size, bool isExecutable) { LIMITED_METHOD_CONTRACT; if (IsDoubleMappingEnabled()) { return VMToOSInterface::CommitDoubleMappedMemory(pStart, size, isExecutable); } else { return ClrVirtualAlloc(pStart, size, MEM_COMMIT, isExecutable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE); } } void ExecutableAllocator::Release(void* pRX) { LIMITED_METHOD_CONTRACT; if (IsDoubleMappingEnabled()) { CRITSEC_Holder csh(m_CriticalSection); // Locate the RX block corresponding to the pRX and remove it from the linked list BlockRX* pBlock; BlockRX* pPrevBlock = NULL; for (pBlock = m_pFirstBlockRX; pBlock != NULL; pBlock = pBlock->next) { if (pRX == pBlock->baseRX) { if (pPrevBlock == NULL) { m_pFirstBlockRX = pBlock->next; } else { pPrevBlock->next = pBlock->next; } break; } pPrevBlock = pBlock; } if (pBlock != NULL) { VMToOSInterface::ReleaseDoubleMappedMemory(m_doubleMemoryMapperHandle, pRX, pBlock->offset, pBlock->size); // Put the released block into the free block list pBlock->baseRX = NULL; pBlock->next = m_pFirstFreeBlockRX; m_pFirstFreeBlockRX = pBlock; } else { // The block was not found, which should never happen. g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RX block to release was not found")); } } else { ClrVirtualFree(pRX, 0, MEM_RELEASE); } } // Find a free block with the closest size >= the requested size. // Returns NULL if no such block exists. ExecutableAllocator::BlockRX* ExecutableAllocator::FindBestFreeBlock(size_t size) { LIMITED_METHOD_CONTRACT; BlockRX* pPrevBlock = NULL; BlockRX* pPrevBestBlock = NULL; BlockRX* pBestBlock = NULL; BlockRX* pBlock = m_pFirstFreeBlockRX; while (pBlock != NULL) { if (pBlock->size >= size) { if (pBestBlock != NULL) { if (pBlock->size < pBestBlock->size) { pPrevBestBlock = pPrevBlock; pBestBlock = pBlock; } } else { pPrevBestBlock = pPrevBlock; pBestBlock = pBlock; } } pPrevBlock = pBlock; pBlock = pBlock->next; } if (pBestBlock != NULL) { if (pPrevBestBlock != NULL) { pPrevBestBlock->next = pBestBlock->next; } else { m_pFirstFreeBlockRX = pBestBlock->next; } pBestBlock->next = NULL; } return pBestBlock; } // Allocate a new block of executable memory and the related descriptor structure. // First try to get it from the free blocks and if there is no suitable free block, // allocate a new one. ExecutableAllocator::BlockRX* ExecutableAllocator::AllocateBlock(size_t size, bool* pIsFreeBlock) { LIMITED_METHOD_CONTRACT; size_t offset; BlockRX* block = FindBestFreeBlock(size); *pIsFreeBlock = (block != NULL); if (block == NULL) { if (!AllocateOffset(&offset, size)) { return NULL; } block = new (nothrow) BlockRX(); if (block == NULL) { return NULL; } block->offset = offset; block->size = size; } return block; } // Backout a previously allocated block. The block is added to the free blocks list and // reused for later allocation requests. void ExecutableAllocator::BackoutBlock(BlockRX* pBlock, bool isFreeBlock) { LIMITED_METHOD_CONTRACT; if (!isFreeBlock) { m_freeOffset -= pBlock->size; delete pBlock; } else { pBlock->next = m_pFirstFreeBlockRX; m_pFirstFreeBlockRX = pBlock; } } // Reserve executable memory within the specified virtual address space range. If it is not possible to // reserve memory in that range, the method returns NULL and nothing is allocated. void* ExecutableAllocator::ReserveWithinRange(size_t size, const void* loAddress, const void* hiAddress) { LIMITED_METHOD_CONTRACT; _ASSERTE((size & (Granularity() - 1)) == 0); if (IsDoubleMappingEnabled()) { CRITSEC_Holder csh(m_CriticalSection); bool isFreeBlock; BlockRX* block = AllocateBlock(size, &isFreeBlock); if (block == NULL) { return NULL; } void *result = VMToOSInterface::ReserveDoubleMappedMemory(m_doubleMemoryMapperHandle, block->offset, size, loAddress, hiAddress); if (result != NULL) { block->baseRX = result; AddRXBlock(block); } else { BackoutBlock(block, isFreeBlock); } return result; } else { DWORD allocationType = MEM_RESERVE; #ifdef HOST_UNIX // Tell PAL to use the executable memory allocator to satisfy this request for virtual memory. // This will allow us to place JIT'ed code close to the coreclr library // and thus improve performance by avoiding jump stubs in managed code. allocationType |= MEM_RESERVE_EXECUTABLE; #endif return ClrVirtualAllocWithinRange((const BYTE*)loAddress, (const BYTE*)hiAddress, size, allocationType, PAGE_NOACCESS); } } // Reserve executable memory. On Windows it tries to use the allocation hints to // allocate memory close to the previously allocated executable memory and loaded // executable files. void* ExecutableAllocator::Reserve(size_t size) { LIMITED_METHOD_CONTRACT; _ASSERTE((size & (Granularity() - 1)) == 0); BYTE *result = NULL; #if USE_LAZY_PREFERRED_RANGE // // If we are using the UPPER_ADDRESS space (on Win64) // then for any heap that will contain executable code // we will place it in the upper address space // // This enables us to avoid having to use JumpStubs // to reach the code for our ngen-ed images on x64, // since they are also placed in the UPPER_ADDRESS space. // BYTE * pHint = g_lazyPreferredRangeHint; if (size <= (SIZE_T)(g_preferredRangeMax - g_preferredRangeMin) && pHint != NULL) { // Try to allocate in the preferred region after the hint result = (BYTE*)ReserveWithinRange(size, pHint, g_preferredRangeMax); if (result != NULL) { g_lazyPreferredRangeHint = result + size; } else { // Try to allocate in the preferred region before the hint result = (BYTE*)ReserveWithinRange(size, g_preferredRangeMin, pHint + size); if (result != NULL) { g_lazyPreferredRangeHint = result + size; } g_lazyPreferredRangeHint = NULL; } } // Fall through to #endif // USE_LAZY_PREFERRED_RANGE if (result == NULL) { if (IsDoubleMappingEnabled()) { CRITSEC_Holder csh(m_CriticalSection); bool isFreeBlock; BlockRX* block = AllocateBlock(size, &isFreeBlock); if (block == NULL) { return NULL; } result = (BYTE*)VMToOSInterface::ReserveDoubleMappedMemory(m_doubleMemoryMapperHandle, block->offset, size, 0, 0); if (result != NULL) { block->baseRX = result; AddRXBlock(block); } else { BackoutBlock(block, isFreeBlock); } } else { DWORD allocationType = MEM_RESERVE; #ifdef HOST_UNIX // Tell PAL to use the executable memory allocator to satisfy this request for virtual memory. // This will allow us to place JIT'ed code close to the coreclr library // and thus improve performance by avoiding jump stubs in managed code. allocationType |= MEM_RESERVE_EXECUTABLE; #endif result = (BYTE*)ClrVirtualAlloc(NULL, size, allocationType, PAGE_NOACCESS); } } return result; } // Reserve a block of executable memory at the specified virtual address. If it is not // possible, the method returns NULL. void* ExecutableAllocator::ReserveAt(void* baseAddressRX, size_t size) { LIMITED_METHOD_CONTRACT; _ASSERTE((size & (Granularity() - 1)) == 0); if (IsDoubleMappingEnabled()) { CRITSEC_Holder csh(m_CriticalSection); bool isFreeBlock; BlockRX* block = AllocateBlock(size, &isFreeBlock); if (block == NULL) { return NULL; } void* result = VMToOSInterface::ReserveDoubleMappedMemory(m_doubleMemoryMapperHandle, block->offset, size, baseAddressRX, baseAddressRX); if (result != NULL) { block->baseRX = result; AddRXBlock(block); } else { BackoutBlock(block, isFreeBlock); } return result; } else { return VirtualAlloc(baseAddressRX, size, MEM_RESERVE, PAGE_NOACCESS); } } // Map an executable memory block as writeable. If there is already a mapping // covering the specified block, return that mapping instead of creating a new one. // Return starting address of the writeable mapping. void* ExecutableAllocator::MapRW(void* pRX, size_t size) { LIMITED_METHOD_CONTRACT; if (!IsDoubleMappingEnabled()) { return pRX; } CRITSEC_Holder csh(m_CriticalSection); void* result = FindRWBlock(pRX, size); if (result != NULL) { return result; } for (BlockRX* pBlock = m_pFirstBlockRX; pBlock != NULL; pBlock = pBlock->next) { if (pRX >= pBlock->baseRX && ((size_t)pRX + size) <= ((size_t)pBlock->baseRX + pBlock->size)) { // Offset of the RX address in the originally allocated block size_t offset = (size_t)pRX - (size_t)pBlock->baseRX; // Offset of the RX address that will start the newly mapped block size_t mapOffset = ALIGN_DOWN(offset, Granularity()); // Size of the block we will map size_t mapSize = ALIGN_UP(offset - mapOffset + size, Granularity()); void* pRW = VMToOSInterface::GetRWMapping(m_doubleMemoryMapperHandle, (BYTE*)pBlock->baseRX + mapOffset, pBlock->offset + mapOffset, mapSize); if (pRW == NULL) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Failed to create RW mapping for RX memory")); } AddRWBlock(pRW, (BYTE*)pBlock->baseRX + mapOffset, mapSize); return (void*)((size_t)pRW + (offset - mapOffset)); } else if (pRX >= pBlock->baseRX && pRX < (void*)((size_t)pBlock->baseRX + pBlock->size)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Attempting to RW map a block that crosses the end of the allocated RX range")); } else if (pRX < pBlock->baseRX && (void*)((size_t)pRX + size) > pBlock->baseRX) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Attempting to map a block that crosses the beginning of the allocated range")); } } // The executable memory block was not found, so we cannot provide the writeable mapping. g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RX block to map as RW was not found")); return NULL; } // Unmap writeable mapping at the specified address. The address must be an address // returned by the MapRW method. void ExecutableAllocator::UnmapRW(void* pRW) { LIMITED_METHOD_CONTRACT; if (!IsDoubleMappingEnabled()) { return; } CRITSEC_Holder csh(m_CriticalSection); _ASSERTE(pRW != NULL); void* unmapAddress = NULL; size_t unmapSize; if (!RemoveRWBlock(pRW, &unmapAddress, &unmapSize)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RW block to unmap was not found")); } if (unmapAddress && !VMToOSInterface::ReleaseRWMapping(unmapAddress, unmapSize)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the RW mapping failed")); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pedecoder.h" #include "executableallocator.h" #if USE_LAZY_PREFERRED_RANGE // Preferred region to allocate the code in. BYTE * ExecutableAllocator::g_lazyPreferredRangeStart; // Next address to try to allocate for code in the preferred region. BYTE * ExecutableAllocator::g_lazyPreferredRangeHint; #endif // USE_LAZY_PREFERRED_RANGE BYTE * ExecutableAllocator::g_preferredRangeMin; BYTE * ExecutableAllocator::g_preferredRangeMax; bool ExecutableAllocator::g_isWXorXEnabled = false; ExecutableAllocator::FatalErrorHandler ExecutableAllocator::g_fatalErrorHandler = NULL; ExecutableAllocator* ExecutableAllocator::g_instance = NULL; bool ExecutableAllocator::IsDoubleMappingEnabled() { LIMITED_METHOD_CONTRACT; #if defined(HOST_OSX) && defined(HOST_ARM64) return false; #else return g_isWXorXEnabled; #endif } bool ExecutableAllocator::IsWXORXEnabled() { LIMITED_METHOD_CONTRACT; #if defined(HOST_OSX) && defined(HOST_ARM64) return true; #else return g_isWXorXEnabled; #endif } extern SYSTEM_INFO g_SystemInfo; size_t ExecutableAllocator::Granularity() { LIMITED_METHOD_CONTRACT; return g_SystemInfo.dwAllocationGranularity; } void ExecutableAllocator::InitLazyPreferredRange(size_t base, size_t size, int randomPageOffset) { #if USE_LAZY_PREFERRED_RANGE #ifdef _DEBUG // If GetForceRelocs is enabled we don't constrain the pMinAddr if (PEDecoder::GetForceRelocs()) return; #endif // // If we are using USE_LAZY_PREFERRED_RANGE then we try to allocate memory close // to coreclr.dll. This avoids having to create jump stubs for calls to // helpers and R2R images loaded close to coreclr.dll. // SIZE_T reach = 0x7FFF0000u; // We will choose the preferred code region based on the address of coreclr.dll. The JIT helpers // in coreclr.dll are the most heavily called functions. g_preferredRangeMin = (base + size > reach) ? (BYTE *)(base + size - reach) : (BYTE *)0; g_preferredRangeMax = (base + reach > base) ? (BYTE *)(base + reach) : (BYTE *)-1; BYTE * pStart; if (base > UINT32_MAX) { // Try to occupy the space as far as possible to minimize collisions with other ASLR assigned // addresses. Do not start at g_codeMinAddr exactly so that we can also reach common native images // that can be placed at higher addresses than coreclr.dll. pStart = g_preferredRangeMin + (g_preferredRangeMax - g_preferredRangeMin) / 8; } else { // clr.dll missed the base address? // Try to occupy the space right after it. pStart = (BYTE *)(base + size); } // Randomize the address space pStart += GetOsPageSize() * randomPageOffset; g_lazyPreferredRangeStart = pStart; g_lazyPreferredRangeHint = pStart; #endif } void ExecutableAllocator::InitPreferredRange() { #ifdef TARGET_UNIX void *start, *end; PAL_GetExecutableMemoryAllocatorPreferredRange(&start, &end); g_preferredRangeMin = (BYTE *)start; g_preferredRangeMax = (BYTE *)end; #endif } void ExecutableAllocator::ResetLazyPreferredRangeHint() { LIMITED_METHOD_CONTRACT; #if USE_LAZY_PREFERRED_RANGE g_lazyPreferredRangeHint = g_lazyPreferredRangeStart; #endif } // Returns TRUE if p is is located in the memory area where we prefer to put // executable code and static fields. This area is typically close to the // coreclr library. bool ExecutableAllocator::IsPreferredExecutableRange(void * p) { LIMITED_METHOD_CONTRACT; return g_preferredRangeMin <= (BYTE *)p && (BYTE *)p < g_preferredRangeMax; } ExecutableAllocator* ExecutableAllocator::Instance() { LIMITED_METHOD_CONTRACT; return g_instance; } ExecutableAllocator::~ExecutableAllocator() { if (IsDoubleMappingEnabled()) { VMToOSInterface::DestroyDoubleMemoryMapper(m_doubleMemoryMapperHandle); } } HRESULT ExecutableAllocator::StaticInitialize(FatalErrorHandler fatalErrorHandler) { LIMITED_METHOD_CONTRACT; g_fatalErrorHandler = fatalErrorHandler; g_isWXorXEnabled = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_EnableWriteXorExecute) != 0; g_instance = new (nothrow) ExecutableAllocator(); if (g_instance == NULL) { return E_OUTOFMEMORY; } if (!g_instance->Initialize()) { return E_FAIL; } return S_OK; } bool ExecutableAllocator::Initialize() { LIMITED_METHOD_CONTRACT; if (IsDoubleMappingEnabled()) { if (!VMToOSInterface::CreateDoubleMemoryMapper(&m_doubleMemoryMapperHandle, &m_maxExecutableCodeSize)) { return false; } m_CriticalSection = ClrCreateCriticalSection(CrstExecutableAllocatorLock,CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)); } return true; } //#define ENABLE_CACHED_MAPPINGS void ExecutableAllocator::UpdateCachedMapping(BlockRW* pBlock) { LIMITED_METHOD_CONTRACT; #ifdef ENABLE_CACHED_MAPPINGS if (m_cachedMapping == NULL) { m_cachedMapping = pBlock; pBlock->refCount++; } else if (m_cachedMapping != pBlock) { void* unmapAddress = NULL; size_t unmapSize; if (!RemoveRWBlock(m_cachedMapping->baseRW, &unmapAddress, &unmapSize)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RW block to unmap was not found")); } if (unmapAddress && !VMToOSInterface::ReleaseRWMapping(unmapAddress, unmapSize)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the RW mapping failed")); } m_cachedMapping = pBlock; pBlock->refCount++; } #endif // ENABLE_CACHED_MAPPINGS } void* ExecutableAllocator::FindRWBlock(void* baseRX, size_t size) { LIMITED_METHOD_CONTRACT; for (BlockRW* pBlock = m_pFirstBlockRW; pBlock != NULL; pBlock = pBlock->next) { if (pBlock->baseRX <= baseRX && ((size_t)baseRX + size) <= ((size_t)pBlock->baseRX + pBlock->size)) { pBlock->refCount++; UpdateCachedMapping(pBlock); return (BYTE*)pBlock->baseRW + ((size_t)baseRX - (size_t)pBlock->baseRX); } } return NULL; } bool ExecutableAllocator::AddRWBlock(void* baseRW, void* baseRX, size_t size) { LIMITED_METHOD_CONTRACT; for (BlockRW* pBlock = m_pFirstBlockRW; pBlock != NULL; pBlock = pBlock->next) { if (pBlock->baseRX <= baseRX && ((size_t)baseRX + size) <= ((size_t)pBlock->baseRX + pBlock->size)) { break; } } // The new "nothrow" below failure is handled as fail fast since it is not recoverable PERMANENT_CONTRACT_VIOLATION(FaultViolation, ReasonContractInfrastructure); BlockRW* pBlockRW = new (nothrow) BlockRW(); if (pBlockRW == NULL) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RW block metadata cannot be allocated")); return false; } pBlockRW->baseRW = baseRW; pBlockRW->baseRX = baseRX; pBlockRW->size = size; pBlockRW->next = m_pFirstBlockRW; pBlockRW->refCount = 1; m_pFirstBlockRW = pBlockRW; UpdateCachedMapping(pBlockRW); return true; } bool ExecutableAllocator::RemoveRWBlock(void* pRW, void** pUnmapAddress, size_t* pUnmapSize) { LIMITED_METHOD_CONTRACT; BlockRW* pPrevBlockRW = NULL; for (BlockRW* pBlockRW = m_pFirstBlockRW; pBlockRW != NULL; pBlockRW = pBlockRW->next) { if (pBlockRW->baseRW <= pRW && (size_t)pRW < ((size_t)pBlockRW->baseRW + pBlockRW->size)) { // found pBlockRW->refCount--; if (pBlockRW->refCount != 0) { *pUnmapAddress = NULL; return true; } if (pPrevBlockRW == NULL) { m_pFirstBlockRW = pBlockRW->next; } else { pPrevBlockRW->next = pBlockRW->next; } *pUnmapAddress = pBlockRW->baseRW; *pUnmapSize = pBlockRW->size; delete pBlockRW; return true; } pPrevBlockRW = pBlockRW; } return false; } bool ExecutableAllocator::AllocateOffset(size_t* pOffset, size_t size) { LIMITED_METHOD_CONTRACT; size_t offset = m_freeOffset; size_t newFreeOffset = offset + size; if (newFreeOffset > m_maxExecutableCodeSize) { return false; } m_freeOffset = newFreeOffset; *pOffset = offset; return true; } void ExecutableAllocator::AddRXBlock(BlockRX* pBlock) { LIMITED_METHOD_CONTRACT; pBlock->next = m_pFirstBlockRX; m_pFirstBlockRX = pBlock; } void* ExecutableAllocator::Commit(void* pStart, size_t size, bool isExecutable) { LIMITED_METHOD_CONTRACT; if (IsDoubleMappingEnabled()) { return VMToOSInterface::CommitDoubleMappedMemory(pStart, size, isExecutable); } else { return ClrVirtualAlloc(pStart, size, MEM_COMMIT, isExecutable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE); } } void ExecutableAllocator::Release(void* pRX) { LIMITED_METHOD_CONTRACT; if (IsDoubleMappingEnabled()) { CRITSEC_Holder csh(m_CriticalSection); // Locate the RX block corresponding to the pRX and remove it from the linked list BlockRX* pBlock; BlockRX* pPrevBlock = NULL; for (pBlock = m_pFirstBlockRX; pBlock != NULL; pBlock = pBlock->next) { if (pRX == pBlock->baseRX) { if (pPrevBlock == NULL) { m_pFirstBlockRX = pBlock->next; } else { pPrevBlock->next = pBlock->next; } break; } pPrevBlock = pBlock; } if (pBlock != NULL) { VMToOSInterface::ReleaseDoubleMappedMemory(m_doubleMemoryMapperHandle, pRX, pBlock->offset, pBlock->size); // Put the released block into the free block list pBlock->baseRX = NULL; pBlock->next = m_pFirstFreeBlockRX; m_pFirstFreeBlockRX = pBlock; } else { // The block was not found, which should never happen. g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RX block to release was not found")); } } else { ClrVirtualFree(pRX, 0, MEM_RELEASE); } } // Find a free block with the closest size >= the requested size. // Returns NULL if no such block exists. ExecutableAllocator::BlockRX* ExecutableAllocator::FindBestFreeBlock(size_t size) { LIMITED_METHOD_CONTRACT; BlockRX* pPrevBlock = NULL; BlockRX* pPrevBestBlock = NULL; BlockRX* pBestBlock = NULL; BlockRX* pBlock = m_pFirstFreeBlockRX; while (pBlock != NULL) { if (pBlock->size >= size) { if (pBestBlock != NULL) { if (pBlock->size < pBestBlock->size) { pPrevBestBlock = pPrevBlock; pBestBlock = pBlock; } } else { pPrevBestBlock = pPrevBlock; pBestBlock = pBlock; } } pPrevBlock = pBlock; pBlock = pBlock->next; } if (pBestBlock != NULL) { if (pPrevBestBlock != NULL) { pPrevBestBlock->next = pBestBlock->next; } else { m_pFirstFreeBlockRX = pBestBlock->next; } pBestBlock->next = NULL; } return pBestBlock; } // Allocate a new block of executable memory and the related descriptor structure. // First try to get it from the free blocks and if there is no suitable free block, // allocate a new one. ExecutableAllocator::BlockRX* ExecutableAllocator::AllocateBlock(size_t size, bool* pIsFreeBlock) { LIMITED_METHOD_CONTRACT; size_t offset; BlockRX* block = FindBestFreeBlock(size); *pIsFreeBlock = (block != NULL); if (block == NULL) { if (!AllocateOffset(&offset, size)) { return NULL; } block = new (nothrow) BlockRX(); if (block == NULL) { return NULL; } block->offset = offset; block->size = size; } return block; } // Backout a previously allocated block. The block is added to the free blocks list and // reused for later allocation requests. void ExecutableAllocator::BackoutBlock(BlockRX* pBlock, bool isFreeBlock) { LIMITED_METHOD_CONTRACT; if (!isFreeBlock) { m_freeOffset -= pBlock->size; delete pBlock; } else { pBlock->next = m_pFirstFreeBlockRX; m_pFirstFreeBlockRX = pBlock; } } // Reserve executable memory within the specified virtual address space range. If it is not possible to // reserve memory in that range, the method returns NULL and nothing is allocated. void* ExecutableAllocator::ReserveWithinRange(size_t size, const void* loAddress, const void* hiAddress) { LIMITED_METHOD_CONTRACT; _ASSERTE((size & (Granularity() - 1)) == 0); if (IsDoubleMappingEnabled()) { CRITSEC_Holder csh(m_CriticalSection); bool isFreeBlock; BlockRX* block = AllocateBlock(size, &isFreeBlock); if (block == NULL) { return NULL; } void *result = VMToOSInterface::ReserveDoubleMappedMemory(m_doubleMemoryMapperHandle, block->offset, size, loAddress, hiAddress); if (result != NULL) { block->baseRX = result; AddRXBlock(block); } else { BackoutBlock(block, isFreeBlock); } return result; } else { DWORD allocationType = MEM_RESERVE; #ifdef HOST_UNIX // Tell PAL to use the executable memory allocator to satisfy this request for virtual memory. // This will allow us to place JIT'ed code close to the coreclr library // and thus improve performance by avoiding jump stubs in managed code. allocationType |= MEM_RESERVE_EXECUTABLE; #endif return ClrVirtualAllocWithinRange((const BYTE*)loAddress, (const BYTE*)hiAddress, size, allocationType, PAGE_NOACCESS); } } // Reserve executable memory. On Windows it tries to use the allocation hints to // allocate memory close to the previously allocated executable memory and loaded // executable files. void* ExecutableAllocator::Reserve(size_t size) { LIMITED_METHOD_CONTRACT; _ASSERTE((size & (Granularity() - 1)) == 0); BYTE *result = NULL; #if USE_LAZY_PREFERRED_RANGE // // If we are using the UPPER_ADDRESS space (on Win64) // then for any heap that will contain executable code // we will place it in the upper address space // // This enables us to avoid having to use JumpStubs // to reach the code for our ngen-ed images on x64, // since they are also placed in the UPPER_ADDRESS space. // BYTE * pHint = g_lazyPreferredRangeHint; if (size <= (SIZE_T)(g_preferredRangeMax - g_preferredRangeMin) && pHint != NULL) { // Try to allocate in the preferred region after the hint result = (BYTE*)ReserveWithinRange(size, pHint, g_preferredRangeMax); if (result != NULL) { g_lazyPreferredRangeHint = result + size; } else { // Try to allocate in the preferred region before the hint result = (BYTE*)ReserveWithinRange(size, g_preferredRangeMin, pHint + size); if (result != NULL) { g_lazyPreferredRangeHint = result + size; } g_lazyPreferredRangeHint = NULL; } } // Fall through to #endif // USE_LAZY_PREFERRED_RANGE if (result == NULL) { if (IsDoubleMappingEnabled()) { CRITSEC_Holder csh(m_CriticalSection); bool isFreeBlock; BlockRX* block = AllocateBlock(size, &isFreeBlock); if (block == NULL) { return NULL; } result = (BYTE*)VMToOSInterface::ReserveDoubleMappedMemory(m_doubleMemoryMapperHandle, block->offset, size, 0, 0); if (result != NULL) { block->baseRX = result; AddRXBlock(block); } else { BackoutBlock(block, isFreeBlock); } } else { DWORD allocationType = MEM_RESERVE; #ifdef HOST_UNIX // Tell PAL to use the executable memory allocator to satisfy this request for virtual memory. // This will allow us to place JIT'ed code close to the coreclr library // and thus improve performance by avoiding jump stubs in managed code. allocationType |= MEM_RESERVE_EXECUTABLE; #endif result = (BYTE*)ClrVirtualAlloc(NULL, size, allocationType, PAGE_NOACCESS); } } return result; } // Reserve a block of executable memory at the specified virtual address. If it is not // possible, the method returns NULL. void* ExecutableAllocator::ReserveAt(void* baseAddressRX, size_t size) { LIMITED_METHOD_CONTRACT; _ASSERTE((size & (Granularity() - 1)) == 0); if (IsDoubleMappingEnabled()) { CRITSEC_Holder csh(m_CriticalSection); bool isFreeBlock; BlockRX* block = AllocateBlock(size, &isFreeBlock); if (block == NULL) { return NULL; } void* result = VMToOSInterface::ReserveDoubleMappedMemory(m_doubleMemoryMapperHandle, block->offset, size, baseAddressRX, baseAddressRX); if (result != NULL) { block->baseRX = result; AddRXBlock(block); } else { BackoutBlock(block, isFreeBlock); } return result; } else { return VirtualAlloc(baseAddressRX, size, MEM_RESERVE, PAGE_NOACCESS); } } // Map an executable memory block as writeable. If there is already a mapping // covering the specified block, return that mapping instead of creating a new one. // Return starting address of the writeable mapping. void* ExecutableAllocator::MapRW(void* pRX, size_t size) { LIMITED_METHOD_CONTRACT; if (!IsDoubleMappingEnabled()) { return pRX; } CRITSEC_Holder csh(m_CriticalSection); void* result = FindRWBlock(pRX, size); if (result != NULL) { return result; } for (BlockRX* pBlock = m_pFirstBlockRX; pBlock != NULL; pBlock = pBlock->next) { if (pRX >= pBlock->baseRX && ((size_t)pRX + size) <= ((size_t)pBlock->baseRX + pBlock->size)) { // Offset of the RX address in the originally allocated block size_t offset = (size_t)pRX - (size_t)pBlock->baseRX; // Offset of the RX address that will start the newly mapped block size_t mapOffset = ALIGN_DOWN(offset, Granularity()); // Size of the block we will map size_t mapSize = ALIGN_UP(offset - mapOffset + size, Granularity()); void* pRW = VMToOSInterface::GetRWMapping(m_doubleMemoryMapperHandle, (BYTE*)pBlock->baseRX + mapOffset, pBlock->offset + mapOffset, mapSize); if (pRW == NULL) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Failed to create RW mapping for RX memory")); } AddRWBlock(pRW, (BYTE*)pBlock->baseRX + mapOffset, mapSize); return (void*)((size_t)pRW + (offset - mapOffset)); } else if (pRX >= pBlock->baseRX && pRX < (void*)((size_t)pBlock->baseRX + pBlock->size)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Attempting to RW map a block that crosses the end of the allocated RX range")); } else if (pRX < pBlock->baseRX && (void*)((size_t)pRX + size) > pBlock->baseRX) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Attempting to map a block that crosses the beginning of the allocated range")); } } // The executable memory block was not found, so we cannot provide the writeable mapping. g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RX block to map as RW was not found")); return NULL; } // Unmap writeable mapping at the specified address. The address must be an address // returned by the MapRW method. void ExecutableAllocator::UnmapRW(void* pRW) { LIMITED_METHOD_CONTRACT; if (!IsDoubleMappingEnabled()) { return; } CRITSEC_Holder csh(m_CriticalSection); _ASSERTE(pRW != NULL); void* unmapAddress = NULL; size_t unmapSize; if (!RemoveRWBlock(pRW, &unmapAddress, &unmapSize)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RW block to unmap was not found")); } if (unmapAddress && !VMToOSInterface::ReleaseRWMapping(unmapAddress, unmapSize)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the RW mapping failed")); } }
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/libraries/Common/tests/System/IO/Compression/StreamHelpers.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; using System.Threading.Tasks; public static partial class StreamHelpers { public static async Task<MemoryStream> CreateTempCopyStream(string path) { var bytes = File.ReadAllBytes(path); var ms = new MemoryStream(); await ms.WriteAsync(bytes, 0, bytes.Length); ms.Position = 0; return ms; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; using System.Threading.Tasks; public static partial class StreamHelpers { public static async Task<MemoryStream> CreateTempCopyStream(string path) { var bytes = File.ReadAllBytes(path); var ms = new MemoryStream(); await ms.WriteAsync(bytes, 0, bytes.Length); ms.Position = 0; return ms; } }
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/libraries/System.Private.CoreLib/src/System/Diagnostics/Tracing/TraceLogging/EmptyStruct.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #if ES_BUILD_STANDALONE namespace Microsoft.Diagnostics.Tracing #else namespace System.Diagnostics.Tracing #endif { /// <summary> /// TraceLogging: Empty struct indicating no payload data. /// </summary> internal struct EmptyStruct { } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #if ES_BUILD_STANDALONE namespace Microsoft.Diagnostics.Tracing #else namespace System.Diagnostics.Tracing #endif { /// <summary> /// TraceLogging: Empty struct indicating no payload data. /// </summary> internal struct EmptyStruct { } }
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Dataflow/ReferenceSource/ValueNode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Linq; using System.Text; using Mono.Cecil; using FieldDefinition = Mono.Cecil.FieldDefinition; using GenericParameter = Mono.Cecil.GenericParameter; using TypeDefinition = Mono.Cecil.TypeDefinition; namespace Mono.Linker.Dataflow { public enum ValueNodeKind { Invalid, // in case the Kind field is not initialized properly Unknown, // unknown value, has StaticType from context Null, // known value SystemType, // known value - TypeRepresented RuntimeTypeHandle, // known value - TypeRepresented KnownString, // known value - Contents ConstInt, // known value - Int32 AnnotatedString, // string with known annotation MethodParameter, // symbolic placeholder MethodReturn, // symbolic placeholder RuntimeMethodHandle, // known value - MethodRepresented SystemReflectionMethodBase, // known value - MethodRepresented RuntimeTypeHandleForGenericParameter, // symbolic placeholder for generic parameter SystemTypeForGenericParameter, // symbolic placeholder for generic parameter MergePoint, // structural, multiplexer - Values GetTypeFromString, // structural, could be known value - KnownString Array, // structural, could be known value - Array LoadField, // structural, could be known value - InstanceValue } /// <summary> /// A ValueNode represents a value in the IL dataflow analysis. It may not contain complete information as it is a /// best-effort representation. Additionally, as the analysis is linear and does not account for control flow, any /// given ValueNode may represent multiple values simultaneously. (This occurs, for example, at control flow join /// points when both paths yield values on the IL stack or in a local.) /// </summary> public abstract class ValueNode : IEquatable<ValueNode> { public ValueNode () { #if false // Helpful for debugging a cycle that has inadvertently crept into the graph if (this.DetectCycle(new HashSet<ValueNode>())) { throw new Exception("Found a cycle"); } #endif } /// <summary> /// The 'kind' of value node -- this represents the most-derived type and allows us to switch over and do /// equality checks without the cost of casting. Intermediate non-leaf types in the ValueNode hierarchy should /// be abstract. /// </summary> public ValueNodeKind Kind { get; protected set; } /// <summary> /// The IL type of the value, represented as closely as possible, but not always exact. It can be null, for /// example, when the analysis is imprecise or operating on malformed IL. /// </summary> public TypeDefinition? StaticType { get; protected set; } /// <summary> /// Allows the enumeration of the direct children of this node. The ChildCollection struct returned here /// supports 'foreach' without allocation. /// </summary> public ChildCollection Children { get { return new ChildCollection (this); } } /// <summary> /// This property allows you to enumerate all 'unique values' represented by a given ValueNode. The basic idea /// is that there will be no MergePointValues in the returned ValueNodes and all structural operations will be /// applied so that each 'unique value' can be considered on its own without regard to the structure that led to /// it. /// </summary> public UniqueValueCollection UniqueValuesInternal { get { return new UniqueValueCollection (this); } } /// <summary> /// This protected method is how nodes implement the UniqueValues property. It is protected because it returns /// an IEnumerable and we want to avoid allocating an enumerator for the exceedingly common case of there being /// only one value in the enumeration. The UniqueValueCollection returned by the UniqueValues property handles /// this detail. /// </summary> protected abstract IEnumerable<ValueNode> EvaluateUniqueValues (); /// <summary> /// RepresentsExactlyOneValue is used by the UniqueValues property to allow us to bypass allocating an /// enumerator to return just one value. If a node returns 'true' from RepresentsExactlyOneValue, it must also /// return that one value from GetSingleUniqueValue. If it always returns 'false', it doesn't need to implement /// GetSingleUniqueValue. /// </summary> protected virtual bool RepresentsExactlyOneValue { get { return false; } } /// <summary> /// GetSingleUniqueValue is called if, and only if, RepresentsExactlyOneValue returns true. It allows us to /// bypass the allocation of an enumerator for the common case of returning exactly one value. /// </summary> protected virtual ValueNode GetSingleUniqueValue () { // Not implemented because RepresentsExactlyOneValue returns false and, therefore, this method should be // unreachable. throw new NotImplementedException (); } protected abstract int NumChildren { get; } protected abstract ValueNode ChildAt (int index); public virtual bool Equals (ValueNode? other) { return other != null && this.Kind == other.Kind && this.StaticType == other.StaticType; } public abstract override int GetHashCode (); /// <summary> /// Each node type must implement this to stringize itself. The expectation is that it is implemented using /// ValueNodeDump.ValueNodeToString(), passing any non-ValueNode properties of interest (e.g. /// SystemTypeValue.TypeRepresented). Properties that are invariant on a particular node type /// should be omitted for clarity. /// </summary> protected abstract string NodeToString (); public override string ToString () { return NodeToString (); } public override bool Equals (object? other) { if (!(other is ValueNode)) return false; return this.Equals ((ValueNode) other); } #region Specialized Collection Nested Types /// <summary> /// ChildCollection struct is used to wrap the operations on a node involving its children. In particular, the /// struct implements a GetEnumerator method that is used to allow "foreach (ValueNode node in myNode.Children)" /// without heap allocations. /// </summary> public struct ChildCollection : IEnumerable<ValueNode> { /// <summary> /// Enumerator for children of a ValueNode. Allows foreach(var child in node.Children) to work without /// allocating a heap-based enumerator. /// </summary> public struct Enumerator : IEnumerator<ValueNode> { int _index; readonly ValueNode _parent; public Enumerator (ValueNode parent) { _parent = parent; _index = -1; } public ValueNode Current { get { return _parent.ChildAt (_index); } } object System.Collections.IEnumerator.Current { get { return Current; } } public bool MoveNext () { _index++; return (_parent != null) ? (_index < _parent.NumChildren) : false; } public void Reset () { _index = -1; } public void Dispose () { } } readonly ValueNode _parentNode; public ChildCollection (ValueNode parentNode) { _parentNode = parentNode; } // Used by C# 'foreach', when strongly typed, to avoid allocation. public Enumerator GetEnumerator () { return new Enumerator (_parentNode); } IEnumerator<ValueNode> IEnumerable<ValueNode>.GetEnumerator () { // note the boxing! return new Enumerator (_parentNode); } System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator () { // note the boxing! return new Enumerator (_parentNode); } public int Count { get { return (_parentNode != null) ? _parentNode.NumChildren : 0; } } } /// <summary> /// UniqueValueCollection is used to wrap calls to ValueNode.EvaluateUniqueValues. If a ValueNode represents /// only one value, then foreach(ValueNode value in node.UniqueValues) will not allocate a heap-based enumerator. /// /// This is implented by having each ValueNode tell us whether or not it represents exactly one value or not. /// If it does, we fetch it with ValueNode.GetSingleUniqueValue(), otherwise, we fall back to the usual heap- /// based IEnumerable returned by ValueNode.EvaluateUniqueValues. /// </summary> public struct UniqueValueCollection : IEnumerable<ValueNode> { readonly IEnumerable<ValueNode>? _multiValueEnumerable; readonly ValueNode? _treeNode; public UniqueValueCollection (ValueNode node) { if (node.RepresentsExactlyOneValue) { _multiValueEnumerable = null; _treeNode = node; } else { _multiValueEnumerable = node.EvaluateUniqueValues (); _treeNode = null; } } public Enumerator GetEnumerator () { return new Enumerator (_treeNode, _multiValueEnumerable); } IEnumerator<ValueNode> IEnumerable<ValueNode>.GetEnumerator () { if (_multiValueEnumerable != null) { return _multiValueEnumerable.GetEnumerator (); } // note the boxing! return GetEnumerator (); } System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator () { if (_multiValueEnumerable != null) { return _multiValueEnumerable.GetEnumerator (); } // note the boxing! return GetEnumerator (); } public struct Enumerator : IEnumerator<ValueNode> { readonly IEnumerator<ValueNode>? _multiValueEnumerator; readonly ValueNode? _singleValueNode; int _index; public Enumerator (ValueNode? treeNode, IEnumerable<ValueNode>? multiValueEnumerable) { Debug.Assert (treeNode != null || multiValueEnumerable != null); _singleValueNode = treeNode?.GetSingleUniqueValue (); _multiValueEnumerator = multiValueEnumerable?.GetEnumerator (); _index = -1; } public void Reset () { if (_multiValueEnumerator != null) { _multiValueEnumerator.Reset (); return; } _index = -1; } public bool MoveNext () { if (_multiValueEnumerator != null) return _multiValueEnumerator.MoveNext (); _index++; return _index == 0; } public ValueNode Current { get { if (_multiValueEnumerator != null) return _multiValueEnumerator.Current; if (_index == 0) return _singleValueNode!; throw new InvalidOperationException (); } } object System.Collections.IEnumerator.Current { get { return Current; } } public void Dispose () { } } } #endregion } /// <summary> /// LeafValueNode represents a 'leaf' in the expression tree. In other words, the node has no ValueNode children. /// It *may* still have non-ValueNode 'properties' that are interesting. This class serves, primarily, as a way to /// collect up the very common implmentation of NumChildren/ChildAt for leaf nodes and the "represents exactly one /// value" optimization. These things aren't on the ValueNode base class because, otherwise, new node types /// deriving from ValueNode may 'forget' to implement these things. So this class allows them to remain abstract in /// ValueNode while still having a common implementation for all the leaf nodes. /// </summary> public abstract class LeafValueNode : ValueNode { protected override int NumChildren { get { return 0; } } protected override ValueNode ChildAt (int index) { throw new InvalidOperationException (); } protected override bool RepresentsExactlyOneValue { get { return true; } } protected override ValueNode GetSingleUniqueValue () { return this; } protected override IEnumerable<ValueNode> EvaluateUniqueValues () { // Leaf values should not represent more than one value. This method should be unreachable as long as // RepresentsExactlyOneValue returns true. throw new NotImplementedException (); } } // These are extension methods because we want to allow the use of them on null 'this' pointers. internal static class ValueNodeExtensions { /// <summary> /// Returns true if a ValueNode graph contains a cycle /// </summary> /// <param name="node">Node to evaluate</param> /// <param name="seenNodes">Set of nodes previously seen on the current arc. Callers may pass a non-empty set /// to test whether adding that set to this node would create a cycle. Contents will be modified by the walk /// and should not be used by the caller after returning</param> /// <param name="allNodesSeen">Optional. The set of all nodes encountered during a walk after DetectCycle returns</param> /// <returns></returns> public static bool DetectCycle (this ValueNode? node, HashSet<ValueNode> seenNodes, HashSet<ValueNode>? allNodesSeen) { if (node == null) return false; if (seenNodes.Contains (node)) return true; seenNodes.Add (node); if (allNodesSeen != null) { allNodesSeen.Add (node); } bool foundCycle = false; switch (node.Kind) { // // Leaf nodes // case ValueNodeKind.Unknown: case ValueNodeKind.Null: case ValueNodeKind.SystemType: case ValueNodeKind.RuntimeTypeHandle: case ValueNodeKind.KnownString: case ValueNodeKind.AnnotatedString: case ValueNodeKind.ConstInt: case ValueNodeKind.MethodParameter: case ValueNodeKind.MethodReturn: case ValueNodeKind.SystemTypeForGenericParameter: case ValueNodeKind.RuntimeTypeHandleForGenericParameter: case ValueNodeKind.SystemReflectionMethodBase: case ValueNodeKind.RuntimeMethodHandle: case ValueNodeKind.LoadField: break; // // Nodes with children // case ValueNodeKind.MergePoint: foreach (ValueNode val in ((MergePointValue) node).Values) { if (val.DetectCycle (seenNodes, allNodesSeen)) { foundCycle = true; } } break; case ValueNodeKind.GetTypeFromString: GetTypeFromStringValue gtfsv = (GetTypeFromStringValue) node; foundCycle = gtfsv.AssemblyIdentity.DetectCycle (seenNodes, allNodesSeen); foundCycle |= gtfsv.NameString.DetectCycle (seenNodes, allNodesSeen); break; case ValueNodeKind.Array: ArrayValue av = (ArrayValue) node; foundCycle = av.Size.DetectCycle (seenNodes, allNodesSeen); foreach (ValueBasicBlockPair pair in av.IndexValues.Values) { foundCycle |= pair.Value.DetectCycle (seenNodes, allNodesSeen); } break; default: throw new Exception (String.Format ("Unknown node kind: {0}", node.Kind)); } seenNodes.Remove (node); return foundCycle; } public static ValueNode.UniqueValueCollection UniqueValues (this ValueNode? node) { if (node == null) return new ValueNode.UniqueValueCollection (UnknownValue.Instance); return node.UniqueValuesInternal; } public static int? AsConstInt (this ValueNode? node) { if (node is ConstIntValue constInt) return constInt.Value; return null; } } internal static class ValueNodeDump { internal static string ValueNodeToString (ValueNode? node, params object[] args) { if (node == null) return "<null>"; StringBuilder sb = new StringBuilder (); sb.Append (node.Kind.ToString ()); sb.Append ("("); if (args != null) { for (int i = 0; i < args.Length; i++) { if (i > 0) sb.Append (","); sb.Append (args[i] == null ? "<null>" : args[i].ToString ()); } } sb.Append (")"); return sb.ToString (); } static string GetIndent (int level) { StringBuilder sb = new StringBuilder (level * 2); for (int i = 0; i < level; i++) sb.Append (" "); return sb.ToString (); } public static void DumpTree (this ValueNode node, System.IO.TextWriter? writer = null, int indentLevel = 0) { if (writer == null) writer = Console.Out; writer.Write (GetIndent (indentLevel)); if (node == null) { writer.WriteLine ("<null>"); return; } writer.WriteLine (node); foreach (ValueNode child in node.Children) { child.DumpTree (writer, indentLevel + 1); } } } /// <summary> /// Represents an unknown value. /// </summary> class UnknownValue : LeafValueNode { private UnknownValue () { Kind = ValueNodeKind.Unknown; StaticType = null; } public static UnknownValue Instance { get; } = new UnknownValue (); public override bool Equals (ValueNode? other) { return base.Equals (other); } public override int GetHashCode () { // All instances of UnknownValue are equivalent, so they all hash to the same hashcode. This one was // chosen for no particular reason at all. return 0x98052; } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this); } } class NullValue : LeafValueNode { private NullValue () { Kind = ValueNodeKind.Null; StaticType = null; } public override bool Equals (ValueNode? other) { return base.Equals (other); } public static NullValue Instance { get; } = new NullValue (); public override int GetHashCode () { // All instances of NullValue are equivalent, so they all hash to the same hashcode. This one was // chosen for no particular reason at all. return 0x90210; } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this); } } /// <summary> /// This is a known System.Type value. TypeRepresented is the 'value' of the System.Type. /// </summary> class SystemTypeValue : LeafValueNode { public SystemTypeValue (TypeDefinition typeRepresented) { Kind = ValueNodeKind.SystemType; // Should be System.Type - but we don't have any use case where tracking it like that would matter StaticType = null; TypeRepresented = typeRepresented; } public TypeDefinition TypeRepresented { get; private set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.TypeRepresented, ((SystemTypeValue) other).TypeRepresented); } public override int GetHashCode () { return HashCode.Combine (Kind, TypeRepresented); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, TypeRepresented); } } /// <summary> /// This is the System.RuntimeTypeHandle equivalent to a <see cref="SystemTypeValue"/> node. /// </summary> class RuntimeTypeHandleValue : LeafValueNode { public RuntimeTypeHandleValue (TypeDefinition typeRepresented) { Kind = ValueNodeKind.RuntimeTypeHandle; // Should be System.RuntimeTypeHandle, but we don't have a use case for it like that StaticType = null; TypeRepresented = typeRepresented; } public TypeDefinition TypeRepresented { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.TypeRepresented, ((RuntimeTypeHandleValue) other).TypeRepresented); } public override int GetHashCode () { return HashCode.Combine (Kind, TypeRepresented); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, TypeRepresented); } } /// <summary> /// This is a System.Type value which represents generic parameter (basically result of typeof(T)) /// Its actual type is unknown, but it can have annotations. /// </summary> class SystemTypeForGenericParameterValue : LeafValueWithDynamicallyAccessedMemberNode { public SystemTypeForGenericParameterValue (GenericParameter genericParameter, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes) : base (genericParameter) { Kind = ValueNodeKind.SystemTypeForGenericParameter; // Should be System.Type, but we don't have a use case for it StaticType = null; GenericParameter = genericParameter; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public GenericParameter GenericParameter { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; var otherValue = (SystemTypeForGenericParameterValue) other; return this.GenericParameter == otherValue.GenericParameter && this.DynamicallyAccessedMemberTypes == otherValue.DynamicallyAccessedMemberTypes; } public override int GetHashCode () { return HashCode.Combine (Kind, GenericParameter, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, GenericParameter, DynamicallyAccessedMemberTypes); } } /// <summary> /// This is the System.RuntimeTypeHandle equivalent to a <see cref="SystemTypeForGenericParameterValue"/> node. /// </summary> class RuntimeTypeHandleForGenericParameterValue : LeafValueNode { public RuntimeTypeHandleForGenericParameterValue (GenericParameter genericParameter) { Kind = ValueNodeKind.RuntimeTypeHandleForGenericParameter; // Should be System.RuntimeTypeHandle, but we don't have a use case for it StaticType = null; GenericParameter = genericParameter; } public GenericParameter GenericParameter { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.GenericParameter, ((RuntimeTypeHandleForGenericParameterValue) other).GenericParameter); } public override int GetHashCode () { return HashCode.Combine (Kind, GenericParameter); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, GenericParameter); } } /// <summary> /// This is the System.RuntimeMethodHandle equivalent to a <see cref="SystemReflectionMethodBaseValue"/> node. /// </summary> class RuntimeMethodHandleValue : LeafValueNode { public RuntimeMethodHandleValue (MethodDefinition methodRepresented) { Kind = ValueNodeKind.RuntimeMethodHandle; // Should be System.RuntimeMethodHandle, but we don't have a use case for it StaticType = null; MethodRepresented = methodRepresented; } public MethodDefinition MethodRepresented { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.MethodRepresented, ((RuntimeMethodHandleValue) other).MethodRepresented); } public override int GetHashCode () { return HashCode.Combine (Kind, MethodRepresented); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, MethodRepresented); } } /// <summary> /// This is a known System.Reflection.MethodBase value. MethodRepresented is the 'value' of the MethodBase. /// </summary> class SystemReflectionMethodBaseValue : LeafValueNode { public SystemReflectionMethodBaseValue (MethodDefinition methodRepresented) { Kind = ValueNodeKind.SystemReflectionMethodBase; // Should be System.Reflection.MethodBase, but we don't have a use case for it StaticType = null; MethodRepresented = methodRepresented; } public MethodDefinition MethodRepresented { get; private set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.MethodRepresented, ((SystemReflectionMethodBaseValue) other).MethodRepresented); } public override int GetHashCode () { return HashCode.Combine (Kind, MethodRepresented); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, MethodRepresented); } } /// <summary> /// A known string - such as the result of a ldstr. /// </summary> class KnownStringValue : LeafValueNode { public KnownStringValue (string contents) { Kind = ValueNodeKind.KnownString; // Should be System.String, but we don't have a use case for it StaticType = null; Contents = contents; } public string Contents { get; private set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return this.Contents == ((KnownStringValue) other).Contents; } public override int GetHashCode () { return HashCode.Combine (Kind, Contents); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, "\"" + Contents + "\""); } } /// <summary> /// Base class for all nodes which can have dynamically accessed member annotation. /// </summary> abstract class LeafValueWithDynamicallyAccessedMemberNode : LeafValueNode { public LeafValueWithDynamicallyAccessedMemberNode (IMetadataTokenProvider sourceContext) { SourceContext = sourceContext; } public IMetadataTokenProvider SourceContext { get; private set; } /// <summary> /// The bitfield of dynamically accessed member types the node guarantees /// </summary> public DynamicallyAccessedMemberTypes DynamicallyAccessedMemberTypes { get; protected set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; var otherValue = (LeafValueWithDynamicallyAccessedMemberNode) other; return SourceContext == otherValue.SourceContext && DynamicallyAccessedMemberTypes == otherValue.DynamicallyAccessedMemberTypes; } } /// <summary> /// A value that came from a method parameter - such as the result of a ldarg. /// </summary> class MethodParameterValue : LeafValueWithDynamicallyAccessedMemberNode { public MethodParameterValue (TypeDefinition? staticType, int parameterIndex, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes, IMetadataTokenProvider sourceContext) : base (sourceContext) { Kind = ValueNodeKind.MethodParameter; StaticType = staticType; ParameterIndex = parameterIndex; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public int ParameterIndex { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; var otherValue = (MethodParameterValue) other; return this.ParameterIndex == otherValue.ParameterIndex; } public override int GetHashCode () { return HashCode.Combine (Kind, ParameterIndex, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, ParameterIndex, DynamicallyAccessedMemberTypes); } } /// <summary> /// String with a known annotation. /// </summary> class AnnotatedStringValue : LeafValueWithDynamicallyAccessedMemberNode { public AnnotatedStringValue (IMetadataTokenProvider sourceContext, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes) : base (sourceContext) { Kind = ValueNodeKind.AnnotatedString; // Should be System.String, but we don't have a use case for it StaticType = null; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public override bool Equals (ValueNode? other) { return base.Equals (other); } public override int GetHashCode () { return HashCode.Combine (Kind, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, DynamicallyAccessedMemberTypes); } } /// <summary> /// Return value from a method /// </summary> class MethodReturnValue : LeafValueWithDynamicallyAccessedMemberNode { public MethodReturnValue (TypeDefinition? staticType, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes, IMetadataTokenProvider sourceContext) : base (sourceContext) { Kind = ValueNodeKind.MethodReturn; StaticType = staticType; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public override bool Equals (ValueNode? other) { return base.Equals (other); } public override int GetHashCode () { return HashCode.Combine (Kind, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, DynamicallyAccessedMemberTypes); } } /// <summary> /// A merge point commonly occurs due to control flow in a method body. It represents a set of values /// from different paths through the method. It is the reason for EvaluateUniqueValues, which essentially /// provides an enumeration over all the concrete values represented by a given ValueNode after 'erasing' /// the merge point nodes. /// </summary> class MergePointValue : ValueNode { private MergePointValue (ValueNode one, ValueNode two) { Kind = ValueNodeKind.MergePoint; StaticType = null; m_values = new ValueNodeHashSet (); if (one.Kind == ValueNodeKind.MergePoint) { MergePointValue mpvOne = (MergePointValue) one; foreach (ValueNode value in mpvOne.Values) m_values.Add (value); } else m_values.Add (one); if (two.Kind == ValueNodeKind.MergePoint) { MergePointValue mpvTwo = (MergePointValue) two; foreach (ValueNode value in mpvTwo.Values) m_values.Add (value); } else m_values.Add (two); } public MergePointValue () { Kind = ValueNodeKind.MergePoint; m_values = new ValueNodeHashSet (); } public void AddValue (ValueNode node) { // we are mutating our state, so we must invalidate any cached knowledge //InvalidateIsOpen (); if (node.Kind == ValueNodeKind.MergePoint) { foreach (ValueNode value in ((MergePointValue) node).Values) m_values.Add (value); } else m_values.Add (node); #if false if (this.DetectCycle(new HashSet<ValueNode>())) { throw new Exception("Found a cycle"); } #endif } readonly ValueNodeHashSet m_values; public ValueNodeHashSet Values { get { return m_values; } } protected override int NumChildren { get { return Values.Count; } } protected override ValueNode ChildAt (int index) { if (index < NumChildren) return Values.ElementAt (index); throw new InvalidOperationException (); } public static ValueNode? MergeValues (ValueNode? one, ValueNode? two) { if (one == null) return two; else if (two == null) return one; else if (one.Equals (two)) return one; else return new MergePointValue (one, two); } protected override IEnumerable<ValueNode> EvaluateUniqueValues () { foreach (ValueNode value in Values) { foreach (ValueNode uniqueValue in value.UniqueValuesInternal) { yield return uniqueValue; } } } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; MergePointValue otherMpv = (MergePointValue) other; if (this.Values.Count != otherMpv.Values.Count) return false; foreach (ValueNode value in this.Values) { if (!otherMpv.Values.Contains (value)) return false; } return true; } public override int GetHashCode () { return HashCode.Combine (Kind, Values); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this); } } delegate TypeDefinition TypeResolver (string assemblyString, string typeString); /// <summary> /// The result of a Type.GetType. /// AssemblyIdentity is the scope in which to resolve if the type name string is not assembly-qualified. /// </summary> #pragma warning disable CA1812 // GetTypeFromStringValue is never instantiated class GetTypeFromStringValue : ValueNode { private readonly TypeResolver _resolver; public GetTypeFromStringValue (TypeResolver resolver, ValueNode assemblyIdentity, ValueNode nameString) { _resolver = resolver; Kind = ValueNodeKind.GetTypeFromString; // Should be System.Type, but we don't have a use case for it StaticType = null; AssemblyIdentity = assemblyIdentity; NameString = nameString; } public ValueNode AssemblyIdentity { get; private set; } public ValueNode NameString { get; private set; } protected override int NumChildren { get { return 2; } } protected override ValueNode ChildAt (int index) { if (index == 0) return AssemblyIdentity; if (index == 1) return NameString; throw new InvalidOperationException (); } protected override IEnumerable<ValueNode> EvaluateUniqueValues () { HashSet<string>? names = null; foreach (ValueNode nameStringValue in NameString.UniqueValuesInternal) { if (nameStringValue.Kind == ValueNodeKind.KnownString) { if (names == null) { names = new HashSet<string> (); } string typeName = ((KnownStringValue) nameStringValue).Contents; names.Add (typeName); } } bool foundAtLeastOne = false; if (names != null) { foreach (ValueNode assemblyValue in AssemblyIdentity.UniqueValuesInternal) { if (assemblyValue.Kind == ValueNodeKind.KnownString) { string assemblyName = ((KnownStringValue) assemblyValue).Contents; foreach (string name in names) { TypeDefinition typeDefinition = _resolver (assemblyName, name); if (typeDefinition != null) { foundAtLeastOne = true; yield return new SystemTypeValue (typeDefinition); } } } } } if (!foundAtLeastOne) yield return UnknownValue.Instance; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; GetTypeFromStringValue otherGtfs = (GetTypeFromStringValue) other; return this.AssemblyIdentity.Equals (otherGtfs.AssemblyIdentity) && this.NameString.Equals (otherGtfs.NameString); } public override int GetHashCode () { return HashCode.Combine (Kind, AssemblyIdentity, NameString); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, NameString); } } /// <summary> /// A representation of a ldfld. Note that we don't have a representation of objects containing fields /// so there isn't much that can be done with this node type yet. /// </summary> class LoadFieldValue : LeafValueWithDynamicallyAccessedMemberNode { public LoadFieldValue (TypeDefinition? staticType, FieldDefinition fieldToLoad, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes) : base (fieldToLoad) { Kind = ValueNodeKind.LoadField; StaticType = staticType; Field = fieldToLoad; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public FieldDefinition Field { get; private set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; LoadFieldValue otherLfv = (LoadFieldValue) other; return Equals (this.Field, otherLfv.Field); } public override int GetHashCode () { return HashCode.Combine (Kind, Field, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, Field, DynamicallyAccessedMemberTypes); } } /// <summary> /// Represents a ldc on an int32. /// </summary> class ConstIntValue : LeafValueNode { public ConstIntValue (int value) { Kind = ValueNodeKind.ConstInt; // Should be System.Int32, but we don't have a usecase for it right now StaticType = null; Value = value; } public int Value { get; private set; } public override int GetHashCode () { return HashCode.Combine (Kind, Value); } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; ConstIntValue otherCiv = (ConstIntValue) other; return Value == otherCiv.Value; } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, Value); } } class ArrayValue : ValueNode { protected override int NumChildren => 1 + IndexValues.Count; /// <summary> /// Constructs an array value of the given size /// </summary> public ArrayValue (ValueNode? size, TypeReference elementType) { Kind = ValueNodeKind.Array; // Should be System.Array (or similar), but we don't have a use case for it StaticType = null; Size = size ?? UnknownValue.Instance; ElementType = elementType; IndexValues = new Dictionary<int, ValueBasicBlockPair> (); } private ArrayValue (ValueNode size, TypeReference elementType, Dictionary<int, ValueBasicBlockPair> indexValues) : this (size, elementType) { IndexValues = indexValues; } public ValueNode Size { get; } public TypeReference ElementType { get; } public Dictionary<int, ValueBasicBlockPair> IndexValues { get; } public override int GetHashCode () { return HashCode.Combine (Kind, Size); } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; ArrayValue otherArr = (ArrayValue) other; bool equals = Size.Equals (otherArr.Size); equals &= IndexValues.Count == otherArr.IndexValues.Count; if (!equals) return false; // If both sets T and O are the same size and "T intersect O" is empty, then T == O. HashSet<KeyValuePair<int, ValueBasicBlockPair>> thisValueSet = new (IndexValues); HashSet<KeyValuePair<int, ValueBasicBlockPair>> otherValueSet = new (otherArr.IndexValues); thisValueSet.ExceptWith (otherValueSet); return thisValueSet.Count == 0; } protected override string NodeToString () { // TODO: Use StringBuilder and remove Linq usage. return $"(Array Size:{ValueNodeDump.ValueNodeToString (this, Size)}, Values:({string.Join (',', IndexValues.Select (v => $"({v.Key},{ValueNodeDump.ValueNodeToString (v.Value.Value)})"))})"; } protected override IEnumerable<ValueNode> EvaluateUniqueValues () { foreach (var sizeConst in Size.UniqueValuesInternal) yield return new ArrayValue (sizeConst, ElementType, IndexValues); } protected override ValueNode ChildAt (int index) { if (index == 0) return Size; if (index - 1 <= IndexValues.Count) return IndexValues.Values.ElementAt (index - 1).Value!; throw new InvalidOperationException (); } } #region ValueNode Collections public class ValueNodeList : List<ValueNode?> { public ValueNodeList () { } public ValueNodeList (int capacity) : base (capacity) { } public ValueNodeList (List<ValueNode> other) : base (other) { } public override int GetHashCode () { return HashUtils.CalcHashCodeEnumerable (this); } public override bool Equals (object? other) { if (!(other is ValueNodeList otherList)) return false; if (otherList.Count != Count) return false; for (int i = 0; i < Count; i++) { if (!(otherList[i]?.Equals (this[i]) ?? (this[i] is null))) return false; } return true; } } class ValueNodeHashSet : HashSet<ValueNode> { public override int GetHashCode () { return HashUtils.CalcHashCodeEnumerable (this); } public override bool Equals (object? other) { if (!(other is ValueNodeHashSet otherSet)) return false; if (otherSet.Count != Count) return false; IEnumerator<ValueNode> thisEnumerator = this.GetEnumerator (); IEnumerator<ValueNode> otherEnumerator = otherSet.GetEnumerator (); for (int i = 0; i < Count; i++) { thisEnumerator.MoveNext (); otherEnumerator.MoveNext (); if (!thisEnumerator.Current.Equals (otherEnumerator.Current)) return false; } return true; } } #endregion static class HashUtils { public static int CalcHashCodeEnumerable<T> (IEnumerable<T> list) where T : class? { HashCode hashCode = new HashCode (); foreach (var item in list) hashCode.Add (item); return hashCode.ToHashCode (); } } public struct ValueBasicBlockPair { public ValueNode? Value; public int BasicBlockIndex; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Linq; using System.Text; using Mono.Cecil; using FieldDefinition = Mono.Cecil.FieldDefinition; using GenericParameter = Mono.Cecil.GenericParameter; using TypeDefinition = Mono.Cecil.TypeDefinition; namespace Mono.Linker.Dataflow { public enum ValueNodeKind { Invalid, // in case the Kind field is not initialized properly Unknown, // unknown value, has StaticType from context Null, // known value SystemType, // known value - TypeRepresented RuntimeTypeHandle, // known value - TypeRepresented KnownString, // known value - Contents ConstInt, // known value - Int32 AnnotatedString, // string with known annotation MethodParameter, // symbolic placeholder MethodReturn, // symbolic placeholder RuntimeMethodHandle, // known value - MethodRepresented SystemReflectionMethodBase, // known value - MethodRepresented RuntimeTypeHandleForGenericParameter, // symbolic placeholder for generic parameter SystemTypeForGenericParameter, // symbolic placeholder for generic parameter MergePoint, // structural, multiplexer - Values GetTypeFromString, // structural, could be known value - KnownString Array, // structural, could be known value - Array LoadField, // structural, could be known value - InstanceValue } /// <summary> /// A ValueNode represents a value in the IL dataflow analysis. It may not contain complete information as it is a /// best-effort representation. Additionally, as the analysis is linear and does not account for control flow, any /// given ValueNode may represent multiple values simultaneously. (This occurs, for example, at control flow join /// points when both paths yield values on the IL stack or in a local.) /// </summary> public abstract class ValueNode : IEquatable<ValueNode> { public ValueNode () { #if false // Helpful for debugging a cycle that has inadvertently crept into the graph if (this.DetectCycle(new HashSet<ValueNode>())) { throw new Exception("Found a cycle"); } #endif } /// <summary> /// The 'kind' of value node -- this represents the most-derived type and allows us to switch over and do /// equality checks without the cost of casting. Intermediate non-leaf types in the ValueNode hierarchy should /// be abstract. /// </summary> public ValueNodeKind Kind { get; protected set; } /// <summary> /// The IL type of the value, represented as closely as possible, but not always exact. It can be null, for /// example, when the analysis is imprecise or operating on malformed IL. /// </summary> public TypeDefinition? StaticType { get; protected set; } /// <summary> /// Allows the enumeration of the direct children of this node. The ChildCollection struct returned here /// supports 'foreach' without allocation. /// </summary> public ChildCollection Children { get { return new ChildCollection (this); } } /// <summary> /// This property allows you to enumerate all 'unique values' represented by a given ValueNode. The basic idea /// is that there will be no MergePointValues in the returned ValueNodes and all structural operations will be /// applied so that each 'unique value' can be considered on its own without regard to the structure that led to /// it. /// </summary> public UniqueValueCollection UniqueValuesInternal { get { return new UniqueValueCollection (this); } } /// <summary> /// This protected method is how nodes implement the UniqueValues property. It is protected because it returns /// an IEnumerable and we want to avoid allocating an enumerator for the exceedingly common case of there being /// only one value in the enumeration. The UniqueValueCollection returned by the UniqueValues property handles /// this detail. /// </summary> protected abstract IEnumerable<ValueNode> EvaluateUniqueValues (); /// <summary> /// RepresentsExactlyOneValue is used by the UniqueValues property to allow us to bypass allocating an /// enumerator to return just one value. If a node returns 'true' from RepresentsExactlyOneValue, it must also /// return that one value from GetSingleUniqueValue. If it always returns 'false', it doesn't need to implement /// GetSingleUniqueValue. /// </summary> protected virtual bool RepresentsExactlyOneValue { get { return false; } } /// <summary> /// GetSingleUniqueValue is called if, and only if, RepresentsExactlyOneValue returns true. It allows us to /// bypass the allocation of an enumerator for the common case of returning exactly one value. /// </summary> protected virtual ValueNode GetSingleUniqueValue () { // Not implemented because RepresentsExactlyOneValue returns false and, therefore, this method should be // unreachable. throw new NotImplementedException (); } protected abstract int NumChildren { get; } protected abstract ValueNode ChildAt (int index); public virtual bool Equals (ValueNode? other) { return other != null && this.Kind == other.Kind && this.StaticType == other.StaticType; } public abstract override int GetHashCode (); /// <summary> /// Each node type must implement this to stringize itself. The expectation is that it is implemented using /// ValueNodeDump.ValueNodeToString(), passing any non-ValueNode properties of interest (e.g. /// SystemTypeValue.TypeRepresented). Properties that are invariant on a particular node type /// should be omitted for clarity. /// </summary> protected abstract string NodeToString (); public override string ToString () { return NodeToString (); } public override bool Equals (object? other) { if (!(other is ValueNode)) return false; return this.Equals ((ValueNode) other); } #region Specialized Collection Nested Types /// <summary> /// ChildCollection struct is used to wrap the operations on a node involving its children. In particular, the /// struct implements a GetEnumerator method that is used to allow "foreach (ValueNode node in myNode.Children)" /// without heap allocations. /// </summary> public struct ChildCollection : IEnumerable<ValueNode> { /// <summary> /// Enumerator for children of a ValueNode. Allows foreach(var child in node.Children) to work without /// allocating a heap-based enumerator. /// </summary> public struct Enumerator : IEnumerator<ValueNode> { int _index; readonly ValueNode _parent; public Enumerator (ValueNode parent) { _parent = parent; _index = -1; } public ValueNode Current { get { return _parent.ChildAt (_index); } } object System.Collections.IEnumerator.Current { get { return Current; } } public bool MoveNext () { _index++; return (_parent != null) ? (_index < _parent.NumChildren) : false; } public void Reset () { _index = -1; } public void Dispose () { } } readonly ValueNode _parentNode; public ChildCollection (ValueNode parentNode) { _parentNode = parentNode; } // Used by C# 'foreach', when strongly typed, to avoid allocation. public Enumerator GetEnumerator () { return new Enumerator (_parentNode); } IEnumerator<ValueNode> IEnumerable<ValueNode>.GetEnumerator () { // note the boxing! return new Enumerator (_parentNode); } System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator () { // note the boxing! return new Enumerator (_parentNode); } public int Count { get { return (_parentNode != null) ? _parentNode.NumChildren : 0; } } } /// <summary> /// UniqueValueCollection is used to wrap calls to ValueNode.EvaluateUniqueValues. If a ValueNode represents /// only one value, then foreach(ValueNode value in node.UniqueValues) will not allocate a heap-based enumerator. /// /// This is implented by having each ValueNode tell us whether or not it represents exactly one value or not. /// If it does, we fetch it with ValueNode.GetSingleUniqueValue(), otherwise, we fall back to the usual heap- /// based IEnumerable returned by ValueNode.EvaluateUniqueValues. /// </summary> public struct UniqueValueCollection : IEnumerable<ValueNode> { readonly IEnumerable<ValueNode>? _multiValueEnumerable; readonly ValueNode? _treeNode; public UniqueValueCollection (ValueNode node) { if (node.RepresentsExactlyOneValue) { _multiValueEnumerable = null; _treeNode = node; } else { _multiValueEnumerable = node.EvaluateUniqueValues (); _treeNode = null; } } public Enumerator GetEnumerator () { return new Enumerator (_treeNode, _multiValueEnumerable); } IEnumerator<ValueNode> IEnumerable<ValueNode>.GetEnumerator () { if (_multiValueEnumerable != null) { return _multiValueEnumerable.GetEnumerator (); } // note the boxing! return GetEnumerator (); } System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator () { if (_multiValueEnumerable != null) { return _multiValueEnumerable.GetEnumerator (); } // note the boxing! return GetEnumerator (); } public struct Enumerator : IEnumerator<ValueNode> { readonly IEnumerator<ValueNode>? _multiValueEnumerator; readonly ValueNode? _singleValueNode; int _index; public Enumerator (ValueNode? treeNode, IEnumerable<ValueNode>? multiValueEnumerable) { Debug.Assert (treeNode != null || multiValueEnumerable != null); _singleValueNode = treeNode?.GetSingleUniqueValue (); _multiValueEnumerator = multiValueEnumerable?.GetEnumerator (); _index = -1; } public void Reset () { if (_multiValueEnumerator != null) { _multiValueEnumerator.Reset (); return; } _index = -1; } public bool MoveNext () { if (_multiValueEnumerator != null) return _multiValueEnumerator.MoveNext (); _index++; return _index == 0; } public ValueNode Current { get { if (_multiValueEnumerator != null) return _multiValueEnumerator.Current; if (_index == 0) return _singleValueNode!; throw new InvalidOperationException (); } } object System.Collections.IEnumerator.Current { get { return Current; } } public void Dispose () { } } } #endregion } /// <summary> /// LeafValueNode represents a 'leaf' in the expression tree. In other words, the node has no ValueNode children. /// It *may* still have non-ValueNode 'properties' that are interesting. This class serves, primarily, as a way to /// collect up the very common implmentation of NumChildren/ChildAt for leaf nodes and the "represents exactly one /// value" optimization. These things aren't on the ValueNode base class because, otherwise, new node types /// deriving from ValueNode may 'forget' to implement these things. So this class allows them to remain abstract in /// ValueNode while still having a common implementation for all the leaf nodes. /// </summary> public abstract class LeafValueNode : ValueNode { protected override int NumChildren { get { return 0; } } protected override ValueNode ChildAt (int index) { throw new InvalidOperationException (); } protected override bool RepresentsExactlyOneValue { get { return true; } } protected override ValueNode GetSingleUniqueValue () { return this; } protected override IEnumerable<ValueNode> EvaluateUniqueValues () { // Leaf values should not represent more than one value. This method should be unreachable as long as // RepresentsExactlyOneValue returns true. throw new NotImplementedException (); } } // These are extension methods because we want to allow the use of them on null 'this' pointers. internal static class ValueNodeExtensions { /// <summary> /// Returns true if a ValueNode graph contains a cycle /// </summary> /// <param name="node">Node to evaluate</param> /// <param name="seenNodes">Set of nodes previously seen on the current arc. Callers may pass a non-empty set /// to test whether adding that set to this node would create a cycle. Contents will be modified by the walk /// and should not be used by the caller after returning</param> /// <param name="allNodesSeen">Optional. The set of all nodes encountered during a walk after DetectCycle returns</param> /// <returns></returns> public static bool DetectCycle (this ValueNode? node, HashSet<ValueNode> seenNodes, HashSet<ValueNode>? allNodesSeen) { if (node == null) return false; if (seenNodes.Contains (node)) return true; seenNodes.Add (node); if (allNodesSeen != null) { allNodesSeen.Add (node); } bool foundCycle = false; switch (node.Kind) { // // Leaf nodes // case ValueNodeKind.Unknown: case ValueNodeKind.Null: case ValueNodeKind.SystemType: case ValueNodeKind.RuntimeTypeHandle: case ValueNodeKind.KnownString: case ValueNodeKind.AnnotatedString: case ValueNodeKind.ConstInt: case ValueNodeKind.MethodParameter: case ValueNodeKind.MethodReturn: case ValueNodeKind.SystemTypeForGenericParameter: case ValueNodeKind.RuntimeTypeHandleForGenericParameter: case ValueNodeKind.SystemReflectionMethodBase: case ValueNodeKind.RuntimeMethodHandle: case ValueNodeKind.LoadField: break; // // Nodes with children // case ValueNodeKind.MergePoint: foreach (ValueNode val in ((MergePointValue) node).Values) { if (val.DetectCycle (seenNodes, allNodesSeen)) { foundCycle = true; } } break; case ValueNodeKind.GetTypeFromString: GetTypeFromStringValue gtfsv = (GetTypeFromStringValue) node; foundCycle = gtfsv.AssemblyIdentity.DetectCycle (seenNodes, allNodesSeen); foundCycle |= gtfsv.NameString.DetectCycle (seenNodes, allNodesSeen); break; case ValueNodeKind.Array: ArrayValue av = (ArrayValue) node; foundCycle = av.Size.DetectCycle (seenNodes, allNodesSeen); foreach (ValueBasicBlockPair pair in av.IndexValues.Values) { foundCycle |= pair.Value.DetectCycle (seenNodes, allNodesSeen); } break; default: throw new Exception (String.Format ("Unknown node kind: {0}", node.Kind)); } seenNodes.Remove (node); return foundCycle; } public static ValueNode.UniqueValueCollection UniqueValues (this ValueNode? node) { if (node == null) return new ValueNode.UniqueValueCollection (UnknownValue.Instance); return node.UniqueValuesInternal; } public static int? AsConstInt (this ValueNode? node) { if (node is ConstIntValue constInt) return constInt.Value; return null; } } internal static class ValueNodeDump { internal static string ValueNodeToString (ValueNode? node, params object[] args) { if (node == null) return "<null>"; StringBuilder sb = new StringBuilder (); sb.Append (node.Kind.ToString ()); sb.Append ("("); if (args != null) { for (int i = 0; i < args.Length; i++) { if (i > 0) sb.Append (","); sb.Append (args[i] == null ? "<null>" : args[i].ToString ()); } } sb.Append (")"); return sb.ToString (); } static string GetIndent (int level) { StringBuilder sb = new StringBuilder (level * 2); for (int i = 0; i < level; i++) sb.Append (" "); return sb.ToString (); } public static void DumpTree (this ValueNode node, System.IO.TextWriter? writer = null, int indentLevel = 0) { if (writer == null) writer = Console.Out; writer.Write (GetIndent (indentLevel)); if (node == null) { writer.WriteLine ("<null>"); return; } writer.WriteLine (node); foreach (ValueNode child in node.Children) { child.DumpTree (writer, indentLevel + 1); } } } /// <summary> /// Represents an unknown value. /// </summary> class UnknownValue : LeafValueNode { private UnknownValue () { Kind = ValueNodeKind.Unknown; StaticType = null; } public static UnknownValue Instance { get; } = new UnknownValue (); public override bool Equals (ValueNode? other) { return base.Equals (other); } public override int GetHashCode () { // All instances of UnknownValue are equivalent, so they all hash to the same hashcode. This one was // chosen for no particular reason at all. return 0x98052; } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this); } } class NullValue : LeafValueNode { private NullValue () { Kind = ValueNodeKind.Null; StaticType = null; } public override bool Equals (ValueNode? other) { return base.Equals (other); } public static NullValue Instance { get; } = new NullValue (); public override int GetHashCode () { // All instances of NullValue are equivalent, so they all hash to the same hashcode. This one was // chosen for no particular reason at all. return 0x90210; } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this); } } /// <summary> /// This is a known System.Type value. TypeRepresented is the 'value' of the System.Type. /// </summary> class SystemTypeValue : LeafValueNode { public SystemTypeValue (TypeDefinition typeRepresented) { Kind = ValueNodeKind.SystemType; // Should be System.Type - but we don't have any use case where tracking it like that would matter StaticType = null; TypeRepresented = typeRepresented; } public TypeDefinition TypeRepresented { get; private set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.TypeRepresented, ((SystemTypeValue) other).TypeRepresented); } public override int GetHashCode () { return HashCode.Combine (Kind, TypeRepresented); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, TypeRepresented); } } /// <summary> /// This is the System.RuntimeTypeHandle equivalent to a <see cref="SystemTypeValue"/> node. /// </summary> class RuntimeTypeHandleValue : LeafValueNode { public RuntimeTypeHandleValue (TypeDefinition typeRepresented) { Kind = ValueNodeKind.RuntimeTypeHandle; // Should be System.RuntimeTypeHandle, but we don't have a use case for it like that StaticType = null; TypeRepresented = typeRepresented; } public TypeDefinition TypeRepresented { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.TypeRepresented, ((RuntimeTypeHandleValue) other).TypeRepresented); } public override int GetHashCode () { return HashCode.Combine (Kind, TypeRepresented); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, TypeRepresented); } } /// <summary> /// This is a System.Type value which represents generic parameter (basically result of typeof(T)) /// Its actual type is unknown, but it can have annotations. /// </summary> class SystemTypeForGenericParameterValue : LeafValueWithDynamicallyAccessedMemberNode { public SystemTypeForGenericParameterValue (GenericParameter genericParameter, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes) : base (genericParameter) { Kind = ValueNodeKind.SystemTypeForGenericParameter; // Should be System.Type, but we don't have a use case for it StaticType = null; GenericParameter = genericParameter; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public GenericParameter GenericParameter { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; var otherValue = (SystemTypeForGenericParameterValue) other; return this.GenericParameter == otherValue.GenericParameter && this.DynamicallyAccessedMemberTypes == otherValue.DynamicallyAccessedMemberTypes; } public override int GetHashCode () { return HashCode.Combine (Kind, GenericParameter, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, GenericParameter, DynamicallyAccessedMemberTypes); } } /// <summary> /// This is the System.RuntimeTypeHandle equivalent to a <see cref="SystemTypeForGenericParameterValue"/> node. /// </summary> class RuntimeTypeHandleForGenericParameterValue : LeafValueNode { public RuntimeTypeHandleForGenericParameterValue (GenericParameter genericParameter) { Kind = ValueNodeKind.RuntimeTypeHandleForGenericParameter; // Should be System.RuntimeTypeHandle, but we don't have a use case for it StaticType = null; GenericParameter = genericParameter; } public GenericParameter GenericParameter { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.GenericParameter, ((RuntimeTypeHandleForGenericParameterValue) other).GenericParameter); } public override int GetHashCode () { return HashCode.Combine (Kind, GenericParameter); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, GenericParameter); } } /// <summary> /// This is the System.RuntimeMethodHandle equivalent to a <see cref="SystemReflectionMethodBaseValue"/> node. /// </summary> class RuntimeMethodHandleValue : LeafValueNode { public RuntimeMethodHandleValue (MethodDefinition methodRepresented) { Kind = ValueNodeKind.RuntimeMethodHandle; // Should be System.RuntimeMethodHandle, but we don't have a use case for it StaticType = null; MethodRepresented = methodRepresented; } public MethodDefinition MethodRepresented { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.MethodRepresented, ((RuntimeMethodHandleValue) other).MethodRepresented); } public override int GetHashCode () { return HashCode.Combine (Kind, MethodRepresented); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, MethodRepresented); } } /// <summary> /// This is a known System.Reflection.MethodBase value. MethodRepresented is the 'value' of the MethodBase. /// </summary> class SystemReflectionMethodBaseValue : LeafValueNode { public SystemReflectionMethodBaseValue (MethodDefinition methodRepresented) { Kind = ValueNodeKind.SystemReflectionMethodBase; // Should be System.Reflection.MethodBase, but we don't have a use case for it StaticType = null; MethodRepresented = methodRepresented; } public MethodDefinition MethodRepresented { get; private set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return Equals (this.MethodRepresented, ((SystemReflectionMethodBaseValue) other).MethodRepresented); } public override int GetHashCode () { return HashCode.Combine (Kind, MethodRepresented); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, MethodRepresented); } } /// <summary> /// A known string - such as the result of a ldstr. /// </summary> class KnownStringValue : LeafValueNode { public KnownStringValue (string contents) { Kind = ValueNodeKind.KnownString; // Should be System.String, but we don't have a use case for it StaticType = null; Contents = contents; } public string Contents { get; private set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; return this.Contents == ((KnownStringValue) other).Contents; } public override int GetHashCode () { return HashCode.Combine (Kind, Contents); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, "\"" + Contents + "\""); } } /// <summary> /// Base class for all nodes which can have dynamically accessed member annotation. /// </summary> abstract class LeafValueWithDynamicallyAccessedMemberNode : LeafValueNode { public LeafValueWithDynamicallyAccessedMemberNode (IMetadataTokenProvider sourceContext) { SourceContext = sourceContext; } public IMetadataTokenProvider SourceContext { get; private set; } /// <summary> /// The bitfield of dynamically accessed member types the node guarantees /// </summary> public DynamicallyAccessedMemberTypes DynamicallyAccessedMemberTypes { get; protected set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; var otherValue = (LeafValueWithDynamicallyAccessedMemberNode) other; return SourceContext == otherValue.SourceContext && DynamicallyAccessedMemberTypes == otherValue.DynamicallyAccessedMemberTypes; } } /// <summary> /// A value that came from a method parameter - such as the result of a ldarg. /// </summary> class MethodParameterValue : LeafValueWithDynamicallyAccessedMemberNode { public MethodParameterValue (TypeDefinition? staticType, int parameterIndex, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes, IMetadataTokenProvider sourceContext) : base (sourceContext) { Kind = ValueNodeKind.MethodParameter; StaticType = staticType; ParameterIndex = parameterIndex; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public int ParameterIndex { get; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; var otherValue = (MethodParameterValue) other; return this.ParameterIndex == otherValue.ParameterIndex; } public override int GetHashCode () { return HashCode.Combine (Kind, ParameterIndex, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, ParameterIndex, DynamicallyAccessedMemberTypes); } } /// <summary> /// String with a known annotation. /// </summary> class AnnotatedStringValue : LeafValueWithDynamicallyAccessedMemberNode { public AnnotatedStringValue (IMetadataTokenProvider sourceContext, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes) : base (sourceContext) { Kind = ValueNodeKind.AnnotatedString; // Should be System.String, but we don't have a use case for it StaticType = null; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public override bool Equals (ValueNode? other) { return base.Equals (other); } public override int GetHashCode () { return HashCode.Combine (Kind, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, DynamicallyAccessedMemberTypes); } } /// <summary> /// Return value from a method /// </summary> class MethodReturnValue : LeafValueWithDynamicallyAccessedMemberNode { public MethodReturnValue (TypeDefinition? staticType, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes, IMetadataTokenProvider sourceContext) : base (sourceContext) { Kind = ValueNodeKind.MethodReturn; StaticType = staticType; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public override bool Equals (ValueNode? other) { return base.Equals (other); } public override int GetHashCode () { return HashCode.Combine (Kind, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, DynamicallyAccessedMemberTypes); } } /// <summary> /// A merge point commonly occurs due to control flow in a method body. It represents a set of values /// from different paths through the method. It is the reason for EvaluateUniqueValues, which essentially /// provides an enumeration over all the concrete values represented by a given ValueNode after 'erasing' /// the merge point nodes. /// </summary> class MergePointValue : ValueNode { private MergePointValue (ValueNode one, ValueNode two) { Kind = ValueNodeKind.MergePoint; StaticType = null; m_values = new ValueNodeHashSet (); if (one.Kind == ValueNodeKind.MergePoint) { MergePointValue mpvOne = (MergePointValue) one; foreach (ValueNode value in mpvOne.Values) m_values.Add (value); } else m_values.Add (one); if (two.Kind == ValueNodeKind.MergePoint) { MergePointValue mpvTwo = (MergePointValue) two; foreach (ValueNode value in mpvTwo.Values) m_values.Add (value); } else m_values.Add (two); } public MergePointValue () { Kind = ValueNodeKind.MergePoint; m_values = new ValueNodeHashSet (); } public void AddValue (ValueNode node) { // we are mutating our state, so we must invalidate any cached knowledge //InvalidateIsOpen (); if (node.Kind == ValueNodeKind.MergePoint) { foreach (ValueNode value in ((MergePointValue) node).Values) m_values.Add (value); } else m_values.Add (node); #if false if (this.DetectCycle(new HashSet<ValueNode>())) { throw new Exception("Found a cycle"); } #endif } readonly ValueNodeHashSet m_values; public ValueNodeHashSet Values { get { return m_values; } } protected override int NumChildren { get { return Values.Count; } } protected override ValueNode ChildAt (int index) { if (index < NumChildren) return Values.ElementAt (index); throw new InvalidOperationException (); } public static ValueNode? MergeValues (ValueNode? one, ValueNode? two) { if (one == null) return two; else if (two == null) return one; else if (one.Equals (two)) return one; else return new MergePointValue (one, two); } protected override IEnumerable<ValueNode> EvaluateUniqueValues () { foreach (ValueNode value in Values) { foreach (ValueNode uniqueValue in value.UniqueValuesInternal) { yield return uniqueValue; } } } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; MergePointValue otherMpv = (MergePointValue) other; if (this.Values.Count != otherMpv.Values.Count) return false; foreach (ValueNode value in this.Values) { if (!otherMpv.Values.Contains (value)) return false; } return true; } public override int GetHashCode () { return HashCode.Combine (Kind, Values); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this); } } delegate TypeDefinition TypeResolver (string assemblyString, string typeString); /// <summary> /// The result of a Type.GetType. /// AssemblyIdentity is the scope in which to resolve if the type name string is not assembly-qualified. /// </summary> #pragma warning disable CA1812 // GetTypeFromStringValue is never instantiated class GetTypeFromStringValue : ValueNode { private readonly TypeResolver _resolver; public GetTypeFromStringValue (TypeResolver resolver, ValueNode assemblyIdentity, ValueNode nameString) { _resolver = resolver; Kind = ValueNodeKind.GetTypeFromString; // Should be System.Type, but we don't have a use case for it StaticType = null; AssemblyIdentity = assemblyIdentity; NameString = nameString; } public ValueNode AssemblyIdentity { get; private set; } public ValueNode NameString { get; private set; } protected override int NumChildren { get { return 2; } } protected override ValueNode ChildAt (int index) { if (index == 0) return AssemblyIdentity; if (index == 1) return NameString; throw new InvalidOperationException (); } protected override IEnumerable<ValueNode> EvaluateUniqueValues () { HashSet<string>? names = null; foreach (ValueNode nameStringValue in NameString.UniqueValuesInternal) { if (nameStringValue.Kind == ValueNodeKind.KnownString) { if (names == null) { names = new HashSet<string> (); } string typeName = ((KnownStringValue) nameStringValue).Contents; names.Add (typeName); } } bool foundAtLeastOne = false; if (names != null) { foreach (ValueNode assemblyValue in AssemblyIdentity.UniqueValuesInternal) { if (assemblyValue.Kind == ValueNodeKind.KnownString) { string assemblyName = ((KnownStringValue) assemblyValue).Contents; foreach (string name in names) { TypeDefinition typeDefinition = _resolver (assemblyName, name); if (typeDefinition != null) { foundAtLeastOne = true; yield return new SystemTypeValue (typeDefinition); } } } } } if (!foundAtLeastOne) yield return UnknownValue.Instance; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; GetTypeFromStringValue otherGtfs = (GetTypeFromStringValue) other; return this.AssemblyIdentity.Equals (otherGtfs.AssemblyIdentity) && this.NameString.Equals (otherGtfs.NameString); } public override int GetHashCode () { return HashCode.Combine (Kind, AssemblyIdentity, NameString); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, NameString); } } /// <summary> /// A representation of a ldfld. Note that we don't have a representation of objects containing fields /// so there isn't much that can be done with this node type yet. /// </summary> class LoadFieldValue : LeafValueWithDynamicallyAccessedMemberNode { public LoadFieldValue (TypeDefinition? staticType, FieldDefinition fieldToLoad, DynamicallyAccessedMemberTypes dynamicallyAccessedMemberTypes) : base (fieldToLoad) { Kind = ValueNodeKind.LoadField; StaticType = staticType; Field = fieldToLoad; DynamicallyAccessedMemberTypes = dynamicallyAccessedMemberTypes; } public FieldDefinition Field { get; private set; } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; LoadFieldValue otherLfv = (LoadFieldValue) other; return Equals (this.Field, otherLfv.Field); } public override int GetHashCode () { return HashCode.Combine (Kind, Field, DynamicallyAccessedMemberTypes); } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, Field, DynamicallyAccessedMemberTypes); } } /// <summary> /// Represents a ldc on an int32. /// </summary> class ConstIntValue : LeafValueNode { public ConstIntValue (int value) { Kind = ValueNodeKind.ConstInt; // Should be System.Int32, but we don't have a usecase for it right now StaticType = null; Value = value; } public int Value { get; private set; } public override int GetHashCode () { return HashCode.Combine (Kind, Value); } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; ConstIntValue otherCiv = (ConstIntValue) other; return Value == otherCiv.Value; } protected override string NodeToString () { return ValueNodeDump.ValueNodeToString (this, Value); } } class ArrayValue : ValueNode { protected override int NumChildren => 1 + IndexValues.Count; /// <summary> /// Constructs an array value of the given size /// </summary> public ArrayValue (ValueNode? size, TypeReference elementType) { Kind = ValueNodeKind.Array; // Should be System.Array (or similar), but we don't have a use case for it StaticType = null; Size = size ?? UnknownValue.Instance; ElementType = elementType; IndexValues = new Dictionary<int, ValueBasicBlockPair> (); } private ArrayValue (ValueNode size, TypeReference elementType, Dictionary<int, ValueBasicBlockPair> indexValues) : this (size, elementType) { IndexValues = indexValues; } public ValueNode Size { get; } public TypeReference ElementType { get; } public Dictionary<int, ValueBasicBlockPair> IndexValues { get; } public override int GetHashCode () { return HashCode.Combine (Kind, Size); } public override bool Equals (ValueNode? other) { if (!base.Equals (other)) return false; ArrayValue otherArr = (ArrayValue) other; bool equals = Size.Equals (otherArr.Size); equals &= IndexValues.Count == otherArr.IndexValues.Count; if (!equals) return false; // If both sets T and O are the same size and "T intersect O" is empty, then T == O. HashSet<KeyValuePair<int, ValueBasicBlockPair>> thisValueSet = new (IndexValues); HashSet<KeyValuePair<int, ValueBasicBlockPair>> otherValueSet = new (otherArr.IndexValues); thisValueSet.ExceptWith (otherValueSet); return thisValueSet.Count == 0; } protected override string NodeToString () { // TODO: Use StringBuilder and remove Linq usage. return $"(Array Size:{ValueNodeDump.ValueNodeToString (this, Size)}, Values:({string.Join (',', IndexValues.Select (v => $"({v.Key},{ValueNodeDump.ValueNodeToString (v.Value.Value)})"))})"; } protected override IEnumerable<ValueNode> EvaluateUniqueValues () { foreach (var sizeConst in Size.UniqueValuesInternal) yield return new ArrayValue (sizeConst, ElementType, IndexValues); } protected override ValueNode ChildAt (int index) { if (index == 0) return Size; if (index - 1 <= IndexValues.Count) return IndexValues.Values.ElementAt (index - 1).Value!; throw new InvalidOperationException (); } } #region ValueNode Collections public class ValueNodeList : List<ValueNode?> { public ValueNodeList () { } public ValueNodeList (int capacity) : base (capacity) { } public ValueNodeList (List<ValueNode> other) : base (other) { } public override int GetHashCode () { return HashUtils.CalcHashCodeEnumerable (this); } public override bool Equals (object? other) { if (!(other is ValueNodeList otherList)) return false; if (otherList.Count != Count) return false; for (int i = 0; i < Count; i++) { if (!(otherList[i]?.Equals (this[i]) ?? (this[i] is null))) return false; } return true; } } class ValueNodeHashSet : HashSet<ValueNode> { public override int GetHashCode () { return HashUtils.CalcHashCodeEnumerable (this); } public override bool Equals (object? other) { if (!(other is ValueNodeHashSet otherSet)) return false; if (otherSet.Count != Count) return false; IEnumerator<ValueNode> thisEnumerator = this.GetEnumerator (); IEnumerator<ValueNode> otherEnumerator = otherSet.GetEnumerator (); for (int i = 0; i < Count; i++) { thisEnumerator.MoveNext (); otherEnumerator.MoveNext (); if (!thisEnumerator.Current.Equals (otherEnumerator.Current)) return false; } return true; } } #endregion static class HashUtils { public static int CalcHashCodeEnumerable<T> (IEnumerable<T> list) where T : class? { HashCode hashCode = new HashCode (); foreach (var item in list) hashCode.Add (item); return hashCode.ToHashCode (); } } public struct ValueBasicBlockPair { public ValueNode? Value; public int BasicBlockIndex; } }
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b42387/b42387.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/libraries/System.Net.Sockets/tests/FunctionalTests/InlineCompletions.Unix.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Diagnostics; using System.Threading.Tasks; using Xunit; using Xunit.Abstractions; using Microsoft.DotNet.RemoteExecutor; namespace System.Net.Sockets.Tests { public class InlineContinuations { [OuterLoop] [Fact] [PlatformSpecific(TestPlatforms.AnyUnix)] // Inline Socket mode is specific to Unix Socket implementation. public void InlineSocketContinuations() { RemoteInvokeOptions options = new RemoteInvokeOptions(); options.StartInfo.EnvironmentVariables.Add("DOTNET_SYSTEM_NET_SOCKETS_INLINE_COMPLETIONS", "1"); options.TimeOut = (int)TimeSpan.FromMinutes(20).TotalMilliseconds; RemoteExecutor.Invoke(async () => { // Connect/Accept tests await new AcceptEap(null).Accept_ConcurrentAcceptsBeforeConnects_Success(5); await new AcceptEap(null).Accept_ConcurrentAcceptsAfterConnects_Success(5); // Send/Receive tests await new SendReceive_Eap(null).SendRecv_Stream_TCP(IPAddress.Loopback, useMultipleBuffers: false); await new SendReceive_Eap(null).SendRecv_Stream_TCP_MultipleConcurrentReceives(IPAddress.Loopback, useMultipleBuffers: false); await new SendReceive_Eap(null).SendRecv_Stream_TCP_MultipleConcurrentSends(IPAddress.Loopback, useMultipleBuffers: false); await new SendReceive_Eap(null).TcpReceiveSendGetsCanceledByDispose(receiveOrSend: true, ipv6Server: false, dualModeClient: false); await new SendReceive_Eap(null).TcpReceiveSendGetsCanceledByDispose(receiveOrSend: false, ipv6Server: false, dualModeClient: false); }, options).Dispose(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Diagnostics; using System.Threading.Tasks; using Xunit; using Xunit.Abstractions; using Microsoft.DotNet.RemoteExecutor; namespace System.Net.Sockets.Tests { public class InlineContinuations { [OuterLoop] [Fact] [PlatformSpecific(TestPlatforms.AnyUnix)] // Inline Socket mode is specific to Unix Socket implementation. public void InlineSocketContinuations() { RemoteInvokeOptions options = new RemoteInvokeOptions(); options.StartInfo.EnvironmentVariables.Add("DOTNET_SYSTEM_NET_SOCKETS_INLINE_COMPLETIONS", "1"); options.TimeOut = (int)TimeSpan.FromMinutes(20).TotalMilliseconds; RemoteExecutor.Invoke(async () => { // Connect/Accept tests await new AcceptEap(null).Accept_ConcurrentAcceptsBeforeConnects_Success(5); await new AcceptEap(null).Accept_ConcurrentAcceptsAfterConnects_Success(5); // Send/Receive tests await new SendReceive_Eap(null).SendRecv_Stream_TCP(IPAddress.Loopback, useMultipleBuffers: false); await new SendReceive_Eap(null).SendRecv_Stream_TCP_MultipleConcurrentReceives(IPAddress.Loopback, useMultipleBuffers: false); await new SendReceive_Eap(null).SendRecv_Stream_TCP_MultipleConcurrentSends(IPAddress.Loopback, useMultipleBuffers: false); await new SendReceive_Eap(null).TcpReceiveSendGetsCanceledByDispose(receiveOrSend: true, ipv6Server: false, dualModeClient: false); await new SendReceive_Eap(null).TcpReceiveSendGetsCanceledByDispose(receiveOrSend: false, ipv6Server: false, dualModeClient: false); }, options).Dispose(); } } }
-1
dotnet/runtime
65,889
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE
Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
simonrozsival
"2022-02-25T12:07:00Z"
"2022-03-09T14:24:14Z"
e32f3b61cd41e6a97ebe8f512ff673b63ff40640
cbcc616cf386b88e49f97f74182ffff241528179
[Mono] Add SIMD intrinsics for Vector64/128 "All" and "Any" variants of GT/GE/LT/LE. Related to #64072 - `GreaterThanAll` - `GreaterThanAny` - `GreaterThanOrEqualAll` - `GreaterThanOrEqualAny` - `LessThanAll` - `LessThanAny` - `LessThanOrEqualAll` - `LessThanOrEqualAny`
./src/libraries/System.Globalization/tests/Invariant/InvariantMode.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Buffers; using System.Reflection; using System.Buffers.Binary; using System.Collections.Generic; using System.IO; using System.Runtime.InteropServices; using System.Text; using Xunit; namespace System.Globalization.Tests { public class InvariantModeTests { private static bool PredefinedCulturesOnlyIsDisabled { get; } = !PredefinedCulturesOnly(); private static bool PredefinedCulturesOnly() { bool ret; try { ret = (bool) typeof(object).Assembly.GetType("System.Globalization.GlobalizationMode").GetProperty("PredefinedCulturesOnly", BindingFlags.Static | BindingFlags.NonPublic).GetValue(null); } catch { ret = false; } return ret; } public static IEnumerable<object[]> Cultures_TestData() { yield return new object[] { "en-US" }; yield return new object[] { "ja-JP" }; yield return new object[] { "fr-FR" }; yield return new object[] { "tr-TR" }; yield return new object[] { "" }; } private static readonly string[] s_cultureNames = new string[] { "en-US", "ja-JP", "fr-FR", "tr-TR", "" }; public static IEnumerable<object[]> IndexOf_TestData() { // Empty string yield return new object[] { "foo", "", 0, 3, CompareOptions.None, 0 }; yield return new object[] { "", "", 0, 0, CompareOptions.None, 0 }; // OrdinalIgnoreCase yield return new object[] { "Hello", "l", 0, 5, CompareOptions.OrdinalIgnoreCase, 2 }; yield return new object[] { "Hello", "L", 0, 5, CompareOptions.OrdinalIgnoreCase, 2 }; yield return new object[] { "Hello", "h", 0, 5, CompareOptions.OrdinalIgnoreCase, 0 }; yield return new object[] { "Hello\u00D3\u00D4", "\u00F3\u00F4", 0, 7, CompareOptions.OrdinalIgnoreCase, 5 }; yield return new object[] { "Hello\u00D3\u00D4", "\u00F3\u00F5", 0, 7, CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "Hello\U00010400", "\U00010428", 0, 7, CompareOptions.OrdinalIgnoreCase, 5 }; // Long strings yield return new object[] { new string('b', 100) + new string('a', 5555), "aaaaaaaaaaaaaaa", 0, 5655, CompareOptions.None, 100 }; yield return new object[] { new string('b', 101) + new string('a', 5555), new string('a', 5000), 0, 5656, CompareOptions.None, 101 }; yield return new object[] { new string('a', 5555), new string('a', 5000) + "b", 0, 5555, CompareOptions.None, -1 }; // Hungarian yield return new object[] { "foobardzsdzs", "rddzs", 0, 12, CompareOptions.Ordinal, -1 }; yield return new object[] { "foobardzsdzs", "rddzs", 0, 12, CompareOptions.None, -1 }; yield return new object[] { "foobardzsdzs", "rddzs", 0, 12, CompareOptions.Ordinal, -1 }; // Turkish yield return new object[] { "Hi", "I", 0, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "I", 0, 2, CompareOptions.IgnoreCase, 1 }; yield return new object[] { "Hi", "\u0130", 0, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "\u0130", 0, 2, CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Hi", "I", 0, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "\u0130", 0, 2, CompareOptions.IgnoreCase, -1 }; // Unicode yield return new object[] { "Hi", "\u0130", 0, 2, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "A\u0300", 0, 9, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "A\u0300", 0, 9, CompareOptions.Ordinal, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 0, 9, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 0, 9, CompareOptions.Ordinal, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 0, 9, CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 0, 9, CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "FooBar", "Foo\u0400Bar", 0, 6, CompareOptions.Ordinal, -1 }; yield return new object[] { "TestFooBA\u0300R", "FooB\u00C0R", 0, 11, CompareOptions.IgnoreNonSpace, -1 }; // Weightless characters yield return new object[] { "", "\u200d", 0, 0, CompareOptions.None, -1 }; yield return new object[] { "hello", "\u200d", 0, 5, CompareOptions.IgnoreCase, -1 }; // Ignore symbols yield return new object[] { "More Test's", "Tests", 0, 11, CompareOptions.IgnoreSymbols, -1 }; yield return new object[] { "More Test's", "Tests", 0, 11, CompareOptions.None, -1 }; yield return new object[] { "cbabababdbaba", "ab", 0, 13, CompareOptions.None, 2 }; // Ordinal should be case-se yield return new object[] { "a", "a", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "a", "A", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "abc", "aBc", 0, 3, CompareOptions.Ordinal, -1 }; // Ordinal with numbers and yield return new object[] { "a", "1", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "1", "1", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "1", "!", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "a", "-", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "-", "-", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "-", "!", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "!", "!", 0, 1, CompareOptions.Ordinal, 0 }; // Ordinal with unicode yield return new object[] { "\uFF21", "\uFE57", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\uFE57", "\uFF21", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\uFF21", "a\u0400Bc", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\uFE57", "a\u0400Bc", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "a", "a\u0400Bc", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "a\u0400Bc", "a", 0, 4, CompareOptions.Ordinal, 0 }; // Ordinal with I or i yield return new object[] { "I", "i", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "I", "I", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "i", "I", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "i", "i", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "I", "\u0130", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0130", "I", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "i", "\u0130", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0130", "i", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "I", "\u0131", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\0131", "I", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "i", "\u0131", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0131", "i", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0130", "\u0130", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "\u0131", "\u0131", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "\u0130", "\u0131", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0131", "\u0130", 0, 1, CompareOptions.Ordinal, -1 }; // Platform differences yield return new object[] { "foobardzsdzs", "rddzs", 0, 12, CompareOptions.None, -1 }; } public static IEnumerable<object[]> LastIndexOf_TestData() { // Empty strings yield return new object[] { "foo", "", 2, 3, CompareOptions.None, 3 }; yield return new object[] { "", "", 0, 0, CompareOptions.None, 0 }; yield return new object[] { "", "a", 0, 0, CompareOptions.None, -1 }; yield return new object[] { "", "", -1, 0, CompareOptions.None, 0 }; yield return new object[] { "", "a", -1, 0, CompareOptions.None, -1 }; yield return new object[] { "", "", 0, -1, CompareOptions.None, 0 }; yield return new object[] { "", "a", 0, -1, CompareOptions.None, -1 }; // Start index = source.Length yield return new object[] { "Hello", "l", 5, 5, CompareOptions.None, 3 }; yield return new object[] { "Hello", "b", 5, 5, CompareOptions.None, -1 }; yield return new object[] { "Hello", "l", 5, 0, CompareOptions.None, -1 }; yield return new object[] { "Hello", "", 5, 5, CompareOptions.None, 5 }; yield return new object[] { "Hello", "", 5, 0, CompareOptions.None, 5 }; // OrdinalIgnoreCase yield return new object[] { "Hello", "l", 4, 5, CompareOptions.OrdinalIgnoreCase, 3 }; yield return new object[] { "Hello", "L", 4, 5, CompareOptions.OrdinalIgnoreCase, 3 }; yield return new object[] { "Hello", "h", 4, 5, CompareOptions.OrdinalIgnoreCase, 0 }; yield return new object[] { "Hello\u00D3\u00D4\u00D3\u00D4", "\u00F3\u00F4", 8, 9, CompareOptions.OrdinalIgnoreCase, 7 }; yield return new object[] { "Hello\u00D3\u00D4\u00D3\u00D4", "\u00F3\u00F5", 8, 9, CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "Hello\U00010400\U00010400", "\U00010428", 8, 9, CompareOptions.OrdinalIgnoreCase, 7 }; // Long strings yield return new object[] { new string('a', 5555) + new string('b', 100), "aaaaaaaaaaaaaaa", 5654, 5655, CompareOptions.None, 5540 }; yield return new object[] { new string('b', 101) + new string('a', 5555), new string('a', 5000), 5655, 5656, CompareOptions.None, 656 }; yield return new object[] { new string('a', 5555), new string('a', 5000) + "b", 5554, 5555, CompareOptions.None, -1 }; // Hungarian yield return new object[] { "foobardzsdzs", "rddzs", 11, 12, CompareOptions.Ordinal, -1 }; yield return new object[] { "foobardzsdzs", "rddzs", 11, 12, CompareOptions.None, -1 }; yield return new object[] { "foobardzsdzs", "rddzs", 11, 12, CompareOptions.Ordinal, -1 }; // Turkish yield return new object[] { "Hi", "I", 1, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "I", 1, 2, CompareOptions.IgnoreCase, 1 }; yield return new object[] { "Hi", "\u0130", 1, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "\u0130", 1, 2, CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Hi", "I", 1, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "I", 1, 2, CompareOptions.IgnoreCase, 1 }; yield return new object[] { "Hi", "\u0130", 1, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "\u0130", 1, 2, CompareOptions.IgnoreCase, -1 }; // Unicode yield return new object[] { "Exhibit \u00C0", "A\u0300", 8, 9, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "A\u0300", 8, 9, CompareOptions.Ordinal, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 8, 9, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 8, 9, CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 8, 9, CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 8, 9, CompareOptions.Ordinal, -1 }; yield return new object[] { "FooBar", "Foo\u0400Bar", 5, 6, CompareOptions.Ordinal, -1 }; yield return new object[] { "TestFooBA\u0300R", "FooB\u00C0R", 10, 11, CompareOptions.IgnoreNonSpace, -1 }; // Weightless characters yield return new object[] { "", "\u200d", 0, 0, CompareOptions.None, -1 }; yield return new object[] { "", "\u200d", -1, 0, CompareOptions.None, -1 }; yield return new object[] { "hello", "\u200d", 4, 5, CompareOptions.IgnoreCase, -1 }; // Ignore symbols yield return new object[] { "More Test's", "Tests", 10, 11, CompareOptions.IgnoreSymbols, -1 }; yield return new object[] { "More Test's", "Tests", 10, 11, CompareOptions.None, -1 }; yield return new object[] { "cbabababdbaba", "ab", 12, 13, CompareOptions.None, 10 }; // Platform differences yield return new object[] { "foobardzsdzs", "rddzs", 11, 12, CompareOptions.None, -1 }; } public static IEnumerable<object[]> IsPrefix_TestData() { // Empty strings yield return new object[] { "foo", "", CompareOptions.None, true }; yield return new object[] { "", "", CompareOptions.None, true }; // Early exit for empty values before 'options' is validated yield return new object[] { "hello", "", (CompareOptions)(-1), true }; // Long strings yield return new object[] { new string('a', 5555), "aaaaaaaaaaaaaaa", CompareOptions.None, true }; yield return new object[] { new string('a', 5555), new string('a', 5000), CompareOptions.None, true }; yield return new object[] { new string('a', 5555), new string('a', 5000) + "b", CompareOptions.None, false }; // Hungarian yield return new object[] { "dzsdzsfoobar", "ddzsf", CompareOptions.None, false }; yield return new object[] { "dzsdzsfoobar", "ddzsf", CompareOptions.Ordinal, false }; yield return new object[] { "dzsdzsfoobar", "ddzsf", CompareOptions.Ordinal, false }; // Turkish yield return new object[] { "interesting", "I", CompareOptions.None, false }; yield return new object[] { "interesting", "I", CompareOptions.IgnoreCase, true }; yield return new object[] { "interesting", "\u0130", CompareOptions.None, false }; yield return new object[] { "interesting", "\u0130", CompareOptions.IgnoreCase, false }; // Unicode yield return new object[] { "\u00C0nimal", "A\u0300", CompareOptions.None, false }; yield return new object[] { "\u00C0nimal", "A\u0300", CompareOptions.Ordinal, false }; yield return new object[] { "\u00C0nimal", "a\u0300", CompareOptions.IgnoreCase, false }; yield return new object[] { "\u00C0nimal", "a\u0300", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "FooBar", "Foo\u0400Bar", CompareOptions.Ordinal, false }; yield return new object[] { "FooBA\u0300R", "FooB\u00C0R", CompareOptions.IgnoreNonSpace, false }; yield return new object[] { "\u00D3\u00D4\u00D3\u00D4Hello", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, true }; yield return new object[] { "\u00D3\u00D4Hello\u00D3\u00D4", "\u00F3\u00F5", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "\U00010400\U00010400Hello", "\U00010428", CompareOptions.OrdinalIgnoreCase, true }; // Ignore symbols yield return new object[] { "Test's can be interesting", "Tests", CompareOptions.IgnoreSymbols, false }; yield return new object[] { "Test's can be interesting", "Tests", CompareOptions.None, false }; // Platform differences yield return new object[] { "dzsdzsfoobar", "ddzsf", CompareOptions.None, false }; } public static IEnumerable<object[]> IsSuffix_TestData() { // Empty strings yield return new object[] { "foo", "", CompareOptions.None, true }; yield return new object[] { "", "", CompareOptions.None, true }; // Early exit for empty values before 'options' is validated yield return new object[] { "hello", "", (CompareOptions)(-1), true }; // Long strings yield return new object[] { new string('a', 5555), "aaaaaaaaaaaaaaa", CompareOptions.None, true }; yield return new object[] { new string('a', 5555), new string('a', 5000), CompareOptions.None, true }; yield return new object[] { new string('a', 5555), new string('a', 5000) + "b", CompareOptions.None, false }; // Hungarian yield return new object[] { "foobardzsdzs", "rddzs", CompareOptions.Ordinal, false }; yield return new object[] { "foobardzsdzs", "rddzs", CompareOptions.None, false }; // Turkish yield return new object[] { "Hi", "I", CompareOptions.None, false }; yield return new object[] { "Hi", "I", CompareOptions.IgnoreCase, true }; yield return new object[] { "Hi", "\u0130", CompareOptions.None, false }; yield return new object[] { "Hi", "\u0130", CompareOptions.IgnoreCase, false }; // Unicode yield return new object[] { "Exhibit \u00C0", "A\u0300", CompareOptions.None, false }; yield return new object[] { "Exhibit \u00C0", "A\u0300", CompareOptions.Ordinal, false }; yield return new object[] { "Exhibit \u00C0", "a\u0300", CompareOptions.IgnoreCase, false }; yield return new object[] { "Exhibit \u00C0", "a\u0300", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "FooBar", "Foo\u0400Bar", CompareOptions.Ordinal, false }; yield return new object[] { "FooBA\u0300R", "FooB\u00C0R", CompareOptions.IgnoreNonSpace, false }; yield return new object[] { "\u00D3\u00D4\u00D3\u00D4Hello", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "\u00D3\u00D4Hello\u00D3\u00D4", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, true }; yield return new object[] { "\U00010400\U00010400Hello", "\U00010428", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "Hello\U00010400", "\U00010428", CompareOptions.OrdinalIgnoreCase, true }; // Weightless characters yield return new object[] { "", "\u200d", CompareOptions.None, false }; yield return new object[] { "", "\u200d", CompareOptions.IgnoreCase, false }; // Ignore symbols yield return new object[] { "More Test's", "Tests", CompareOptions.IgnoreSymbols, false }; yield return new object[] { "More Test's", "Tests", CompareOptions.None, false }; // Platform differences yield return new object[] { "foobardzsdzs", "rddzs", CompareOptions.None, false }; } public static IEnumerable<object[]> Compare_TestData() { CompareOptions ignoreKanaIgnoreWidthIgnoreCase = CompareOptions.IgnoreKanaType | CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase; yield return new object[] { "\u3042", "\u30A2", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3042", "\uFF71", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u304D\u3083", "\u30AD\u30E3", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u304D\u3083", "\u30AD\u3083", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u304D \u3083", "\u30AD\u3083", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3044", "I", ignoreKanaIgnoreWidthIgnoreCase, 1 }; yield return new object[] { "a", "A", ignoreKanaIgnoreWidthIgnoreCase, 0 }; yield return new object[] { "a", "\uFF41", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "\uFF21\uFF22\uFF23\uFF24\uFF25", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "\uFF21\uFF22\uFF23D\uFF25", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "a\uFF22\uFF23D\uFF25", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "\uFF41\uFF42\uFF23D\uFF25", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u6FA4", "\u6CA2", ignoreKanaIgnoreWidthIgnoreCase, 1 }; yield return new object[] { "\u3070\u3073\u3076\u3079\u307C", "\u30D0\u30D3\u30D6\u30D9\u30DC", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABDDE", "D", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "\uFF43D", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "c", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3060", "\u305F", ignoreKanaIgnoreWidthIgnoreCase, 1 }; yield return new object[] { "\u3060", "\uFF80\uFF9E", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3060", "\u30C0", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3042", "\u30A1", CompareOptions.None, -1 }; yield return new object[] { "\u304D \u3083", "\u30AD\u3083", CompareOptions.None, -1 }; yield return new object[] { "\u3044", "I", CompareOptions.None, 1 }; yield return new object[] { "a", "A", CompareOptions.None, 1 }; yield return new object[] { "a", "\uFF41", CompareOptions.None, -1 }; yield return new object[] { "", "'", CompareOptions.None, -1 }; yield return new object[] { "\u00D3\u00D4", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, 0 }; yield return new object[] { "\U00010400", "\U00010428", CompareOptions.OrdinalIgnoreCase, 0 }; yield return new object[] { "\u00D3\u00D4", "\u00F3\u00F4", CompareOptions.IgnoreCase, 0 }; yield return new object[] { "\U00010400", "\U00010428", CompareOptions.IgnoreCase, 0 }; yield return new object[] { "\u00D3\u00D4G", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, 1 }; yield return new object[] { "\U00010400G", "\U00010428", CompareOptions.OrdinalIgnoreCase, 1 }; yield return new object[] { "\u00D3\u00D4G", "\u00F3\u00F4", CompareOptions.IgnoreCase, 1 }; yield return new object[] { "\U00010400G", "\U00010428", CompareOptions.IgnoreCase, 1 }; yield return new object[] { "\u00D3\u00D4", "\u00F3\u00F4G", CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "\U00010400", "\U00010428G", CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "\u00D3\u00D4", "\u00F3\u00F4G", CompareOptions.IgnoreCase, -1 }; yield return new object[] { "\U00010400", "\U00010428G", CompareOptions.IgnoreCase, -1 }; // Hungarian yield return new object[] { "dzsdzs", "ddzs", CompareOptions.Ordinal, 1 }; yield return new object[] { "dzsdzs", "ddzs", CompareOptions.None, 1 }; // Turkish yield return new object[] { "i", "I", CompareOptions.None, 1 }; yield return new object[] { "i", "I", CompareOptions.IgnoreCase, 0 }; yield return new object[] { "i", "\u0130", CompareOptions.None, -1 }; yield return new object[] { "i", "\u0130", CompareOptions.IgnoreCase, -1 }; yield return new object[] { "\u00C0", "A\u0300", CompareOptions.None, 1 }; yield return new object[] { "\u00C0", "A\u0300", CompareOptions.Ordinal, 1 }; yield return new object[] { "FooBar", "Foo\u0400Bar", CompareOptions.Ordinal, -1 }; yield return new object[] { "FooBA\u0300R", "FooB\u00C0R", CompareOptions.IgnoreNonSpace, -1 }; yield return new object[] { "Test's", "Tests", CompareOptions.IgnoreSymbols, -1 }; yield return new object[] { "Test's", "Tests", CompareOptions.StringSort, -1 }; // Spanish yield return new object[] { "llegar", "lugar", CompareOptions.None, -1 }; yield return new object[] { "\u3042", "\u30A1", CompareOptions.IgnoreKanaType | CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase, -1 }; // Surrogates yield return new object[] { "Hello\uFE6A", "Hello\U0001F601", CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Hello\U0001F601", "Hello\uFE6A", CompareOptions.IgnoreCase, 1 }; yield return new object[] { "\uDBFF", "\uD800\uDC00", CompareOptions.IgnoreCase, -1 }; yield return new object[] { "\uD800\uDC00", "\uDBFF", CompareOptions.IgnoreCase, 1 }; yield return new object[] { "abcdefg\uDBFF", "abcdefg\uD800\uDC00", CompareOptions.IgnoreCase, -1 }; } public static IEnumerable<object[]> ToLower_TestData() { yield return new object[] { "", "", true }; yield return new object[] { "A", "a", true }; yield return new object[] { "a", "a", true }; yield return new object[] { "ABC", "abc", true }; yield return new object[] { "abc", "abc", true }; yield return new object[] { "1", "1", true }; yield return new object[] { "123", "123", true }; yield return new object[] { "!", "!", true }; yield return new object[] { "HELLOWOR!LD123", "hellowor!ld123", true }; yield return new object[] { "HelloWor!ld123", "hellowor!ld123", true }; yield return new object[] { "Hello\n\0World\u0009!", "hello\n\0world\t!", true }; yield return new object[] { "THIS IS A LONGER TEST CASE", "this is a longer test case", true }; yield return new object[] { "this Is A LONGER mIXEd casE test case", "this is a longer mixed case test case", true }; yield return new object[] { "THIS \t hAs \t SOMe \t tabs", "this \t has \t some \t tabs", true }; yield return new object[] { "EMBEDDED\0NuLL\0Byte\0", "embedded\0null\0byte\0", true }; // LATIN CAPITAL LETTER O WITH ACUTE, which has a lower case variant. yield return new object[] { "\u00D3", "\u00F3", true }; // SNOWMAN, which does not have a lower case variant. yield return new object[] { "\u2603", "\u2603", true }; // RAINBOW (outside the BMP and does not case) yield return new object[] { "\U0001F308", "\U0001F308", true }; // Surrogate casing yield return new object[] { "\U00010400", "\U00010428", true }; // Unicode defines some codepoints which expand into multiple codepoints // when cased (see SpecialCasing.txt from UNIDATA for some examples). We have never done // these sorts of expansions, since it would cause string lengths to change when cased, // which is non-intuitive. In addition, there are some context sensitive mappings which // we also don't preform. // Greek Capital Letter Sigma (does not to case to U+03C2 with "final sigma" rule). yield return new object[] { "\u03A3", "\u03C3", true }; } public static IEnumerable<object[]> ToUpper_TestData() { yield return new object[] { "", "" , true}; yield return new object[] { "a", "A", true }; yield return new object[] { "abc", "ABC", true }; yield return new object[] { "A", "A", true }; yield return new object[] { "ABC", "ABC", true }; yield return new object[] { "1", "1", true }; yield return new object[] { "123", "123", true }; yield return new object[] { "!", "!", true }; yield return new object[] { "HelloWor!ld123", "HELLOWOR!LD123", true }; yield return new object[] { "HELLOWOR!LD123", "HELLOWOR!LD123", true }; yield return new object[] { "Hello\n\0World\u0009!", "HELLO\n\0WORLD\t!", true }; yield return new object[] { "this is a longer test case", "THIS IS A LONGER TEST CASE", true }; yield return new object[] { "this Is A LONGER mIXEd casE test case", "THIS IS A LONGER MIXED CASE TEST CASE", true }; yield return new object[] { "this \t HaS \t somE \t TABS", "THIS \t HAS \t SOME \t TABS", true }; yield return new object[] { "embedded\0NuLL\0Byte\0", "EMBEDDED\0NULL\0BYTE\0", true }; // LATIN SMALL LETTER O WITH ACUTE, mapped to LATIN CAPITAL LETTER O WITH ACUTE. yield return new object[] { "\u00F3", "\u00D3", true }; // SNOWMAN, which does not have an upper case variant. yield return new object[] { "\u2603", "\u2603", true }; // RAINBOW (outside the BMP and does not case) yield return new object[] { "\U0001F308", "\U0001F308", true }; // Surrogate casing yield return new object[] { "\U00010428", "\U00010400", true }; // Unicode defines some codepoints which expand into multiple codepoints // when cased (see SpecialCasing.txt from UNIDATA for some examples). We have never done // these sorts of expansions, since it would cause string lengths to change when cased, // which is non-intuitive. In addition, there are some context sensitive mappings which // we also don't preform. // es-zed does not case to SS when uppercased. yield return new object[] { "\u00DF", "\u00DF", true }; // Ligatures do not expand when cased. yield return new object[] { "\uFB00", "\uFB00", true }; // Precomposed character with no uppercase variant, we don't want to "decompose" this // as part of casing. yield return new object[] { "\u0149", "\u0149", true }; yield return new object[] { "\u03C3", "\u03A3", true }; } public static IEnumerable<object[]> GetAscii_TestData() { yield return new object[] { "\u0101", 0, 1, "xn--yda" }; yield return new object[] { "\u0101\u0061\u0041", 0, 3, "xn--aa-cla" }; yield return new object[] { "\u0061\u0101\u0062", 0, 3, "xn--ab-dla" }; yield return new object[] { "\u0061\u0062\u0101", 0, 3, "xn--ab-ela" }; yield return new object[] { "\uD800\uDF00\uD800\uDF01\uD800\uDF02", 0, 6, "xn--097ccd" }; // Surrogate pairs yield return new object[] { "\uD800\uDF00\u0061\uD800\uDF01\u0042\uD800\uDF02", 0, 8, "xn--ab-ic6nfag" }; // Surrogate pairs separated by ASCII yield return new object[] { "\uD800\uDF00\u0101\uD800\uDF01\u305D\uD800\uDF02", 0, 8, "xn--yda263v6b6kfag" }; // Surrogate pairs separated by non-ASCII yield return new object[] { "\uD800\uDF00\u0101\uD800\uDF01\u0061\uD800\uDF02", 0, 8, "xn--a-nha4529qfag" }; // Surrogate pairs separated by ASCII and non-ASCII yield return new object[] { "\u0061\u0062\u0063", 0, 3, "\u0061\u0062\u0063" }; // ASCII only code points yield return new object[] { "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", 0, 7, "xn--d9juau41awczczp" }; // Non-ASCII only code points yield return new object[] { "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 9, "xn--de-jg4avhby1noc0d" }; // ASCII and non-ASCII code points yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 21, "abc.xn--d9juau41awczczp.xn--de-jg4avhby1noc0d" }; // Fully qualified domain name // Embedded domain name conversion (NLS + only)(Priority 1) // Per the spec [7], "The index and count parameters (when provided) allow the // conversion to be done on a larger string where the domain name is embedded // (such as a URI or IRI). The output string is only the converted FQDN or // label, not the whole input string (if the input string contains more // character than the substring to convert)." // Fully Qualified Domain Name (Label1.Label2.Label3) yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 21, "abc.xn--d9juau41awczczp.xn--de-jg4avhby1noc0d" }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 11, "abc.xn--d9juau41awczczp" }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 12, "abc.xn--d9juau41awczczp." }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 4, 17, "xn--d9juau41awczczp.xn--de-jg4avhby1noc0d" }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 4, 7, "xn--d9juau41awczczp" }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 4, 8, "xn--d9juau41awczczp." }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 12, 9, "xn--de-jg4avhby1noc0d" }; } public static IEnumerable<object[]> GetUnicode_TestData() { yield return new object[] { "xn--yda", 0, 7, "\u0101" }; yield return new object[] { "axn--ydab", 1, 7, "\u0101" }; yield return new object[] { "xn--aa-cla", 0, 10, "\u0101\u0061a" }; yield return new object[] { "xn--ab-dla", 0, 10, "\u0061\u0101\u0062" }; yield return new object[] { "xn--ab-ela", 0, 10, "\u0061\u0062\u0101" }; yield return new object[] { "xn--097ccd", 0, 10, "\uD800\uDF00\uD800\uDF01\uD800\uDF02" }; // Surrogate pairs yield return new object[] { "xn--ab-ic6nfag", 0, 14, "\uD800\uDF00\u0061\uD800\uDF01b\uD800\uDF02" }; // Surrogate pairs separated by ASCII yield return new object[] { "xn--yda263v6b6kfag", 0, 18, "\uD800\uDF00\u0101\uD800\uDF01\u305D\uD800\uDF02" }; // Surrogate pairs separated by non-ASCII yield return new object[] { "xn--a-nha4529qfag", 0, 17, "\uD800\uDF00\u0101\uD800\uDF01\u0061\uD800\uDF02" }; // Surrogate pairs separated by ASCII and non-ASCII yield return new object[] { "\u0061\u0062\u0063", 0, 3, "\u0061\u0062\u0063" }; // ASCII only code points yield return new object[] { "xn--d9juau41awczczp", 0, 19, "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067" }; // Non-ASCII only code points yield return new object[] { "xn--de-jg4avhby1noc0d", 0, 21, "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; // ASCII and non-ASCII code points yield return new object[] { "abc.xn--d9juau41awczczp.xn--de-jg4avhby1noc0d", 0, 45, "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; // Fully qualified domain name // Embedded domain name conversion (NLS + only)(Priority 1) // Per the spec [7], "The index and count parameters (when provided) allow the // conversion to be done on a larger string where the domain name is embedded // (such as a URI or IRI). The output string is only the converted FQDN or // label, not the whole input string (if the input string contains more // character than the substring to convert)." // Fully Qualified Domain Name (Label1.Label2.Label3) yield return new object[] { "abc.xn--d9juau41awczczp.xn--de-jg4avhby1noc0d", 0, 45, "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; yield return new object[] { "abc.xn--d9juau41awczczp", 0, 23, "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067" }; yield return new object[] { "abc.xn--d9juau41awczczp.", 0, 24, "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067." }; yield return new object[] { "xn--d9juau41awczczp.xn--de-jg4avhby1noc0d", 0, 41, "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; yield return new object[] { "xn--d9juau41awczczp", 0, 19, "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067" }; yield return new object[] { "xn--d9juau41awczczp.", 0, 20, "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067." }; yield return new object[] { "xn--de-jg4avhby1noc0d", 0, 21, "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public static void IcuShouldNotBeLoaded() { Assert.False(PlatformDetection.IsIcuGlobalization); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(Cultures_TestData))] public void TestCultureData(string cultureName) { CultureInfo ci = new CultureInfo(cultureName); // // DateTimeInfo // Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.AbbreviatedDayNames, ci.DateTimeFormat.AbbreviatedDayNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.AbbreviatedMonthGenitiveNames, ci.DateTimeFormat.AbbreviatedMonthGenitiveNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.AbbreviatedMonthNames, ci.DateTimeFormat.AbbreviatedMonthNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.AMDesignator, ci.DateTimeFormat.AMDesignator); Assert.True(ci.DateTimeFormat.Calendar is GregorianCalendar); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.CalendarWeekRule, ci.DateTimeFormat.CalendarWeekRule); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.DateSeparator, ci.DateTimeFormat.DateSeparator); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.DayNames, ci.DateTimeFormat.DayNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.FirstDayOfWeek, ci.DateTimeFormat.FirstDayOfWeek); for (DayOfWeek dow = DayOfWeek.Sunday; dow < DayOfWeek.Saturday; dow++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetAbbreviatedDayName(dow), ci.DateTimeFormat.GetAbbreviatedDayName(dow)); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetAbbreviatedEraName(1), ci.DateTimeFormat.GetAbbreviatedEraName(1)); for (int i = 1; i <= 12; i++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetAbbreviatedMonthName(i), ci.DateTimeFormat.GetAbbreviatedMonthName(i)); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetAllDateTimePatterns(), ci.DateTimeFormat.GetAllDateTimePatterns()); for (DayOfWeek dow = DayOfWeek.Sunday; dow < DayOfWeek.Saturday; dow++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetDayName(dow), ci.DateTimeFormat.GetDayName(dow)); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetEra(CultureInfo.InvariantCulture.DateTimeFormat.GetEraName(1)), ci.DateTimeFormat.GetEra(ci.DateTimeFormat.GetEraName(1))); for (int i = 1; i <= 12; i++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetMonthName(i), ci.DateTimeFormat.GetMonthName(i)); for (DayOfWeek dow = DayOfWeek.Sunday; dow < DayOfWeek.Saturday; dow++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetShortestDayName(dow), ci.DateTimeFormat.GetShortestDayName(dow)); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.LongDatePattern, ci.DateTimeFormat.LongDatePattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.LongTimePattern, ci.DateTimeFormat.LongTimePattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.MonthDayPattern, ci.DateTimeFormat.MonthDayPattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.MonthGenitiveNames, ci.DateTimeFormat.MonthGenitiveNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.MonthNames, ci.DateTimeFormat.MonthNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.NativeCalendarName, ci.DateTimeFormat.NativeCalendarName); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.PMDesignator, ci.DateTimeFormat.PMDesignator); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.RFC1123Pattern, ci.DateTimeFormat.RFC1123Pattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.ShortDatePattern, ci.DateTimeFormat.ShortDatePattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.ShortestDayNames, ci.DateTimeFormat.ShortestDayNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.ShortTimePattern, ci.DateTimeFormat.ShortTimePattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.TimeSeparator, ci.DateTimeFormat.TimeSeparator); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.YearMonthPattern, ci.DateTimeFormat.YearMonthPattern); // // Culture data // Assert.True(ci.Calendar is GregorianCalendar); CultureTypes ct = ci.Name == "" ? CultureInfo.InvariantCulture.CultureTypes : CultureInfo.InvariantCulture.CultureTypes | CultureTypes.UserCustomCulture; Assert.Equal(ct, ci.CultureTypes); Assert.Equal(CultureInfo.InvariantCulture.NativeName, ci.DisplayName); Assert.Equal(CultureInfo.InvariantCulture.EnglishName, ci.EnglishName); Assert.Equal(CultureInfo.InvariantCulture.GetConsoleFallbackUICulture(), ci.GetConsoleFallbackUICulture()); Assert.Equal(cultureName, ci.IetfLanguageTag); Assert.Equal(CultureInfo.InvariantCulture.IsNeutralCulture, ci.IsNeutralCulture); Assert.Equal(CultureInfo.InvariantCulture.KeyboardLayoutId, ci.KeyboardLayoutId); Assert.Equal(ci.Name == "" ? 0x7F : 0x1000, ci.LCID); Assert.Equal(cultureName, ci.Name); Assert.Equal(CultureInfo.InvariantCulture.NativeName, ci.NativeName); Assert.Equal(1, ci.OptionalCalendars.Length); Assert.True(ci.OptionalCalendars[0] is GregorianCalendar); Assert.Equal(CultureInfo.InvariantCulture.Parent, ci.Parent); Assert.Equal(CultureInfo.InvariantCulture.ThreeLetterISOLanguageName, ci.ThreeLetterISOLanguageName); Assert.Equal(CultureInfo.InvariantCulture.ThreeLetterWindowsLanguageName, ci.ThreeLetterWindowsLanguageName); Assert.Equal(CultureInfo.InvariantCulture.TwoLetterISOLanguageName, ci.TwoLetterISOLanguageName); Assert.Equal(ci.Name == "" ? false : true, ci.UseUserOverride); // // Culture Creations // Assert.Equal(CultureInfo.InvariantCulture, CultureInfo.CurrentCulture); Assert.Equal(CultureInfo.InvariantCulture, CultureInfo.CurrentUICulture); Assert.Equal(CultureInfo.InvariantCulture, CultureInfo.InstalledUICulture); Assert.Equal(CultureInfo.InvariantCulture, CultureInfo.CreateSpecificCulture("en")); Assert.Equal(ci, CultureInfo.GetCultureInfo(cultureName).Clone()); Assert.Equal(ci, CultureInfo.GetCultureInfoByIetfLanguageTag(cultureName)); // // NumberFormatInfo // Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyDecimalDigits, ci.NumberFormat.CurrencyDecimalDigits); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyDecimalSeparator, ci.NumberFormat.CurrencyDecimalSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyGroupSeparator, ci.NumberFormat.CurrencyGroupSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyGroupSizes, ci.NumberFormat.CurrencyGroupSizes); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyNegativePattern, ci.NumberFormat.CurrencyNegativePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyPositivePattern, ci.NumberFormat.CurrencyPositivePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencySymbol, ci.NumberFormat.CurrencySymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.DigitSubstitution, ci.NumberFormat.DigitSubstitution); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NaNSymbol, ci.NumberFormat.NaNSymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NativeDigits, ci.NumberFormat.NativeDigits); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NegativeInfinitySymbol, ci.NumberFormat.NegativeInfinitySymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NegativeSign, ci.NumberFormat.NegativeSign); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberDecimalDigits, ci.NumberFormat.NumberDecimalDigits); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberDecimalSeparator, ci.NumberFormat.NumberDecimalSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberGroupSeparator, ci.NumberFormat.NumberGroupSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberGroupSizes, ci.NumberFormat.NumberGroupSizes); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberNegativePattern, ci.NumberFormat.NumberNegativePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentDecimalDigits, ci.NumberFormat.PercentDecimalDigits); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentDecimalSeparator, ci.NumberFormat.PercentDecimalSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentGroupSeparator, ci.NumberFormat.PercentGroupSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentGroupSizes, ci.NumberFormat.PercentGroupSizes); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentNegativePattern, ci.NumberFormat.PercentNegativePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentPositivePattern, ci.NumberFormat.PercentPositivePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentSymbol, ci.NumberFormat.PercentSymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PerMilleSymbol, ci.NumberFormat.PerMilleSymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PositiveInfinitySymbol, ci.NumberFormat.PositiveInfinitySymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PositiveSign, ci.NumberFormat.PositiveSign); // // TextInfo // Assert.Equal(CultureInfo.InvariantCulture.TextInfo.ANSICodePage, ci.TextInfo.ANSICodePage); Assert.Equal(cultureName, ci.TextInfo.CultureName); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.EBCDICCodePage, ci.TextInfo.EBCDICCodePage); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.IsRightToLeft, ci.TextInfo.IsRightToLeft); Assert.Equal(ci.Name == "" ? 0x7F : 0x1000, ci.TextInfo.LCID); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.ListSeparator, ci.TextInfo.ListSeparator); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.MacCodePage, ci.TextInfo.MacCodePage); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.OEMCodePage, ci.TextInfo.OEMCodePage); // // CompareInfo // Assert.Equal(ci.Name == "" ? 0x7F : 0x1000, ci.CompareInfo.LCID); Assert.True(cultureName.Equals(ci.CompareInfo.Name, StringComparison.OrdinalIgnoreCase)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(Cultures_TestData))] public void SetCultureData(string cultureName) { CultureInfo ci = new CultureInfo(cultureName); // // DateTimeInfo // var calendar = new GregorianCalendar(); ci.DateTimeFormat.Calendar = calendar; Assert.Equal(calendar, ci.DateTimeFormat.Calendar); Assert.Throws<ArgumentOutOfRangeException>(() => ci.DateTimeFormat.Calendar = new TaiwanCalendar()); } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public void TestEnum() { Assert.Equal(new CultureInfo[1] { CultureInfo.InvariantCulture }, CultureInfo.GetCultures(CultureTypes.AllCultures)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(Cultures_TestData))] public void TestSortVersion(string cultureName) { SortVersion version = new SortVersion(0, new Guid(0, 0, 0, 0, 0, 0, 0, (byte)(0x7F >> 24), (byte)((0x7F & 0x00FF0000) >> 16), (byte)((0x7F & 0x0000FF00) >> 8), (byte)(0x7F & 0xFF))); Assert.Equal(version, new CultureInfo(cultureName).CompareInfo.Version); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData(0, 0)] [InlineData(1, 2)] [InlineData(100_000, 200_000)] [InlineData(0x3FFF_FFFF, 0x7FFF_FFFE)] public void TestGetSortKeyLength_Valid(int inputLength, int expectedSortKeyLength) { using BoundedMemory<char> boundedMemory = BoundedMemory.Allocate<char>(0); // AV if dereferenced boundedMemory.MakeReadonly(); ReadOnlySpan<char> dummySpan = MemoryMarshal.CreateReadOnlySpan(ref MemoryMarshal.GetReference(boundedMemory.Span), inputLength); Assert.Equal(expectedSortKeyLength, CultureInfo.InvariantCulture.CompareInfo.GetSortKeyLength(dummySpan)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData(0x4000_0000)] [InlineData(int.MaxValue)] public unsafe void TestGetSortKeyLength_OverlongArgument(int inputLength) { using BoundedMemory<char> boundedMemory = BoundedMemory.Allocate<char>(0); // AV if dereferenced boundedMemory.MakeReadonly(); Assert.Throws<ArgumentException>("source", () => { ReadOnlySpan<char> dummySpan = MemoryMarshal.CreateReadOnlySpan(ref MemoryMarshal.GetReference(boundedMemory.Span), inputLength); CultureInfo.InvariantCulture.CompareInfo.GetSortKeyLength(dummySpan); }); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData("Hello", CompareOptions.None, "Hello")] [InlineData("Hello", CompareOptions.IgnoreWidth, "Hello")] [InlineData("Hello", CompareOptions.IgnoreCase, "HELLO")] [InlineData("Hello", CompareOptions.IgnoreCase | CompareOptions.IgnoreWidth, "HELLO")] [InlineData("Hell\u00F6", CompareOptions.None, "Hell\u00F6")] // U+00F6 = LATIN SMALL LETTER O WITH DIAERESIS [InlineData("Hell\u00F6", CompareOptions.IgnoreCase, "HELL\u00D6")] public unsafe void TestSortKey_FromSpan(string input, CompareOptions options, string expected) { byte[] expectedOutputBytes = GetExpectedInvariantOrdinalSortKey(expected); CompareInfo compareInfo = CultureInfo.InvariantCulture.CompareInfo; // First, validate that too short a buffer throws Assert.Throws<ArgumentException>("destination", () => compareInfo.GetSortKey(input, new byte[expectedOutputBytes.Length - 1], options)); // Next, validate that using a properly-sized buffer succeeds // We'll use BoundedMemory to check for buffer overruns using BoundedMemory<char> boundedInputMemory = BoundedMemory.AllocateFromExistingData<char>(input); boundedInputMemory.MakeReadonly(); ReadOnlySpan<char> boundedInputSpan = boundedInputMemory.Span; using BoundedMemory<byte> boundedOutputMemory = BoundedMemory.Allocate<byte>(expectedOutputBytes.Length); Span<byte> boundedOutputSpan = boundedOutputMemory.Span; Assert.Equal(expectedOutputBytes.Length, compareInfo.GetSortKey(boundedInputSpan, boundedOutputSpan, options)); Assert.Equal(expectedOutputBytes, boundedOutputSpan[0..expectedOutputBytes.Length].ToArray()); // Now try it once more, passing a larger span where the last byte points to unallocated memory. // If GetSortKey attempts to write beyond the number of bytes we expect, the unit test will AV. boundedOutputSpan.Clear(); fixed (byte* pBoundedOutputSpan = boundedOutputSpan) { boundedOutputSpan = new Span<byte>(pBoundedOutputSpan, boundedOutputSpan.Length + 1); // last byte is unallocated memory Assert.Equal(expectedOutputBytes.Length, compareInfo.GetSortKey(boundedInputSpan, boundedOutputSpan, options)); Assert.Equal(expectedOutputBytes, boundedOutputSpan[0..expectedOutputBytes.Length].ToArray()); } } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public void TestSortKey_ZeroWeightCodePoints() { // In the invariant globalization mode, there's no such thing as a zero-weight code point, // so the U+200C ZERO WIDTH NON-JOINER code point contributes to the final sort key value. CompareInfo compareInfo = CultureInfo.InvariantCulture.CompareInfo; SortKey sortKeyForEmptyString = compareInfo.GetSortKey(""); SortKey sortKeyForZeroWidthJoiner = compareInfo.GetSortKey("\u200c"); Assert.NotEqual(0, SortKey.Compare(sortKeyForEmptyString, sortKeyForZeroWidthJoiner)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData("", "", 0)] [InlineData("", "not-empty", -1)] [InlineData("not-empty", "", 1)] [InlineData("hello", "hello", 0)] [InlineData("prefix", "prefix-with-more-data", -1)] [InlineData("prefix-with-more-data", "prefix", 1)] [InlineData("e", "\u0115", -1)] // U+0115 = LATIN SMALL LETTER E WITH BREVE, tests endianness handling public void TestSortKey_Compare_And_Equals(string value1, string value2, int expectedSign) { // These tests are in the "invariant" unit test project because we rely on Invariant mode // copying the input data directly into the sort key. SortKey sortKey1 = CultureInfo.InvariantCulture.CompareInfo.GetSortKey(value1); SortKey sortKey2 = CultureInfo.InvariantCulture.CompareInfo.GetSortKey(value2); Assert.Equal(expectedSign, Math.Sign(SortKey.Compare(sortKey1, sortKey2))); Assert.Equal(expectedSign == 0, sortKey1.Equals(sortKey2)); } private static StringComparison GetStringComparison(CompareOptions options) { StringComparison sc = (StringComparison) 0; if ((options & CompareOptions.IgnoreCase) != 0) sc |= StringComparison.CurrentCultureIgnoreCase; if ((options & CompareOptions.Ordinal) != 0) sc |= StringComparison.Ordinal; if ((options & CompareOptions.OrdinalIgnoreCase) != 0) sc |= StringComparison.OrdinalIgnoreCase; if (sc == (StringComparison)0) { sc = StringComparison.CurrentCulture; } return sc; } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(IndexOf_TestData))] public void TestIndexOf(string source, string value, int startIndex, int count, CompareOptions options, int result) { foreach (string cul in s_cultureNames) { CompareInfo compareInfo = CultureInfo.GetCultureInfo(cul).CompareInfo; TestCore(compareInfo, source, value, startIndex, count, options, result); } // static test helper method to avoid mutating input args when called in a loop static void TestCore(CompareInfo compareInfo, string source, string value, int startIndex, int count, CompareOptions options, int result) { Assert.Equal(result, compareInfo.IndexOf(source, value, startIndex, count, options)); Assert.Equal(result, source.IndexOf(value, startIndex, count, GetStringComparison(options))); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source.AsSpan(startIndex, count)); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; int offsetResult = result; if (offsetResult >= 0) { offsetResult -= startIndex; // account for span slicing Assert.True(offsetResult >= 0, "Shouldn't have made an affirmative result go negative."); } Assert.Equal(offsetResult, sourceBoundedSpan.IndexOf(valueBoundedSpan, GetStringComparison(options))); Assert.Equal(offsetResult, compareInfo.IndexOf(sourceBoundedSpan, valueBoundedSpan, options)); Assert.Equal(offsetResult, compareInfo.IndexOf(sourceBoundedSpan, valueBoundedSpan, options, out int matchLength)); if (offsetResult >= 0) { Assert.Equal(valueBoundedSpan.Length, matchLength); // Invariant mode should perform non-linguistic comparisons } else { Assert.Equal(0, matchLength); // not found } } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(LastIndexOf_TestData))] public void TestLastIndexOf(string source, string value, int startIndex, int count, CompareOptions options, int result) { foreach (string cul in s_cultureNames) { CompareInfo compareInfo = CultureInfo.GetCultureInfo(cul).CompareInfo; TestCore(compareInfo, source, value, startIndex, count, options, result); } // static test helper method to avoid mutating input args when called in a loop static void TestCore(CompareInfo compareInfo, string source, string value, int startIndex, int count, CompareOptions options, int result) { Assert.Equal(result, compareInfo.LastIndexOf(source, value, startIndex, count, options)); Assert.Equal(result, source.LastIndexOf(value, startIndex, count, GetStringComparison(options))); // Filter differences betweeen string-based and Span-based LastIndexOf // - Empty value handling - https://github.com/dotnet/runtime/issues/13382 // - Negative count if (value.Length == 0 || count < 0) return; if (startIndex == source.Length) { startIndex--; if (count > 0) count--; } int leftStartIndex = (startIndex - count + 1); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source.AsSpan(leftStartIndex, count)); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; if (result >= 0) { result -= leftStartIndex; // account for span slicing Assert.True(result >= 0, "Shouldn't have made an affirmative result go negative."); } Assert.Equal(result, sourceBoundedSpan.LastIndexOf(valueBoundedSpan, GetStringComparison(options))); Assert.Equal(result, compareInfo.LastIndexOf(sourceBoundedSpan, valueBoundedSpan, options)); Assert.Equal(result, compareInfo.LastIndexOf(sourceBoundedSpan, valueBoundedSpan, options, out int matchLength)); if (result >= 0) { Assert.Equal(valueBoundedSpan.Length, matchLength); // Invariant mode should perform non-linguistic comparisons } else { Assert.Equal(0, matchLength); // not found } } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(IsPrefix_TestData))] public void TestIsPrefix(string source, string value, CompareOptions options, bool result) { foreach (string cul in s_cultureNames) { CompareInfo compareInfo = CultureInfo.GetCultureInfo(cul).CompareInfo; Assert.Equal(result, compareInfo.IsPrefix(source, value, options)); Assert.Equal(result, source.StartsWith(value, GetStringComparison(options))); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; Assert.Equal(result, sourceBoundedSpan.StartsWith(valueBoundedSpan, GetStringComparison(options))); Assert.Equal(result, compareInfo.IsPrefix(sourceBoundedSpan, valueBoundedSpan, options)); Assert.Equal(result, compareInfo.IsPrefix(sourceBoundedSpan, valueBoundedSpan, options, out int matchLength)); if (result) { Assert.Equal(valueBoundedSpan.Length, matchLength); // Invariant mode should perform non-linguistic comparisons } else { Assert.Equal(0, matchLength); // not found } } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(IsSuffix_TestData))] public void TestIsSuffix(string source, string value, CompareOptions options, bool result) { foreach (string cul in s_cultureNames) { CompareInfo compareInfo = CultureInfo.GetCultureInfo(cul).CompareInfo; Assert.Equal(result, compareInfo.IsSuffix(source, value, options)); Assert.Equal(result, source.EndsWith(value, GetStringComparison(options))); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; Assert.Equal(result, sourceBoundedSpan.EndsWith(valueBoundedSpan, GetStringComparison(options))); Assert.Equal(result, compareInfo.IsSuffix(sourceBoundedSpan, valueBoundedSpan, options)); Assert.Equal(result, compareInfo.IsSuffix(sourceBoundedSpan, valueBoundedSpan, options, out int matchLength)); if (result) { Assert.Equal(valueBoundedSpan.Length, matchLength); // Invariant mode should perform non-linguistic comparisons } else { Assert.Equal(0, matchLength); // not found } } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData("", false)] [InlineData('x', true)] [InlineData('\ud800', true)] // standalone high surrogate [InlineData("hello", true)] public void TestIsSortable(object sourceObj, bool expectedResult) { if (sourceObj is string s) { Assert.Equal(expectedResult, CompareInfo.IsSortable(s)); } else { Assert.Equal(expectedResult, CompareInfo.IsSortable((char)sourceObj)); } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(Compare_TestData))] public void TestCompare(string source, string value, CompareOptions options, int result) { foreach (string cul in s_cultureNames) { int res = CultureInfo.GetCultureInfo(cul).CompareInfo.Compare(source, value, options); Assert.Equal(result, Math.Sign(res)); res = string.Compare(source, value, GetStringComparison(options)); Assert.Equal(result, Math.Sign(res)); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; res = CultureInfo.GetCultureInfo(cul).CompareInfo.Compare(sourceBoundedSpan, valueBoundedSpan, options); Assert.Equal(result, Math.Sign(res)); res = sourceBoundedSpan.CompareTo(valueBoundedSpan, GetStringComparison(options)); Assert.Equal(result, Math.Sign(res)); } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(ToLower_TestData))] public void TestToLower(string upper, string lower, bool result) { foreach (string cul in s_cultureNames) { Assert.Equal(result, CultureInfo.GetCultureInfo(cul).TextInfo.ToLower(upper).Equals(lower, StringComparison.Ordinal)); Assert.Equal(result, upper.ToLower().Equals(lower, StringComparison.Ordinal)); } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(ToUpper_TestData))] public void TestToUpper(string lower, string upper, bool result) { foreach (string cul in s_cultureNames) { Assert.Equal(result, CultureInfo.GetCultureInfo(cul).TextInfo.ToUpper(lower).Equals(upper, StringComparison.Ordinal)); Assert.Equal(result, lower.ToUpper().Equals(upper, StringComparison.Ordinal)); } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData("", NormalizationForm.FormC)] [InlineData("\uFB01", NormalizationForm.FormC)] [InlineData("\uFB01", NormalizationForm.FormD)] [InlineData("\uFB01", NormalizationForm.FormKC)] [InlineData("\uFB01", NormalizationForm.FormKD)] [InlineData("\u1E9b\u0323", NormalizationForm.FormC)] [InlineData("\u1E9b\u0323", NormalizationForm.FormD)] [InlineData("\u1E9b\u0323", NormalizationForm.FormKC)] [InlineData("\u1E9b\u0323", NormalizationForm.FormKD)] [InlineData("\u00C4\u00C7", NormalizationForm.FormC)] [InlineData("\u00C4\u00C7", NormalizationForm.FormD)] [InlineData("A\u0308C\u0327", NormalizationForm.FormC)] [InlineData("A\u0308C\u0327", NormalizationForm.FormD)] public void TestNormalization(string s, NormalizationForm form) { Assert.True(s.IsNormalized()); Assert.True(s.IsNormalized(form)); Assert.Equal(s, s.Normalize()); Assert.Equal(s, s.Normalize(form)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(GetAscii_TestData))] public void GetAscii(string unicode, int index, int count, string expected) { if (index + count == unicode.Length) { if (index == 0) { Assert.Equal(expected, new IdnMapping().GetAscii(unicode)); } Assert.Equal(expected, new IdnMapping().GetAscii(unicode, index)); } Assert.Equal(expected, new IdnMapping().GetAscii(unicode, index, count)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(GetUnicode_TestData))] public void GetUnicode(string ascii, int index, int count, string expected) { if (index + count == ascii.Length) { if (index == 0) { Assert.Equal(expected, new IdnMapping().GetUnicode(ascii)); } Assert.Equal(expected, new IdnMapping().GetUnicode(ascii, index)); } Assert.Equal(expected, new IdnMapping().GetUnicode(ascii, index, count)); } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public void TestHashing() { StringComparer cultureComparer = StringComparer.Create(CultureInfo.GetCultureInfo("tr-TR"), true); StringComparer ordinalComparer = StringComparer.OrdinalIgnoreCase; string turkishString = "i\u0130"; Assert.Equal(ordinalComparer.GetHashCode(turkishString), cultureComparer.GetHashCode(turkishString)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData('a', 'A', 'a')] [InlineData('A', 'A', 'a')] [InlineData('i', 'I', 'i')] // to verify that we don't special-case the Turkish I in the invariant globalization mode [InlineData('I', 'I', 'i')] [InlineData('\u017f', '\u017f', '\u017f')] // Latin small letter long S shouldn't be case mapped in the invariant mode. [InlineData(0x00C1, 0x00C1, 0x00E1)] // U+00C1 LATIN CAPITAL LETTER A WITH ACUTE [InlineData(0x00E1, 0x00C1, 0x00E1)] // U+00E1 LATIN SMALL LETTER A WITH ACUTE [InlineData(0x00D7, 0x00D7, 0x00D7)] // U+00D7 MULTIPLICATION SIGN public void TestRune(int original, int expectedToUpper, int expectedToLower) { Rune originalRune = new Rune(original); Assert.Equal(expectedToUpper, Rune.ToUpperInvariant(originalRune).Value); Assert.Equal(expectedToUpper, Rune.ToUpper(originalRune, CultureInfo.GetCultureInfo("tr-TR")).Value); Assert.Equal(expectedToLower, Rune.ToLowerInvariant(originalRune).Value); Assert.Equal(expectedToLower, Rune.ToLower(originalRune, CultureInfo.GetCultureInfo("tr-TR")).Value); } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public void TestGetCultureInfo_PredefinedOnly_ReturnsSame() { Assert.Equal(CultureInfo.GetCultureInfo("en-US"), CultureInfo.GetCultureInfo("en-US", predefinedOnly: true)); } private static byte[] GetExpectedInvariantOrdinalSortKey(ReadOnlySpan<char> input) { MemoryStream memoryStream = new MemoryStream(); Span<byte> tempBuffer = stackalloc byte[sizeof(char)]; foreach (char ch in input) { BinaryPrimitives.WriteUInt16BigEndian(tempBuffer, (ushort)ch); memoryStream.Write(tempBuffer); } return memoryStream.ToArray(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Buffers; using System.Reflection; using System.Buffers.Binary; using System.Collections.Generic; using System.IO; using System.Runtime.InteropServices; using System.Text; using Xunit; namespace System.Globalization.Tests { public class InvariantModeTests { private static bool PredefinedCulturesOnlyIsDisabled { get; } = !PredefinedCulturesOnly(); private static bool PredefinedCulturesOnly() { bool ret; try { ret = (bool) typeof(object).Assembly.GetType("System.Globalization.GlobalizationMode").GetProperty("PredefinedCulturesOnly", BindingFlags.Static | BindingFlags.NonPublic).GetValue(null); } catch { ret = false; } return ret; } public static IEnumerable<object[]> Cultures_TestData() { yield return new object[] { "en-US" }; yield return new object[] { "ja-JP" }; yield return new object[] { "fr-FR" }; yield return new object[] { "tr-TR" }; yield return new object[] { "" }; } private static readonly string[] s_cultureNames = new string[] { "en-US", "ja-JP", "fr-FR", "tr-TR", "" }; public static IEnumerable<object[]> IndexOf_TestData() { // Empty string yield return new object[] { "foo", "", 0, 3, CompareOptions.None, 0 }; yield return new object[] { "", "", 0, 0, CompareOptions.None, 0 }; // OrdinalIgnoreCase yield return new object[] { "Hello", "l", 0, 5, CompareOptions.OrdinalIgnoreCase, 2 }; yield return new object[] { "Hello", "L", 0, 5, CompareOptions.OrdinalIgnoreCase, 2 }; yield return new object[] { "Hello", "h", 0, 5, CompareOptions.OrdinalIgnoreCase, 0 }; yield return new object[] { "Hello\u00D3\u00D4", "\u00F3\u00F4", 0, 7, CompareOptions.OrdinalIgnoreCase, 5 }; yield return new object[] { "Hello\u00D3\u00D4", "\u00F3\u00F5", 0, 7, CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "Hello\U00010400", "\U00010428", 0, 7, CompareOptions.OrdinalIgnoreCase, 5 }; // Long strings yield return new object[] { new string('b', 100) + new string('a', 5555), "aaaaaaaaaaaaaaa", 0, 5655, CompareOptions.None, 100 }; yield return new object[] { new string('b', 101) + new string('a', 5555), new string('a', 5000), 0, 5656, CompareOptions.None, 101 }; yield return new object[] { new string('a', 5555), new string('a', 5000) + "b", 0, 5555, CompareOptions.None, -1 }; // Hungarian yield return new object[] { "foobardzsdzs", "rddzs", 0, 12, CompareOptions.Ordinal, -1 }; yield return new object[] { "foobardzsdzs", "rddzs", 0, 12, CompareOptions.None, -1 }; yield return new object[] { "foobardzsdzs", "rddzs", 0, 12, CompareOptions.Ordinal, -1 }; // Turkish yield return new object[] { "Hi", "I", 0, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "I", 0, 2, CompareOptions.IgnoreCase, 1 }; yield return new object[] { "Hi", "\u0130", 0, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "\u0130", 0, 2, CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Hi", "I", 0, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "\u0130", 0, 2, CompareOptions.IgnoreCase, -1 }; // Unicode yield return new object[] { "Hi", "\u0130", 0, 2, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "A\u0300", 0, 9, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "A\u0300", 0, 9, CompareOptions.Ordinal, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 0, 9, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 0, 9, CompareOptions.Ordinal, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 0, 9, CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 0, 9, CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "FooBar", "Foo\u0400Bar", 0, 6, CompareOptions.Ordinal, -1 }; yield return new object[] { "TestFooBA\u0300R", "FooB\u00C0R", 0, 11, CompareOptions.IgnoreNonSpace, -1 }; // Weightless characters yield return new object[] { "", "\u200d", 0, 0, CompareOptions.None, -1 }; yield return new object[] { "hello", "\u200d", 0, 5, CompareOptions.IgnoreCase, -1 }; // Ignore symbols yield return new object[] { "More Test's", "Tests", 0, 11, CompareOptions.IgnoreSymbols, -1 }; yield return new object[] { "More Test's", "Tests", 0, 11, CompareOptions.None, -1 }; yield return new object[] { "cbabababdbaba", "ab", 0, 13, CompareOptions.None, 2 }; // Ordinal should be case-se yield return new object[] { "a", "a", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "a", "A", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "abc", "aBc", 0, 3, CompareOptions.Ordinal, -1 }; // Ordinal with numbers and yield return new object[] { "a", "1", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "1", "1", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "1", "!", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "a", "-", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "-", "-", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "-", "!", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "!", "!", 0, 1, CompareOptions.Ordinal, 0 }; // Ordinal with unicode yield return new object[] { "\uFF21", "\uFE57", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\uFE57", "\uFF21", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\uFF21", "a\u0400Bc", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\uFE57", "a\u0400Bc", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "a", "a\u0400Bc", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "a\u0400Bc", "a", 0, 4, CompareOptions.Ordinal, 0 }; // Ordinal with I or i yield return new object[] { "I", "i", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "I", "I", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "i", "I", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "i", "i", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "I", "\u0130", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0130", "I", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "i", "\u0130", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0130", "i", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "I", "\u0131", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\0131", "I", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "i", "\u0131", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0131", "i", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0130", "\u0130", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "\u0131", "\u0131", 0, 1, CompareOptions.Ordinal, 0 }; yield return new object[] { "\u0130", "\u0131", 0, 1, CompareOptions.Ordinal, -1 }; yield return new object[] { "\u0131", "\u0130", 0, 1, CompareOptions.Ordinal, -1 }; // Platform differences yield return new object[] { "foobardzsdzs", "rddzs", 0, 12, CompareOptions.None, -1 }; } public static IEnumerable<object[]> LastIndexOf_TestData() { // Empty strings yield return new object[] { "foo", "", 2, 3, CompareOptions.None, 3 }; yield return new object[] { "", "", 0, 0, CompareOptions.None, 0 }; yield return new object[] { "", "a", 0, 0, CompareOptions.None, -1 }; yield return new object[] { "", "", -1, 0, CompareOptions.None, 0 }; yield return new object[] { "", "a", -1, 0, CompareOptions.None, -1 }; yield return new object[] { "", "", 0, -1, CompareOptions.None, 0 }; yield return new object[] { "", "a", 0, -1, CompareOptions.None, -1 }; // Start index = source.Length yield return new object[] { "Hello", "l", 5, 5, CompareOptions.None, 3 }; yield return new object[] { "Hello", "b", 5, 5, CompareOptions.None, -1 }; yield return new object[] { "Hello", "l", 5, 0, CompareOptions.None, -1 }; yield return new object[] { "Hello", "", 5, 5, CompareOptions.None, 5 }; yield return new object[] { "Hello", "", 5, 0, CompareOptions.None, 5 }; // OrdinalIgnoreCase yield return new object[] { "Hello", "l", 4, 5, CompareOptions.OrdinalIgnoreCase, 3 }; yield return new object[] { "Hello", "L", 4, 5, CompareOptions.OrdinalIgnoreCase, 3 }; yield return new object[] { "Hello", "h", 4, 5, CompareOptions.OrdinalIgnoreCase, 0 }; yield return new object[] { "Hello\u00D3\u00D4\u00D3\u00D4", "\u00F3\u00F4", 8, 9, CompareOptions.OrdinalIgnoreCase, 7 }; yield return new object[] { "Hello\u00D3\u00D4\u00D3\u00D4", "\u00F3\u00F5", 8, 9, CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "Hello\U00010400\U00010400", "\U00010428", 8, 9, CompareOptions.OrdinalIgnoreCase, 7 }; // Long strings yield return new object[] { new string('a', 5555) + new string('b', 100), "aaaaaaaaaaaaaaa", 5654, 5655, CompareOptions.None, 5540 }; yield return new object[] { new string('b', 101) + new string('a', 5555), new string('a', 5000), 5655, 5656, CompareOptions.None, 656 }; yield return new object[] { new string('a', 5555), new string('a', 5000) + "b", 5554, 5555, CompareOptions.None, -1 }; // Hungarian yield return new object[] { "foobardzsdzs", "rddzs", 11, 12, CompareOptions.Ordinal, -1 }; yield return new object[] { "foobardzsdzs", "rddzs", 11, 12, CompareOptions.None, -1 }; yield return new object[] { "foobardzsdzs", "rddzs", 11, 12, CompareOptions.Ordinal, -1 }; // Turkish yield return new object[] { "Hi", "I", 1, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "I", 1, 2, CompareOptions.IgnoreCase, 1 }; yield return new object[] { "Hi", "\u0130", 1, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "\u0130", 1, 2, CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Hi", "I", 1, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "I", 1, 2, CompareOptions.IgnoreCase, 1 }; yield return new object[] { "Hi", "\u0130", 1, 2, CompareOptions.None, -1 }; yield return new object[] { "Hi", "\u0130", 1, 2, CompareOptions.IgnoreCase, -1 }; // Unicode yield return new object[] { "Exhibit \u00C0", "A\u0300", 8, 9, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "A\u0300", 8, 9, CompareOptions.Ordinal, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 8, 9, CompareOptions.None, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 8, 9, CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 8, 9, CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "Exhibit \u00C0", "a\u0300", 8, 9, CompareOptions.Ordinal, -1 }; yield return new object[] { "FooBar", "Foo\u0400Bar", 5, 6, CompareOptions.Ordinal, -1 }; yield return new object[] { "TestFooBA\u0300R", "FooB\u00C0R", 10, 11, CompareOptions.IgnoreNonSpace, -1 }; // Weightless characters yield return new object[] { "", "\u200d", 0, 0, CompareOptions.None, -1 }; yield return new object[] { "", "\u200d", -1, 0, CompareOptions.None, -1 }; yield return new object[] { "hello", "\u200d", 4, 5, CompareOptions.IgnoreCase, -1 }; // Ignore symbols yield return new object[] { "More Test's", "Tests", 10, 11, CompareOptions.IgnoreSymbols, -1 }; yield return new object[] { "More Test's", "Tests", 10, 11, CompareOptions.None, -1 }; yield return new object[] { "cbabababdbaba", "ab", 12, 13, CompareOptions.None, 10 }; // Platform differences yield return new object[] { "foobardzsdzs", "rddzs", 11, 12, CompareOptions.None, -1 }; } public static IEnumerable<object[]> IsPrefix_TestData() { // Empty strings yield return new object[] { "foo", "", CompareOptions.None, true }; yield return new object[] { "", "", CompareOptions.None, true }; // Early exit for empty values before 'options' is validated yield return new object[] { "hello", "", (CompareOptions)(-1), true }; // Long strings yield return new object[] { new string('a', 5555), "aaaaaaaaaaaaaaa", CompareOptions.None, true }; yield return new object[] { new string('a', 5555), new string('a', 5000), CompareOptions.None, true }; yield return new object[] { new string('a', 5555), new string('a', 5000) + "b", CompareOptions.None, false }; // Hungarian yield return new object[] { "dzsdzsfoobar", "ddzsf", CompareOptions.None, false }; yield return new object[] { "dzsdzsfoobar", "ddzsf", CompareOptions.Ordinal, false }; yield return new object[] { "dzsdzsfoobar", "ddzsf", CompareOptions.Ordinal, false }; // Turkish yield return new object[] { "interesting", "I", CompareOptions.None, false }; yield return new object[] { "interesting", "I", CompareOptions.IgnoreCase, true }; yield return new object[] { "interesting", "\u0130", CompareOptions.None, false }; yield return new object[] { "interesting", "\u0130", CompareOptions.IgnoreCase, false }; // Unicode yield return new object[] { "\u00C0nimal", "A\u0300", CompareOptions.None, false }; yield return new object[] { "\u00C0nimal", "A\u0300", CompareOptions.Ordinal, false }; yield return new object[] { "\u00C0nimal", "a\u0300", CompareOptions.IgnoreCase, false }; yield return new object[] { "\u00C0nimal", "a\u0300", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "FooBar", "Foo\u0400Bar", CompareOptions.Ordinal, false }; yield return new object[] { "FooBA\u0300R", "FooB\u00C0R", CompareOptions.IgnoreNonSpace, false }; yield return new object[] { "\u00D3\u00D4\u00D3\u00D4Hello", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, true }; yield return new object[] { "\u00D3\u00D4Hello\u00D3\u00D4", "\u00F3\u00F5", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "\U00010400\U00010400Hello", "\U00010428", CompareOptions.OrdinalIgnoreCase, true }; // Ignore symbols yield return new object[] { "Test's can be interesting", "Tests", CompareOptions.IgnoreSymbols, false }; yield return new object[] { "Test's can be interesting", "Tests", CompareOptions.None, false }; // Platform differences yield return new object[] { "dzsdzsfoobar", "ddzsf", CompareOptions.None, false }; } public static IEnumerable<object[]> IsSuffix_TestData() { // Empty strings yield return new object[] { "foo", "", CompareOptions.None, true }; yield return new object[] { "", "", CompareOptions.None, true }; // Early exit for empty values before 'options' is validated yield return new object[] { "hello", "", (CompareOptions)(-1), true }; // Long strings yield return new object[] { new string('a', 5555), "aaaaaaaaaaaaaaa", CompareOptions.None, true }; yield return new object[] { new string('a', 5555), new string('a', 5000), CompareOptions.None, true }; yield return new object[] { new string('a', 5555), new string('a', 5000) + "b", CompareOptions.None, false }; // Hungarian yield return new object[] { "foobardzsdzs", "rddzs", CompareOptions.Ordinal, false }; yield return new object[] { "foobardzsdzs", "rddzs", CompareOptions.None, false }; // Turkish yield return new object[] { "Hi", "I", CompareOptions.None, false }; yield return new object[] { "Hi", "I", CompareOptions.IgnoreCase, true }; yield return new object[] { "Hi", "\u0130", CompareOptions.None, false }; yield return new object[] { "Hi", "\u0130", CompareOptions.IgnoreCase, false }; // Unicode yield return new object[] { "Exhibit \u00C0", "A\u0300", CompareOptions.None, false }; yield return new object[] { "Exhibit \u00C0", "A\u0300", CompareOptions.Ordinal, false }; yield return new object[] { "Exhibit \u00C0", "a\u0300", CompareOptions.IgnoreCase, false }; yield return new object[] { "Exhibit \u00C0", "a\u0300", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "FooBar", "Foo\u0400Bar", CompareOptions.Ordinal, false }; yield return new object[] { "FooBA\u0300R", "FooB\u00C0R", CompareOptions.IgnoreNonSpace, false }; yield return new object[] { "\u00D3\u00D4\u00D3\u00D4Hello", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "\u00D3\u00D4Hello\u00D3\u00D4", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, true }; yield return new object[] { "\U00010400\U00010400Hello", "\U00010428", CompareOptions.OrdinalIgnoreCase, false }; yield return new object[] { "Hello\U00010400", "\U00010428", CompareOptions.OrdinalIgnoreCase, true }; // Weightless characters yield return new object[] { "", "\u200d", CompareOptions.None, false }; yield return new object[] { "", "\u200d", CompareOptions.IgnoreCase, false }; // Ignore symbols yield return new object[] { "More Test's", "Tests", CompareOptions.IgnoreSymbols, false }; yield return new object[] { "More Test's", "Tests", CompareOptions.None, false }; // Platform differences yield return new object[] { "foobardzsdzs", "rddzs", CompareOptions.None, false }; } public static IEnumerable<object[]> Compare_TestData() { CompareOptions ignoreKanaIgnoreWidthIgnoreCase = CompareOptions.IgnoreKanaType | CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase; yield return new object[] { "\u3042", "\u30A2", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3042", "\uFF71", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u304D\u3083", "\u30AD\u30E3", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u304D\u3083", "\u30AD\u3083", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u304D \u3083", "\u30AD\u3083", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3044", "I", ignoreKanaIgnoreWidthIgnoreCase, 1 }; yield return new object[] { "a", "A", ignoreKanaIgnoreWidthIgnoreCase, 0 }; yield return new object[] { "a", "\uFF41", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "\uFF21\uFF22\uFF23\uFF24\uFF25", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "\uFF21\uFF22\uFF23D\uFF25", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "a\uFF22\uFF23D\uFF25", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "\uFF41\uFF42\uFF23D\uFF25", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u6FA4", "\u6CA2", ignoreKanaIgnoreWidthIgnoreCase, 1 }; yield return new object[] { "\u3070\u3073\u3076\u3079\u307C", "\u30D0\u30D3\u30D6\u30D9\u30DC", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABDDE", "D", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "\uFF43D", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "ABCDE", "c", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3060", "\u305F", ignoreKanaIgnoreWidthIgnoreCase, 1 }; yield return new object[] { "\u3060", "\uFF80\uFF9E", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3060", "\u30C0", ignoreKanaIgnoreWidthIgnoreCase, -1 }; yield return new object[] { "\u3042", "\u30A1", CompareOptions.None, -1 }; yield return new object[] { "\u304D \u3083", "\u30AD\u3083", CompareOptions.None, -1 }; yield return new object[] { "\u3044", "I", CompareOptions.None, 1 }; yield return new object[] { "a", "A", CompareOptions.None, 1 }; yield return new object[] { "a", "\uFF41", CompareOptions.None, -1 }; yield return new object[] { "", "'", CompareOptions.None, -1 }; yield return new object[] { "\u00D3\u00D4", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, 0 }; yield return new object[] { "\U00010400", "\U00010428", CompareOptions.OrdinalIgnoreCase, 0 }; yield return new object[] { "\u00D3\u00D4", "\u00F3\u00F4", CompareOptions.IgnoreCase, 0 }; yield return new object[] { "\U00010400", "\U00010428", CompareOptions.IgnoreCase, 0 }; yield return new object[] { "\u00D3\u00D4G", "\u00F3\u00F4", CompareOptions.OrdinalIgnoreCase, 1 }; yield return new object[] { "\U00010400G", "\U00010428", CompareOptions.OrdinalIgnoreCase, 1 }; yield return new object[] { "\u00D3\u00D4G", "\u00F3\u00F4", CompareOptions.IgnoreCase, 1 }; yield return new object[] { "\U00010400G", "\U00010428", CompareOptions.IgnoreCase, 1 }; yield return new object[] { "\u00D3\u00D4", "\u00F3\u00F4G", CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "\U00010400", "\U00010428G", CompareOptions.OrdinalIgnoreCase, -1 }; yield return new object[] { "\u00D3\u00D4", "\u00F3\u00F4G", CompareOptions.IgnoreCase, -1 }; yield return new object[] { "\U00010400", "\U00010428G", CompareOptions.IgnoreCase, -1 }; // Hungarian yield return new object[] { "dzsdzs", "ddzs", CompareOptions.Ordinal, 1 }; yield return new object[] { "dzsdzs", "ddzs", CompareOptions.None, 1 }; // Turkish yield return new object[] { "i", "I", CompareOptions.None, 1 }; yield return new object[] { "i", "I", CompareOptions.IgnoreCase, 0 }; yield return new object[] { "i", "\u0130", CompareOptions.None, -1 }; yield return new object[] { "i", "\u0130", CompareOptions.IgnoreCase, -1 }; yield return new object[] { "\u00C0", "A\u0300", CompareOptions.None, 1 }; yield return new object[] { "\u00C0", "A\u0300", CompareOptions.Ordinal, 1 }; yield return new object[] { "FooBar", "Foo\u0400Bar", CompareOptions.Ordinal, -1 }; yield return new object[] { "FooBA\u0300R", "FooB\u00C0R", CompareOptions.IgnoreNonSpace, -1 }; yield return new object[] { "Test's", "Tests", CompareOptions.IgnoreSymbols, -1 }; yield return new object[] { "Test's", "Tests", CompareOptions.StringSort, -1 }; // Spanish yield return new object[] { "llegar", "lugar", CompareOptions.None, -1 }; yield return new object[] { "\u3042", "\u30A1", CompareOptions.IgnoreKanaType | CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase, -1 }; // Surrogates yield return new object[] { "Hello\uFE6A", "Hello\U0001F601", CompareOptions.IgnoreCase, -1 }; yield return new object[] { "Hello\U0001F601", "Hello\uFE6A", CompareOptions.IgnoreCase, 1 }; yield return new object[] { "\uDBFF", "\uD800\uDC00", CompareOptions.IgnoreCase, -1 }; yield return new object[] { "\uD800\uDC00", "\uDBFF", CompareOptions.IgnoreCase, 1 }; yield return new object[] { "abcdefg\uDBFF", "abcdefg\uD800\uDC00", CompareOptions.IgnoreCase, -1 }; } public static IEnumerable<object[]> ToLower_TestData() { yield return new object[] { "", "", true }; yield return new object[] { "A", "a", true }; yield return new object[] { "a", "a", true }; yield return new object[] { "ABC", "abc", true }; yield return new object[] { "abc", "abc", true }; yield return new object[] { "1", "1", true }; yield return new object[] { "123", "123", true }; yield return new object[] { "!", "!", true }; yield return new object[] { "HELLOWOR!LD123", "hellowor!ld123", true }; yield return new object[] { "HelloWor!ld123", "hellowor!ld123", true }; yield return new object[] { "Hello\n\0World\u0009!", "hello\n\0world\t!", true }; yield return new object[] { "THIS IS A LONGER TEST CASE", "this is a longer test case", true }; yield return new object[] { "this Is A LONGER mIXEd casE test case", "this is a longer mixed case test case", true }; yield return new object[] { "THIS \t hAs \t SOMe \t tabs", "this \t has \t some \t tabs", true }; yield return new object[] { "EMBEDDED\0NuLL\0Byte\0", "embedded\0null\0byte\0", true }; // LATIN CAPITAL LETTER O WITH ACUTE, which has a lower case variant. yield return new object[] { "\u00D3", "\u00F3", true }; // SNOWMAN, which does not have a lower case variant. yield return new object[] { "\u2603", "\u2603", true }; // RAINBOW (outside the BMP and does not case) yield return new object[] { "\U0001F308", "\U0001F308", true }; // Surrogate casing yield return new object[] { "\U00010400", "\U00010428", true }; // Unicode defines some codepoints which expand into multiple codepoints // when cased (see SpecialCasing.txt from UNIDATA for some examples). We have never done // these sorts of expansions, since it would cause string lengths to change when cased, // which is non-intuitive. In addition, there are some context sensitive mappings which // we also don't preform. // Greek Capital Letter Sigma (does not to case to U+03C2 with "final sigma" rule). yield return new object[] { "\u03A3", "\u03C3", true }; } public static IEnumerable<object[]> ToUpper_TestData() { yield return new object[] { "", "" , true}; yield return new object[] { "a", "A", true }; yield return new object[] { "abc", "ABC", true }; yield return new object[] { "A", "A", true }; yield return new object[] { "ABC", "ABC", true }; yield return new object[] { "1", "1", true }; yield return new object[] { "123", "123", true }; yield return new object[] { "!", "!", true }; yield return new object[] { "HelloWor!ld123", "HELLOWOR!LD123", true }; yield return new object[] { "HELLOWOR!LD123", "HELLOWOR!LD123", true }; yield return new object[] { "Hello\n\0World\u0009!", "HELLO\n\0WORLD\t!", true }; yield return new object[] { "this is a longer test case", "THIS IS A LONGER TEST CASE", true }; yield return new object[] { "this Is A LONGER mIXEd casE test case", "THIS IS A LONGER MIXED CASE TEST CASE", true }; yield return new object[] { "this \t HaS \t somE \t TABS", "THIS \t HAS \t SOME \t TABS", true }; yield return new object[] { "embedded\0NuLL\0Byte\0", "EMBEDDED\0NULL\0BYTE\0", true }; // LATIN SMALL LETTER O WITH ACUTE, mapped to LATIN CAPITAL LETTER O WITH ACUTE. yield return new object[] { "\u00F3", "\u00D3", true }; // SNOWMAN, which does not have an upper case variant. yield return new object[] { "\u2603", "\u2603", true }; // RAINBOW (outside the BMP and does not case) yield return new object[] { "\U0001F308", "\U0001F308", true }; // Surrogate casing yield return new object[] { "\U00010428", "\U00010400", true }; // Unicode defines some codepoints which expand into multiple codepoints // when cased (see SpecialCasing.txt from UNIDATA for some examples). We have never done // these sorts of expansions, since it would cause string lengths to change when cased, // which is non-intuitive. In addition, there are some context sensitive mappings which // we also don't preform. // es-zed does not case to SS when uppercased. yield return new object[] { "\u00DF", "\u00DF", true }; // Ligatures do not expand when cased. yield return new object[] { "\uFB00", "\uFB00", true }; // Precomposed character with no uppercase variant, we don't want to "decompose" this // as part of casing. yield return new object[] { "\u0149", "\u0149", true }; yield return new object[] { "\u03C3", "\u03A3", true }; } public static IEnumerable<object[]> GetAscii_TestData() { yield return new object[] { "\u0101", 0, 1, "xn--yda" }; yield return new object[] { "\u0101\u0061\u0041", 0, 3, "xn--aa-cla" }; yield return new object[] { "\u0061\u0101\u0062", 0, 3, "xn--ab-dla" }; yield return new object[] { "\u0061\u0062\u0101", 0, 3, "xn--ab-ela" }; yield return new object[] { "\uD800\uDF00\uD800\uDF01\uD800\uDF02", 0, 6, "xn--097ccd" }; // Surrogate pairs yield return new object[] { "\uD800\uDF00\u0061\uD800\uDF01\u0042\uD800\uDF02", 0, 8, "xn--ab-ic6nfag" }; // Surrogate pairs separated by ASCII yield return new object[] { "\uD800\uDF00\u0101\uD800\uDF01\u305D\uD800\uDF02", 0, 8, "xn--yda263v6b6kfag" }; // Surrogate pairs separated by non-ASCII yield return new object[] { "\uD800\uDF00\u0101\uD800\uDF01\u0061\uD800\uDF02", 0, 8, "xn--a-nha4529qfag" }; // Surrogate pairs separated by ASCII and non-ASCII yield return new object[] { "\u0061\u0062\u0063", 0, 3, "\u0061\u0062\u0063" }; // ASCII only code points yield return new object[] { "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", 0, 7, "xn--d9juau41awczczp" }; // Non-ASCII only code points yield return new object[] { "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 9, "xn--de-jg4avhby1noc0d" }; // ASCII and non-ASCII code points yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 21, "abc.xn--d9juau41awczczp.xn--de-jg4avhby1noc0d" }; // Fully qualified domain name // Embedded domain name conversion (NLS + only)(Priority 1) // Per the spec [7], "The index and count parameters (when provided) allow the // conversion to be done on a larger string where the domain name is embedded // (such as a URI or IRI). The output string is only the converted FQDN or // label, not the whole input string (if the input string contains more // character than the substring to convert)." // Fully Qualified Domain Name (Label1.Label2.Label3) yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 21, "abc.xn--d9juau41awczczp.xn--de-jg4avhby1noc0d" }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 11, "abc.xn--d9juau41awczczp" }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 0, 12, "abc.xn--d9juau41awczczp." }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 4, 17, "xn--d9juau41awczczp.xn--de-jg4avhby1noc0d" }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 4, 7, "xn--d9juau41awczczp" }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 4, 8, "xn--d9juau41awczczp." }; yield return new object[] { "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", 12, 9, "xn--de-jg4avhby1noc0d" }; } public static IEnumerable<object[]> GetUnicode_TestData() { yield return new object[] { "xn--yda", 0, 7, "\u0101" }; yield return new object[] { "axn--ydab", 1, 7, "\u0101" }; yield return new object[] { "xn--aa-cla", 0, 10, "\u0101\u0061a" }; yield return new object[] { "xn--ab-dla", 0, 10, "\u0061\u0101\u0062" }; yield return new object[] { "xn--ab-ela", 0, 10, "\u0061\u0062\u0101" }; yield return new object[] { "xn--097ccd", 0, 10, "\uD800\uDF00\uD800\uDF01\uD800\uDF02" }; // Surrogate pairs yield return new object[] { "xn--ab-ic6nfag", 0, 14, "\uD800\uDF00\u0061\uD800\uDF01b\uD800\uDF02" }; // Surrogate pairs separated by ASCII yield return new object[] { "xn--yda263v6b6kfag", 0, 18, "\uD800\uDF00\u0101\uD800\uDF01\u305D\uD800\uDF02" }; // Surrogate pairs separated by non-ASCII yield return new object[] { "xn--a-nha4529qfag", 0, 17, "\uD800\uDF00\u0101\uD800\uDF01\u0061\uD800\uDF02" }; // Surrogate pairs separated by ASCII and non-ASCII yield return new object[] { "\u0061\u0062\u0063", 0, 3, "\u0061\u0062\u0063" }; // ASCII only code points yield return new object[] { "xn--d9juau41awczczp", 0, 19, "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067" }; // Non-ASCII only code points yield return new object[] { "xn--de-jg4avhby1noc0d", 0, 21, "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; // ASCII and non-ASCII code points yield return new object[] { "abc.xn--d9juau41awczczp.xn--de-jg4avhby1noc0d", 0, 45, "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; // Fully qualified domain name // Embedded domain name conversion (NLS + only)(Priority 1) // Per the spec [7], "The index and count parameters (when provided) allow the // conversion to be done on a larger string where the domain name is embedded // (such as a URI or IRI). The output string is only the converted FQDN or // label, not the whole input string (if the input string contains more // character than the substring to convert)." // Fully Qualified Domain Name (Label1.Label2.Label3) yield return new object[] { "abc.xn--d9juau41awczczp.xn--de-jg4avhby1noc0d", 0, 45, "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; yield return new object[] { "abc.xn--d9juau41awczczp", 0, 23, "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067" }; yield return new object[] { "abc.xn--d9juau41awczczp.", 0, 24, "\u0061\u0062\u0063.\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067." }; yield return new object[] { "xn--d9juau41awczczp.xn--de-jg4avhby1noc0d", 0, 41, "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067.\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; yield return new object[] { "xn--d9juau41awczczp", 0, 19, "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067" }; yield return new object[] { "xn--d9juau41awczczp.", 0, 20, "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067." }; yield return new object[] { "xn--de-jg4avhby1noc0d", 0, 21, "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0" }; } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public static void IcuShouldNotBeLoaded() { Assert.False(PlatformDetection.IsIcuGlobalization); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(Cultures_TestData))] public void TestCultureData(string cultureName) { CultureInfo ci = new CultureInfo(cultureName); // // DateTimeInfo // Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.AbbreviatedDayNames, ci.DateTimeFormat.AbbreviatedDayNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.AbbreviatedMonthGenitiveNames, ci.DateTimeFormat.AbbreviatedMonthGenitiveNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.AbbreviatedMonthNames, ci.DateTimeFormat.AbbreviatedMonthNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.AMDesignator, ci.DateTimeFormat.AMDesignator); Assert.True(ci.DateTimeFormat.Calendar is GregorianCalendar); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.CalendarWeekRule, ci.DateTimeFormat.CalendarWeekRule); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.DateSeparator, ci.DateTimeFormat.DateSeparator); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.DayNames, ci.DateTimeFormat.DayNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.FirstDayOfWeek, ci.DateTimeFormat.FirstDayOfWeek); for (DayOfWeek dow = DayOfWeek.Sunday; dow < DayOfWeek.Saturday; dow++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetAbbreviatedDayName(dow), ci.DateTimeFormat.GetAbbreviatedDayName(dow)); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetAbbreviatedEraName(1), ci.DateTimeFormat.GetAbbreviatedEraName(1)); for (int i = 1; i <= 12; i++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetAbbreviatedMonthName(i), ci.DateTimeFormat.GetAbbreviatedMonthName(i)); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetAllDateTimePatterns(), ci.DateTimeFormat.GetAllDateTimePatterns()); for (DayOfWeek dow = DayOfWeek.Sunday; dow < DayOfWeek.Saturday; dow++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetDayName(dow), ci.DateTimeFormat.GetDayName(dow)); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetEra(CultureInfo.InvariantCulture.DateTimeFormat.GetEraName(1)), ci.DateTimeFormat.GetEra(ci.DateTimeFormat.GetEraName(1))); for (int i = 1; i <= 12; i++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetMonthName(i), ci.DateTimeFormat.GetMonthName(i)); for (DayOfWeek dow = DayOfWeek.Sunday; dow < DayOfWeek.Saturday; dow++) Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.GetShortestDayName(dow), ci.DateTimeFormat.GetShortestDayName(dow)); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.LongDatePattern, ci.DateTimeFormat.LongDatePattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.LongTimePattern, ci.DateTimeFormat.LongTimePattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.MonthDayPattern, ci.DateTimeFormat.MonthDayPattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.MonthGenitiveNames, ci.DateTimeFormat.MonthGenitiveNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.MonthNames, ci.DateTimeFormat.MonthNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.NativeCalendarName, ci.DateTimeFormat.NativeCalendarName); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.PMDesignator, ci.DateTimeFormat.PMDesignator); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.RFC1123Pattern, ci.DateTimeFormat.RFC1123Pattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.ShortDatePattern, ci.DateTimeFormat.ShortDatePattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.ShortestDayNames, ci.DateTimeFormat.ShortestDayNames); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.ShortTimePattern, ci.DateTimeFormat.ShortTimePattern); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.TimeSeparator, ci.DateTimeFormat.TimeSeparator); Assert.Equal(CultureInfo.InvariantCulture.DateTimeFormat.YearMonthPattern, ci.DateTimeFormat.YearMonthPattern); // // Culture data // Assert.True(ci.Calendar is GregorianCalendar); CultureTypes ct = ci.Name == "" ? CultureInfo.InvariantCulture.CultureTypes : CultureInfo.InvariantCulture.CultureTypes | CultureTypes.UserCustomCulture; Assert.Equal(ct, ci.CultureTypes); Assert.Equal(CultureInfo.InvariantCulture.NativeName, ci.DisplayName); Assert.Equal(CultureInfo.InvariantCulture.EnglishName, ci.EnglishName); Assert.Equal(CultureInfo.InvariantCulture.GetConsoleFallbackUICulture(), ci.GetConsoleFallbackUICulture()); Assert.Equal(cultureName, ci.IetfLanguageTag); Assert.Equal(CultureInfo.InvariantCulture.IsNeutralCulture, ci.IsNeutralCulture); Assert.Equal(CultureInfo.InvariantCulture.KeyboardLayoutId, ci.KeyboardLayoutId); Assert.Equal(ci.Name == "" ? 0x7F : 0x1000, ci.LCID); Assert.Equal(cultureName, ci.Name); Assert.Equal(CultureInfo.InvariantCulture.NativeName, ci.NativeName); Assert.Equal(1, ci.OptionalCalendars.Length); Assert.True(ci.OptionalCalendars[0] is GregorianCalendar); Assert.Equal(CultureInfo.InvariantCulture.Parent, ci.Parent); Assert.Equal(CultureInfo.InvariantCulture.ThreeLetterISOLanguageName, ci.ThreeLetterISOLanguageName); Assert.Equal(CultureInfo.InvariantCulture.ThreeLetterWindowsLanguageName, ci.ThreeLetterWindowsLanguageName); Assert.Equal(CultureInfo.InvariantCulture.TwoLetterISOLanguageName, ci.TwoLetterISOLanguageName); Assert.Equal(ci.Name == "" ? false : true, ci.UseUserOverride); // // Culture Creations // Assert.Equal(CultureInfo.InvariantCulture, CultureInfo.CurrentCulture); Assert.Equal(CultureInfo.InvariantCulture, CultureInfo.CurrentUICulture); Assert.Equal(CultureInfo.InvariantCulture, CultureInfo.InstalledUICulture); Assert.Equal(CultureInfo.InvariantCulture, CultureInfo.CreateSpecificCulture("en")); Assert.Equal(ci, CultureInfo.GetCultureInfo(cultureName).Clone()); Assert.Equal(ci, CultureInfo.GetCultureInfoByIetfLanguageTag(cultureName)); // // NumberFormatInfo // Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyDecimalDigits, ci.NumberFormat.CurrencyDecimalDigits); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyDecimalSeparator, ci.NumberFormat.CurrencyDecimalSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyGroupSeparator, ci.NumberFormat.CurrencyGroupSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyGroupSizes, ci.NumberFormat.CurrencyGroupSizes); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyNegativePattern, ci.NumberFormat.CurrencyNegativePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencyPositivePattern, ci.NumberFormat.CurrencyPositivePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.CurrencySymbol, ci.NumberFormat.CurrencySymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.DigitSubstitution, ci.NumberFormat.DigitSubstitution); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NaNSymbol, ci.NumberFormat.NaNSymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NativeDigits, ci.NumberFormat.NativeDigits); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NegativeInfinitySymbol, ci.NumberFormat.NegativeInfinitySymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NegativeSign, ci.NumberFormat.NegativeSign); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberDecimalDigits, ci.NumberFormat.NumberDecimalDigits); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberDecimalSeparator, ci.NumberFormat.NumberDecimalSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberGroupSeparator, ci.NumberFormat.NumberGroupSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberGroupSizes, ci.NumberFormat.NumberGroupSizes); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.NumberNegativePattern, ci.NumberFormat.NumberNegativePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentDecimalDigits, ci.NumberFormat.PercentDecimalDigits); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentDecimalSeparator, ci.NumberFormat.PercentDecimalSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentGroupSeparator, ci.NumberFormat.PercentGroupSeparator); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentGroupSizes, ci.NumberFormat.PercentGroupSizes); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentNegativePattern, ci.NumberFormat.PercentNegativePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentPositivePattern, ci.NumberFormat.PercentPositivePattern); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PercentSymbol, ci.NumberFormat.PercentSymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PerMilleSymbol, ci.NumberFormat.PerMilleSymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PositiveInfinitySymbol, ci.NumberFormat.PositiveInfinitySymbol); Assert.Equal(CultureInfo.InvariantCulture.NumberFormat.PositiveSign, ci.NumberFormat.PositiveSign); // // TextInfo // Assert.Equal(CultureInfo.InvariantCulture.TextInfo.ANSICodePage, ci.TextInfo.ANSICodePage); Assert.Equal(cultureName, ci.TextInfo.CultureName); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.EBCDICCodePage, ci.TextInfo.EBCDICCodePage); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.IsRightToLeft, ci.TextInfo.IsRightToLeft); Assert.Equal(ci.Name == "" ? 0x7F : 0x1000, ci.TextInfo.LCID); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.ListSeparator, ci.TextInfo.ListSeparator); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.MacCodePage, ci.TextInfo.MacCodePage); Assert.Equal(CultureInfo.InvariantCulture.TextInfo.OEMCodePage, ci.TextInfo.OEMCodePage); // // CompareInfo // Assert.Equal(ci.Name == "" ? 0x7F : 0x1000, ci.CompareInfo.LCID); Assert.True(cultureName.Equals(ci.CompareInfo.Name, StringComparison.OrdinalIgnoreCase)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(Cultures_TestData))] public void SetCultureData(string cultureName) { CultureInfo ci = new CultureInfo(cultureName); // // DateTimeInfo // var calendar = new GregorianCalendar(); ci.DateTimeFormat.Calendar = calendar; Assert.Equal(calendar, ci.DateTimeFormat.Calendar); Assert.Throws<ArgumentOutOfRangeException>(() => ci.DateTimeFormat.Calendar = new TaiwanCalendar()); } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public void TestEnum() { Assert.Equal(new CultureInfo[1] { CultureInfo.InvariantCulture }, CultureInfo.GetCultures(CultureTypes.AllCultures)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(Cultures_TestData))] public void TestSortVersion(string cultureName) { SortVersion version = new SortVersion(0, new Guid(0, 0, 0, 0, 0, 0, 0, (byte)(0x7F >> 24), (byte)((0x7F & 0x00FF0000) >> 16), (byte)((0x7F & 0x0000FF00) >> 8), (byte)(0x7F & 0xFF))); Assert.Equal(version, new CultureInfo(cultureName).CompareInfo.Version); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData(0, 0)] [InlineData(1, 2)] [InlineData(100_000, 200_000)] [InlineData(0x3FFF_FFFF, 0x7FFF_FFFE)] public void TestGetSortKeyLength_Valid(int inputLength, int expectedSortKeyLength) { using BoundedMemory<char> boundedMemory = BoundedMemory.Allocate<char>(0); // AV if dereferenced boundedMemory.MakeReadonly(); ReadOnlySpan<char> dummySpan = MemoryMarshal.CreateReadOnlySpan(ref MemoryMarshal.GetReference(boundedMemory.Span), inputLength); Assert.Equal(expectedSortKeyLength, CultureInfo.InvariantCulture.CompareInfo.GetSortKeyLength(dummySpan)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData(0x4000_0000)] [InlineData(int.MaxValue)] public unsafe void TestGetSortKeyLength_OverlongArgument(int inputLength) { using BoundedMemory<char> boundedMemory = BoundedMemory.Allocate<char>(0); // AV if dereferenced boundedMemory.MakeReadonly(); Assert.Throws<ArgumentException>("source", () => { ReadOnlySpan<char> dummySpan = MemoryMarshal.CreateReadOnlySpan(ref MemoryMarshal.GetReference(boundedMemory.Span), inputLength); CultureInfo.InvariantCulture.CompareInfo.GetSortKeyLength(dummySpan); }); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData("Hello", CompareOptions.None, "Hello")] [InlineData("Hello", CompareOptions.IgnoreWidth, "Hello")] [InlineData("Hello", CompareOptions.IgnoreCase, "HELLO")] [InlineData("Hello", CompareOptions.IgnoreCase | CompareOptions.IgnoreWidth, "HELLO")] [InlineData("Hell\u00F6", CompareOptions.None, "Hell\u00F6")] // U+00F6 = LATIN SMALL LETTER O WITH DIAERESIS [InlineData("Hell\u00F6", CompareOptions.IgnoreCase, "HELL\u00D6")] public unsafe void TestSortKey_FromSpan(string input, CompareOptions options, string expected) { byte[] expectedOutputBytes = GetExpectedInvariantOrdinalSortKey(expected); CompareInfo compareInfo = CultureInfo.InvariantCulture.CompareInfo; // First, validate that too short a buffer throws Assert.Throws<ArgumentException>("destination", () => compareInfo.GetSortKey(input, new byte[expectedOutputBytes.Length - 1], options)); // Next, validate that using a properly-sized buffer succeeds // We'll use BoundedMemory to check for buffer overruns using BoundedMemory<char> boundedInputMemory = BoundedMemory.AllocateFromExistingData<char>(input); boundedInputMemory.MakeReadonly(); ReadOnlySpan<char> boundedInputSpan = boundedInputMemory.Span; using BoundedMemory<byte> boundedOutputMemory = BoundedMemory.Allocate<byte>(expectedOutputBytes.Length); Span<byte> boundedOutputSpan = boundedOutputMemory.Span; Assert.Equal(expectedOutputBytes.Length, compareInfo.GetSortKey(boundedInputSpan, boundedOutputSpan, options)); Assert.Equal(expectedOutputBytes, boundedOutputSpan[0..expectedOutputBytes.Length].ToArray()); // Now try it once more, passing a larger span where the last byte points to unallocated memory. // If GetSortKey attempts to write beyond the number of bytes we expect, the unit test will AV. boundedOutputSpan.Clear(); fixed (byte* pBoundedOutputSpan = boundedOutputSpan) { boundedOutputSpan = new Span<byte>(pBoundedOutputSpan, boundedOutputSpan.Length + 1); // last byte is unallocated memory Assert.Equal(expectedOutputBytes.Length, compareInfo.GetSortKey(boundedInputSpan, boundedOutputSpan, options)); Assert.Equal(expectedOutputBytes, boundedOutputSpan[0..expectedOutputBytes.Length].ToArray()); } } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public void TestSortKey_ZeroWeightCodePoints() { // In the invariant globalization mode, there's no such thing as a zero-weight code point, // so the U+200C ZERO WIDTH NON-JOINER code point contributes to the final sort key value. CompareInfo compareInfo = CultureInfo.InvariantCulture.CompareInfo; SortKey sortKeyForEmptyString = compareInfo.GetSortKey(""); SortKey sortKeyForZeroWidthJoiner = compareInfo.GetSortKey("\u200c"); Assert.NotEqual(0, SortKey.Compare(sortKeyForEmptyString, sortKeyForZeroWidthJoiner)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData("", "", 0)] [InlineData("", "not-empty", -1)] [InlineData("not-empty", "", 1)] [InlineData("hello", "hello", 0)] [InlineData("prefix", "prefix-with-more-data", -1)] [InlineData("prefix-with-more-data", "prefix", 1)] [InlineData("e", "\u0115", -1)] // U+0115 = LATIN SMALL LETTER E WITH BREVE, tests endianness handling public void TestSortKey_Compare_And_Equals(string value1, string value2, int expectedSign) { // These tests are in the "invariant" unit test project because we rely on Invariant mode // copying the input data directly into the sort key. SortKey sortKey1 = CultureInfo.InvariantCulture.CompareInfo.GetSortKey(value1); SortKey sortKey2 = CultureInfo.InvariantCulture.CompareInfo.GetSortKey(value2); Assert.Equal(expectedSign, Math.Sign(SortKey.Compare(sortKey1, sortKey2))); Assert.Equal(expectedSign == 0, sortKey1.Equals(sortKey2)); } private static StringComparison GetStringComparison(CompareOptions options) { StringComparison sc = (StringComparison) 0; if ((options & CompareOptions.IgnoreCase) != 0) sc |= StringComparison.CurrentCultureIgnoreCase; if ((options & CompareOptions.Ordinal) != 0) sc |= StringComparison.Ordinal; if ((options & CompareOptions.OrdinalIgnoreCase) != 0) sc |= StringComparison.OrdinalIgnoreCase; if (sc == (StringComparison)0) { sc = StringComparison.CurrentCulture; } return sc; } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(IndexOf_TestData))] public void TestIndexOf(string source, string value, int startIndex, int count, CompareOptions options, int result) { foreach (string cul in s_cultureNames) { CompareInfo compareInfo = CultureInfo.GetCultureInfo(cul).CompareInfo; TestCore(compareInfo, source, value, startIndex, count, options, result); } // static test helper method to avoid mutating input args when called in a loop static void TestCore(CompareInfo compareInfo, string source, string value, int startIndex, int count, CompareOptions options, int result) { Assert.Equal(result, compareInfo.IndexOf(source, value, startIndex, count, options)); Assert.Equal(result, source.IndexOf(value, startIndex, count, GetStringComparison(options))); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source.AsSpan(startIndex, count)); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; int offsetResult = result; if (offsetResult >= 0) { offsetResult -= startIndex; // account for span slicing Assert.True(offsetResult >= 0, "Shouldn't have made an affirmative result go negative."); } Assert.Equal(offsetResult, sourceBoundedSpan.IndexOf(valueBoundedSpan, GetStringComparison(options))); Assert.Equal(offsetResult, compareInfo.IndexOf(sourceBoundedSpan, valueBoundedSpan, options)); Assert.Equal(offsetResult, compareInfo.IndexOf(sourceBoundedSpan, valueBoundedSpan, options, out int matchLength)); if (offsetResult >= 0) { Assert.Equal(valueBoundedSpan.Length, matchLength); // Invariant mode should perform non-linguistic comparisons } else { Assert.Equal(0, matchLength); // not found } } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(LastIndexOf_TestData))] public void TestLastIndexOf(string source, string value, int startIndex, int count, CompareOptions options, int result) { foreach (string cul in s_cultureNames) { CompareInfo compareInfo = CultureInfo.GetCultureInfo(cul).CompareInfo; TestCore(compareInfo, source, value, startIndex, count, options, result); } // static test helper method to avoid mutating input args when called in a loop static void TestCore(CompareInfo compareInfo, string source, string value, int startIndex, int count, CompareOptions options, int result) { Assert.Equal(result, compareInfo.LastIndexOf(source, value, startIndex, count, options)); Assert.Equal(result, source.LastIndexOf(value, startIndex, count, GetStringComparison(options))); // Filter differences betweeen string-based and Span-based LastIndexOf // - Empty value handling - https://github.com/dotnet/runtime/issues/13382 // - Negative count if (value.Length == 0 || count < 0) return; if (startIndex == source.Length) { startIndex--; if (count > 0) count--; } int leftStartIndex = (startIndex - count + 1); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source.AsSpan(leftStartIndex, count)); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; if (result >= 0) { result -= leftStartIndex; // account for span slicing Assert.True(result >= 0, "Shouldn't have made an affirmative result go negative."); } Assert.Equal(result, sourceBoundedSpan.LastIndexOf(valueBoundedSpan, GetStringComparison(options))); Assert.Equal(result, compareInfo.LastIndexOf(sourceBoundedSpan, valueBoundedSpan, options)); Assert.Equal(result, compareInfo.LastIndexOf(sourceBoundedSpan, valueBoundedSpan, options, out int matchLength)); if (result >= 0) { Assert.Equal(valueBoundedSpan.Length, matchLength); // Invariant mode should perform non-linguistic comparisons } else { Assert.Equal(0, matchLength); // not found } } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(IsPrefix_TestData))] public void TestIsPrefix(string source, string value, CompareOptions options, bool result) { foreach (string cul in s_cultureNames) { CompareInfo compareInfo = CultureInfo.GetCultureInfo(cul).CompareInfo; Assert.Equal(result, compareInfo.IsPrefix(source, value, options)); Assert.Equal(result, source.StartsWith(value, GetStringComparison(options))); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; Assert.Equal(result, sourceBoundedSpan.StartsWith(valueBoundedSpan, GetStringComparison(options))); Assert.Equal(result, compareInfo.IsPrefix(sourceBoundedSpan, valueBoundedSpan, options)); Assert.Equal(result, compareInfo.IsPrefix(sourceBoundedSpan, valueBoundedSpan, options, out int matchLength)); if (result) { Assert.Equal(valueBoundedSpan.Length, matchLength); // Invariant mode should perform non-linguistic comparisons } else { Assert.Equal(0, matchLength); // not found } } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(IsSuffix_TestData))] public void TestIsSuffix(string source, string value, CompareOptions options, bool result) { foreach (string cul in s_cultureNames) { CompareInfo compareInfo = CultureInfo.GetCultureInfo(cul).CompareInfo; Assert.Equal(result, compareInfo.IsSuffix(source, value, options)); Assert.Equal(result, source.EndsWith(value, GetStringComparison(options))); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; Assert.Equal(result, sourceBoundedSpan.EndsWith(valueBoundedSpan, GetStringComparison(options))); Assert.Equal(result, compareInfo.IsSuffix(sourceBoundedSpan, valueBoundedSpan, options)); Assert.Equal(result, compareInfo.IsSuffix(sourceBoundedSpan, valueBoundedSpan, options, out int matchLength)); if (result) { Assert.Equal(valueBoundedSpan.Length, matchLength); // Invariant mode should perform non-linguistic comparisons } else { Assert.Equal(0, matchLength); // not found } } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData("", false)] [InlineData('x', true)] [InlineData('\ud800', true)] // standalone high surrogate [InlineData("hello", true)] public void TestIsSortable(object sourceObj, bool expectedResult) { if (sourceObj is string s) { Assert.Equal(expectedResult, CompareInfo.IsSortable(s)); } else { Assert.Equal(expectedResult, CompareInfo.IsSortable((char)sourceObj)); } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(Compare_TestData))] public void TestCompare(string source, string value, CompareOptions options, int result) { foreach (string cul in s_cultureNames) { int res = CultureInfo.GetCultureInfo(cul).CompareInfo.Compare(source, value, options); Assert.Equal(result, Math.Sign(res)); res = string.Compare(source, value, GetStringComparison(options)); Assert.Equal(result, Math.Sign(res)); // Span versions - using BoundedMemory to check for buffer overruns using BoundedMemory<char> sourceBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(source); sourceBoundedMemory.MakeReadonly(); ReadOnlySpan<char> sourceBoundedSpan = sourceBoundedMemory.Span; using BoundedMemory<char> valueBoundedMemory = BoundedMemory.AllocateFromExistingData<char>(value); valueBoundedMemory.MakeReadonly(); ReadOnlySpan<char> valueBoundedSpan = valueBoundedMemory.Span; res = CultureInfo.GetCultureInfo(cul).CompareInfo.Compare(sourceBoundedSpan, valueBoundedSpan, options); Assert.Equal(result, Math.Sign(res)); res = sourceBoundedSpan.CompareTo(valueBoundedSpan, GetStringComparison(options)); Assert.Equal(result, Math.Sign(res)); } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(ToLower_TestData))] public void TestToLower(string upper, string lower, bool result) { foreach (string cul in s_cultureNames) { Assert.Equal(result, CultureInfo.GetCultureInfo(cul).TextInfo.ToLower(upper).Equals(lower, StringComparison.Ordinal)); Assert.Equal(result, upper.ToLower().Equals(lower, StringComparison.Ordinal)); } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(ToUpper_TestData))] public void TestToUpper(string lower, string upper, bool result) { foreach (string cul in s_cultureNames) { Assert.Equal(result, CultureInfo.GetCultureInfo(cul).TextInfo.ToUpper(lower).Equals(upper, StringComparison.Ordinal)); Assert.Equal(result, lower.ToUpper().Equals(upper, StringComparison.Ordinal)); } } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData("", NormalizationForm.FormC)] [InlineData("\uFB01", NormalizationForm.FormC)] [InlineData("\uFB01", NormalizationForm.FormD)] [InlineData("\uFB01", NormalizationForm.FormKC)] [InlineData("\uFB01", NormalizationForm.FormKD)] [InlineData("\u1E9b\u0323", NormalizationForm.FormC)] [InlineData("\u1E9b\u0323", NormalizationForm.FormD)] [InlineData("\u1E9b\u0323", NormalizationForm.FormKC)] [InlineData("\u1E9b\u0323", NormalizationForm.FormKD)] [InlineData("\u00C4\u00C7", NormalizationForm.FormC)] [InlineData("\u00C4\u00C7", NormalizationForm.FormD)] [InlineData("A\u0308C\u0327", NormalizationForm.FormC)] [InlineData("A\u0308C\u0327", NormalizationForm.FormD)] public void TestNormalization(string s, NormalizationForm form) { Assert.True(s.IsNormalized()); Assert.True(s.IsNormalized(form)); Assert.Equal(s, s.Normalize()); Assert.Equal(s, s.Normalize(form)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(GetAscii_TestData))] public void GetAscii(string unicode, int index, int count, string expected) { if (index + count == unicode.Length) { if (index == 0) { Assert.Equal(expected, new IdnMapping().GetAscii(unicode)); } Assert.Equal(expected, new IdnMapping().GetAscii(unicode, index)); } Assert.Equal(expected, new IdnMapping().GetAscii(unicode, index, count)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [MemberData(nameof(GetUnicode_TestData))] public void GetUnicode(string ascii, int index, int count, string expected) { if (index + count == ascii.Length) { if (index == 0) { Assert.Equal(expected, new IdnMapping().GetUnicode(ascii)); } Assert.Equal(expected, new IdnMapping().GetUnicode(ascii, index)); } Assert.Equal(expected, new IdnMapping().GetUnicode(ascii, index, count)); } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public void TestHashing() { StringComparer cultureComparer = StringComparer.Create(CultureInfo.GetCultureInfo("tr-TR"), true); StringComparer ordinalComparer = StringComparer.OrdinalIgnoreCase; string turkishString = "i\u0130"; Assert.Equal(ordinalComparer.GetHashCode(turkishString), cultureComparer.GetHashCode(turkishString)); } [ConditionalTheory(nameof(PredefinedCulturesOnlyIsDisabled))] [InlineData('a', 'A', 'a')] [InlineData('A', 'A', 'a')] [InlineData('i', 'I', 'i')] // to verify that we don't special-case the Turkish I in the invariant globalization mode [InlineData('I', 'I', 'i')] [InlineData('\u017f', '\u017f', '\u017f')] // Latin small letter long S shouldn't be case mapped in the invariant mode. [InlineData(0x00C1, 0x00C1, 0x00E1)] // U+00C1 LATIN CAPITAL LETTER A WITH ACUTE [InlineData(0x00E1, 0x00C1, 0x00E1)] // U+00E1 LATIN SMALL LETTER A WITH ACUTE [InlineData(0x00D7, 0x00D7, 0x00D7)] // U+00D7 MULTIPLICATION SIGN public void TestRune(int original, int expectedToUpper, int expectedToLower) { Rune originalRune = new Rune(original); Assert.Equal(expectedToUpper, Rune.ToUpperInvariant(originalRune).Value); Assert.Equal(expectedToUpper, Rune.ToUpper(originalRune, CultureInfo.GetCultureInfo("tr-TR")).Value); Assert.Equal(expectedToLower, Rune.ToLowerInvariant(originalRune).Value); Assert.Equal(expectedToLower, Rune.ToLower(originalRune, CultureInfo.GetCultureInfo("tr-TR")).Value); } [ConditionalFact(nameof(PredefinedCulturesOnlyIsDisabled))] public void TestGetCultureInfo_PredefinedOnly_ReturnsSame() { Assert.Equal(CultureInfo.GetCultureInfo("en-US"), CultureInfo.GetCultureInfo("en-US", predefinedOnly: true)); } private static byte[] GetExpectedInvariantOrdinalSortKey(ReadOnlySpan<char> input) { MemoryStream memoryStream = new MemoryStream(); Span<byte> tempBuffer = stackalloc byte[sizeof(char)]; foreach (char ch in input) { BinaryPrimitives.WriteUInt16BigEndian(tempBuffer, (ushort)ch); memoryStream.Write(tempBuffer); } return memoryStream.ToArray(); } } }
-1