repo_name
stringlengths
6
97
path
stringlengths
3
341
text
stringlengths
8
1.02M
dandycheung/baulk
lib/archive/zstd/compress/zstd_fast.c
<filename>lib/archive/zstd/compress/zstd_fast.c<gh_stars>10-100 /* * Copyright (c) <NAME>, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ #include "zstd_fast.h" void ZSTD_fillHashTable(ZSTD_matchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hBits = cParams->hashLog; U32 const mls = cParams->minMatch; const BYTE* const base = ms->window.base; const BYTE* ip = base + ms->nextToUpdate; const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; const U32 fastHashFillStep = 3; /* Always insert every fastHashFillStep position into the hash table. * Insert the other positions if their hash entry is empty. */ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { U32 const curr = (U32)(ip - base); size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls); hashTable[hash0] = curr; if (dtlm == ZSTD_dtlm_fast) continue; /* Only load extra positions for ZSTD_dtlm_full */ { U32 p; for (p = 1; p < fastHashFillStep; ++p) { size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls); if (hashTable[hash] == 0) { /* not yet filled */ hashTable[hash] = curr + p; } } } } } /** * If you squint hard enough (and ignore repcodes), the search operation at any * given position is broken into 4 stages: * * 1. Hash (map position to hash value via input read) * 2. Lookup (map hash val to index via hashtable read) * 3. Load (map index to value at that position via input read) * 4. Compare * * Each of these steps involves a memory read at an address which is computed * from the previous step. This means these steps must be sequenced and their * latencies are cumulative. * * Rather than do 1->2->3->4 sequentially for a single position before moving * onto the next, this implementation interleaves these operations across the * next few positions: * * R = Repcode Read & Compare * H = Hash * T = Table Lookup * M = Match Read & Compare * * Pos | Time --> * ----+------------------- * N | ... M * N+1 | ... TM * N+2 | R H T M * N+3 | H TM * N+4 | R H T M * N+5 | H ... * N+6 | R ... * * This is very much analogous to the pipelining of execution in a CPU. And just * like a CPU, we have to dump the pipeline when we find a match (i.e., take a * branch). * * When this happens, we throw away our current state, and do the following prep * to re-enter the loop: * * Pos | Time --> * ----+------------------- * N | H T * N+1 | H * * This is also the work we do at the beginning to enter the loop initially. */ FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_fast_noDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2; const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; const BYTE* anchor = istart; const BYTE* ip0 = istart; const BYTE* ip1; const BYTE* ip2; const BYTE* ip3; U32 current0; U32 rep_offset1 = rep[0]; U32 rep_offset2 = rep[1]; U32 offsetSaved = 0; size_t hash0; /* hash for ip0 */ size_t hash1; /* hash for ip1 */ U32 idx; /* match idx for ip0 */ U32 mval; /* src value at match idx */ U32 offcode; const BYTE* match0; size_t mLength; /* ip0 and ip1 are always adjacent. The targetLength skipping and * uncompressibility acceleration is applied to every other position, * matching the behavior of #1562. step therefore represents the gap * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */ size_t step; const BYTE* nextStep; const size_t kStepIncr = (1 << (kSearchStrength - 1)); DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); ip0 += (ip0 == prefixStart); { U32 const curr = (U32)(ip0 - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); U32 const maxRep = curr - windowLow; if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0; if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0; } /* start each op */ _start: /* Requires: ip0 */ step = stepSize; nextStep = ip0 + kStepIncr; /* calculate positions, ip0 - anchor == 0, so we skip step calc */ ip1 = ip0 + 1; ip2 = ip0 + step; ip3 = ip2 + 1; if (ip3 >= ilimit) { goto _cleanup; } hash0 = ZSTD_hashPtr(ip0, hlog, mls); hash1 = ZSTD_hashPtr(ip1, hlog, mls); idx = hashTable[hash0]; do { /* load repcode match for ip[2]*/ const U32 rval = MEM_read32(ip2 - rep_offset1); /* write back hash table entry */ current0 = (U32)(ip0 - base); hashTable[hash0] = current0; /* check repcode at ip[2] */ if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) { ip0 = ip2; match0 = ip0 - rep_offset1; mLength = ip0[-1] == match0[-1]; ip0 -= mLength; match0 -= mLength; offcode = STORE_REPCODE_1; mLength += 4; goto _match; } /* load match for ip[0] */ if (idx >= prefixStartIndex) { mval = MEM_read32(base + idx); } else { mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ } /* check match at ip[0] */ if (MEM_read32(ip0) == mval) { /* found a match! */ goto _offset; } /* lookup ip[1] */ idx = hashTable[hash1]; /* hash ip[2] */ hash0 = hash1; hash1 = ZSTD_hashPtr(ip2, hlog, mls); /* advance to next positions */ ip0 = ip1; ip1 = ip2; ip2 = ip3; /* write back hash table entry */ current0 = (U32)(ip0 - base); hashTable[hash0] = current0; /* load match for ip[0] */ if (idx >= prefixStartIndex) { mval = MEM_read32(base + idx); } else { mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ } /* check match at ip[0] */ if (MEM_read32(ip0) == mval) { /* found a match! */ goto _offset; } /* lookup ip[1] */ idx = hashTable[hash1]; /* hash ip[2] */ hash0 = hash1; hash1 = ZSTD_hashPtr(ip2, hlog, mls); /* advance to next positions */ ip0 = ip1; ip1 = ip2; ip2 = ip0 + step; ip3 = ip1 + step; /* calculate step */ if (ip2 >= nextStep) { step++; PREFETCH_L1(ip1 + 64); PREFETCH_L1(ip1 + 128); nextStep += kStepIncr; } } while (ip3 < ilimit); _cleanup: /* Note that there are probably still a couple positions we could search. * However, it seems to be a meaningful performance hit to try to search * them. So let's not. */ /* save reps for next block */ rep[0] = rep_offset1 ? rep_offset1 : offsetSaved; rep[1] = rep_offset2 ? rep_offset2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); _offset: /* Requires: ip0, idx */ /* Compute the offset code. */ match0 = base + idx; rep_offset2 = rep_offset1; rep_offset1 = (U32)(ip0-match0); offcode = STORE_OFFSET(rep_offset1); mLength = 4; /* Count the backwards match length. */ while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } _match: /* Requires: ip0, match0, offcode */ /* Count the forward length. */ mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); ip0 += mLength; anchor = ip0; /* write next hash table entry */ if (ip1 < ip0) { hashTable[hash1] = (U32)(ip1 - base); } /* Fill table and check for immediate repcode. */ if (ip0 <= ilimit) { /* Fill Table */ assert(base+current0+2 > istart); /* check base overflow */ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) { /* store sequence */ size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4; { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); ip0 += rLength; ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength); anchor = ip0; continue; /* faster when present (confirmed on gcc-8) ... (?) */ } } } goto _start; } #define ZSTD_GEN_FAST_FN(dictMode, mls, step) \ static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ void const* src, size_t srcSize) \ { \ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \ } ZSTD_GEN_FAST_FN(noDict, 4, 1) ZSTD_GEN_FAST_FN(noDict, 5, 1) ZSTD_GEN_FAST_FN(noDict, 6, 1) ZSTD_GEN_FAST_FN(noDict, 7, 1) ZSTD_GEN_FAST_FN(noDict, 4, 0) ZSTD_GEN_FAST_FN(noDict, 5, 0) ZSTD_GEN_FAST_FN(noDict, 6, 0) ZSTD_GEN_FAST_FN(noDict, 7, 0) size_t ZSTD_compressBlock_fast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; assert(ms->dictMatchState == NULL); if (ms->cParams.targetLength > 1) { switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize); case 5 : return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize); case 6 : return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize); case 7 : return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); } } else { switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize); case 5 : return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize); case 6 : return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize); case 7 : return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); } } } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_fast_dictMatchState_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const U32 prefixStartIndex = ms->window.dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; const U32* const dictHashTable = dms->hashTable; const U32 dictStartIndex = dms->window.dictLimit; const BYTE* const dictBase = dms->window.base; const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart); const U32 dictHLog = dictCParams->hashLog; /* if a dictionary is still attached, it necessarily means that * it is within window size. So we just check it. */ const U32 maxDistance = 1U << cParams->windowLog; const U32 endIndex = (U32)((size_t)(ip - base) + srcSize); assert(endIndex - prefixStartIndex <= maxDistance); (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ (void)hasStep; /* not currently specialized on whether it's accelerated */ /* ensure there will be no underflow * when translating a dict index into a local index */ assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); /* init */ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); ip += (dictAndPrefixLength == 0); /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); /* Main Search Loop */ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ size_t mLength; size_t const h = ZSTD_hashPtr(ip, hlog, mls); U32 const curr = (U32)(ip-base); U32 const matchIndex = hashTable[h]; const BYTE* match = base + matchIndex; const U32 repIndex = curr + 1 - offset_1; const BYTE* repMatch = (repIndex < prefixStartIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; hashTable[h] = curr; /* update hash table */ if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); } else if ( (matchIndex <= prefixStartIndex) ) { size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls); U32 const dictMatchIndex = dictHashTable[dictHash]; const BYTE* dictMatch = dictBase + dictMatchIndex; if (dictMatchIndex <= dictStartIndex || MEM_read32(dictMatch) != MEM_read32(ip)) { assert(stepSize >= 1); ip += ((ip-anchor) >> kSearchStrength) + stepSize; continue; } else { /* found a dict match */ U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta); mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4; while (((ip>anchor) & (dictMatch>dictStart)) && (ip[-1] == dictMatch[-1])) { ip--; dictMatch--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); } } else if (MEM_read32(match) != MEM_read32(ip)) { /* it's not a match, and we're not going to check the dictionary */ assert(stepSize >= 1); ip += ((ip-anchor) >> kSearchStrength) + stepSize; continue; } else { /* found a regular match */ U32 const offset = (U32)(ip-match); mLength = ZSTD_count(ip+4, match+4, iend) + 4; while (((ip>anchor) & (match>prefixStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); } /* match found */ ip += mLength; anchor = ip; if (ip <= ilimit) { /* Fill Table */ assert(base+curr+2 > istart); /* check base overflow */ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); /* check immediate repcode */ while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : base + repIndex2; if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; ip += repLength2; anchor = ip; continue; } break; } } } /* save reps for next block */ rep[0] = offset_1 ? offset_1 : offsetSaved; rep[1] = offset_2 ? offset_2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); } ZSTD_GEN_FAST_FN(dictMatchState, 4, 0) ZSTD_GEN_FAST_FN(dictMatchState, 5, 0) ZSTD_GEN_FAST_FN(dictMatchState, 6, 0) ZSTD_GEN_FAST_FN(dictMatchState, 7, 0) size_t ZSTD_compressBlock_fast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; assert(ms->dictMatchState != NULL); switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize); case 5 : return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize); case 6 : return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize); case 7 : return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize); } } static size_t ZSTD_compressBlock_fast_extDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const istart = (const BYTE*)src; const BYTE* ip = istart; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); const U32 dictStartIndex = lowLimit; const BYTE* const dictStart = dictBase + dictStartIndex; const U32 dictLimit = ms->window.dictLimit; const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const dictEnd = dictBase + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; U32 offset_1=rep[0], offset_2=rep[1]; (void)hasStep; /* not currently specialized on whether it's accelerated */ DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1); /* switch to "regular" variant if extDict is invalidated due to maxDistance */ if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ const size_t h = ZSTD_hashPtr(ip, hlog, mls); const U32 matchIndex = hashTable[h]; const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; const BYTE* match = matchBase + matchIndex; const U32 curr = (U32)(ip-base); const U32 repIndex = curr + 1 - offset_1; const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; hashTable[h] = curr; /* update hash table */ DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr); if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4; ip++; ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength); ip += rLength; anchor = ip; } else { if ( (matchIndex < dictStartIndex) || (MEM_read32(match) != MEM_read32(ip)) ) { assert(stepSize >= 1); ip += ((ip-anchor) >> kSearchStrength) + stepSize; continue; } { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; U32 const offset = curr - matchIndex; size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; /* update offset history */ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); ip += mLength; anchor = ip; } } if (ip <= ilimit) { /* Fill Table */ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); /* check immediate repcode */ while (ip <= ilimit) { U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2); hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; ip += repLength2; anchor = ip; continue; } break; } } } /* save reps for next block */ rep[0] = offset_1; rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); } ZSTD_GEN_FAST_FN(extDict, 4, 0) ZSTD_GEN_FAST_FN(extDict, 5, 0) ZSTD_GEN_FAST_FN(extDict, 6, 0) ZSTD_GEN_FAST_FN(extDict, 7, 0) size_t ZSTD_compressBlock_fast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; switch(mls) { default: /* includes case 3 */ case 4 : return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize); case 5 : return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize); case 6 : return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize); case 7 : return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize); } }
dandycheung/baulk
lib/archive/zlib/slide_hash_simd.h
/* slide_hash_simd.h * * Copyright 2022 The Chromium Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the Chromium source repository LICENSE file. */ #ifndef SLIDE_HASH_SIMD_H #define SLIDE_HASH_SIMD_H #include "deflate.h" #ifndef INLINE #if defined(_MSC_VER) && !defined(__clang__) #define INLINE __inline #else #define INLINE inline #endif #endif #if defined(CPU_NO_SIMD) #error SIMD has been disabled for your build target #elif defined(DEFLATE_SLIDE_HASH_SSE2) #include <emmintrin.h> /* SSE2 */ #define Z_SLIDE_INIT_SIMD(wsize) _mm_set1_epi16((ush)(wsize)) #define Z_SLIDE_HASH_SIMD(table, size, vector_wsize) \ for (const Posf* const end = table + size; table != end;) { \ __m128i vO = _mm_loadu_si128((__m128i *)(table + 0)); \ vO = _mm_subs_epu16(vO, vector_wsize); \ _mm_storeu_si128((__m128i *)(table + 0), vO); \ table += 8; \ } typedef __m128i z_vec128i_u16x8_t; #elif defined(DEFLATE_SLIDE_HASH_NEON) #include <arm_neon.h> /* NEON */ #define Z_SLIDE_INIT_SIMD(wsize) vdupq_n_u16((ush)(wsize)) #define Z_SLIDE_HASH_SIMD(table, size, vector_wsize) \ for (const Posf* const end = table + size; table != end;) { \ uint16x8_t vO = vld1q_u16(table + 0); \ uint16x8_t v8 = vld1q_u16(table + 8); \ vO = vqsubq_u16(vO, vector_wsize); \ v8 = vqsubq_u16(v8, vector_wsize); \ vst1q_u16(table + 0, vO); \ vst1q_u16(table + 8, v8); \ table += 8 + 8; \ } typedef uint16x8_t z_vec128i_u16x8_t; #else #error slide_hash_simd is not defined for your build target #endif /* =========================================================================== * Slide the hash table when sliding the window down (could be avoided with 32 * bit values at the expense of memory usage). We slide even when level == 0 to * keep the hash table consistent if we switch back to level > 0 later. */ local INLINE void slide_hash_simd( Posf *head, Posf *prev, const uInt w_size, const uInt hash_size) { /* * The SIMD implementation of the hash table slider assumes: * * 1. hash chain offset is 2 bytes. Should be true as Pos is "ush" type. */ Assert(sizeof(Pos) == 2, "Pos type size error: should be 2 bytes"); Assert(sizeof(ush) == 2, "ush type size error: should be 2 bytes"); Assert(hash_size <= (1 << 16), "Hash table maximum size error"); Assert(hash_size >= (1 << 8), "Hash table minimum size error"); Assert(w_size == (ush)w_size, "Prev table size error"); /* * 2. The hash & prev table sizes are a multiple of 32 bytes (256 bits), * since the NEON table slider moves two 128-bit items per loop (loop is * unrolled on NEON for performance, see http://crbug.com/863257). */ Assert(!((hash_size * sizeof(head[0])) & (32 - 1)), "Hash table size error: should be a multiple of 32 bytes"); Assert(!((w_size * sizeof(prev[0])) & (32 - 1)), "Prev table size error: should be a multiple of 32 bytes"); /* * Duplicate (ush)w_size in each uint16_t component of a 128-bit vector. */ const z_vec128i_u16x8_t vec_wsize = Z_SLIDE_INIT_SIMD(w_size); /* * Slide {head,prev} hash chain values: subtracts (ush)w_size from every * value with a saturating SIMD subtract, to clamp the result to 0(NIL), * to implement slide_hash() `(m >= wsize ? m - wsize : NIL);` code. */ Z_SLIDE_HASH_SIMD(head, hash_size, vec_wsize); #ifndef FASTEST Z_SLIDE_HASH_SIMD(prev, w_size, vec_wsize); #endif } #undef z_vec128i_u16x8_t #undef Z_SLIDE_HASH_SIMD #undef Z_SLIDE_INIT_SIMD #endif /* SLIDE_HASH_SIMD_H */
dandycheung/baulk
lib/archive/liblzma/lz/lz_encoder_mf.c
/////////////////////////////////////////////////////////////////////////////// // /// \file lz_encoder_mf.c /// \brief Match finders /// // Authors: <NAME> // <NAME> // // This file has been put into the public domain. // You can do whatever you want with this file. // /////////////////////////////////////////////////////////////////////////////// #include "lz_encoder.h" #include "lz_encoder_hash.h" #include "memcmplen.h" /// \brief Find matches starting from the current byte /// /// \return The length of the longest match found extern uint32_t lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches) { // Call the match finder. It returns the number of length-distance // pairs found. // FIXME: Minimum count is zero, what _exactly_ is the maximum? const uint32_t count = mf->find(mf, matches); // Length of the longest match; assume that no matches were found // and thus the maximum length is zero. uint32_t len_best = 0; if (count > 0) { #ifndef NDEBUG // Validate the matches. for (uint32_t i = 0; i < count; ++i) { assert(matches[i].len <= mf->nice_len); assert(matches[i].dist < mf->read_pos); assert(memcmp(mf_ptr(mf) - 1, mf_ptr(mf) - matches[i].dist - 2, matches[i].len) == 0); } #endif // The last used element in the array contains // the longest match. len_best = matches[count - 1].len; // If a match of maximum search length was found, try to // extend the match to maximum possible length. if (len_best == mf->nice_len) { // The limit for the match length is either the // maximum match length supported by the LZ-based // encoder or the number of bytes left in the // dictionary, whichever is smaller. uint32_t limit = mf_avail(mf) + 1; if (limit > mf->match_len_max) limit = mf->match_len_max; // Pointer to the byte we just ran through // the match finder. const uint8_t *p1 = mf_ptr(mf) - 1; // Pointer to the beginning of the match. We need -1 // here because the match distances are zero based. const uint8_t *p2 = p1 - matches[count - 1].dist - 1; len_best = lzma_memcmplen(p1, p2, len_best, limit); } } *count_ptr = count; // Finally update the read position to indicate that match finder was // run for this dictionary offset. ++mf->read_ahead; return len_best; } /// Hash value to indicate unused element in the hash. Since we start the /// positions from dict_size + 1, zero is always too far to qualify /// as usable match position. #define EMPTY_HASH_VALUE 0 /// Normalization must be done when lzma_mf.offset + lzma_mf.read_pos /// reaches MUST_NORMALIZE_POS. #define MUST_NORMALIZE_POS UINT32_MAX /// \brief Normalizes hash values /// /// The hash arrays store positions of match candidates. The positions are /// relative to an arbitrary offset that is not the same as the absolute /// offset in the input stream. The relative position of the current byte /// is lzma_mf.offset + lzma_mf.read_pos. The distances of the matches are /// the differences of the current read position and the position found from /// the hash. /// /// To prevent integer overflows of the offsets stored in the hash arrays, /// we need to "normalize" the stored values now and then. During the /// normalization, we drop values that indicate distance greater than the /// dictionary size, thus making space for new values. static void normalize(lzma_mf *mf) { assert(mf->read_pos + mf->offset == MUST_NORMALIZE_POS); // In future we may not want to touch the lowest bits, because there // may be match finders that use larger resolution than one byte. const uint32_t subvalue = (MUST_NORMALIZE_POS - mf->cyclic_size); // & ~((UINT32_C(1) << 10) - 1); for (uint32_t i = 0; i < mf->hash_count; ++i) { // If the distance is greater than the dictionary size, // we can simply mark the hash element as empty. if (mf->hash[i] <= subvalue) mf->hash[i] = EMPTY_HASH_VALUE; else mf->hash[i] -= subvalue; } for (uint32_t i = 0; i < mf->sons_count; ++i) { // Do the same for mf->son. // // NOTE: There may be uninitialized elements in mf->son. // Valgrind may complain that the "if" below depends on // an uninitialized value. In this case it is safe to ignore // the warning. See also the comments in lz_encoder_init() // in lz_encoder.c. if (mf->son[i] <= subvalue) mf->son[i] = EMPTY_HASH_VALUE; else mf->son[i] -= subvalue; } // Update offset to match the new locations. mf->offset -= subvalue; return; } /// Mark the current byte as processed from point of view of the match finder. static void move_pos(lzma_mf *mf) { if (++mf->cyclic_pos == mf->cyclic_size) mf->cyclic_pos = 0; ++mf->read_pos; assert(mf->read_pos <= mf->write_pos); if (unlikely(mf->read_pos + mf->offset == UINT32_MAX)) normalize(mf); } /// When flushing, we cannot run the match finder unless there is nice_len /// bytes available in the dictionary. Instead, we skip running the match /// finder (indicating that no match was found), and count how many bytes we /// have ignored this way. /// /// When new data is given after the flushing was completed, the match finder /// is restarted by rewinding mf->read_pos backwards by mf->pending. Then /// the missed bytes are added to the hash using the match finder's skip /// function (with small amount of input, it may start using mf->pending /// again if flushing). /// /// Due to this rewinding, we don't touch cyclic_pos or test for /// normalization. It will be done when the match finder's skip function /// catches up after a flush. static void move_pending(lzma_mf *mf) { ++mf->read_pos; assert(mf->read_pos <= mf->write_pos); ++mf->pending; } /// Calculate len_limit and determine if there is enough input to run /// the actual match finder code. Sets up "cur" and "pos". This macro /// is used by all find functions and binary tree skip functions. Hash /// chain skip function doesn't need len_limit so a simpler code is used /// in them. #define header(is_bt, len_min, ret_op) \ uint32_t len_limit = mf_avail(mf); \ if (mf->nice_len <= len_limit) { \ len_limit = mf->nice_len; \ } else if (len_limit < (len_min) \ || (is_bt && mf->action == LZMA_SYNC_FLUSH)) { \ assert(mf->action != LZMA_RUN); \ move_pending(mf); \ ret_op; \ } \ const uint8_t *cur = mf_ptr(mf); \ const uint32_t pos = mf->read_pos + mf->offset /// Header for find functions. "return 0" indicates that zero matches /// were found. #define header_find(is_bt, len_min) \ header(is_bt, len_min, return 0); \ uint32_t matches_count = 0 /// Header for a loop in a skip function. "continue" tells to skip the rest /// of the code in the loop. #define header_skip(is_bt, len_min) \ header(is_bt, len_min, continue) /// Calls hc_find_func() or bt_find_func() and calculates the total number /// of matches found. Updates the dictionary position and returns the number /// of matches found. #define call_find(func, len_best) \ do { \ matches_count = func(len_limit, pos, cur, cur_match, mf->depth, \ mf->son, mf->cyclic_pos, mf->cyclic_size, \ matches + matches_count, len_best) \ - matches; \ move_pos(mf); \ return matches_count; \ } while (0) //////////////// // Hash Chain // //////////////// #if defined(HAVE_MF_HC3) || defined(HAVE_MF_HC4) /// /// /// \param len_limit Don't look for matches longer than len_limit. /// \param pos lzma_mf.read_pos + lzma_mf.offset /// \param cur Pointer to current byte (mf_ptr(mf)) /// \param cur_match Start position of the current match candidate /// \param depth Maximum length of the hash chain /// \param son lzma_mf.son (contains the hash chain) /// \param cyclic_pos /// \param cyclic_size /// \param matches Array to hold the matches. /// \param len_best The length of the longest match found so far. static lzma_match * hc_find_func( const uint32_t len_limit, const uint32_t pos, const uint8_t *const cur, uint32_t cur_match, uint32_t depth, uint32_t *const son, const uint32_t cyclic_pos, const uint32_t cyclic_size, lzma_match *matches, uint32_t len_best) { son[cyclic_pos] = cur_match; while (true) { const uint32_t delta = pos - cur_match; if (depth-- == 0 || delta >= cyclic_size) return matches; const uint8_t *const pb = cur - delta; cur_match = son[cyclic_pos - delta + (delta > cyclic_pos ? cyclic_size : 0)]; if (pb[len_best] == cur[len_best] && pb[0] == cur[0]) { uint32_t len = lzma_memcmplen(pb, cur, 1, len_limit); if (len_best < len) { len_best = len; matches->len = len; matches->dist = delta - 1; ++matches; if (len == len_limit) return matches; } } } } #define hc_find(len_best) \ call_find(hc_find_func, len_best) #define hc_skip() \ do { \ mf->son[mf->cyclic_pos] = cur_match; \ move_pos(mf); \ } while (0) #endif #ifdef HAVE_MF_HC3 extern uint32_t lzma_mf_hc3_find(lzma_mf *mf, lzma_match *matches) { header_find(false, 3); hash_3_calc(); const uint32_t delta2 = pos - mf->hash[hash_2_value]; const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; uint32_t len_best = 2; if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { len_best = lzma_memcmplen(cur - delta2, cur, len_best, len_limit); matches[0].len = len_best; matches[0].dist = delta2 - 1; matches_count = 1; if (len_best == len_limit) { hc_skip(); return 1; // matches_count } } hc_find(len_best); } extern void lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount) { do { if (mf_avail(mf) < 3) { move_pending(mf); continue; } const uint8_t *cur = mf_ptr(mf); const uint32_t pos = mf->read_pos + mf->offset; hash_3_calc(); const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; hc_skip(); } while (--amount != 0); } #endif #ifdef HAVE_MF_HC4 extern uint32_t lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches) { header_find(false, 4); hash_4_calc(); uint32_t delta2 = pos - mf->hash[hash_2_value]; const uint32_t delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value]; const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; mf->hash[hash_2_value ] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_4_HASH_SIZE + hash_value] = pos; uint32_t len_best = 1; if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { len_best = 2; matches[0].len = 2; matches[0].dist = delta2 - 1; matches_count = 1; } if (delta2 != delta3 && delta3 < mf->cyclic_size && *(cur - delta3) == *cur) { len_best = 3; matches[matches_count++].dist = delta3 - 1; delta2 = delta3; } if (matches_count != 0) { len_best = lzma_memcmplen(cur - delta2, cur, len_best, len_limit); matches[matches_count - 1].len = len_best; if (len_best == len_limit) { hc_skip(); return matches_count; } } if (len_best < 3) len_best = 3; hc_find(len_best); } extern void lzma_mf_hc4_skip(lzma_mf *mf, uint32_t amount) { do { if (mf_avail(mf) < 4) { move_pending(mf); continue; } const uint8_t *cur = mf_ptr(mf); const uint32_t pos = mf->read_pos + mf->offset; hash_4_calc(); const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_4_HASH_SIZE + hash_value] = pos; hc_skip(); } while (--amount != 0); } #endif ///////////////// // Binary Tree // ///////////////// #if defined(HAVE_MF_BT2) || defined(HAVE_MF_BT3) || defined(HAVE_MF_BT4) static lzma_match * bt_find_func( const uint32_t len_limit, const uint32_t pos, const uint8_t *const cur, uint32_t cur_match, uint32_t depth, uint32_t *const son, const uint32_t cyclic_pos, const uint32_t cyclic_size, lzma_match *matches, uint32_t len_best) { uint32_t *ptr0 = son + (cyclic_pos << 1) + 1; uint32_t *ptr1 = son + (cyclic_pos << 1); uint32_t len0 = 0; uint32_t len1 = 0; while (true) { const uint32_t delta = pos - cur_match; if (depth-- == 0 || delta >= cyclic_size) { *ptr0 = EMPTY_HASH_VALUE; *ptr1 = EMPTY_HASH_VALUE; return matches; } uint32_t *const pair = son + ((cyclic_pos - delta + (delta > cyclic_pos ? cyclic_size : 0)) << 1); const uint8_t *const pb = cur - delta; uint32_t len = my_min(len0, len1); if (pb[len] == cur[len]) { len = lzma_memcmplen(pb, cur, len + 1, len_limit); if (len_best < len) { len_best = len; matches->len = len; matches->dist = delta - 1; ++matches; if (len == len_limit) { *ptr1 = pair[0]; *ptr0 = pair[1]; return matches; } } } if (pb[len] < cur[len]) { *ptr1 = cur_match; ptr1 = pair + 1; cur_match = *ptr1; len1 = len; } else { *ptr0 = cur_match; ptr0 = pair; cur_match = *ptr0; len0 = len; } } } static void bt_skip_func( const uint32_t len_limit, const uint32_t pos, const uint8_t *const cur, uint32_t cur_match, uint32_t depth, uint32_t *const son, const uint32_t cyclic_pos, const uint32_t cyclic_size) { uint32_t *ptr0 = son + (cyclic_pos << 1) + 1; uint32_t *ptr1 = son + (cyclic_pos << 1); uint32_t len0 = 0; uint32_t len1 = 0; while (true) { const uint32_t delta = pos - cur_match; if (depth-- == 0 || delta >= cyclic_size) { *ptr0 = EMPTY_HASH_VALUE; *ptr1 = EMPTY_HASH_VALUE; return; } uint32_t *pair = son + ((cyclic_pos - delta + (delta > cyclic_pos ? cyclic_size : 0)) << 1); const uint8_t *pb = cur - delta; uint32_t len = my_min(len0, len1); if (pb[len] == cur[len]) { len = lzma_memcmplen(pb, cur, len + 1, len_limit); if (len == len_limit) { *ptr1 = pair[0]; *ptr0 = pair[1]; return; } } if (pb[len] < cur[len]) { *ptr1 = cur_match; ptr1 = pair + 1; cur_match = *ptr1; len1 = len; } else { *ptr0 = cur_match; ptr0 = pair; cur_match = *ptr0; len0 = len; } } } #define bt_find(len_best) \ call_find(bt_find_func, len_best) #define bt_skip() \ do { \ bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, \ mf->son, mf->cyclic_pos, \ mf->cyclic_size); \ move_pos(mf); \ } while (0) #endif #ifdef HAVE_MF_BT2 extern uint32_t lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches) { header_find(true, 2); hash_2_calc(); const uint32_t cur_match = mf->hash[hash_value]; mf->hash[hash_value] = pos; bt_find(1); } extern void lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount) { do { header_skip(true, 2); hash_2_calc(); const uint32_t cur_match = mf->hash[hash_value]; mf->hash[hash_value] = pos; bt_skip(); } while (--amount != 0); } #endif #ifdef HAVE_MF_BT3 extern uint32_t lzma_mf_bt3_find(lzma_mf *mf, lzma_match *matches) { header_find(true, 3); hash_3_calc(); const uint32_t delta2 = pos - mf->hash[hash_2_value]; const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; uint32_t len_best = 2; if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { len_best = lzma_memcmplen( cur, cur - delta2, len_best, len_limit); matches[0].len = len_best; matches[0].dist = delta2 - 1; matches_count = 1; if (len_best == len_limit) { bt_skip(); return 1; // matches_count } } bt_find(len_best); } extern void lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount) { do { header_skip(true, 3); hash_3_calc(); const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_value] = pos; bt_skip(); } while (--amount != 0); } #endif #ifdef HAVE_MF_BT4 extern uint32_t lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches) { header_find(true, 4); hash_4_calc(); uint32_t delta2 = pos - mf->hash[hash_2_value]; const uint32_t delta3 = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value]; const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_4_HASH_SIZE + hash_value] = pos; uint32_t len_best = 1; if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) { len_best = 2; matches[0].len = 2; matches[0].dist = delta2 - 1; matches_count = 1; } if (delta2 != delta3 && delta3 < mf->cyclic_size && *(cur - delta3) == *cur) { len_best = 3; matches[matches_count++].dist = delta3 - 1; delta2 = delta3; } if (matches_count != 0) { len_best = lzma_memcmplen( cur, cur - delta2, len_best, len_limit); matches[matches_count - 1].len = len_best; if (len_best == len_limit) { bt_skip(); return matches_count; } } if (len_best < 3) len_best = 3; bt_find(len_best); } extern void lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount) { do { header_skip(true, 4); hash_4_calc(); const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value]; mf->hash[hash_2_value] = pos; mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos; mf->hash[FIX_4_HASH_SIZE + hash_value] = pos; bt_skip(); } while (--amount != 0); } #endif
dandycheung/baulk
lib/archive/ppmd/Ppmd8.h
/* Ppmd8.h -- Ppmd8 (PPMdI) compression codec 2021-04-13 : <NAME> : Public domain This code is based on: PPMd var.I (2002): <NAME> : Public domain Carryless rangecoder (1999): <NAME> : Public domain */ #ifndef __PPMD8_H #define __PPMD8_H #include "Ppmd.h" EXTERN_C_BEGIN #define PPMD8_MIN_ORDER 2 #define PPMD8_MAX_ORDER 16 struct CPpmd8_Context_; typedef Ppmd_Ref_Type(struct CPpmd8_Context_) CPpmd8_Context_Ref; // MY_CPU_pragma_pack_push_1 typedef struct CPpmd8_Context_ { Byte NumStats; Byte Flags; union { UInt16 SummFreq; CPpmd_State2 State2; } Union2; union { CPpmd_State_Ref Stats; CPpmd_State4 State4; } Union4; CPpmd8_Context_Ref Suffix; } CPpmd8_Context; // MY_CPU_pragma_pop #define Ppmd8Context_OneState(p) ((CPpmd_State *)&(p)->Union2) /* PPMdI code rev.2 contains the fix over PPMdI code rev.1. But the code PPMdI.2 is not compatible with PPMdI.1 for some files compressed in FREEZE mode. So we disable FREEZE mode support. */ // #define PPMD8_FREEZE_SUPPORT enum { PPMD8_RESTORE_METHOD_RESTART, PPMD8_RESTORE_METHOD_CUT_OFF #ifdef PPMD8_FREEZE_SUPPORT , PPMD8_RESTORE_METHOD_FREEZE #endif , PPMD8_RESTORE_METHOD_UNSUPPPORTED }; typedef struct { CPpmd8_Context *MinContext, *MaxContext; CPpmd_State *FoundState; unsigned OrderFall, InitEsc, PrevSuccess, MaxOrder, RestoreMethod; Int32 RunLength, InitRL; /* must be 32-bit at least */ UInt32 Size; UInt32 GlueCount; UInt32 AlignOffset; Byte *Base, *LoUnit, *HiUnit, *Text, *UnitsStart; UInt32 Range; UInt32 Code; UInt32 Low; union { IByteIn *In; IByteOut *Out; } Stream; Byte Indx2Units[PPMD_NUM_INDEXES + 2]; // +2 for alignment Byte Units2Indx[128]; CPpmd_Void_Ref FreeList[PPMD_NUM_INDEXES]; UInt32 Stamps[PPMD_NUM_INDEXES]; Byte NS2BSIndx[256], NS2Indx[260]; Byte ExpEscape[16]; CPpmd_See DummySee, See[24][32]; UInt16 BinSumm[25][64]; } CPpmd8; void Ppmd8_Construct(CPpmd8 *p); BoolInt Ppmd8_Alloc(CPpmd8 *p, UInt32 size, ISzAllocPtr alloc); void Ppmd8_Free(CPpmd8 *p, ISzAllocPtr alloc); void Ppmd8_Init(CPpmd8 *p, unsigned maxOrder, unsigned restoreMethod); #define Ppmd8_WasAllocated(p) ((p)->Base != NULL) /* ---------- Internal Functions ---------- */ #define Ppmd8_GetPtr(p, ptr) Ppmd_GetPtr(p, ptr) #define Ppmd8_GetContext(p, ptr) Ppmd_GetPtr_Type(p, ptr, CPpmd8_Context) #define Ppmd8_GetStats(p, ctx) Ppmd_GetPtr_Type(p, (ctx)->Union4.Stats, CPpmd_State) void Ppmd8_Update1(CPpmd8 *p); void Ppmd8_Update1_0(CPpmd8 *p); void Ppmd8_Update2(CPpmd8 *p); #define Ppmd8_GetBinSumm(p) \ &p->BinSumm[p->NS2Indx[(size_t)Ppmd8Context_OneState(p->MinContext)->Freq - 1]] \ [ p->PrevSuccess + ((p->RunLength >> 26) & 0x20) \ + p->NS2BSIndx[Ppmd8_GetContext(p, p->MinContext->Suffix)->NumStats] + \ + p->MinContext->Flags ] CPpmd_See *Ppmd8_MakeEscFreq(CPpmd8 *p, unsigned numMasked, UInt32 *scale); /* 20.01: the original PPMdI encoder and decoder probably could work incorrectly in some rare cases, where the original PPMdI code can give "Divide by Zero" operation. We use the following fix to allow correct working of encoder and decoder in any cases. We correct (Escape_Freq) and (_sum_), if (_sum_) is larger than p->Range) */ #define PPMD8_CORRECT_SUM_RANGE(p, _sum_) if (_sum_ > p->Range /* /1 */) _sum_ = p->Range; /* ---------- Decode ---------- */ #define PPMD8_SYM_END (-1) #define PPMD8_SYM_ERROR (-2) /* You must set (CPpmd8::Stream.In) before Ppmd8_RangeDec_Init() Ppmd8_DecodeSymbol() out: >= 0 : decoded byte -1 : PPMD8_SYM_END : End of payload marker -2 : PPMD8_SYM_ERROR : Data error */ BoolInt Ppmd8_Init_RangeDec(CPpmd8 *p); #define Ppmd8_RangeDec_IsFinishedOK(p) ((p)->Code == 0) int Ppmd8_DecodeSymbol(CPpmd8 *p); /* ---------- Encode ---------- */ #define Ppmd8_Init_RangeEnc(p) { (p)->Low = 0; (p)->Range = 0xFFFFFFFF; } void Ppmd8_Flush_RangeEnc(CPpmd8 *p); void Ppmd8_EncodeSymbol(CPpmd8 *p, int symbol); EXTERN_C_END #endif
NurdTurd/c-learning
lib/my_zero_char_array.c
/* ** EPITECH PROJECT, 2019 ** my_zero_char_array ** File description: ** */ void my_zero_char_array(char *arr, int length) { for (int i = 0; i < length; i++) arr[i] = 0; }
NurdTurd/c-learning
lib/my_putstr.c
int my_putstr(char *str) { int i = 0; while (str[i] != '\0') { my_putchar(str[i]); } }
NurdTurd/c-learning
lib/my_macro_abs.c
<filename>lib/my_macro_abs.c /* ** EPITECH PROJECT, 2019 ** my_macro_abs.h ** File description: ** */ #include <stdio.h> #include "include/my_macro_abs.h" int main(void) { printf("%d", ABS(-10)); }
NurdTurd/c-learning
lib/my_strcat.c
<filename>lib/my_strcat.c /* ** EPITECH PROJECT, 2019 ** my_strcat ** File description: ** */ char *my_strcat(char *dest, char const *src) { return; }
NurdTurd/c-learning
lib/my_params_to_array.c
<filename>lib/my_params_to_array.c<gh_stars>0 /* ** EPITECH PROJECT, 2019 ** my_params_to_array ** File description: ** */ struct info_param *my_params_to_array(int ac, char **av) { int length; char *str; char *copy; char **word_array; }
NurdTurd/c-learning
lib/my_compute_factorial_it.c
<reponame>NurdTurd/c-learning /* ** EPITECH PROJECT, 2019 ** my_compute_factorial_it ** File description: ** returns the factorial of the number given */ int my_putchar(char c) { write(1, &c, 1); } int my_compute_factoriel_it(int nb) { int resultat = 1; if (nb < 0 || nb > 12) { return (0); } else if (nb == 0) { return (1); } else { while (nb > 1) { resultat = resultat * nb; nb--; } } return (resultat); } int main(void) { return (my_compute_factoriel_it(5)); }
NurdTurd/c-learning
lib/do_up.c
/* ** EPITECH PROJECT, 2019 ** do-op ** File description: ** */ #include <stdio.h> void my_putchar(char c) { write(1, &c, 1); } int my_put_nbr(int nb) { int modulo; if (nb < 0) { my_putchar('-'); nb = nb * (-1); } if (nb >= 0) { if (nb >= 10) { modulo = (nb % 10); nb = (nb - modulo) / 10; my_put_nbr(nb); my_putchar(48 + modulo); } else { my_putchar(nb % 10 + 48); } } } /* int do_op (int ac, char **av) { if(av[1] || av[3] == NULL) { return (84); } if(av[2] != '+' || '-' || '*' || '/' || '%') { my_putchar('0'); return (84); } if(av[2] == '/' && av[3] == '0') { my_putstr("Stop: division by zero"); av[2] = '$'; } if(av[2] == '%' && av[3] == '0') { my_putstr("Stop: modulo by zero"); av[2] = '$'; } switch(av[2]) { case '+': av[1] + av[3]; case '-': av[1] - av[3]; case '*': av[1] * av[3]; case '/': av[1] / av[3]; case '%': av[1] % av[3]; default: my_putstr("wtf frrr"); } } */ int main(int ac, char **av) { if(av[1][0] || av[3][0] == NULL) { return (84); } if(av[2][0] != '+' || '-' || '*' || '/' || '%') { my_putchar('0'); return (84); } if(av[2][0] == '/' && av[3][0] == '0') { my_putstr("Stop: division by zero"); av[2] = '$'; } if(av[2][0] == '%' && av[3][0] == '0') { my_putstr("Stop: modulo by zero"); av[2] = '$'; } int resultat = 0; if (av[2][0] == '+') { resultat = av[1][0] + av[3][0]; } else if (av[2][0] == '-') { resultat = av[1][0] - av[3][0]; } else if (av[2][0] == '/') { resultat = av[1][0] / av[3][0]; } else if (av[2][0] == '%') { resultat = av[1][0] % av[3][0]; } my_put_nbr(resultat); }
NurdTurd/c-learning
lib/my_isneg.c
<filename>lib/my_isneg.c /* ** EPITECH PROJECT, 2019 ** my_isneg ** File description: ** */ int my_isneg(int n) { int x = 0; if (n < x) { my_putchar('N'); my_putchar('\n'); } else { my_putchar('P'); my_putchar('\n'); } }
NurdTurd/c-learning
examples/merge-sort.c
#include<stdlib.h> #include<stdio.h> // Merge Function void merge(int arr[], int l, int m, int r) { int i, j, k; int n1 = m - l + 1; int n2 = r - m; int L[n1], R[n2]; for (i = 0; i < n1; i++) L[i] = arr[l + i]; for (j = 0; j < n2; j++) R[j] = arr[m + 1 + j]; i = 0; j = 0; k = l; while (i < n1 && j < n2) { if (L[i] <= R[j]) { arr[k] = L[i]; i++; } else { arr[k] = R[j]; j++; } k++; } while (i < n1) { arr[k] = L[i]; i++; k++; } while (j < n2) { arr[k] = R[j]; j++; k++; } } // Merge Sort Function in C void mergeSort(int arr[], int l, int r) { if (l < r) { int m = l + (r - l) / 2; mergeSort(arr, l, m); mergeSort(arr, m + 1, r); merge(arr, l, m, r); } } // Functions to Print Elements of Array void printArray(int A[], int size) { int i; for (i = 0; i < size; i++) printf("%d ", A[i]); printf("n"); } // Main Method int main() { int arr[] = { 85, 24, 63, 45, 17, 31, 96, 50 }; int arr_size = sizeof(arr) / sizeof(arr[0]); printf("Given array is n"); printArray(arr, arr_size); mergeSort(arr, 0, arr_size - 1); printf("nSorted array is n"); printArray(arr, arr_size); return 0; }
NurdTurd/c-learning
lib/my_strcmp.c
<gh_stars>0 /* ** EPITECH PROJECT, 2019 ** my_strcmp ** File description: ** Reproduce the behavior of the strcmp */ int my_strcmp(char const *s1, char const *s2) { int i; for (i = 0; s1[i] == s2[i]; i++) if (s2[i] == '\0') return 0; return s1[i] - s2[i]; }
NurdTurd/c-learning
lib/my_getnbr.c
/* ** EPITECH PROJECT, 2019 ** CPool_Day04_2019 ** File description: ** my_getnbr.c */ #include <stddef.h> int my_getnbr(char const *str) { int i = -1; int neg = 1; long int number = 0; if (str == NULL) return 0; while (str[++i] != '\0' && number == 0) { if (str[i] == '-') neg *= -1; while (str[i] >= '0' && str[i] <= '9') { number = 10 * number + (str[i] - 48); ++i; } } return number * neg; }
NurdTurd/c-learning
lib/my_print_params.c
<filename>lib/my_print_params.c /* ** EPITECH PROJECT, 2019 ** my_print_params ** File description: ** */ int main(int argc, char
NurdTurd/c-learning
lib/my_put_nbr.c
/* ** EPITECH PROJECT, 2019 ** my_put_nbr ** File description: ** */ int my_put_nbr(int nb) { int mod; if (nb < 0) { my_putchar('-'); nb = nb * (-1); } if (nb >= 0) { if (nb >= 10) { mod = (nb % 10); nb = (nb - mod) / 10; my_put_nbr(nb); my_putchar(48 + mod); } else { my_putchar(48 + nb % 10); } } }
NurdTurd/c-learning
examples/print-ascii-table.c
//Program to print ASCII table. #include <stdio.h> int main() { unsigned char count; for(count=32; count< 255; count+=1) { printf(" %3d - %c",count,count); if(count % 6==0) printf("\n"); } return 0; }
NurdTurd/c-learning
lib/my_compute_factorial_rec.c
/* ** EPITECH PROJECT, 2019 ** my_compute_factorial_rec ** File description: ** */ int my_compute_factorial_rec(int nb) { int f; if (nb == 0) return (1); if (nb < 0) return (0); f = nb * my_compute_factorial_rec(nb - 1); return (f); }
NurdTurd/c-learning
lib/my_revstr.c
<gh_stars>0 /* ** EPITECH PROJECT, 2019 ** my_revstr ** File description: ** Reverse string given */ void my_swap2(char *a, char *b) { char t; t = *b; *b = *a; *a = t; } char *my_revstr(char *str) { int i = 0; char x = 0; char y = 0; while (str[i] != '\0') { i++; } y = i - 1; while (x <= (i / 2)) { my_swap2(&str[x], &str[y]); x++; y--; } return (str); }
NurdTurd/c-learning
lib/my_putchar.c
<reponame>NurdTurd/c-learning int my_putchar(char c) { write(1, &c, 1); }
LaudateCorpus1/otp
erts/emulator/nifs/common/prim_net_nif.c
/* * %CopyrightBegin% * * Copyright Ericsson AB 2018-2022. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * %CopyrightEnd% * * ---------------------------------------------------------------------- * Purpose : The NIF (C) part of the net interface * This is a module of miscellaneous functions. * * We (try to) avoid name clashes by prefixing "all" internal * function names with enet. * ---------------------------------------------------------------------- * */ #define STATIC_ERLANG_NIF 1 #ifdef HAVE_CONFIG_H #include "config.h" #endif /* If we HAVE_SCTP_H and Solaris, we need to define the following in * order to get SCTP working: */ #if (defined(HAVE_SCTP_H) && defined(__sun) && defined(__SVR4)) #define SOLARIS10 1 /* WARNING: This is not quite correct, it may also be Solaris 11! */ #define _XPG4_2 #define __EXTENSIONS__ #endif #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <ctype.h> #include <sys/types.h> #include <errno.h> #include <time.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_SYS_UIO_H #include <sys/uio.h> #endif #ifdef HAVE_NET_IF_DL_H #include <net/if_dl.h> #endif #ifdef HAVE_IFADDRS_H #include <ifaddrs.h> #elif defined(__PASE__) /* PASE has this, but under a different name because AIX doesn't have it. */ #include <as400_protos.h> /* * We don't redefine the function names because they're used in other * contexts, but the struct is safe to rename. */ #define ifaddrs ifaddrs_pase #endif #ifdef HAVE_NETPACKET_PACKET_H #include <netpacket/packet.h> #endif #ifdef HAVE_SYS_UN_H #include <sys/un.h> #endif /* SENDFILE STUFF HERE IF WE NEED IT... */ #if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__) #define __DARWIN__ 1 #endif #ifdef __WIN32__ // #define STRNCASECMP strncasecmp #define INCL_WINSOCK_API_TYPEDEFS 1 #ifndef WINDOWS_H_INCLUDES_WINSOCK2_H #include <winsock2.h> #endif #include <windows.h> #include <Ws2tcpip.h> /* NEED VC 6.0 or higher */ /* Visual studio 2008+: NTDDI_VERSION needs to be set for iphlpapi.h * to define the right structures. It needs to be set to WINXP (or LONGHORN) * for IPV6 to work and it's set lower by default, so we need to change it. */ #ifdef HAVE_SDKDDKVER_H # include <sdkddkver.h> # ifdef NTDDI_VERSION # undef NTDDI_VERSION # endif # define NTDDI_VERSION NTDDI_WINXP #endif #include <iphlpapi.h> #undef WANT_NONBLOCKING #include "sys.h" #else /* !__WIN32__ */ #include <sys/time.h> #ifdef NETDB_H_NEEDS_IN_H #include <netinet/in.h> #endif #include <netdb.h> #include <sys/socket.h> #include <netinet/in.h> #ifdef DEF_INADDR_LOOPBACK_IN_RPC_TYPES_H #include <rpc/types.h> #endif #include <netinet/ip.h> #include <netinet/tcp.h> #include <netinet/udp.h> #include <arpa/inet.h> #include <sys/param.h> #ifdef HAVE_ARPA_NAMESER_H #include <arpa/nameser.h> #endif #ifdef HAVE_SYS_SOCKIO_H #include <sys/sockio.h> #endif #ifdef HAVE_SYS_IOCTL_H #include <sys/ioctl.h> #endif #include <net/if.h> #ifdef HAVE_SCHED_H #include <sched.h> #endif #ifdef HAVE_SETNS_H #include <setns.h> #endif #define HAVE_UDP #ifndef WANT_NONBLOCKING #define WANT_NONBLOCKING #endif #include "sys.h" #endif #include <erl_nif.h> #include "socket_dbg.h" #include "socket_int.h" #include "socket_tarray.h" #include "socket_util.h" /* All platforms fail on malloc errors. */ #define FATAL_MALLOC #ifdef __WIN32__ #define net_gethostname(__buf__, __bufSz__) gethostname((__buf__), (__bufSz__)) #else #define net_gethostname(__buf__, __bufSz__) gethostname((__buf__), (__bufSz__)) #endif // __WIN32__ /* *** Misc macros and defines *** */ #ifdef __WIN32__ #define get_errno() WSAGetLastError() #else #define get_errno() errno #endif #define HOSTNAME_LEN 256 #define SERVICE_LEN 256 /* MAXHOSTNAMELEN could be 64 or 255 depending * on the platform. Instead, use INET_MAXHOSTNAMELEN * which is always 255 across all platforms */ #define NET_MAXHOSTNAMELEN 255 /* =================================================================== * * * * Various enif macros * * * * =================================================================== */ /* Debug stuff... */ #define NET_NIF_DEBUG_DEFAULT FALSE #define NDBG( proto ) ESOCK_DBG_PRINTF( data.debug , proto ) #define NDBG2( dbg, proto ) ESOCK_DBG_PRINTF( (dbg || data.debug) , proto ) /* Global 'stuff' */ typedef struct { BOOLEAN_T debug; } NetData; /* =================================================================== * * * * Static data * * * * =================================================================== */ static NetData data; /* ---------------------------------------------------------------------- * F o r w a r d s * ---------------------------------------------------------------------- */ /* THIS IS JUST TEMPORARY */ extern char* erl_errno_id(int error); /* All the nif "callback" functions for the net API has * the exact same API: * * nif_<funcname>(ErlNifEnv* env, * int argc, * const ERL_NIF_TERM argv[]); * * So, to simplify, we use some macro magic to define those. * * These are the functions making up the "official" API. */ #define ENET_NIF_FUNCS \ ENET_NIF_FUNC_DEF(info); \ ENET_NIF_FUNC_DEF(command); \ ENET_NIF_FUNC_DEF(gethostname); \ ENET_NIF_FUNC_DEF(getnameinfo); \ ENET_NIF_FUNC_DEF(getaddrinfo); \ ENET_NIF_FUNC_DEF(getifaddrs); \ ENET_NIF_FUNC_DEF(get_adapters_addresses); \ ENET_NIF_FUNC_DEF(get_if_entry); \ ENET_NIF_FUNC_DEF(get_interface_info); \ ENET_NIF_FUNC_DEF(get_ip_address_table); \ ENET_NIF_FUNC_DEF(if_name2index); \ ENET_NIF_FUNC_DEF(if_index2name); \ ENET_NIF_FUNC_DEF(if_names); #define ENET_NIF_FUNC_DEF(F) \ static ERL_NIF_TERM nif_##F(ErlNifEnv* env, \ int argc, \ const ERL_NIF_TERM argv[]); ENET_NIF_FUNCS #undef ENET_NIF_FUNC_DEF /* And here comes the functions that does the actual work (for the most part) */ static ERL_NIF_TERM enet_command(ErlNifEnv* env, ERL_NIF_TERM cmd); static ERL_NIF_TERM enet_gethostname(ErlNifEnv* env); #if defined(HAVE_GETNAMEINFO) static ERL_NIF_TERM enet_getnameinfo(ErlNifEnv* env, const ESockAddress* saP, SOCKLEN_T saLen, int flags); #endif #if defined(HAVE_GETADDRINFO) static ERL_NIF_TERM enet_getaddrinfo(ErlNifEnv* env, char* host, char* serv); #endif #if defined(HAVE_GETIFADDRS) || defined(__PASE__) static ERL_NIF_TERM enet_getifaddrs(ErlNifEnv* env, char* netns); #endif #if defined(__WIN32__) /* *** Get Adapters Addresses functions *** */ static BOOLEAN_T enet_get_adapters_addresses_args_debug(ErlNifEnv* env, const ERL_NIF_TERM eargs); static BOOLEAN_T enet_get_adapters_addresses_args_family(ErlNifEnv* env, const ERL_NIF_TERM eargs, ULONG* fam); static BOOLEAN_T enet_get_adapters_addresses_args_flags(ErlNifEnv* env, const ERL_NIF_TERM eargs, ULONG* flags); static ERL_NIF_TERM enet_get_adapters_addresses(ErlNifEnv* env, BOOLEAN_T dbg, ULONG fam, ULONG flags); static ERL_NIF_TERM enet_adapters_addresses_encode(ErlNifEnv* env, BOOLEAN_T dbg, IP_ADAPTER_ADDRESSES* ipAdAddrsP); static ERL_NIF_TERM enet_adapter_addresses_encode(ErlNifEnv* env, BOOLEAN_T dbg, IP_ADAPTER_ADDRESSES* ipAdAddrsP); static ERL_NIF_TERM enet_adapter_encode_name(ErlNifEnv* env, WCHAR* name); static ERL_NIF_TERM enet_adapter_encode_friendly_name(ErlNifEnv* env, WCHAR* fname); static ERL_NIF_TERM encode_if_oper_status(ErlNifEnv* env, DWORD status); static ERL_NIF_TERM encode_adapter_flags(ErlNifEnv* env, IP_ADAPTER_ADDRESSES* ipAdAddrsP); static ERL_NIF_TERM encode_adapter_unicast_addrs(ErlNifEnv* env, IP_ADAPTER_UNICAST_ADDRESS* firstP); static ERL_NIF_TERM encode_adapter_unicast_addr(ErlNifEnv* env, IP_ADAPTER_UNICAST_ADDRESS* addrP); static ERL_NIF_TERM encode_adapter_unicast_addr_flags(ErlNifEnv* env, DWORD flags); static ERL_NIF_TERM encode_adapter_unicast_addr_sockaddr(ErlNifEnv* env, struct sockaddr* addrP); static ERL_NIF_TERM encode_adapter_unicast_addr_porig(ErlNifEnv* env, IP_PREFIX_ORIGIN porig); static ERL_NIF_TERM encode_adapter_unicast_addr_sorig(ErlNifEnv* env, IP_SUFFIX_ORIGIN sorig); static ERL_NIF_TERM encode_adapter_unicast_addr_dad_state(ErlNifEnv* env, IP_DAD_STATE dstate); static ERL_NIF_TERM encode_adapter_anycast_addrs(ErlNifEnv* env, IP_ADAPTER_ANYCAST_ADDRESS* firstP); static ERL_NIF_TERM encode_adapter_anycast_addr(ErlNifEnv* env, IP_ADAPTER_ANYCAST_ADDRESS* addrP); static ERL_NIF_TERM encode_adapter_anycast_addr_flags(ErlNifEnv* env, DWORD flags); static ERL_NIF_TERM encode_adapter_anycast_addr_sockaddr(ErlNifEnv* env, struct sockaddr* addrP); static ERL_NIF_TERM encode_adapter_multicast_addrs(ErlNifEnv* env, IP_ADAPTER_MULTICAST_ADDRESS* firstP); static ERL_NIF_TERM encode_adapter_multicast_addr(ErlNifEnv* env, IP_ADAPTER_MULTICAST_ADDRESS* addrP); static ERL_NIF_TERM encode_adapter_multicast_addr_flags(ErlNifEnv* env, DWORD flags); static ERL_NIF_TERM encode_adapter_multicast_addr_sockaddr(ErlNifEnv* env, struct sockaddr* addrP); static ERL_NIF_TERM encode_adapter_dns_server_addrs(ErlNifEnv* env, IP_ADAPTER_DNS_SERVER_ADDRESS* firstP); static ERL_NIF_TERM encode_adapter_dns_server_addr(ErlNifEnv* env, IP_ADAPTER_DNS_SERVER_ADDRESS* addrP); static ERL_NIF_TERM encode_adapter_dns_server_addr_sockaddr(ErlNifEnv* env, struct sockaddr* addrP); static ERL_NIF_TERM encode_adapter_zone_indices(ErlNifEnv* env, DWORD* zoneIndices, DWORD len); static ERL_NIF_TERM encode_adapter_prefixes(ErlNifEnv* env, IP_ADAPTER_PREFIX* firstP); static ERL_NIF_TERM encode_adapter_prefix(ErlNifEnv* env, IP_ADAPTER_PREFIX* prefP); static ERL_NIF_TERM encode_adapter_prefix_sockaddr(ErlNifEnv* env, struct sockaddr* addrP); /* *** Get If Entry (MIB_IFROW) functions *** */ static ERL_NIF_TERM enet_get_if_entry(ErlNifEnv* env, BOOLEAN_T dbg, DWORD index); static BOOLEAN_T enet_get_if_entry_args_index(ErlNifEnv* env, const ERL_NIF_TERM eargs, DWORD* index); static BOOLEAN_T enet_get_if_entry_args_debug(ErlNifEnv* env, const ERL_NIF_TERM eargs); static ERL_NIF_TERM enet_if_row_encode(ErlNifEnv* env, BOOLEAN_T dbg, MIB_IFROW* rowP); static ERL_NIF_TERM encode_if_type(ErlNifEnv* env, DWORD type); static ERL_NIF_TERM encode_if_row_description(ErlNifEnv* env, DWORD len, UCHAR* buf); static ERL_NIF_TERM encode_if_admin_status(ErlNifEnv* env, DWORD status); static ERL_NIF_TERM encode_internal_if_oper_status(ErlNifEnv* env, DWORD status); static ERL_NIF_TERM encode_if_row_phys_address(ErlNifEnv* env, DWORD len, UCHAR* buf); /* *** Get Interface Info functions *** */ static ERL_NIF_TERM enet_get_interface_info(ErlNifEnv* env, BOOLEAN_T dbg); static BOOLEAN_T enet_get_interface_info_args_debug(ErlNifEnv* env, const ERL_NIF_TERM eextra); static ERL_NIF_TERM enet_interface_info_encode(ErlNifEnv* env, BOOLEAN_T dbg, IP_INTERFACE_INFO* infoP); static void encode_adapter_index_map(ErlNifEnv* env, BOOLEAN_T dbg, IP_ADAPTER_INDEX_MAP* adapterP, ERL_NIF_TERM* eadapter); static ERL_NIF_TERM encode_adapter_index_map_name(ErlNifEnv* env, WCHAR* name); static void make_adapter_index_map(ErlNifEnv* env, ERL_NIF_TERM eindex, ERL_NIF_TERM ename, ERL_NIF_TERM* emap); /* *** Get IP Address Table functions *** */ static ERL_NIF_TERM enet_get_ip_address_table(ErlNifEnv* env, BOOLEAN_T dbg); static ERL_NIF_TERM enet_get_ip_address_table_encode(ErlNifEnv* env, BOOLEAN_T dbg, MIB_IPADDRTABLE* tabP); static ERL_NIF_TERM encode_ip_address_row(ErlNifEnv* env, BOOLEAN_T dbg, MIB_IPADDRROW* rowP); static ERL_NIF_TERM encode_ip_address_row_addr(ErlNifEnv* env, BOOLEAN_T dbg, const char* descr, DWORD addr); static void make_ip_address_row(ErlNifEnv* env, ERL_NIF_TERM eaddr, ERL_NIF_TERM eindex, ERL_NIF_TERM emask, ERL_NIF_TERM eBCastAddr, ERL_NIF_TERM eReasmSize, ERL_NIF_TERM* iar); #endif #if defined(HAVE_IF_NAMETOINDEX) static ERL_NIF_TERM enet_if_name2index(ErlNifEnv* env, char* ifn); #endif #if defined(HAVE_IF_INDEXTONAME) static ERL_NIF_TERM enet_if_index2name(ErlNifEnv* env, unsigned int id); #endif #if defined(HAVE_IF_NAMEINDEX) && defined(HAVE_IF_FREENAMEINDEX) static ERL_NIF_TERM enet_if_names(ErlNifEnv* env); static unsigned int enet_if_names_length(struct if_nameindex* p); #endif /* static void net_dtor(ErlNifEnv* env, void* obj); static void net_stop(ErlNifEnv* env, void* obj, int fd, int is_direct_call); static void net_down(ErlNifEnv* env, void* obj, const ErlNifPid* pid, const ErlNifMonitor* mon); */ static ERL_NIF_TERM enet_getifaddrs(ErlNifEnv* env, char* netns); static ERL_NIF_TERM enet_getifaddrs_process(ErlNifEnv* env, struct ifaddrs* ifap); static unsigned int enet_getifaddrs_length(struct ifaddrs* ifap); static void encode_ifaddrs(ErlNifEnv* env, struct ifaddrs* ifap, ERL_NIF_TERM* eifa); static ERL_NIF_TERM encode_ifaddrs_name(ErlNifEnv* env, char* name); static ERL_NIF_TERM encode_ifaddrs_flags(ErlNifEnv* env, unsigned int flags); static ERL_NIF_TERM encode_ifaddrs_addr(ErlNifEnv* env, struct sockaddr* sa); static void make_ifaddrs(ErlNifEnv* env, ERL_NIF_TERM name, ERL_NIF_TERM flags, ERL_NIF_TERM addr, ERL_NIF_TERM netmask, ERL_NIF_TERM ifu_key, ERL_NIF_TERM ifu_value, ERL_NIF_TERM data, ERL_NIF_TERM* ifAddrs); #ifdef HAVE_SETNS static BOOLEAN_T enet_getifaddrs_netns(ErlNifEnv* env, ERL_NIF_TERM map, char** netns); static BOOLEAN_T change_network_namespace(char* netns, int* cns, int* err); static BOOLEAN_T restore_network_namespace(int ns, int* err); #endif static ERL_NIF_TERM encode_sockaddr(ErlNifEnv* env, struct sockaddr* sa); static BOOLEAN_T decode_nameinfo_flags(ErlNifEnv* env, const ERL_NIF_TERM eflags, int* flags); static BOOLEAN_T decode_nameinfo_flags_list(ErlNifEnv* env, const ERL_NIF_TERM eflags, int* flags); static BOOLEAN_T decode_addrinfo_string(ErlNifEnv* env, const ERL_NIF_TERM eString, char** stringP); static ERL_NIF_TERM decode_bool(ErlNifEnv* env, ERL_NIF_TERM ebool, BOOLEAN_T* ibool); static ERL_NIF_TERM encode_address_infos(ErlNifEnv* env, struct addrinfo* addrInfo); static ERL_NIF_TERM encode_address_info(ErlNifEnv* env, struct addrinfo* addrInfoP); static unsigned int address_info_length(struct addrinfo* addrInfoP); static ERL_NIF_TERM encode_address_info_family(ErlNifEnv* env, int family); static ERL_NIF_TERM encode_address_info_type(ErlNifEnv* env, int socktype); static void make_address_info(ErlNifEnv* env, ERL_NIF_TERM fam, ERL_NIF_TERM sockType, ERL_NIF_TERM proto, ERL_NIF_TERM addr, ERL_NIF_TERM* ai); #if defined(__WIN32__) static ERL_NIF_TERM encode_uchar(ErlNifEnv* env, DWORD len, UCHAR* buf); static ERL_NIF_TERM encode_wchar(ErlNifEnv* env, WCHAR* name); #endif static BOOLEAN_T get_debug(ErlNifEnv* env, ERL_NIF_TERM map); static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info); #if HAVE_IN6 # if ! defined(HAVE_IN6ADDR_ANY) || ! HAVE_IN6ADDR_ANY # if HAVE_DECL_IN6ADDR_ANY_INIT static const struct in6_addr in6addr_any = { { IN6ADDR_ANY_INIT } }; # else static const struct in6_addr in6addr_any = { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } }; # endif /* HAVE_IN6ADDR_ANY_INIT */ # endif /* ! HAVE_DECL_IN6ADDR_ANY */ # if ! defined(HAVE_IN6ADDR_LOOPBACK) || ! HAVE_IN6ADDR_LOOPBACK # if HAVE_DECL_IN6ADDR_LOOPBACK_INIT static const struct in6_addr in6addr_loopback = { { IN6ADDR_LOOPBACK_INIT } }; # else static const struct in6_addr in6addr_loopback = { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }; # endif /* HAVE_IN6ADDR_LOOPBACk_INIT */ # endif /* ! HAVE_DECL_IN6ADDR_LOOPBACK */ #endif /* HAVE_IN6 */ /* *** Local atoms *** * * These have been deprecated: * LOCAL_ATOM_DECL(idna_allow_unassigned); Should really have been idn_... * LOCAL_ATOM_DECL(idna_use_std3_ascii_rules); Should really have been idn_... */ #define LOCAL_ATOMS \ LOCAL_ATOM_DECL(address_info); \ LOCAL_ATOM_DECL(admin_status); \ LOCAL_ATOM_DECL(anycast_addrs); \ LOCAL_ATOM_DECL(atm); \ LOCAL_ATOM_DECL(automedia); \ LOCAL_ATOM_DECL(bcast_addr); \ LOCAL_ATOM_DECL(broadaddr); \ LOCAL_ATOM_DECL(broadcast); \ LOCAL_ATOM_DECL(dad_state); \ LOCAL_ATOM_DECL(debug); \ LOCAL_ATOM_DECL(deprecated); \ LOCAL_ATOM_DECL(description); \ LOCAL_ATOM_DECL(dhcp); \ LOCAL_ATOM_DECL(dhcp_v4_enabled); \ LOCAL_ATOM_DECL(ddns_enabled); \ LOCAL_ATOM_DECL(disconnected); \ LOCAL_ATOM_DECL(dns_eligible); \ LOCAL_ATOM_DECL(dns_server_addrs); \ LOCAL_ATOM_DECL(dns_suffix); \ LOCAL_ATOM_DECL(down); \ LOCAL_ATOM_DECL(dstaddr); \ LOCAL_ATOM_DECL(duplicate); \ LOCAL_ATOM_DECL(dynamic); \ LOCAL_ATOM_DECL(ethernet_csmacd); \ LOCAL_ATOM_DECL(fddi); \ LOCAL_ATOM_DECL(friendly_name); \ LOCAL_ATOM_DECL(host); \ LOCAL_ATOM_DECL(idn); \ LOCAL_ATOM_DECL(ieee1394); \ LOCAL_ATOM_DECL(ieee80211); \ LOCAL_ATOM_DECL(ieee80216_wman); \ LOCAL_ATOM_DECL(include_prefix); \ LOCAL_ATOM_DECL(include_wins_info); \ LOCAL_ATOM_DECL(include_gateways); \ LOCAL_ATOM_DECL(include_all_interfaces); \ LOCAL_ATOM_DECL(include_all_compartments); \ LOCAL_ATOM_DECL(include_tunnel_bindingorder); \ LOCAL_ATOM_DECL(index); \ LOCAL_ATOM_DECL(internal_oper_status); \ LOCAL_ATOM_DECL(invalid); \ LOCAL_ATOM_DECL(in_octets); \ LOCAL_ATOM_DECL(in_ucast_pkts); \ LOCAL_ATOM_DECL(in_nucast_pkts); \ LOCAL_ATOM_DECL(in_discards); \ LOCAL_ATOM_DECL(in_errors); \ LOCAL_ATOM_DECL(in_unknown_protos); \ LOCAL_ATOM_DECL(ipv4_enabled); \ LOCAL_ATOM_DECL(ipv6_enabled); \ LOCAL_ATOM_DECL(ipv6_index); \ LOCAL_ATOM_DECL(ipv6_managed_address_config_supported); \ LOCAL_ATOM_DECL(ipv6_other_stateful_config); \ LOCAL_ATOM_DECL(iso88025_tokenring); \ LOCAL_ATOM_DECL(last_change); \ LOCAL_ATOM_DECL(lease_lifetime); \ LOCAL_ATOM_DECL(length); \ LOCAL_ATOM_DECL(link_layer_address); \ LOCAL_ATOM_DECL(lower_layer_down); \ LOCAL_ATOM_DECL(manual); \ LOCAL_ATOM_DECL(mask); \ LOCAL_ATOM_DECL(master); \ LOCAL_ATOM_DECL(multicast); \ LOCAL_ATOM_DECL(multicast_addrs); \ LOCAL_ATOM_DECL(namereqd); \ LOCAL_ATOM_DECL(name_info); \ LOCAL_ATOM_DECL(netbios_over_tcpip_enabled); \ LOCAL_ATOM_DECL(netmask); \ LOCAL_ATOM_DECL(noarp); \ LOCAL_ATOM_DECL(nofqdn); \ LOCAL_ATOM_DECL(non_operational); \ LOCAL_ATOM_DECL(notrailers); \ LOCAL_ATOM_DECL(not_present); \ LOCAL_ATOM_DECL(no_multicast); \ LOCAL_ATOM_DECL(numerichost); \ LOCAL_ATOM_DECL(numericserv); \ LOCAL_ATOM_DECL(on_link_prefix_length); \ LOCAL_ATOM_DECL(operational); \ LOCAL_ATOM_DECL(oper_status); \ LOCAL_ATOM_DECL(other); \ LOCAL_ATOM_DECL(out_octets); \ LOCAL_ATOM_DECL(out_ucast_pkts); \ LOCAL_ATOM_DECL(out_nucast_pkts); \ LOCAL_ATOM_DECL(out_discards); \ LOCAL_ATOM_DECL(out_errors); \ LOCAL_ATOM_DECL(out_qlen); \ LOCAL_ATOM_DECL(phys_addr); \ LOCAL_ATOM_DECL(pointopoint); \ LOCAL_ATOM_DECL(portsel); \ LOCAL_ATOM_DECL(ppp); \ LOCAL_ATOM_DECL(preferred); \ LOCAL_ATOM_DECL(preferred_lifetime); \ LOCAL_ATOM_DECL(prefixes); \ LOCAL_ATOM_DECL(prefix_origin); \ LOCAL_ATOM_DECL(promisc); \ LOCAL_ATOM_DECL(random); \ LOCAL_ATOM_DECL(reasm_size); \ LOCAL_ATOM_DECL(receive_only); \ LOCAL_ATOM_DECL(register_adapter_suffix); \ LOCAL_ATOM_DECL(router_advertisement); \ LOCAL_ATOM_DECL(running); \ LOCAL_ATOM_DECL(service); \ LOCAL_ATOM_DECL(slave); \ LOCAL_ATOM_DECL(skip_unicast); \ LOCAL_ATOM_DECL(skip_anycast); \ LOCAL_ATOM_DECL(skip_multicast); \ LOCAL_ATOM_DECL(skip_dns_server); \ LOCAL_ATOM_DECL(skip_friendly_name); \ LOCAL_ATOM_DECL(software_loopback); \ LOCAL_ATOM_DECL(speed); \ LOCAL_ATOM_DECL(suffix_origin); \ LOCAL_ATOM_DECL(tentative); \ LOCAL_ATOM_DECL(testing); \ LOCAL_ATOM_DECL(transient); \ LOCAL_ATOM_DECL(tunnel); \ LOCAL_ATOM_DECL(unchanged); \ LOCAL_ATOM_DECL(unknown); \ LOCAL_ATOM_DECL(unicast_addrs); \ LOCAL_ATOM_DECL(unreachable); \ LOCAL_ATOM_DECL(up); \ LOCAL_ATOM_DECL(valid_lifetime); \ LOCAL_ATOM_DECL(well_known); \ LOCAL_ATOM_DECL(wwanpp); \ LOCAL_ATOM_DECL(wwanpp2); \ LOCAL_ATOM_DECL(zone_indices); #define LOCAL_ERROR_REASON_ATOMS \ LOCAL_ATOM_DECL(address_not_associated); \ LOCAL_ATOM_DECL(can_not_complete); \ LOCAL_ATOM_DECL(eaddrfamily); \ LOCAL_ATOM_DECL(ebadflags); \ LOCAL_ATOM_DECL(efail); \ LOCAL_ATOM_DECL(efamily); \ LOCAL_ATOM_DECL(efault); \ LOCAL_ATOM_DECL(emem); \ LOCAL_ATOM_DECL(enametoolong); \ LOCAL_ATOM_DECL(enodata); \ LOCAL_ATOM_DECL(enoname); \ LOCAL_ATOM_DECL(enxio); \ LOCAL_ATOM_DECL(eoverflow); \ LOCAL_ATOM_DECL(eservice); \ LOCAL_ATOM_DECL(esocktype); \ LOCAL_ATOM_DECL(esystem); \ LOCAL_ATOM_DECL(insufficient_buffer); \ LOCAL_ATOM_DECL(invalid_data); \ LOCAL_ATOM_DECL(invalid_flags); \ LOCAL_ATOM_DECL(invalid_parameter); \ LOCAL_ATOM_DECL(not_found); \ LOCAL_ATOM_DECL(not_enough_memory); \ LOCAL_ATOM_DECL(not_supported); \ LOCAL_ATOM_DECL(no_data); \ LOCAL_ATOM_DECL(no_function); \ LOCAL_ATOM_DECL(no_uniconde_traslation); #define LOCAL_ATOM_DECL(A) static ERL_NIF_TERM atom_##A LOCAL_ATOMS LOCAL_ERROR_REASON_ATOMS #undef LOCAL_ATOM_DECL /* *** net *** */ static ErlNifResourceType* net; static ErlNifResourceTypeInit netInit = { NULL, // net_dtor, NULL, // net_stop, NULL // (ErlNifResourceDown*) net_down }; /* ---------------------------------------------------------------------- * N I F F u n c t i o n s * ---------------------------------------------------------------------- * * Utility and admin functions: * ---------------------------- * nif_info/0 * nif_command/1 * * The "proper" net functions: * ------------------------------ * nif_gethostname/0 * nif_getnameinfo/2 * nif_getaddrinfo/3 * nif_getifaddrs/1 * nif_get_adapters_addresses/1 * nif_get_if_entry/1 * nif_get_interface_info/1 * nif_get_ip_address_table/1 * nif_if_name2index/1 * nif_if_index2name/1 * nif_if_names/0 * */ /* ---------------------------------------------------------------------- * nif_info * * Description: * This is currently just a placeholder... */ static ERL_NIF_TERM nif_info(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM info, tmp; NDBG( ("NET", "info -> entry\r\n") ); tmp = enif_make_new_map(env); if (!enif_make_map_put(env, tmp, atom_debug, BOOL2ATOM(data.debug), &info)) info = tmp; NDBG( ("NET", "info -> done: %T\r\n", info) ); return info; } /* ---------------------------------------------------------------------- * nif_command * * Description: * This is a general purpose utility function. * * Arguments: * Command - This is a general purpose command, of any type. * Currently, the only supported command is: * * {debug, boolean()} */ static ERL_NIF_TERM nif_command(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM ecmd, result; NDBG( ("NET", "command -> entry (%d)\r\n", argc) ); if (argc != 1) return enif_make_badarg(env); ecmd = argv[0]; NDBG( ("NET", "command -> ecmd: %T\r\n", ecmd) ); result = enet_command(env, ecmd); NDBG( ("NET", "command -> result: %T\r\n", result) ); return result; } /* * The command can, in principle, be anything, though currently we only * support a debug command. */ static ERL_NIF_TERM enet_command(ErlNifEnv* env, ERL_NIF_TERM cmd) { const ERL_NIF_TERM* t; int tsz; if (IS_TUPLE(env, cmd)) { /* Could be the debug tuple */ if (!GET_TUPLE(env, cmd, &tsz, &t)) return esock_make_error(env, esock_atom_einval); if (tsz != 2) return esock_make_error(env, esock_atom_einval); /* First element should be the atom 'debug' */ if (COMPARE(t[0], atom_debug) != 0) return esock_make_error(env, esock_atom_einval); return decode_bool(env, t[1], &data.debug); } else { return esock_make_error(env, esock_atom_einval); } } /* ---------------------------------------------------------------------- * nif_gethostname * * Description: * Access the hostname of the current processor. * */ static ERL_NIF_TERM nif_gethostname(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM result; NDBG( ("NET", "nif_gethostname -> entry (%d)\r\n", argc) ); if (argc != 0) return enif_make_badarg(env); result = enet_gethostname(env); NDBG( ("NET", "nif_gethostname -> done when result: %T\r\n", result) ); return result; } static ERL_NIF_TERM enet_gethostname(ErlNifEnv* env) { ERL_NIF_TERM result; char buf[NET_MAXHOSTNAMELEN + 1]; int res; res = net_gethostname(buf, sizeof(buf)); NDBG( ("NET", "enet_gethostname -> gethostname res: %d\r\n", res) ); switch (res) { case 0: result = esock_make_ok2(env, MKS(env, buf)); break; case EFAULT: result = esock_make_error(env, atom_efault); break; case EINVAL: result = esock_make_error(env, esock_atom_einval); break; case ENAMETOOLONG: result = esock_make_error(env, atom_enametoolong); break; default: result = esock_make_error(env, MKI(env, res)); break; } return result; } /* ---------------------------------------------------------------------- * nif_getnameinfo * * Description: * Address-to-name translation in protocol-independent manner. * * Arguments: * SockAddr - Socket Address (address and port) * Flags - The flags argument modifies the behavior of getnameinfo(). */ static ERL_NIF_TERM nif_getnameinfo(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if defined(HAVE_GETNAMEINFO) ERL_NIF_TERM result; ERL_NIF_TERM eSockAddr, eFlags; int flags = 0; // Just in case... ESockAddress sa; SOCKLEN_T saLen = 0; // Just in case... NDBG( ("NET", "nif_getnameinfo -> entry (%d)\r\n", argc) ); if (argc != 2) return enif_make_badarg(env); eSockAddr = argv[0]; eFlags = argv[1]; NDBG( ("NET", "nif_getnameinfo -> " "\r\n SockAddr: %T" "\r\n Flags: %T" "\r\n", eSockAddr, eFlags) ); if (! esock_decode_sockaddr(env, eSockAddr, &sa, &saLen)) { NDBG( ("NET", "nif_getnameinfo -> failed decode sockaddr\r\n") ); return esock_make_error(env, esock_atom_einval); } NDBG( ("NET", "nif_getnameinfo -> (try) decode flags\r\n") ); if (!decode_nameinfo_flags(env, eFlags, &flags)) return enif_make_badarg(env); result = enet_getnameinfo(env, &sa, saLen, flags); NDBG( ("NET", "nif_getnameinfo -> done when result: " "\r\n %T\r\n", result) ); return result; #else return esock_make_error(env, esock_atom_enotsup); #endif } /* Given the provided sock(et) address (and flags), retrieve the host and * service info. */ #if defined(HAVE_GETNAMEINFO) static ERL_NIF_TERM enet_getnameinfo(ErlNifEnv* env, const ESockAddress* saP, SOCKLEN_T saLen, int flags) { ERL_NIF_TERM result; char host[HOSTNAME_LEN]; SOCKLEN_T hostLen = sizeof(host); char serv[SERVICE_LEN]; SOCKLEN_T servLen = sizeof(serv); int res = getnameinfo((struct sockaddr*) saP, saLen, host, hostLen, serv, servLen, flags); NDBG( ("NET", "enet_getnameinfo -> res: %d\r\n", res) ); switch (res) { case 0: { ERL_NIF_TERM keys[] = {atom_host, atom_service}; ERL_NIF_TERM vals[] = {MKS(env, host), MKS(env, serv)}; size_t numKeys = NUM(keys); ERL_NIF_TERM info; ESOCK_ASSERT( numKeys == NUM(vals) ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &info) ); result = esock_make_ok2(env, info); } break; #if defined(EAI_AGAIN) case EAI_AGAIN: result = esock_make_error(env, esock_atom_eagain); break; #endif #if defined(EAI_BADFLAGS) case EAI_BADFLAGS: result = esock_make_error(env, atom_ebadflags); break; #endif #if defined(EAI_FAIL) case EAI_FAIL: result = esock_make_error(env, atom_efail); break; #endif #if defined(EAI_FAMILY) case EAI_FAMILY: result = esock_make_error(env, atom_efamily); break; #endif #if defined(EAI_MEMORY) case EAI_MEMORY: result = esock_make_error(env, atom_emem); break; #endif #if !defined(__WIN32__) && defined(EAI_NONAME) case EAI_NONAME: result = esock_make_error(env, atom_enoname); break; #endif #if defined(EAI_OVERFLOW) case EAI_OVERFLOW: result = esock_make_error(env, atom_eoverflow); break; #endif #if defined(EAI_SYSTEM) case EAI_SYSTEM: result = esock_make_error_errno(env, get_errno()); break; #endif default: result = esock_make_error(env, esock_atom_einval); break; } return result; } #endif /* ---------------------------------------------------------------------- * nif_getaddrinfo * * Description: * Network address and service translation. * * Arguments: * Host - Host name (either a string or the atom undefined) * Service - Service name (either a string or the atom undefined) * Hints - Hints for the lookup (address info record) (currently *ignored*) */ static ERL_NIF_TERM nif_getaddrinfo(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if defined(HAVE_GETADDRINFO) ERL_NIF_TERM result, eHostName, eServName; //, eHints; char* hostName; char* servName; // struct addrinfo* hints; NDBG( ("NET", "nif_getaddrinfo -> entry (%d)\r\n", argc) ); if (argc != 3) { return enif_make_badarg(env); } eHostName = argv[0]; eServName = argv[1]; // eHints = argv[2]; NDBG( ("NET", "nif_getaddrinfo -> " "\r\n ehost: %T" "\r\n eservice: %T" "\r\n ehints: %T" "\r\n", argv[0], argv[1], argv[2]) ); if (!decode_addrinfo_string(env, eHostName, &hostName)) return enif_make_badarg(env); if (!decode_addrinfo_string(env, eServName, &servName)) return enif_make_badarg(env); /* if (decode_addrinfo_hints(env, eHints, &hints)) return enif_make_badarg(env); */ if ((hostName == NULL) && (servName == NULL)) return enif_make_badarg(env); result = enet_getaddrinfo(env, hostName, servName); if (hostName != NULL) FREE(hostName); if (servName != NULL) FREE(servName); /* if (hints != NULL) FREE(hints); */ NDBG( ("NET", "nif_getaddrinfo -> done when result: " "\r\n %T\r\n", result) ); return result; #else return esock_make_error(env, esock_atom_enotsup); #endif } #if defined(HAVE_GETADDRINFO) static ERL_NIF_TERM enet_getaddrinfo(ErlNifEnv* env, char* host, char* serv) { ERL_NIF_TERM result; struct addrinfo* addrInfoP; int res; NDBG( ("NET", "enet_getaddrinfo -> entry with" "\r\n host: %s" "\r\n serv: %s" "\r\n", ((host == NULL) ? "NULL" : host), ((serv == NULL) ? "NULL" : serv)) ); res = getaddrinfo(host, serv, NULL, &addrInfoP); NDBG( ("NET", "enet_getaddrinfo -> res: %d\r\n", res) ); switch (res) { case 0: { ERL_NIF_TERM addrInfo = encode_address_infos(env, addrInfoP); freeaddrinfo(addrInfoP); result = esock_make_ok2(env, addrInfo); } break; #if defined(EAI_ADDRFAMILY) case EAI_ADDRFAMILY: result = esock_make_error(env, atom_eaddrfamily); break; #endif #if defined(EAI_AGAIN) case EAI_AGAIN: result = esock_make_error(env, esock_atom_eagain); break; #endif #if defined(EAI_BADFLAGS) case EAI_BADFLAGS: result = esock_make_error(env, atom_ebadflags); break; #endif #if defined(EAI_FAIL) case EAI_FAIL: result = esock_make_error(env, atom_efail); break; #endif #if defined(EAI_FAMILY) case EAI_FAMILY: result = esock_make_error(env, atom_efamily); break; #endif #if defined(EAI_MEMORY) case EAI_MEMORY: result = esock_make_error(env, atom_emem); break; #endif #if defined(EAI_NODATA) case EAI_NODATA: result = esock_make_error(env, atom_enodata); break; #endif /* This value conflict with "some" other value on windows... */ #if !defined(__WIN32__) && defined(EAI_NONAME) case EAI_NONAME: result = esock_make_error(env, atom_enoname); break; #endif #if defined(EAI_SERVICE) case EAI_SERVICE: result = esock_make_error(env, atom_eservice); break; #endif #if defined(EAI_SOCKTYPE) case EAI_SOCKTYPE: result = esock_make_error(env, atom_esocktype); break; #endif #if defined(EAI_SYSTEM) case EAI_SYSTEM: result = esock_make_error(env, atom_esystem); break; #endif default: result = esock_make_error(env, esock_atom_einval); break; } return result; } #endif /* ---------------------------------------------------------------------- * nif_getifaddrs * * Description: * Get interface addresses * * Arguments: * Extra - A way to pass 'extra' arguments. * Currently only used for netns (name space). */ static ERL_NIF_TERM nif_getifaddrs(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if defined(__WIN32__) return enif_raise_exception(env, MKA(env, "notsup")); #elif defined(HAVE_GETIFADDRS) || defined(__PASE__) #ifdef HAVE_SETNS ERL_NIF_TERM extra; #endif char* netns; ERL_NIF_TERM result; NDBG( ("NET", "nif_getifaddrs -> entry (%d)\r\n", argc) ); if ((argc != 1) || !IS_MAP(env, argv[0])) { return enif_make_badarg(env); } #ifdef HAVE_SETNS extra = argv[0]; #endif #ifdef HAVE_SETNS /* We *currently* only support one extra option: netns */ if (!enet_getifaddrs_netns(env, extra, &netns)) { NDBG( ("NET", "nif_getifaddrs -> namespace: %s\r\n", netns) ); return enif_make_badarg(env); } #else netns = NULL; #endif result = enet_getifaddrs(env, netns); NDBG( ("NET", "nif_getifaddrs -> done when result: " "\r\n %T\r\n", result) ); return result; #else // HAVE_GETIFADDRS return esock_make_error(env, esock_atom_enotsup); #endif } #if defined(HAVE_GETIFADDRS) || defined(__PASE__) #ifdef HAVE_SETNS /* enet_getifaddrs_netns - extract the netns field from the 'extra' map * * Note that the 'extra' map *may* contain other options, but here we * only care about 'netns'. */ static BOOLEAN_T enet_getifaddrs_netns(ErlNifEnv* env, ERL_NIF_TERM map, char** netns) { size_t sz; ERL_NIF_TERM key; ERL_NIF_TERM value; unsigned int len; char* buf; int written; /* Note that its acceptable that the extra map is empty */ if (!enif_get_map_size(env, map, &sz) || (sz != 1)) { *netns = NULL; return TRUE; } /* Regardless of the content of the 'extra' map, we only care about 'netns' */ key = enif_make_atom(env, "netns"); if (!GET_MAP_VAL(env, map, key, &value)) { *netns = NULL; return TRUE; } /* So far so good. The value should be a string, check. */ if (!enif_is_list(env, value)) { *netns = NULL; // Just in case... return FALSE; } if (!enif_get_list_length(env, value, &len)) { *netns = NULL; // Just in case... return FALSE; } if ((buf = MALLOC(len+1)) == NULL) { *netns = NULL; // Just in case... return FALSE; } written = enif_get_string(env, value, buf, len+1, ERL_NIF_LATIN1); if (written == (len+1)) { *netns = buf; return TRUE; } else { *netns = NULL; // Just in case... return FALSE; } } #endif static ERL_NIF_TERM enet_getifaddrs(ErlNifEnv* env, char* netns) { ERL_NIF_TERM result; struct ifaddrs* ifap; int save_errno; #ifdef HAVE_SETNS int current_ns = 0; #endif NDBG( ("NET", "enet_getifaddrs -> entry with" "\r\n netns: %s" "\r\n", ((netns == NULL) ? "NULL" : netns)) ); #ifdef HAVE_SETNS if ((netns != NULL) && !change_network_namespace(netns, &current_ns, &save_errno)) return esock_make_error_errno(env, save_errno); #endif #ifdef __PASE__ if (0 == Qp2getifaddrs(&ifap)) { #else if (0 == getifaddrs(&ifap)) { #endif result = enet_getifaddrs_process(env, ifap); #ifdef __PASE__ Qp2freeifaddrs(ifap); #else freeifaddrs(ifap); #endif } else { save_errno = get_errno(); NDBG( ("NET", "enet_getifaddrs -> failed get addrs: %d", save_errno) ); result = esock_make_error_errno(env, save_errno); } #ifdef HAVE_SETNS if ((netns != NULL) && !restore_network_namespace(current_ns, &save_errno)) return esock_make_error_errno(env, save_errno); if (netns != NULL) FREE(netns); #endif NDBG( ("NET", "enet_getifaddrs -> done when" "\r\n result: %T" "\r\n", result) ); return result; } static ERL_NIF_TERM enet_getifaddrs_process(ErlNifEnv* env, struct ifaddrs* ifap) { ERL_NIF_TERM result; unsigned int len = ((ifap == NULL) ? 0 : enet_getifaddrs_length(ifap)); NDBG( ("NET", "enet_getifaddrs_process -> len: %d\r\n", len) ); if (len > 0) { ERL_NIF_TERM* array = MALLOC(len * sizeof(ERL_NIF_TERM)); unsigned int i = 0; struct ifaddrs* p = ifap; while (i < len) { ERL_NIF_TERM entry; encode_ifaddrs(env, p, &entry); NDBG( ("NET", "enet_getifaddrs_process -> entry: %T\r\n", entry) ); array[i] = entry; p = p->ifa_next; i++; } NDBG( ("NET", "enet_getifaddrs_process -> all entries processed\r\n") ); result = esock_make_ok2(env, MKLA(env, array, len)); FREE(array); } else { result = esock_make_ok2(env, MKEL(env)); } NDBG( ("NET", "enet_getifaddrs_process -> result: " "\r\n %T\r\n", result) ); return result; } /* Calculate the length of the interface address linked list * The list is NULL-terminated, so the only way is to * iterate through the list until we find next = NULL. */ static unsigned int enet_getifaddrs_length(struct ifaddrs* ifap) { unsigned int len = 1; struct ifaddrs* tmp; BOOLEAN_T done = FALSE; tmp = ifap; while (!done) { if (tmp->ifa_next != NULL) { len++; tmp = tmp->ifa_next; } else { done = TRUE; } } return len; } static void encode_ifaddrs(ErlNifEnv* env, struct ifaddrs* ifap, ERL_NIF_TERM* eifa) { ERL_NIF_TERM ename, eflags, eaddr, enetmask, eifu_key, eifu_value, edata; ERL_NIF_TERM eifAddrs; ename = encode_ifaddrs_name(env, ifap->ifa_name); NDBG( ("NET", "encode_ifaddrs -> name: %T\r\n", ename) ); eflags = encode_ifaddrs_flags(env, ifap->ifa_flags); NDBG( ("NET", "encode_ifaddrs -> flags: %T\r\n", eflags) ); eaddr = encode_ifaddrs_addr(env, ifap->ifa_addr); NDBG( ("NET", "encode_ifaddrs -> addr: %T\r\n", eaddr) ); enetmask = encode_ifaddrs_addr(env, ifap->ifa_netmask); NDBG( ("NET", "encode_ifaddrs -> netmask: %T\r\n", enetmask) ); if (ifap->ifa_dstaddr && (ifap->ifa_flags & IFF_POINTOPOINT)) { eifu_key = atom_dstaddr; eifu_value = encode_ifaddrs_addr(env, ifap->ifa_dstaddr); } else if (ifap->ifa_broadaddr && (ifap->ifa_flags & IFF_BROADCAST)) { eifu_key = atom_broadaddr; eifu_value = encode_ifaddrs_addr(env, ifap->ifa_broadaddr); } else { eifu_key = esock_atom_undefined; eifu_value = esock_atom_undefined; } NDBG( ("NET", "encode_ifaddrs -> ifu: " "\r\n key: %T" "\r\n val: %T" "\r\n", eifu_key, eifu_value) ); /* Don't know how to encode this yet... * We don't even know the size... */ edata = esock_atom_undefined; make_ifaddrs(env, ename, eflags, eaddr, enetmask, eifu_key, eifu_value, edata, &eifAddrs); NDBG( ("NET", "encode_ifaddrs -> encoded ifAddrs: %T\r\n", eifAddrs) ); *eifa = eifAddrs; } static ERL_NIF_TERM encode_ifaddrs_name(ErlNifEnv* env, char* name) { return ((name == NULL) ? esock_atom_undefined : MKS(env, name)); } static ERL_NIF_TERM encode_ifaddrs_flags(ErlNifEnv* env, unsigned int flags) { SocketTArray ta = TARRAY_CREATE(16); ERL_NIF_TERM eflags; #if defined(IFF_UP) if (flags & IFF_UP) TARRAY_ADD(ta, atom_up); #endif #if defined(IFF_BROADCAST) if (flags & IFF_BROADCAST) TARRAY_ADD(ta, atom_broadcast); #endif #if defined(IFF_DEBUG) if (flags & IFF_DEBUG) TARRAY_ADD(ta, atom_debug); #endif #if defined(IFF_LOOPBACK) if (flags & IFF_LOOPBACK) TARRAY_ADD(ta, esock_atom_loopback); #endif #if defined(IFF_POINTOPOINT) if (flags & IFF_POINTOPOINT) TARRAY_ADD(ta, atom_pointopoint); #endif #if defined(IFF_NOTRAILERS) if (flags & IFF_NOTRAILERS) TARRAY_ADD(ta, atom_notrailers); #endif #if defined(IFF_RUNNING) if (flags & IFF_RUNNING) TARRAY_ADD(ta, atom_running); #endif #if defined(IFF_NOARP) if (flags & IFF_NOARP) TARRAY_ADD(ta, atom_noarp); #endif #if defined(IFF_PROMISC) if (flags & IFF_PROMISC) TARRAY_ADD(ta, atom_promisc); #endif #if defined(IFF_MASTER) if (flags & IFF_MASTER) TARRAY_ADD(ta, atom_master); #endif #if defined(IFF_SLAVE) if (flags & IFF_SLAVE) TARRAY_ADD(ta, atom_slave); #endif #if defined(IFF_MULTICAST) if (flags & IFF_MULTICAST) TARRAY_ADD(ta, atom_multicast); #endif #if defined(IFF_PORTSEL) if (flags & IFF_PORTSEL) TARRAY_ADD(ta, atom_portsel); #endif #if defined(IFF_AUTOMEDIA) if (flags & IFF_AUTOMEDIA) TARRAY_ADD(ta, atom_automedia); #endif #if defined(IFF_DYNAMIC) if (flags & IFF_DYNAMIC) TARRAY_ADD(ta, atom_dynamic); #endif TARRAY_TOLIST(ta, env, &eflags); return eflags; } static ERL_NIF_TERM encode_ifaddrs_addr(ErlNifEnv* env, struct sockaddr* sa) { return encode_sockaddr(env, sa); } static void make_ifaddrs(ErlNifEnv* env, ERL_NIF_TERM ename, ERL_NIF_TERM eflags, ERL_NIF_TERM eaddr, ERL_NIF_TERM enetmask, ERL_NIF_TERM eifu_key, ERL_NIF_TERM eifu_value, ERL_NIF_TERM edata, ERL_NIF_TERM* eifAddrs) { /* Several of these values can be (the atom) undefined, which * means that they should *not* be included in the result map. */ ERL_NIF_TERM keys[6]; // There are only (at most) siz (6) fields... ERL_NIF_TERM vals[6]; size_t len = NUM(keys); // Just in case... size_t idx = 0; /* *** Name *** */ NDBG( ("NET", "make_ifaddrs -> name: %T\r\n", ename) ); keys[idx] = esock_atom_name; vals[idx] = ename; idx++; /* *** Flags *** */ NDBG( ("NET", "make_ifaddrs -> flags: %T\r\n", eflags) ); keys[idx] = esock_atom_flags; vals[idx] = eflags; idx++; /* *** Addr (can be 'undefined' = NULL) *** */ NDBG( ("NET", "make_ifaddrs -> addr: %T\r\n", eaddr) ); if (COMPARE(eaddr, esock_atom_undefined) != 0) { keys[idx] = esock_atom_addr; vals[idx] = eaddr; idx++; } else { len--; } /* *** Netmask (can be 'undefined' = NULL) *** */ NDBG( ("NET", "make_ifaddrs -> netmask: %T\r\n", enetmask) ); if (COMPARE(enetmask, esock_atom_undefined) != 0) { keys[idx] = atom_netmask; vals[idx] = enetmask; idx++; } else { len--; } /* *** Netmask (can be 'undefined' = NULL) *** */ NDBG( ("NET", "make_ifaddrs -> ifu: %T, %T\r\n", eifu_key, eifu_value) ); if ((COMPARE(eifu_key, esock_atom_undefined) != 0) && (COMPARE(eifu_value, esock_atom_undefined) != 0)) { keys[idx] = eifu_key; vals[idx] = eifu_value; idx++; } else { len--; } /* *** Data (can be 'undefined' = NULL) *** */ NDBG( ("NET", "make_ifaddrs -> data: %T\r\n", edata) ); if (COMPARE(edata, esock_atom_undefined) != 0) { keys[idx] = esock_atom_data; vals[idx] = edata; idx++; } else { len--; } NDBG( ("NET", "make_ifaddrs -> construct ifa with:" "\r\n len: %d" "\r\n" ) ); ESOCK_ASSERT( MKMA(env, keys, vals, len, eifAddrs) ); } #endif // HAVE_GETIFADDRS /* ---------------------------------------------------------------------- * nif_get_adapters_addresses * * Description: * Get adapters addresses. * This is a windows only function! * * Arguments: * Args - A way to pass 'extra' arguments. * #{family := unspec (default) | inet | inet6, * flags := flags(), * debug := boolean() (optional)} * * flags() :: #{skip_unicast :: boolean() (default false), * skip_anycast :: boolean() (default true), * skip_multicast :: boolean() (default true), * skip_dns_server :: boolean() (default true), * skip_friendly_name :: boolean() (default true), * include_prefix :: boolean() (default true), * include_wins_info :: boolean() (default false), * include_gateways :: boolean() (default false), * include_all_interfaces :: boolean() (default false), * include_all_compartments :: boolean() (default false), * include_tunnel_bindingorder :: boolean() (default false)} * Suggested Help atoms: * no_skips_all_includes * all_skips_no_includes * no_skips_no_includes * all_skips_all_includes */ static ERL_NIF_TERM nif_get_adapters_addresses(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if !defined(__WIN32__) return enif_raise_exception(env, MKA(env, "notsup")); #else ERL_NIF_TERM result, eargs; ULONG fam, flags; BOOLEAN_T dbg; NDBG( ("NET", "nif_get_adapters_addresses -> entry (%d)\r\n", argc) ); if ((argc != 1) || !IS_MAP(env, argv[0])) { return enif_make_badarg(env); } eargs = argv[0]; if (!enet_get_adapters_addresses_args_family(env, eargs, &fam)) return enif_make_badarg(env); if (!enet_get_adapters_addresses_args_flags(env, eargs, &flags)) return enif_make_badarg(env); dbg = enet_get_adapters_addresses_args_debug(env, eargs); result = enet_get_adapters_addresses(env, dbg, fam, flags); NDBG2( dbg, ("NET", "nif_get_adapters_addresses -> done when result: " "\r\n %T\r\n", result) ); return result; #endif } #if defined(__WIN32__) static BOOLEAN_T enet_get_adapters_addresses_args_debug(ErlNifEnv* env, const ERL_NIF_TERM eargs) { return get_debug(env, eargs); } #endif #if defined(__WIN32__) static BOOLEAN_T enet_get_adapters_addresses_args_family(ErlNifEnv* env, const ERL_NIF_TERM eargs, ULONG* fam) { ERL_NIF_TERM key = esock_atom_family; ERL_NIF_TERM eval; DWORD val; if (!GET_MAP_VAL(env, eargs, key, &eval)) { *fam = AF_UNSPEC; // Default return TRUE; } else { if (!IS_ATOM(env, eval)) return FALSE; if (COMPARE(eval, esock_atom_unspec)) val = AF_UNSPEC; else if (COMPARE(eval, esock_atom_inet)) val = AF_INET; else if (COMPARE(eval, esock_atom_inet6)) val = AF_INET6; else return FALSE; *fam = val; return TRUE; } } #endif // __WIN32__ #if defined(__WIN32__) static BOOLEAN_T enet_get_adapters_addresses_args_flags(ErlNifEnv* env, const ERL_NIF_TERM eargs, ULONG* flags) { ERL_NIF_TERM eflags; ULONG val = 0; if (!GET_MAP_VAL(env, eargs, esock_atom_flags, &eflags)) { // Default *flags = GAA_FLAG_INCLUDE_PREFIX | GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_SKIP_FRIENDLY_NAME | GAA_FLAG_SKIP_MULTICAST; return TRUE; } else { if (!IS_MAP(env, eflags)) return FALSE; /* skip unicast */ if (esock_get_bool_from_map(env, eflags, atom_skip_unicast, FALSE)) val |= GAA_FLAG_SKIP_UNICAST; /* skip anycast */ if (esock_get_bool_from_map(env, eflags, atom_skip_anycast, TRUE)) val |= GAA_FLAG_SKIP_ANYCAST; /* skip multicast */ if (esock_get_bool_from_map(env, eflags, atom_skip_multicast, TRUE)) val |= GAA_FLAG_SKIP_MULTICAST; /* skip dns-server */ if (esock_get_bool_from_map(env, eflags, atom_skip_dns_server, TRUE)) val |= GAA_FLAG_SKIP_DNS_SERVER; /* skip fiendly-name */ if (esock_get_bool_from_map(env, eflags, atom_skip_friendly_name, TRUE)) val |= GAA_FLAG_SKIP_FRIENDLY_NAME; /* include prefix */ if (esock_get_bool_from_map(env, eflags, atom_include_prefix, TRUE)) val |= GAA_FLAG_INCLUDE_PREFIX; /* include wins-info */ if (esock_get_bool_from_map(env, eflags, atom_include_wins_info, FALSE)) val |= GAA_FLAG_INCLUDE_WINS_INFO; /* include gateways */ if (esock_get_bool_from_map(env, eflags, atom_include_gateways, FALSE)) val |= GAA_FLAG_INCLUDE_GATEWAYS; /* include all-interfaces */ if (esock_get_bool_from_map(env, eflags, atom_include_all_interfaces, FALSE)) val |= GAA_FLAG_INCLUDE_ALL_INTERFACES; /* include all-compartments */ if (esock_get_bool_from_map(env, eflags, atom_include_all_compartments, FALSE)) val |= GAA_FLAG_INCLUDE_ALL_COMPARTMENTS; /* include tunnel-bindingorder */ if (esock_get_bool_from_map(env, eflags, atom_include_tunnel_bindingorder, FALSE)) val |= GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER; *flags = val; return TRUE; } } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM enet_get_adapters_addresses(ErlNifEnv* env, BOOLEAN_T dbg, ULONG family, ULONG flags) { int i; DWORD ret; unsigned long ipAdAddrsSz = 16 * 1024; IP_ADAPTER_ADDRESSES* ipAdAddrsP; ERL_NIF_TERM eret, addrs, result; ipAdAddrsP = (IP_ADAPTER_ADDRESSES*) MALLOC(ipAdAddrsSz); for (i = 17; i; i--) { ret = GetAdaptersAddresses(family, flags, NULL, ipAdAddrsP, &ipAdAddrsSz); if (ret == NO_ERROR) { /* We are done! */ break; } else if (ret == ERROR_BUFFER_OVERFLOW) { /* Not large enough */ ipAdAddrsP = REALLOC(ipAdAddrsP, ipAdAddrsSz); continue; } else { /* Failure */ i = 0; } if (ret == NO_ERROR) break; if (ret == ERROR_BUFFER_OVERFLOW) continue; i = 0; } if (! i) { NDBG2( dbg, ("NET", "enet_get_adapters_addresses -> " "try encode error (%d)\r\n", ret) ); FREE(ipAdAddrsP); switch (ret) { case ERROR_ADDRESS_NOT_ASSOCIATED: eret = atom_address_not_associated; break; case ERROR_BUFFER_OVERFLOW: eret = atom_insufficient_buffer; break; case ERROR_INVALID_PARAMETER: eret = atom_invalid_parameter; break; case ERROR_NO_DATA: eret = atom_no_data; break; case ERROR_NOT_ENOUGH_MEMORY: eret = atom_not_enough_memory; break; default: eret = MKI(env, ret); break; } result = esock_make_error(env, eret); } else { NDBG2( dbg, ("NET", "enet_get_adapters_addresses -> " "try encode addresses\r\n") ); addrs = enet_adapters_addresses_encode(env, dbg, ipAdAddrsP); result = esock_make_ok2(env, addrs); } NDBG2( dbg, ("NET", "enet_get_adapters_addresses -> done with:" "\r\n result: %T" "\r\n", result) ); return result; } #endif #if defined(__WIN32__) static ERL_NIF_TERM enet_adapters_addresses_encode(ErlNifEnv* env, BOOLEAN_T dbg, IP_ADAPTER_ADDRESSES* ipAdAddrsP) { /* No idea how many we actually need, so just get some */ SocketTArray adapterArray = TARRAY_CREATE(16); IP_ADAPTER_ADDRESSES* addrsP = ipAdAddrsP; ERL_NIF_TERM entry, result; NDBG2( dbg, ("NET", "enet_get_adapters_addresses -> entry\r\n") ); while (addrsP != NULL) { /* Process current adapter */ entry = enet_adapter_addresses_encode(env, dbg, addrsP); NDBG2( dbg, ("NET", "enet_get_adapters_addresses -> entry encoded:" "\r\n Adapter Entry: %T" "\r\n", entry) ); TARRAY_ADD(adapterArray, entry); addrsP = addrsP->Next; } TARRAY_TOLIST(adapterArray, env, &result); NDBG2( dbg, ("NET", "enet_get_adapters_addresses -> done:" "\r\n %T" "\r\n", result) ); return result; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM enet_adapter_addresses_encode(ErlNifEnv* env, BOOLEAN_T dbg, IP_ADAPTER_ADDRESSES* ipAdAddrsP) { ERL_NIF_TERM ifIdx, name; ERL_NIF_TERM unicastAddrs, anycastAddrs, multicastAddrs, dnsServerAddrs; ERL_NIF_TERM dnsSuffix, description, flags, physAddr, fName, mtu, ifType; ERL_NIF_TERM operStatus, zoneIndices, ipv6IfIdx, prefixes; ERL_NIF_TERM map; ifIdx = MKI(env, ipAdAddrsP->IfIndex); name = MKS(env, ipAdAddrsP->AdapterName); unicastAddrs = encode_adapter_unicast_addrs(env, ipAdAddrsP->FirstUnicastAddress); anycastAddrs = encode_adapter_anycast_addrs(env, ipAdAddrsP->FirstAnycastAddress); multicastAddrs = encode_adapter_multicast_addrs(env, ipAdAddrsP->FirstMulticastAddress); dnsServerAddrs = encode_adapter_dns_server_addrs(env, ipAdAddrsP->FirstDnsServerAddress); dnsSuffix = encode_wchar(env, ipAdAddrsP->DnsSuffix); description = encode_wchar(env, ipAdAddrsP->Description); fName = encode_wchar(env, ipAdAddrsP->FriendlyName); physAddr = encode_uchar(env, ipAdAddrsP->PhysicalAddressLength, ipAdAddrsP->PhysicalAddress); flags = encode_adapter_flags(env, ipAdAddrsP); mtu = MKUI(env, ipAdAddrsP->Mtu); ifType = encode_if_type(env, ipAdAddrsP->IfType); operStatus = encode_if_oper_status(env, ipAdAddrsP->OperStatus); zoneIndices = encode_adapter_zone_indices(env, ipAdAddrsP->ZoneIndices, NUM(ipAdAddrsP->ZoneIndices)); ipv6IfIdx = MKI(env, ipAdAddrsP->Ipv6IfIndex); prefixes = encode_adapter_prefixes(env, ipAdAddrsP->FirstPrefix); /* *** _LH *** */ // tLinkSpeed = MKUI64(env, ipAdAddrsP->TransmitLinkSpeed); // rLinkSpeed = MKUI64(env, ipAdAddrsP->ReceiveLinkSpeed); // winsServerAddr = ... // gatewayAddr = ... // ipv4Metric = ... // ipv6Metric = ... // luid = ... // dhcpv4Server = ... // compartmentId = ... // networkDuid = ... // connectionType = ... // tunnelType = ... // dhcpv6Server = ... // dhcpv6ClientDuid = ... // dhcpv6Iaid = ... // dnsSuffix = ... { ERL_NIF_TERM keys[] = {atom_index, esock_atom_name, atom_unicast_addrs, atom_anycast_addrs, atom_multicast_addrs, atom_dns_server_addrs, atom_dns_suffix, atom_description, atom_friendly_name, atom_phys_addr, esock_atom_flags, esock_atom_mtu, esock_atom_type, atom_oper_status, atom_zone_indices, atom_ipv6_index, atom_prefixes/* , */ /* atom_transmit_link_speed, */ /* atom_receive_link_speed */ }; ERL_NIF_TERM vals[] = {ifIdx, name, unicastAddrs, anycastAddrs, multicastAddrs, dnsServerAddrs, dnsSuffix, description, fName, physAddr, flags, mtu, ifType, operStatus, zoneIndices, ipv6IfIdx, prefixes /* , */ /* tLinkSpeed, */ /* rLinkSpeed */ }; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &map) ); } return map; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_flags(ErlNifEnv* env, IP_ADAPTER_ADDRESSES* ipAdAddrsP) { ERL_NIF_TERM ddnsEnabled, regAdSuffix, dhcpv4Enabled, recvOnly; ERL_NIF_TERM noMulticast, ipv6OtherStatefulCfg, netbiosOverTcpipEnabled; ERL_NIF_TERM ipv4Enabled, ipv6Enabled, ipv6ManagedAddrCfgSup; ERL_NIF_TERM eflags; #if defined(ESOCK_WIN_XP) /* This is just a dummy-ifdef ... there is no such flag (ESOCK_WIN_XP). * But this is a way to keep the code... */ ddnsEnabled = BOOL2ATOM(ipAdAddrsP->DdnsEnabled); regAdSuffix = BOOL2ATOM(ipAdAddrsP->RegisterAdapterSuffix); dhcpv4Enabled = BOOL2ATOM(ipAdAddrsP->Dhcpv4Enabled); recvOnly = BOOL2ATOM(ipAdAddrsP->ReceiveOnly); noMulticast = BOOL2ATOM(ipAdAddrsP->NoMulticast); ipv6OtherStatefulCfg = BOOL2ATOM(ipAdAddrsP->Ipv6OtherStatefulConfig); netbiosOverTcpipEnabled = BOOL2ATOM(ipAdAddrsP->NetbiosOverTcpipEnabled); ipv4Enabled = BOOL2ATOM(ipAdAddrsP->Ipv4Enabled); ipv6Enabled = BOOL2ATOM(ipAdAddrsP->Ipv6Enabled); ipv6ManagedAddrCfgSup = BOOL2ATOM(ipAdAddrsP->Ipv6ManagedAddressConfigurationSupported); #else ddnsEnabled = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_DDNS_ENABLED); regAdSuffix = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_REGISTER_ADAPTER_SUFFIX); dhcpv4Enabled = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_DHCP_ENABLED); recvOnly = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_RECEIVE_ONLY); noMulticast = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_NO_MULTICAST); ipv6OtherStatefulCfg = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_IPV6_OTHER_STATEFUL_CONFIG); netbiosOverTcpipEnabled = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_NETBIOS_OVER_TCPIP_ENABLED); ipv4Enabled = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_IPV4_ENABLED); ipv6Enabled = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_IPV6_ENABLED); ipv6ManagedAddrCfgSup = BOOL2ATOM(ipAdAddrsP->Flags & IP_ADAPTER_IPV6_MANAGE_ADDRESS_CONFIG); #endif { ERL_NIF_TERM keys[] = {atom_ddns_enabled, atom_register_adapter_suffix, atom_dhcp_v4_enabled, atom_receive_only, atom_no_multicast, atom_ipv6_other_stateful_config, atom_netbios_over_tcpip_enabled, atom_ipv4_enabled, atom_ipv6_enabled, atom_ipv6_managed_address_config_supported}; ERL_NIF_TERM vals[] = {ddnsEnabled, regAdSuffix, dhcpv4Enabled, recvOnly, noMulticast, ipv6OtherStatefulCfg, netbiosOverTcpipEnabled, ipv4Enabled, ipv6Enabled, ipv6ManagedAddrCfgSup}; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &eflags) ); } return eflags; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_unicast_addrs(ErlNifEnv* env, IP_ADAPTER_UNICAST_ADDRESS* firstP) { IP_ADAPTER_UNICAST_ADDRESS* tmp = firstP; SocketTArray ta = TARRAY_CREATE(16); ERL_NIF_TERM eaddrs; while (tmp != NULL) { TARRAY_ADD(ta, encode_adapter_unicast_addr(env, tmp)); tmp = tmp->Next; } TARRAY_TOLIST(ta, env, &eaddrs); return eaddrs; } #endif // __WIN32__ #if defined(__WIN32__) /* * unicast_address() :: * #{flags := #{dns_eligible := boolean(), * transient := boolean()}, * addr := socket:address(), * prefix_origin := ip_prefix_origin(), * suffix_origin := ip_suffix_origin(), * dad_state := ip_dad_state(), * valid_lifetime := ulong(), * preferred_lifetime := ulong(), * lease_lifetime := ulong(), * on_link_prefix_length := uint8()} */ static ERL_NIF_TERM encode_adapter_unicast_addr(ErlNifEnv* env, IP_ADAPTER_UNICAST_ADDRESS* addrP) { ERL_NIF_TERM eflags, esa, eporig, esorig, edstate, evlt, eplt, ellt; /* ERL_NIF_TERM eplen; - Not on XP */ ERL_NIF_TERM eua; eflags = encode_adapter_unicast_addr_flags(env, addrP->Flags); esa = encode_adapter_unicast_addr_sockaddr(env, addrP->Address.lpSockaddr); eporig = encode_adapter_unicast_addr_porig(env, addrP->PrefixOrigin); esorig = encode_adapter_unicast_addr_sorig(env, addrP->SuffixOrigin); edstate = encode_adapter_unicast_addr_dad_state(env, addrP->DadState); evlt = MKUL(env, addrP->ValidLifetime); eplt = MKUL(env, addrP->PreferredLifetime); ellt = MKUL(env, addrP->LeaseLifetime); /* eplen = MKUI(env, addrP->OnLinkPrefixLength); - Not on XP */ /* if (addrP->Address.lpSockaddr->sa_family == AF_INET) { struct sockaddr* sinP = addrP->Address.lpSockaddr; ERL_NIF_TERM keys[] = {esock_atom_flags, esock_atom_addr, MKA(env, "raw_addr"), MKA(env, "raw_addr_ntohl"), atom_prefix_origin, atom_suffix_origin, atom_dad_state, atom_valid_lifetime, atom_preferred_lifetime, atom_lease_lifetime }; ERL_NIF_TERM vals[] = {eflags, esa, MKUI(env, (DWORD) (((struct sockaddr_in *)sinP)->sin_addr.s_addr)), MKUI(env, ntohl((DWORD) (((struct sockaddr_in *)sinP)->sin_addr.s_addr))), eporig, esorig, edstate, evlt, eplt, ellt }; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &eua) ); } else */ { ERL_NIF_TERM keys[] = {esock_atom_flags, esock_atom_addr, atom_prefix_origin, atom_suffix_origin, atom_dad_state, atom_valid_lifetime, atom_preferred_lifetime, atom_lease_lifetime/* , on_link_prefix_length Not on XP */ }; ERL_NIF_TERM vals[] = {eflags, esa, eporig, esorig, edstate, evlt, eplt, ellt/*, eplen Not pn XP */ }; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &eua) ); } return eua; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_unicast_addr_flags(ErlNifEnv* env, DWORD flags) { ERL_NIF_TERM map; ERL_NIF_TERM dnsEl = BOOL2ATOM(flags & IP_ADAPTER_ADDRESS_DNS_ELIGIBLE); ERL_NIF_TERM trans = BOOL2ATOM(flags & IP_ADAPTER_ADDRESS_TRANSIENT); ERL_NIF_TERM keys[] = {atom_dns_eligible, atom_transient}; ERL_NIF_TERM vals[] = {dnsEl, trans}; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &map) ); return map; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_unicast_addr_sockaddr(ErlNifEnv* env, struct sockaddr* addrP) { return encode_sockaddr(env, addrP); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_unicast_addr_porig(ErlNifEnv* env, IP_PREFIX_ORIGIN porig) { ERL_NIF_TERM eporig; switch (porig) { case IpPrefixOriginOther: eporig = atom_other; break; case IpPrefixOriginManual: eporig = atom_manual; break; case IpPrefixOriginWellKnown: eporig = atom_well_known; break; case IpPrefixOriginDhcp: eporig = atom_dhcp; break; case IpPrefixOriginRouterAdvertisement: eporig = atom_router_advertisement; break; case IpPrefixOriginUnchanged: eporig = atom_unchanged; break; default: eporig = MKI(env, (int) porig); break; } return eporig; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_unicast_addr_sorig(ErlNifEnv* env, IP_SUFFIX_ORIGIN sorig) { ERL_NIF_TERM esorig; switch (sorig) { case IpSuffixOriginOther: esorig = atom_other; break; case IpSuffixOriginManual: esorig = atom_manual; break; case IpSuffixOriginWellKnown: esorig = atom_well_known; break; case IpSuffixOriginDhcp: esorig = atom_dhcp; break; case IpSuffixOriginLinkLayerAddress: esorig = atom_link_layer_address; break; case IpSuffixOriginRandom: esorig = atom_random; break; case IpSuffixOriginUnchanged: esorig = atom_unchanged; break; default: esorig = MKI(env, (int) sorig); break; } return esorig; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_unicast_addr_dad_state(ErlNifEnv* env, IP_DAD_STATE dstate) { ERL_NIF_TERM edstate; switch (dstate) { case IpDadStateInvalid: edstate = atom_invalid; break; case IpDadStateTentative: edstate = atom_tentative; break; case IpDadStateDuplicate: edstate = atom_duplicate; break; case IpDadStateDeprecated: edstate = atom_deprecated; break; case IpDadStatePreferred: edstate = atom_preferred; break; default: edstate = MKI(env, (int) dstate); break; } return edstate; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_anycast_addrs(ErlNifEnv* env, IP_ADAPTER_ANYCAST_ADDRESS* firstP) { IP_ADAPTER_ANYCAST_ADDRESS* tmp = firstP; SocketTArray ta = TARRAY_CREATE(16); ERL_NIF_TERM eaddrs; while (tmp != NULL) { TARRAY_ADD(ta, encode_adapter_anycast_addr(env, tmp)); tmp = tmp->Next; } TARRAY_TOLIST(ta, env, &eaddrs); return eaddrs; } #endif // __WIN32__ #if defined(__WIN32__) /* * anycast_address() :: * #{flags := #{dns_eligible := boolean(), * transient := boolean()}, * addr := socket:address()} */ static ERL_NIF_TERM encode_adapter_anycast_addr(ErlNifEnv* env, IP_ADAPTER_ANYCAST_ADDRESS* addrP) { ERL_NIF_TERM eflags, esa; ERL_NIF_TERM eaa; eflags = encode_adapter_anycast_addr_flags(env, addrP->Flags); esa = encode_adapter_anycast_addr_sockaddr(env, addrP->Address.lpSockaddr); { ERL_NIF_TERM keys[] = {esock_atom_flags, esock_atom_addr}; ERL_NIF_TERM vals[] = {eflags, esa}; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &eaa) ); } return eaa; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_anycast_addr_flags(ErlNifEnv* env, DWORD flags) { return encode_adapter_unicast_addr_flags(env, flags); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_anycast_addr_sockaddr(ErlNifEnv* env, struct sockaddr* addrP) { return encode_sockaddr(env, addrP); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_multicast_addrs(ErlNifEnv* env, IP_ADAPTER_MULTICAST_ADDRESS* firstP) { IP_ADAPTER_MULTICAST_ADDRESS* tmp = firstP; SocketTArray ta = TARRAY_CREATE(16); ERL_NIF_TERM eaddrs; while (tmp != NULL) { TARRAY_ADD(ta, encode_adapter_multicast_addr(env, tmp)); tmp = tmp->Next; } TARRAY_TOLIST(ta, env, &eaddrs); return eaddrs; } #endif // __WIN32__ #if defined(__WIN32__) /* * multicast_address() :: * #{flags := #{dns_eligible := boolean(), * transient := boolean()}, * addr := socket:address()} */ static ERL_NIF_TERM encode_adapter_multicast_addr(ErlNifEnv* env, IP_ADAPTER_MULTICAST_ADDRESS* addrP) { ERL_NIF_TERM eflags, esa; ERL_NIF_TERM ema; eflags = encode_adapter_multicast_addr_flags(env, addrP->Flags); esa = encode_adapter_multicast_addr_sockaddr(env, addrP->Address.lpSockaddr); { ERL_NIF_TERM keys[] = {esock_atom_flags, esock_atom_addr}; ERL_NIF_TERM vals[] = {eflags, esa}; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &ema) ); } return ema; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_multicast_addr_flags(ErlNifEnv* env, DWORD flags) { return encode_adapter_unicast_addr_flags(env, flags); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_multicast_addr_sockaddr(ErlNifEnv* env, struct sockaddr* addrP) { return encode_sockaddr(env, addrP); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_dns_server_addrs(ErlNifEnv* env, IP_ADAPTER_DNS_SERVER_ADDRESS* firstP) { IP_ADAPTER_DNS_SERVER_ADDRESS* tmp = firstP; SocketTArray ta = TARRAY_CREATE(16); ERL_NIF_TERM eaddrs; while (tmp != NULL) { TARRAY_ADD(ta, encode_adapter_dns_server_addr(env, tmp)); tmp = tmp->Next; } TARRAY_TOLIST(ta, env, &eaddrs); return eaddrs; } #endif // __WIN32__ #if defined(__WIN32__) /* * dns_server_address() :: * #{addr := socket:address()} */ static ERL_NIF_TERM encode_adapter_dns_server_addr(ErlNifEnv* env, IP_ADAPTER_DNS_SERVER_ADDRESS* addrP) { ERL_NIF_TERM esa; ERL_NIF_TERM edsa; esa = encode_adapter_dns_server_addr_sockaddr(env, addrP->Address.lpSockaddr); { ERL_NIF_TERM keys[] = {esock_atom_addr}; ERL_NIF_TERM vals[] = {esa}; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &edsa) ); } return edsa; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_dns_server_addr_sockaddr(ErlNifEnv* env, struct sockaddr* addrP) { return encode_sockaddr(env, addrP); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_if_oper_status(ErlNifEnv* env, DWORD status) { ERL_NIF_TERM estatus; switch (status) { case IfOperStatusUp: estatus = esock_atom_up; break; case IfOperStatusDown: estatus = atom_down; break; case IfOperStatusTesting: estatus = atom_testing; break; case IfOperStatusUnknown: estatus = atom_unknown; break; case IfOperStatusDormant: estatus = esock_atom_dormant; break; case IfOperStatusNotPresent: estatus = atom_not_present; break; case IfOperStatusLowerLayerDown: estatus = atom_lower_layer_down; break; default: estatus = MKUI(env, status); break; } return estatus; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_zone_indices(ErlNifEnv* env, DWORD* zoneIndices, DWORD len) { SocketTArray ta = TARRAY_CREATE(len); DWORD i; ERL_NIF_TERM ezi; for (i = 0; i < len; i++) { TARRAY_ADD(ta, MKUI(env, zoneIndices[i])); } TARRAY_TOLIST(ta, env, &ezi); return ezi; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_prefixes(ErlNifEnv* env, IP_ADAPTER_PREFIX* firstP) { IP_ADAPTER_PREFIX* tmp = firstP; SocketTArray ta = TARRAY_CREATE(16); ERL_NIF_TERM eprefs; while (tmp != NULL) { TARRAY_ADD(ta, encode_adapter_prefix(env, tmp)); tmp = tmp->Next; } TARRAY_TOLIST(ta, env, &eprefs); return eprefs; } #endif // __WIN32__ #if defined(__WIN32__) /* * prerix() :: * #{addr := socket:address(), * length := non_neg_integer()} */ static ERL_NIF_TERM encode_adapter_prefix(ErlNifEnv* env, IP_ADAPTER_PREFIX* prefP) { ERL_NIF_TERM esa, eplen; ERL_NIF_TERM epref; esa = encode_adapter_prefix_sockaddr(env, prefP->Address.lpSockaddr); eplen = MKUI(env, prefP->PrefixLength); /* if (prefP->Address.lpSockaddr->sa_family == AF_INET) { struct sockaddr* sinP = prefP->Address.lpSockaddr; ERL_NIF_TERM keys[] = {esock_atom_addr, atom_length, MKA(env, "raw_addr"), MKA(env, "raw_addr_ntohl")}; ERL_NIF_TERM vals[] = {esa, eplen, MKUI(env, (DWORD) (((struct sockaddr_in *)sinP)->sin_addr.s_addr)), MKUI(env, ntohl((DWORD) (((struct sockaddr_in *)sinP)->sin_addr.s_addr)))}; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &epref) ); } else */ { ERL_NIF_TERM keys[] = {esock_atom_addr, atom_length}; ERL_NIF_TERM vals[] = {esa, eplen}; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &epref) ); } return epref; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_prefix_sockaddr(ErlNifEnv* env, struct sockaddr* addrP) { return encode_sockaddr(env, addrP); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM enet_adapter_encode_name(ErlNifEnv* env, WCHAR* name) { return encode_wchar(env, name); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM enet_adapter_encode_friendly_name(ErlNifEnv* env, WCHAR* fname) { return encode_wchar(env, fname); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_adapter_index_map_name(ErlNifEnv* env, WCHAR* name) { return encode_wchar(env, name); } #endif // __WIN32__ #if defined(__WIN32__) static void make_adapter_index_map(ErlNifEnv* env, ERL_NIF_TERM eindex, ERL_NIF_TERM ename, ERL_NIF_TERM* emap) { ERL_NIF_TERM keys[2]; ERL_NIF_TERM vals[2]; size_t len = NUM(keys); // Just in case... /* Index */ NDBG( ("NET", "make_adapter_index_map -> index: %T\r\n", eindex) ); keys[0] = atom_index; vals[0] = eindex; /* Name */ NDBG( ("NET", "make_adapter_index_map -> name: %T\r\n", ename) ); keys[1] = esock_atom_name; vals[1] = ename; ESOCK_ASSERT( MKMA(env, keys, vals, len, emap) ); } #endif // __WIN32__ /* ---------------------------------------------------------------------- * nif_get_if_entry * * Description: * Get If Entry * This is a windows only function! * * Arguments: * Args - A way to pass arguments. * Currently only used for: index and debug: * #{index := non_neg_integer(), * debug := boolean() (optional)} * * Results: * {ok, mib_if_row()} | {error, Reason :: term()} */ static ERL_NIF_TERM nif_get_if_entry(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if !defined(__WIN32__) return enif_raise_exception(env, MKA(env, "notsup")); #else ERL_NIF_TERM result, eargs; DWORD index; BOOLEAN_T dbg; NDBG( ("NET", "nif_get_if_entry -> entry (%d)\r\n", argc) ); if ((argc != 1) || !IS_MAP(env, argv[0])) { return enif_make_badarg(env); } eargs = argv[0]; if (!enet_get_if_entry_args_index(env, eargs, &index)) return enif_make_badarg(env); dbg = enet_get_if_entry_args_debug(env, eargs); result = enet_get_if_entry(env, dbg, index); NDBG2( dbg, ("NET", "nif_get_if_entry -> done when result: " "\r\n %T\r\n", result) ); return result; #endif } #if defined(__WIN32__) static BOOLEAN_T enet_get_if_entry_args_index(ErlNifEnv* env, const ERL_NIF_TERM eargs, DWORD* index) { ERL_NIF_TERM key = atom_index; ERL_NIF_TERM eval; DWORD val; if (!GET_MAP_VAL(env, eargs, key, &eval)) { return FALSE; } else { if (!IS_NUM(env, eval)) return FALSE; if (!GET_UINT(env, eval, &val)) return FALSE; *index = val; return TRUE; } } #endif // __WIN32__ #if defined(__WIN32__) static BOOLEAN_T enet_get_if_entry_args_debug(ErlNifEnv* env, const ERL_NIF_TERM eargs) { return get_debug(env, eargs); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM enet_get_if_entry(ErlNifEnv* env, BOOLEAN_T dbg, DWORD index) { DWORD ret; MIB_IFROW ifRow; ERL_NIF_TERM eifRow, result; NDBG2( dbg, ("NET", "nif_get_if_entry -> entry with" "\r\n index: %d\r\n", index) ); sys_memzero(&ifRow, sizeof(ifRow)); ifRow.dwIndex = index; ret = GetIfEntry(&ifRow); NDBG2( dbg, ("NET", "nif_get_if_entry -> get-if-entru result:" "\r\n %d\r\n", ret) ); switch (ret) { /* Success */ case NO_ERROR: eifRow = enet_if_row_encode(env, dbg, &ifRow); result = esock_make_ok2(env, eifRow); break; /* Known errors */ case ERROR_CAN_NOT_COMPLETE: result = esock_make_error(env, atom_can_not_complete); break; case ERROR_INVALID_DATA: result = esock_make_error(env, atom_invalid_data); break; case ERROR_INVALID_PARAMETER: result = esock_make_error(env, atom_invalid_parameter); break; case ERROR_NOT_FOUND: result = esock_make_error(env, esock_atom_not_found); break; case ERROR_NOT_SUPPORTED: result = esock_make_error(env, atom_not_supported); break; /* Other errors */ default: result = esock_make_error(env, MKI(env, ret)); break; } NDBG2( dbg, ("NET", "nif_get_if_entry -> done when:" "\r\n result: %T\r\n", result) ); return result; } #endif // __WIN32__ #if defined(__WIN32__) // Returns: mib_if_row() static ERL_NIF_TERM enet_if_row_encode(ErlNifEnv* env, BOOLEAN_T dbg, MIB_IFROW* rowP) { ERL_NIF_TERM eName, eIndex, eType, eMtu, eSpeed, ePhuysAddr, eAdminStatus; ERL_NIF_TERM eOperStatus, eLastChange, eInOctets, eInUcastPkts; ERL_NIF_TERM eInNUcastPkts, eInDiscards, eInError, eInUnknownProtos; ERL_NIF_TERM eOutOcts, eOutUcastPkts, eOutNUcastPkts, eOutDiscards; ERL_NIF_TERM eOutErrors, eOutQLen, eDescr; ERL_NIF_TERM erow; NDBG2( dbg, ("NET", "enet_if_row_encode -> entry\r\n") ); eName = encode_wchar(env, rowP->wszName); eIndex = MKUI(env, rowP->dwIndex); eType = encode_if_type(env, rowP->dwType); eMtu = MKUI(env, rowP->dwMtu); eSpeed = MKUI(env, rowP->dwSpeed); ePhuysAddr = encode_if_row_phys_address(env, rowP->dwPhysAddrLen, rowP->bPhysAddr); eAdminStatus = encode_if_admin_status(env, rowP->dwAdminStatus); eOperStatus = encode_internal_if_oper_status(env, rowP->dwOperStatus); eLastChange = MKUI(env, rowP->dwLastChange); eInOctets = MKUI(env, rowP->dwInOctets); eInUcastPkts = MKUI(env, rowP->dwInUcastPkts); eInNUcastPkts = MKUI(env, rowP->dwInNUcastPkts); eInDiscards = MKUI(env, rowP->dwInDiscards); eInError = MKUI(env, rowP->dwInErrors); eInUnknownProtos = MKUI(env, rowP->dwInUnknownProtos); eOutOcts = MKUI(env, rowP->dwOutOctets); eOutUcastPkts = MKUI(env, rowP->dwOutUcastPkts); eOutNUcastPkts = MKUI(env, rowP->dwOutNUcastPkts); eOutDiscards = MKUI(env, rowP->dwOutDiscards); eOutErrors = MKUI(env, rowP->dwOutErrors); eOutQLen = MKUI(env, rowP->dwOutQLen); eDescr = encode_if_row_description(env, rowP->dwDescrLen, rowP->bDescr); { ERL_NIF_TERM keys[] = {esock_atom_name, atom_index, esock_atom_type, esock_atom_mtu, atom_speed, atom_phys_addr, atom_admin_status, atom_internal_oper_status, atom_last_change, atom_in_octets, atom_in_ucast_pkts, atom_in_nucast_pkts, atom_in_discards, atom_in_errors, atom_in_unknown_protos, atom_out_octets, atom_out_ucast_pkts, atom_out_nucast_pkts, atom_out_discards, atom_out_errors, atom_out_qlen, atom_description}; ERL_NIF_TERM vals[] = {eName, eIndex, eType, eMtu, eSpeed, ePhuysAddr, eAdminStatus, eOperStatus, eLastChange, eInOctets, eInUcastPkts, eInNUcastPkts, eInDiscards, eInError, eInUnknownProtos, eOutOcts, eOutUcastPkts, eOutNUcastPkts, eOutDiscards, eOutErrors, eOutQLen, eDescr}; size_t numKeys = NUM(keys); size_t numVals = NUM(vals); ESOCK_ASSERT( numKeys == numVals ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, &erow) ); } NDBG2( dbg, ("NET", "enet_if_row_encode -> done with:" "\r\n result: %T" "\r\n", erow) ); return erow; } #endif #if defined(__WIN32__) static ERL_NIF_TERM encode_if_type(ErlNifEnv* env, DWORD type) { ERL_NIF_TERM etype; switch (type) { case IF_TYPE_OTHER: etype = atom_other; break; case IF_TYPE_ETHERNET_CSMACD: etype = atom_ethernet_csmacd; break; case IF_TYPE_ISO88025_TOKENRING: etype = atom_iso88025_tokenring; break; case IF_TYPE_FDDI: etype = atom_fddi; break; case IF_TYPE_PPP: etype = atom_ppp; break; case IF_TYPE_SOFTWARE_LOOPBACK: etype = atom_software_loopback; break; case IF_TYPE_ATM: etype = atom_atm; break; case IF_TYPE_IEEE80211: etype = atom_ieee80211; break; case IF_TYPE_TUNNEL: etype = atom_tunnel; break; case IF_TYPE_IEEE1394: etype = atom_ieee1394; break; case IF_TYPE_IEEE80216_WMAN: etype = atom_ieee80216_wman; break; case IF_TYPE_WWANPP: etype = atom_wwanpp; break; case IF_TYPE_WWANPP2: etype = atom_wwanpp2; break; default: etype = MKUI(env, type); break; } return etype; } #endif // __WIN32__ #if defined(__WIN32__) /* * The description is defined as a UCHAR array with a *max* length * of MAXLEN_IFDESCR. But the actual length is defined by the * dwDescrLen field. * The documentation does not specify that its a NULL terminated * string, but in practice that is what it is. But, if the string * *is* MAXLEN_IFDESCR there is no room for a NULL terminator... * So, just to be on the safe side we copy the text into a buffer * of length = len + 1 and add an extra NULL character at the last * position. Then we can handle the it as any NULL terminated string. */ static ERL_NIF_TERM encode_if_row_description(ErlNifEnv* env, DWORD len, UCHAR* buf) { ERL_NIF_TERM edesc; UCHAR* tmp = MALLOC(len + 1); ESOCK_ASSERT( tmp != NULL ); sys_memcpy(tmp, buf, len); tmp[len] = 0; edesc = MKS(env, tmp); FREE(tmp); return edesc; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_if_admin_status(ErlNifEnv* env, DWORD status) { ERL_NIF_TERM estatus; switch (status) { case IF_OPER_STATUS_NON_OPERATIONAL: estatus = atom_non_operational; break; case IF_OPER_STATUS_UNREACHABLE: estatus = atom_unreachable; break; case IF_OPER_STATUS_DISCONNECTED: estatus = atom_disconnected; break; case IF_OPER_STATUS_CONNECTING: estatus = esock_atom_connecting; break; case IF_OPER_STATUS_CONNECTED: estatus = esock_atom_connected; break; case IF_OPER_STATUS_OPERATIONAL: estatus = atom_operational; break; default: estatus = MKUI(env, status); break; } return estatus; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_internal_if_oper_status(ErlNifEnv* env, DWORD status) { ERL_NIF_TERM estatus; switch (status) { case IF_OPER_STATUS_NON_OPERATIONAL: estatus = atom_non_operational; break; case IF_OPER_STATUS_UNREACHABLE: estatus = atom_unreachable; break; case IF_OPER_STATUS_DISCONNECTED: estatus = atom_disconnected; break; case IF_OPER_STATUS_CONNECTING: estatus = esock_atom_connecting; break; case IF_OPER_STATUS_CONNECTED: estatus = esock_atom_connected; break; case IF_OPER_STATUS_OPERATIONAL: estatus = atom_operational; break; default: estatus = MKUI(env, status); break; } return estatus; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_if_row_phys_address(ErlNifEnv* env, DWORD len, UCHAR* buf) { return encode_uchar(env, len, buf); } #endif // __WIN32__ /* ---------------------------------------------------------------------- * nif_get_interface_info * * Description: * Get interface info table (only IPv4 interfaces) * This is a windows only function! * * Physical Interfaces? * * Arguments: * Args - A way to pass arguments. * Currently only used for debug. * * Results: * {ok, [ip_adapter_index_map()]} | {error, Reason :: term()} */ static ERL_NIF_TERM nif_get_interface_info(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if !defined(__WIN32__) return enif_raise_exception(env, MKA(env, "notsup")); #else ERL_NIF_TERM result, eargs; BOOLEAN_T dbg; NDBG( ("NET", "nif_get_interface_info -> entry (%d)\r\n", argc) ); if ((argc != 1) || !IS_MAP(env, argv[0])) { return enif_make_badarg(env); } eargs = argv[0]; dbg = enet_get_interface_info_args_debug(env, eargs); result = enet_get_interface_info(env, dbg); NDBG2( dbg, ("NET", "nif_get_interface_info -> done when result: " "\r\n %T\r\n", result) ); return result; #endif } #if defined(__WIN32__) static BOOLEAN_T enet_get_interface_info_args_debug(ErlNifEnv* env, const ERL_NIF_TERM eargs) { return get_debug(env, eargs); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM enet_get_interface_info(ErlNifEnv* env, BOOLEAN_T dbg) { int i; DWORD ret; unsigned long infoSize = 4 * 1024; IP_INTERFACE_INFO* infoP = (IP_INTERFACE_INFO*) MALLOC(infoSize); ERL_NIF_TERM eret, einfo, result; for (i = 17; i; i--) { NDBG2( dbg, ("NET", "enet_get_interface_info -> try get info with: " "\r\n infoSize: %d" "\r\n", infoSize) ); ret = GetInterfaceInfo(infoP, &infoSize); NDBG2( dbg, ("NET", "enet_get_interface_info -> " "get-info result: %d (%d)\r\n", ret, infoSize) ); if (ret == NO_ERROR) { /* We are done! */ break; } else if (ret == ERROR_INSUFFICIENT_BUFFER) { /* Not large enough */ infoP = REALLOC(infoP, infoSize); continue; } else { /* Failure */ i = 0; } } NDBG2( dbg, ("NET", "enet_get_interface_info -> " "done when get info counter: %d\r\n", i) ); if (! i) { NDBG2( dbg, ("NET", "enet_get_interface_info -> " "try encode error (%d)\r\n", ret) ); FREE(infoP); switch (ret) { case ERROR_INSUFFICIENT_BUFFER: eret = atom_insufficient_buffer; break; case ERROR_INVALID_PARAMETER: eret = atom_invalid_parameter; break; case ERROR_NO_DATA: eret = atom_no_data; break; case ERROR_NOT_SUPPORTED: eret = atom_not_supported; break; default: eret = MKI(env, ret); break; } result = esock_make_error(env, eret); } else { NDBG2( dbg, ("NET", "enet_get_interface_info -> try encode info\r\n") ); einfo = enet_interface_info_encode(env, dbg, infoP); result = esock_make_ok2(env, einfo); FREE(infoP); } NDBG2( dbg, ("NET", "enet_get_interface_info -> done with:" "\r\n result: %T" "\r\n", result) ); return result; } #endif // __WIN32__ #if defined(__WIN32__) // Returns: [#{index := integer(), name := string()}] static ERL_NIF_TERM enet_interface_info_encode(ErlNifEnv* env, BOOLEAN_T dbg, IP_INTERFACE_INFO* infoP) { ERL_NIF_TERM result; LONG num = infoP->NumAdapters; NDBG2( dbg, ("NET", "enet_interface_info_encode -> entry with" "\r\n num: %d" "\r\n", num) ); if (num > 0) { ERL_NIF_TERM* array = MALLOC(num * sizeof(ERL_NIF_TERM)); LONG i = 0; while (i < num) { ERL_NIF_TERM entry; NDBG2( dbg, ("NET", "enet_interface_info_encode -> " "try encode adapter %d" "\r\n", i) ); encode_adapter_index_map(env, dbg, &infoP->Adapter[i], &entry); array[i] = entry; i++; } result = MKLA(env, array, num); FREE(array); } else { result = MKEL(env); } NDBG2( dbg, ("NET", "enet_get_interface_info -> done with:" "\r\n result: %T" "\r\n", result) ); return result; } #endif // __WIN32__ #if defined(__WIN32__) /* * ip_adapter_index_map() :: #{name :: string(), * index :: non_neg_integer()} */ static void encode_adapter_index_map(ErlNifEnv* env, BOOLEAN_T dbg, IP_ADAPTER_INDEX_MAP* adapterP, ERL_NIF_TERM* eadapter) { ERL_NIF_TERM eindex = MKI(env, adapterP->Index); ERL_NIF_TERM ename = encode_adapter_index_map_name(env, adapterP->Name); ERL_NIF_TERM map; NDBG2( dbg, ("NET", "encode_adapter_index_map -> map fields: " "\r\n index: %T" "\r\n name: %T" "\r\n", eindex, ename) ); make_adapter_index_map(env, eindex, ename, &map); NDBG2( dbg, ("NET", "encode_adapter_index_map -> encoded map: %T\r\n", map) ); *eadapter = map; } #endif // __WIN32__ /* ---------------------------------------------------------------------- * nif_get_ip_address_table * * Description: * Get ip address table table. * This is a windows only function! * * Active Interfaces? * * Arguments: * Args - A way to pass arguments. * Currently only used for debug. * * Returns: * {ok, [mib_ip_address_row()]} | {error, Reason :: term()} */ static ERL_NIF_TERM nif_get_ip_address_table(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if !defined(__WIN32__) return enif_raise_exception(env, MKA(env, "notsup")); #else ERL_NIF_TERM result, eargs; BOOLEAN_T dbg; NDBG( ("NET", "nif_get_ip_address_table -> entry (%d)\r\n", argc) ); if ((argc != 1) || !IS_MAP(env, argv[0])) { return enif_make_badarg(env); } eargs = argv[0]; dbg = enet_get_ip_address_table_args_debug(env, eargs); result = enet_get_ip_address_table(env, dbg); NDBG2( dbg, ("NET", "nif_get_ip_address_table -> done when result: " "\r\n %T\r\n", result) ); return result; #endif } #if defined(__WIN32__) static BOOLEAN_T enet_get_ip_address_table_args_debug(ErlNifEnv* env, const ERL_NIF_TERM eargs) { return get_debug(env, eargs); } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM enet_get_ip_address_table(ErlNifEnv* env, BOOLEAN_T dbg) { int i; DWORD ret; /* The table is *not* just an array of 'row', * but that is the significant part, so... */ unsigned long tabSize = 16*sizeof(MIB_IPADDRROW); MIB_IPADDRTABLE* ipAddrTabP = (MIB_IPADDRTABLE*) MALLOC(tabSize); ERL_NIF_TERM eret, etable, result; for (i = 17; i; i--) { NDBG2( dbg, ("NET", "enet_get_ip_address_table -> try get table with: " "\r\n tabSize: %d" "\r\n", tabSize) ); ret = GetIpAddrTable(ipAddrTabP, &tabSize, FALSE); NDBG2( dbg, ("NET", "enet_get_ip_address_table -> " "get-tab result: %d (%d)\r\n", ret, tabSize) ); ipAddrTabP = REALLOC(ipAddrTabP, tabSize); if (ret == NO_ERROR) break; if (ret == ERROR_INSUFFICIENT_BUFFER) continue; i = 0; } NDBG2( dbg, ("NET", "enet_get_ip_address_table -> " "done when get-tab counter: %d\r\n", i) ); if (! i) { NDBG2( dbg, ("NET", "enet_get_ip_address_table -> try transform error\r\n") ); FREE(ipAddrTabP); switch (ret) { case ERROR_INSUFFICIENT_BUFFER: eret = atom_insufficient_buffer; break; case ERROR_INVALID_PARAMETER: eret = atom_invalid_parameter; break; case ERROR_NOT_SUPPORTED: eret = atom_not_supported; break; default: eret = MKI(env, ret); break; } result = esock_make_error(env, eret); } else { NDBG2( dbg, ("NET", "enet_get_ip_address_table -> try transform table\r\n") ); etable = enet_get_ip_address_table_encode(env, dbg, ipAddrTabP); result = esock_make_ok2(env, etable); FREE(ipAddrTabP); } NDBG2( dbg, ("NET", "enet_get_ip_address_table -> done with:" "\r\n result: %T" "\r\n", result) ); return result; } #endif // __WIN32__ #if defined(__WIN32__) // Returns: [row()] static ERL_NIF_TERM enet_get_ip_address_table_encode(ErlNifEnv* env, BOOLEAN_T dbg, MIB_IPADDRTABLE* tabP) { ERL_NIF_TERM result; LONG num = tabP->dwNumEntries; NDBG2( dbg, ("NET", "enet_get_ip_address_table_encode -> entry with" "\r\n num: %d" "\r\n", num) ); if (num > 0) { ERL_NIF_TERM* array = MALLOC(num * sizeof(ERL_NIF_TERM)); LONG i = 0; while (i < num) { ERL_NIF_TERM entry; NDBG2( dbg, ("NET", "enet_interface_info_encode -> " "try encode ip-address-row %d" "\r\n", i) ); entry = encode_ip_address_row(env, dbg, &tabP->table[i]); array[i] = entry; i++; } result = MKLA(env, array, num); FREE(array); } else { result = MKEL(env); } NDBG2( dbg, ("NET", "enet_get_ip_address_table -> done with:" "\r\n result: %T" "\r\n", result) ); return result; } #endif // __WIN32__ #if defined(__WIN32__) static ERL_NIF_TERM encode_ip_address_row(ErlNifEnv* env, BOOLEAN_T dbg, MIB_IPADDRROW* rowP) { ERL_NIF_TERM eaddr = encode_ip_address_row_addr(env, dbg, "Addr", rowP->dwAddr); ERL_NIF_TERM eindex = MKUL(env, rowP->dwIndex); ERL_NIF_TERM emask = encode_ip_address_row_addr(env, dbg, "Mask", rowP->dwMask); ERL_NIF_TERM eBCastAddr = encode_ip_address_row_addr(env, dbg, "BCaseAddr", rowP->dwBCastAddr); ERL_NIF_TERM eReasmSize = MKUL(env, rowP->dwReasmSize); ERL_NIF_TERM map; NDBG2( dbg, ("NET", "encode_ipAddress_row_map -> map fields: " "\r\n address: %T" "\r\n index: %T" "\r\n mask: %T" "\r\n bcas-addr: %T" "\r\n reasm-size: %T" "\r\n", eaddr, eindex, emask, eBCastAddr, eReasmSize) ); make_ip_address_row(env, eaddr, eindex, emask, eBCastAddr, eReasmSize, &map); NDBG2( dbg, ("NET", "encode_ip_address_row -> encoded map: %T\r\n", map) ); return map; } #endif // __WIN32__ #if defined(__WIN32__) /* Converts an *IPv4* address to an erlang term (4-tuple) */ static ERL_NIF_TERM encode_ip_address_row_addr(ErlNifEnv* env, BOOLEAN_T dbg, const char* descr, DWORD addr) { struct in_addr a; ERL_NIF_TERM ea; NDBG2( dbg, ("NET", "encode_ip_address_row_addr -> entry with: " "\r\n %s: %lu\r\n", descr, addr) ); a.s_addr = addr; esock_encode_in_addr(env, &a, &ea); return ea; } #endif // __WIN32__ #if defined(__WIN32__) static void make_ip_address_row(ErlNifEnv* env, ERL_NIF_TERM eaddr, ERL_NIF_TERM eindex, ERL_NIF_TERM emask, ERL_NIF_TERM eBCastAddr, ERL_NIF_TERM eReasmSize, ERL_NIF_TERM* iar) { ERL_NIF_TERM keys[] = {esock_atom_addr, atom_index, atom_mask, atom_bcast_addr, atom_reasm_size}; ERL_NIF_TERM vals[] = {eaddr, eindex, emask, eBCastAddr, eReasmSize}; size_t numKeys = NUM(keys); ESOCK_ASSERT( numKeys == NUM(vals) ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, iar) ); } #endif // __WIN32__ /* ---------------------------------------------------------------------- * nif_if_name2index * * Description: * Perform a Interface Name to Interface Index translation. * * Arguments: * Ifn - Interface name to be translated. */ static ERL_NIF_TERM nif_if_name2index(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if defined(__WIN32__) return enif_raise_exception(env, MKA(env, "notsup")); #elif defined(HAVE_IF_NAMETOINDEX) ERL_NIF_TERM eifn, result; char ifn[IF_NAMESIZE+1]; NDBG( ("NET", "nif_if_name2index -> entry (%d)\r\n", argc) ); if (argc != 1) { return enif_make_badarg(env); } eifn = argv[0]; NDBG( ("NET", "nif_if_name2index -> " "\r\n Ifn: %T" "\r\n", argv[0]) ); if (0 >= GET_STR(env, eifn, ifn, sizeof(ifn))) return esock_make_error(env, esock_atom_einval); result = enet_if_name2index(env, ifn); NDBG( ("NET", "nif_if_name2index -> done when result: %T\r\n", result) ); return result; #else return esock_make_error(env, esock_atom_enotsup); #endif } #if !defined(__WIN32__) && defined(HAVE_IF_NAMETOINDEX) static ERL_NIF_TERM enet_if_name2index(ErlNifEnv* env, char* ifn) { unsigned int idx; NDBG( ("NET", "enet_if_name2index -> entry with ifn: %s\r\n", ifn) ); idx = if_nametoindex(ifn); NDBG( ("NET", "enet_if_name2index -> idx: %d\r\n", idx) ); if (idx == 0) { int save_errno = get_errno(); NDBG( ("NET", "nif_name2index -> failed: %d\r\n", save_errno) ); return esock_make_error_errno(env, save_errno); } else { return esock_make_ok2(env, MKI(env, idx)); } } #endif /* ---------------------------------------------------------------------- * nif_if_index2name * * Description: * Perform a Interface Index to Interface Name translation. * * Arguments: * Idx - Interface index to be translated. */ static ERL_NIF_TERM nif_if_index2name(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if defined(__WIN32__) return enif_raise_exception(env, MKA(env, "notsup")); #elif defined(HAVE_IF_INDEXTONAME) ERL_NIF_TERM result; unsigned int idx; NDBG( ("NET", "nif_if_index2name -> entry (%d)\r\n", argc) ); if ((argc != 1) || !GET_UINT(env, argv[0], &idx)) { return enif_make_badarg(env); } NDBG( ("NET", "nif_index2name -> " "\r\n Idx: %T" "\r\n", argv[0]) ); result = enet_if_index2name(env, idx); NDBG( ("NET", "nif_if_index2name -> done when result: %T\r\n", result) ); return result; #else return esock_make_error(env, esock_atom_enotsup); #endif } #if !defined(__WIN32__) && defined(HAVE_IF_INDEXTONAME) static ERL_NIF_TERM enet_if_index2name(ErlNifEnv* env, unsigned int idx) { ERL_NIF_TERM result; char* ifn = MALLOC(IF_NAMESIZE+1); if (ifn == NULL) return enif_make_badarg(env); // PLACEHOLDER if (NULL != if_indextoname(idx, ifn)) { result = esock_make_ok2(env, MKS(env, ifn)); } else { result = esock_make_error(env, atom_enxio); } FREE(ifn); return result; } #endif /* ---------------------------------------------------------------------- * nif_if_names * * Description: * Get network interface names and indexes. * */ static ERL_NIF_TERM nif_if_names(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { #if defined(__WIN32__) || (defined(__ANDROID__) && (__ANDROID_API__ < 24)) return enif_raise_exception(env, MKA(env, "notsup")); #elif defined(HAVE_IF_NAMEINDEX) && defined(HAVE_IF_FREENAMEINDEX) ERL_NIF_TERM result; NDBG( ("NET", "nif_if_names -> entry (%d)\r\n", argc) ); if (argc != 0) { return enif_make_badarg(env); } result = enet_if_names(env); NDBG( ("NET", "nif_if_names -> done when result: %T\r\n", result) ); return result; #else return esock_make_error(env, esock_atom_enotsup); #endif } /* if_nameindex and if_freenameindex were added in Android 7.0 Nougat. With the Android NDK Unified Headers, check that the build is targeting at least the corresponding API level 24. */ /* Can we replace the ANDROID tests with the HAVE_... ? */ #if !defined(__WIN32__) && !(defined(__ANDROID__) && (__ANDROID_API__ < 24)) #if defined(HAVE_IF_NAMEINDEX) && defined(HAVE_IF_FREENAMEINDEX) static ERL_NIF_TERM enet_if_names(ErlNifEnv* env) { ERL_NIF_TERM result; struct if_nameindex* ifs = if_nameindex(); NDBG( ("NET", "enet_if_names -> ifs: 0x%lX\r\n", ifs) ); if (ifs == NULL) { result = esock_make_error_errno(env, get_errno()); } else { /* * We got some interfaces: * 1) Calculate how many - the only way is to iterate through the list * until its end (which is indicated by an entry with index = zero * and if_name = NULL). * 2) Allocate an ERL_NIF_TERM array of the calculated length. * 3) Iterate through the array of interfaces and for each create * a two tuple: {Idx, If} * * Or shall we instead build a list in reverse order and then when * its done, reverse that? Check */ unsigned int len = enet_if_names_length(ifs); NDBG( ("NET", "enet_if_names -> len: %d\r\n", len) ); if (len > 0) { ERL_NIF_TERM* array = MALLOC(len * sizeof(ERL_NIF_TERM)); unsigned int i; for (i = 0; i < len; i++) { array[i] = MKT2(env, MKI(env, ifs[i].if_index), MKS(env, ifs[i].if_name)); } result = esock_make_ok2(env, MKLA(env, array, len)); FREE(array); } else { result = esock_make_ok2(env, enif_make_list(env, 0)); } } if (ifs != NULL) if_freenameindex(ifs); return result; } static unsigned int enet_if_names_length(struct if_nameindex* p) { unsigned int len = 0; BOOLEAN_T done = FALSE; while (!done) { NDBG( ("NET", "enet_if_names_length -> %d: " "\r\n if_index: %d" "\r\n if_name: 0x%lX" "\r\n", len, p[len].if_index, p[len].if_name) ); if ((p[len].if_index == 0) && (p[len].if_name == NULL)) done = TRUE; else len++; } return len; } #endif // if defined(HAVE_IF_NAMEINDEX) && defined(HAVE_IF_FREENAMEINDEX) #endif // if !defined(__WIN32__) && ... /* ---------------------------------------------------------------------- * U t i l i t y F u n c t i o n s * ---------------------------------------------------------------------- */ /* The erlang format for a set of flags is a list of atoms. * A special case is when there is no flags, which is * represented by the atom undefined. */ static ERL_NIF_TERM encode_sockaddr(ErlNifEnv* env, struct sockaddr* sa) { ERL_NIF_TERM esa; if (sa != NULL) { unsigned int sz = sizeof(ESockAddress); esock_encode_sockaddr(env, (ESockAddress*) sa, sz, &esa); } else { esa = esock_atom_undefined; } return esa; } static BOOLEAN_T decode_nameinfo_flags(ErlNifEnv* env, const ERL_NIF_TERM eflags, int* flags) { BOOLEAN_T result; if (IS_ATOM(env, eflags)) { NDBG( ("NET", "decode_nameinfo_flags -> is atom (%T)\r\n", eflags) ); if (COMPARE(eflags, esock_atom_undefined) == 0) { *flags = 0; result = TRUE; } else { result = FALSE; } } else if (IS_LIST(env, eflags)) { NDBG( ("NET", "decode_nameinfo_flags -> is list\r\n") ); result = decode_nameinfo_flags_list(env, eflags, flags); } else { result = FALSE; } NDBG( ("NET", "decode_nameinfo_flags -> result: %s\r\n", B2S(result)) ); return result; } static BOOLEAN_T decode_nameinfo_flags_list(ErlNifEnv* env, const ERL_NIF_TERM eflags, int* flags) { ERL_NIF_TERM elem, tail, list = eflags; int tmp = 0; BOOLEAN_T done = FALSE; while (!done) { /* NDBG( ("NET", "decode_nameinfo_flags_list -> " "get next (list) element of" "\r\n %T\r\n", list) ); */ if (GET_LIST_ELEM(env, list, &elem, &tail)) { /* NDBG( ("NET", "decode_nameinfo_flags_list -> got: " "\r\n element: %T" "\r\n tail: %T" "\r\n", elem, tail) ); */ if (COMPARE(elem, atom_namereqd) == 0) { tmp |= NI_NAMEREQD; } else if (COMPARE(elem, esock_atom_dgram) == 0) { tmp |= NI_DGRAM; } else if (COMPARE(elem, atom_nofqdn) == 0) { tmp |= NI_NOFQDN; } else if (COMPARE(elem, atom_numerichost) == 0) { tmp |= NI_NUMERICHOST; } else if (COMPARE(elem, atom_numericserv) == 0) { tmp |= NI_NUMERICSERV; /* Starting with glibc 2.3.4: */ #if defined(NI_IDN) } else if (COMPARE(elem, atom_idn) == 0) { tmp |= NI_IDN; #endif /* * In later versions of gcc these have been deprecated. * That is, they results in compiler warnings. * And since we "don't like that", the simplest way * to deal with this is to remove the use of them. * We leave them here commented out as an example. #if defined(NI_IDN_ALLOW_UNASSIGNED) } else if (COMPARE(elem, atom_idna_allow_unassigned) == 0) { tmp |= NI_IDN_ALLOW_UNASSIGNED; #endif #if defined(NI_IDN_USE_STD3_ASCII_RULES) } else if (COMPARE(elem, atom_idna_use_std3_ascii_rules) == 0) { tmp |= NI_IDN_USE_STD3_ASCII_RULES; #endif */ } else { NDBG( ("NET", "decode_nameinfo_flags_list -> " "invalid flag: %T\r\n", elem) ); return FALSE; } list = tail; } else { done = TRUE; } } *flags = tmp; return TRUE; } /* Decode the address info string (hostname or service name) * The string is either the atom undefined or an actual string. */ static BOOLEAN_T decode_addrinfo_string(ErlNifEnv* env, const ERL_NIF_TERM eString, char** stringP) { BOOLEAN_T result; if (IS_ATOM(env, eString)) { if (COMPARE(eString, esock_atom_undefined) == 0) { *stringP = NULL; result = TRUE; } else { *stringP = NULL; result = FALSE; } } else { result = esock_decode_string(env, eString, stringP); } return result; } /* Encode the address info * The address info is a linked list och address info, which * will result in the result being a list of zero or more length. */ static ERL_NIF_TERM encode_address_infos(ErlNifEnv* env, struct addrinfo* addrInfo) { ERL_NIF_TERM result; unsigned int len = address_info_length(addrInfo); NDBG( ("NET", "encode_address_infos -> len: %d\r\n", len) ); if (len > 0) { ERL_NIF_TERM* array = MALLOC(len * sizeof(ERL_NIF_TERM)); unsigned int i = 0; struct addrinfo* p = addrInfo; while (i < len) { array[i] = encode_address_info(env, p); p = p->ai_next; i++; } result = MKLA(env, array, len); FREE(array); } else { result = MKEL(env); } NDBG( ("NET", "encode_address_infos -> result: " "\r\n %T\r\n", result) ); return result; } /* Calculate the length of the address info linked list * The list is NULL-terminated, so the only way is to * iterate through the list until we find next = NULL. */ static unsigned int address_info_length(struct addrinfo* addrInfoP) { unsigned int len = 1; struct addrinfo* tmp; BOOLEAN_T done = FALSE; tmp = addrInfoP; while (!done) { if (tmp->ai_next != NULL) { len++; tmp = tmp->ai_next; } else { done = TRUE; } } return len; } /* Create one (erlang) instance of the address info record * Should we have address info as a record or as a map? * * {address_info, Fam, Type, Proto, Addr} */ static ERL_NIF_TERM encode_address_info(ErlNifEnv* env, struct addrinfo* addrInfoP) { ERL_NIF_TERM fam, type, proto, addr, addrInfo; fam = encode_address_info_family(env, addrInfoP->ai_family); type = encode_address_info_type(env, addrInfoP->ai_socktype); proto = MKI(env, addrInfoP->ai_protocol); esock_encode_sockaddr(env, (ESockAddress*) addrInfoP->ai_addr, addrInfoP->ai_addrlen, &addr); make_address_info(env, fam, type, proto, addr, &addrInfo); return addrInfo; } /* Convert an "native" family to an erlang family (=domain). * Note that this is not currently exhaustive, but only supports * inet and inet6. Other values will be returned as is, that is * in the form of an integer. */ static ERL_NIF_TERM encode_address_info_family(ErlNifEnv* env, int family) { ERL_NIF_TERM efam; esock_encode_domain(env, family, &efam); return efam; } /* Convert an "native" socket type to an erlang socket type. * Note that this is not currently exhaustive, but only supports * stream and dgram. Other values will be returned as is, that is * in the form of an integer. */ static ERL_NIF_TERM encode_address_info_type(ErlNifEnv* env, int socktype) { ERL_NIF_TERM etype; esock_encode_type(env, socktype, &etype); return etype; } static void make_address_info(ErlNifEnv* env, ERL_NIF_TERM fam, ERL_NIF_TERM sockType, ERL_NIF_TERM proto, ERL_NIF_TERM addr, ERL_NIF_TERM* ai) { ERL_NIF_TERM keys[] = {esock_atom_family, esock_atom_type, esock_atom_protocol, esock_atom_addr}; ERL_NIF_TERM vals[] = {fam, sockType, proto, addr}; size_t numKeys = NUM(keys); ESOCK_ASSERT( numKeys == NUM(vals) ); ESOCK_ASSERT( MKMA(env, keys, vals, numKeys, ai) ); } #ifdef HAVE_SETNS /* We should really have another API, so that we can return errno... */ /* *** change network namespace *** * Retrieve the current namespace and set the new. * Return result and previous namespace if successful. */ #if !defined(__WIN32__) static BOOLEAN_T change_network_namespace(char* netns, int* cns, int* err) { int save_errno; int current_ns = 0; int new_ns = 0; NDBG( ("NET", "change_network_namespace -> entry with" "\r\n new ns: %s", netns) ); if (netns != NULL) { current_ns = open("/proc/self/ns/net", O_RDONLY); if (current_ns == -1) { *cns = current_ns; *err = get_errno(); return FALSE; } new_ns = open(netns, O_RDONLY); if (new_ns == -1) { save_errno = get_errno(); while (close(current_ns) == -1 && get_errno() == EINTR); *cns = -1; *err = save_errno; return FALSE; } if (setns(new_ns, CLONE_NEWNET) != 0) { save_errno = get_errno(); while ((close(new_ns) == -1) && (get_errno() == EINTR)); while ((close(current_ns) == -1) && (get_errno() == EINTR)); *cns = -1; *err = save_errno; return FALSE; } else { while ((close(new_ns) == -1) && (get_errno() == EINTR)); *cns = current_ns; *err = 0; return TRUE; } } else { *cns = -1; *err = 0; return TRUE; } } /* *** restore network namespace *** * Restore the previous namespace (see above). */ static BOOLEAN_T restore_network_namespace(int ns, int* err) { int save_errno; NDBG( ("NET", "restore_network_namespace -> entry with" "\r\n ns: %d", ns) ); if (ns != -1) { if (setns(ns, CLONE_NEWNET) != 0) { /* XXX Failed to restore network namespace. * What to do? Tidy up and return an error... * Note that the thread now might still be in the namespace. * Can this even happen? Should the emulator be aborted? */ save_errno = get_errno(); while (close(ns) == -1 && get_errno() == EINTR); *err = save_errno; return FALSE; } else { while (close(ns) == -1 && get_errno() == EINTR); *err = 0; return TRUE; } } *err = 0; return TRUE; } #endif // if !defined(__WIN32__) #endif // ifdef HAVE_SETNS static ERL_NIF_TERM decode_bool(ErlNifEnv* env, ERL_NIF_TERM ebool, BOOLEAN_T* ibool) { if (COMPARE(ebool, esock_atom_true) == 0) { *ibool = TRUE; return esock_atom_ok; } else if (COMPARE(ebool, esock_atom_false) == 0) { *ibool = FALSE; return esock_atom_ok; } else { return esock_make_error(env, esock_atom_einval); } } /* ---------------------------------------------------------------------- * C a l l b a c k F u n c t i o n s * ---------------------------------------------------------------------- */ /* ========================================================================= * net_dtor - Callback function for resource destructor * */ /* static void net_dtor(ErlNifEnv* env, void* obj) { } */ /* ========================================================================= * net_stop - Callback function for resource stop * */ /* static void net_stop(ErlNifEnv* env, void* obj, int fd, int is_direct_call) { } */ /* ========================================================================= * net_down - Callback function for resource down (monitored processes) * */ /* static void net_down(ErlNifEnv* env, void* obj, const ErlNifPid* pid, const ErlNifMonitor* mon) { } */ /* ---------------------------------------------------------------------- * L o a d / u n l o a d / u p g r a d e F u n c t i o n s * ---------------------------------------------------------------------- */ static ErlNifFunc net_funcs[] = { // Some utility functions {"nif_info", 0, nif_info, 0}, {"nif_command", 1, nif_command, 0}, // Shall we let this be dirty? /* get/set hostname */ {"nif_gethostname", 0, nif_gethostname, 0}, /* address and name translation in protocol-independent manner */ {"nif_getnameinfo", 2, nif_getnameinfo, 0}, {"nif_getaddrinfo", 3, nif_getaddrinfo, 0}, {"nif_getifaddrs", 1, nif_getifaddrs, ERL_NIF_DIRTY_JOB_IO_BOUND}, {"nif_get_adapters_addresses", 1, nif_get_adapters_addresses, ERL_NIF_DIRTY_JOB_IO_BOUND}, {"nif_get_if_entry", 1, nif_get_if_entry, ERL_NIF_DIRTY_JOB_IO_BOUND}, {"nif_get_interface_info", 1, nif_get_interface_info, ERL_NIF_DIRTY_JOB_IO_BOUND}, {"nif_get_ip_address_table", 1, nif_get_ip_address_table, ERL_NIF_DIRTY_JOB_IO_BOUND}, /* Network interface (name and/or index) functions */ {"nif_if_name2index", 1, nif_if_name2index, 0}, {"nif_if_index2name", 1, nif_if_index2name, 0}, {"nif_if_names", 0, nif_if_names, 0} }; #if defined(__WIN32__) /* * The assumption is that the 'name' string is NULL terminated */ static ERL_NIF_TERM encode_wchar(ErlNifEnv* env, WCHAR* name) { ERL_NIF_TERM result; int len = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL); if (!len) { result = esock_atom_undefined; } else { char* buf = (char*) MALLOC(len+1); if (0 == WideCharToMultiByte(CP_UTF8, 0, name, -1, buf, len, NULL, NULL)) { DWORD error = GetLastError(); switch (error) { case ERROR_INSUFFICIENT_BUFFER: result = atom_insufficient_buffer; break; case ERROR_INVALID_FLAGS: result = atom_invalid_flags; break; case ERROR_INVALID_PARAMETER: result = atom_invalid_parameter; break; case ERROR_NO_UNICODE_TRANSLATION: result = atom_no_uniconde_traslation; break; default: result = MKI(env, error); break; } } else { result = MKS(env, buf); } FREE(buf); } return result; } #endif // __WIN32__ #if defined(__WIN32__) /* * This builds a binary term from an array of uchar */ static ERL_NIF_TERM encode_uchar(ErlNifEnv* env, DWORD len, UCHAR* buf) { ERL_NIF_TERM ebuf; unsigned char* p; p = enif_make_new_binary(env, len, &ebuf); ESOCK_ASSERT( p != NULL ); sys_memcpy(p, buf, len); return ebuf; } #endif // __WIN32__ static BOOLEAN_T get_debug(ErlNifEnv* env, ERL_NIF_TERM map) { /* * We need to do this here since the "proper" atom has not been * created when this function is called. */ ERL_NIF_TERM debug = MKA(env, "debug"); return esock_get_bool_from_map(env, map, debug, NET_NIF_DEBUG_DEFAULT); } /* ======================================================================= * load_info - A map of misc info (e.g global debug) */ static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) { #if !defined(__WIN32__) // We should make it possible to use load_info to get default values data.debug = get_debug(env, load_info); NDBG( ("NET", "on_load -> entry\r\n") ); #endif #define LOCAL_ATOM_DECL(A) atom_##A = MKA(env, #A) LOCAL_ATOMS LOCAL_ERROR_REASON_ATOMS #undef LOCAL_ATOM_DECL // For storing "global" things... // data.env = enif_alloc_env(); // We should really check // data.version = MKA(env, ERTS_VERSION); // data.buildDate = MKA(env, ERTS_BUILD_DATE); net = enif_open_resource_type_x(env, "net", &netInit, ERL_NIF_RT_CREATE, NULL); #if !defined(__WIN32__) NDBG( ("NET", "on_load -> done\r\n") ); #endif return !net; } ERL_NIF_INIT(prim_net, net_funcs, on_load, NULL, NULL, NULL)
LaudateCorpus1/otp
erts/emulator/beam/erl_fun.c
/* * %CopyrightBegin% * * Copyright Ericsson AB 2000-2021. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * %CopyrightEnd% */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "sys.h" #include "erl_vm.h" #include "global.h" #include "erl_fun.h" #include "hash.h" #include "beam_common.h" /* Container structure for fun entries, allowing us to start `ErlFunEntry` with * a field other than its `HashBucket`. */ typedef struct erl_fun_entry_container { /* !! MUST BE THE FIRST FIELD !! */ HashBucket bucket; ErlFunEntry entry; } ErlFunEntryContainer; static Hash erts_fun_table; static erts_rwmtx_t erts_fun_table_lock; #define erts_fun_read_lock() erts_rwmtx_rlock(&erts_fun_table_lock) #define erts_fun_read_unlock() erts_rwmtx_runlock(&erts_fun_table_lock) #define erts_fun_write_lock() erts_rwmtx_rwlock(&erts_fun_table_lock) #define erts_fun_write_unlock() erts_rwmtx_rwunlock(&erts_fun_table_lock) static HashValue fun_hash(ErlFunEntryContainer* obj); static int fun_cmp(ErlFunEntryContainer* obj1, ErlFunEntryContainer* obj2); static ErlFunEntryContainer* fun_alloc(ErlFunEntryContainer* template); static void fun_free(ErlFunEntryContainer* obj); void erts_init_fun_table(void) { HashFunctions f; erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; erts_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) fun_hash; f.cmp = (HCMP_FUN) fun_cmp; f.alloc = (HALLOC_FUN) fun_alloc; f.free = (HFREE_FUN) fun_free; f.meta_alloc = (HMALLOC_FUN) erts_alloc; f.meta_free = (HMFREE_FUN) erts_free; f.meta_print = (HMPRINT_FUN) erts_print; hash_init(ERTS_ALC_T_FUN_TABLE, &erts_fun_table, "fun_table", 16, f); } void erts_fun_info(fmtfn_t to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; if (lock) erts_fun_read_lock(); hash_info(to, to_arg, &erts_fun_table); if (lock) erts_fun_read_unlock(); } int erts_fun_table_sz(void) { int sz; int lock = !ERTS_IS_CRASH_DUMPING; if (lock) erts_fun_read_lock(); sz = hash_table_sz(&erts_fun_table); if (lock) erts_fun_read_unlock(); return sz; } ErlFunEntry* erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index, const byte* uniq, int index, int arity) { ErlFunEntryContainer template; ErlFunEntryContainer *fc; ErlFunEntry *tp; erts_aint_t refc; tp = &template.entry; /* All fields are copied from the template when inserting a new entry. */ ASSERT(is_atom(mod)); tp->old_index = old_index; tp->old_uniq = old_uniq; tp->index = index; tp->module = mod; tp->arity = arity; sys_memcpy(tp->uniq, uniq, sizeof(tp->uniq)); erts_fun_write_lock(); fc = (ErlFunEntryContainer*)hash_put(&erts_fun_table, (void*)&template); refc = erts_refc_inctest(&fc->entry.refc, 0); if (refc < 2) { /* New or pending delete */ erts_refc_inc(&fc->entry.refc, 1); } erts_fun_write_unlock(); return &fc->entry; } const ErtsCodeMFA *erts_get_fun_mfa(const ErlFunEntry *fe) { ErtsCodePtr address = fe->dispatch.addresses[0]; if (address != beam_unloaded_fun) { return erts_find_function_from_pc(address); } return NULL; } void erts_set_fun_code(ErlFunEntry *fe, ErtsCodePtr address) { int i; for (i = 0; i < ERTS_ADDRESSV_SIZE; i++) { fe->dispatch.addresses[i] = address; } } int erts_is_fun_loaded(const ErlFunEntry* fe) { return fe->dispatch.addresses[0] != beam_unloaded_fun; } static void erts_erase_fun_entry_unlocked(ErlFunEntry* fe) { ErlFunEntryContainer *fc = ErtsContainerStruct(fe, ErlFunEntryContainer, entry); hash_erase(&erts_fun_table, (void *) fc); } void erts_erase_fun_entry(ErlFunEntry* fe) { erts_fun_write_lock(); /* * We have to check refc again since someone might have looked up * the fun entry and incremented refc after last check. */ if (erts_refc_dectest(&fe->refc, -1) <= 0) { if (erts_is_fun_loaded(fe)) { erts_exit(ERTS_ERROR_EXIT, "Internal error: " "Invalid reference count found on #Fun<%T.%d.%d>: " " About to erase fun still referred by code.\n", fe->module, fe->old_index, fe->old_uniq); } erts_erase_fun_entry_unlocked(fe); } erts_fun_write_unlock(); } static void fun_purge_foreach(ErlFunEntryContainer *fc, struct erl_module_instance* modp) { const char *fun_addr, *mod_start; ErlFunEntry *fe = &fc->entry; fun_addr = (const char*)fe->dispatch.addresses[0]; mod_start = (const char*)modp->code_hdr; if (ErtsInArea(fun_addr, mod_start, modp->code_length)) { fe->pend_purge_address = fe->dispatch.addresses[0]; ERTS_THR_WRITE_MEMORY_BARRIER; erts_set_fun_code(fe, beam_unloaded_fun); erts_purge_state_add_fun(fe); } } void erts_fun_purge_prepare(struct erl_module_instance* modp) { erts_fun_read_lock(); hash_foreach(&erts_fun_table, (HFOREACH_FUN)fun_purge_foreach, modp); erts_fun_read_unlock(); } void erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no) { Uint ix; for (ix = 0; ix < no; ix++) { ErlFunEntry *fe = funs[ix]; if (fe->dispatch.addresses[0] == beam_unloaded_fun) { erts_set_fun_code(fe, fe->pend_purge_address); } } } void erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no) { Uint ix; for (ix = 0; ix < no; ix++) { funs[ix]->pend_purge_address = NULL; } } void erts_fun_purge_complete(ErlFunEntry **funs, Uint no) { Uint ix; for (ix = 0; ix < no; ix++) { ErlFunEntry *fe = funs[ix]; fe->pend_purge_address = NULL; if (erts_refc_dectest(&fe->refc, 0) == 0) erts_erase_fun_entry(fe); } ERTS_THR_WRITE_MEMORY_BARRIER; } ErlFunThing *erts_new_export_fun_thing(Eterm **hpp, Export *exp, int arity) { ErlFunThing *funp; funp = (ErlFunThing*)(*hpp); *hpp += ERL_FUN_SIZE; funp->thing_word = HEADER_FUN; funp->next = NULL; funp->entry.exp = exp; funp->num_free = 0; funp->creator = am_external; funp->arity = arity; #ifdef DEBUG { const ErtsCodeMFA *mfa = &exp->info.mfa; ASSERT(arity == mfa->arity); } #endif return funp; } ErlFunThing *erts_new_local_fun_thing(Process *p, ErlFunEntry *fe, int arity, int num_free) { ErlFunThing *funp; funp = (ErlFunThing*) p->htop; p->htop += ERL_FUN_SIZE + num_free; erts_refc_inc(&fe->refc, 2); funp->thing_word = HEADER_FUN; funp->next = MSO(p).first; MSO(p).first = (struct erl_off_heap_header*) funp; funp->entry.fun = fe; funp->num_free = num_free; funp->creator = p->common.id; funp->arity = arity; #ifdef DEBUG { /* FIXME: This assertion can fail because it may point to new code that * has not been committed yet. This is an actual bug but the fix is too * too involved and risky to release in a patch. * * As this problem has existed since the introduction of funs and is * very unlikely to cause actual issues in the wild, we've decided to * postpone the fix until OTP 26. See OTP-18016 for details. */ const ErtsCodeMFA *mfa = erts_get_fun_mfa(fe); ASSERT(funp->arity == mfa->arity - num_free); ASSERT(arity == fe->arity); } #endif return funp; } struct dump_fun_foreach_args { fmtfn_t to; void *to_arg; }; static void dump_fun_foreach(ErlFunEntryContainer *fc, struct dump_fun_foreach_args *args) { ErlFunEntry *fe = &fc->entry; erts_print(args->to, args->to_arg, "=fun\n"); erts_print(args->to, args->to_arg, "Module: %T\n", fe->module); erts_print(args->to, args->to_arg, "Uniq: %d\n", fe->old_uniq); erts_print(args->to, args->to_arg, "Index: %d\n",fe->old_index); erts_print(args->to, args->to_arg, "Address: %p\n", fe->dispatch.addresses[0]); erts_print(args->to, args->to_arg, "Refc: %ld\n", erts_refc_read(&fe->refc, 1)); } void erts_dump_fun_entries(fmtfn_t to, void *to_arg) { struct dump_fun_foreach_args args = {to, to_arg}; int lock = !ERTS_IS_CRASH_DUMPING; if (lock) erts_fun_read_lock(); hash_foreach(&erts_fun_table, (HFOREACH_FUN)dump_fun_foreach, &args); if (lock) erts_fun_read_unlock(); } static HashValue fun_hash(ErlFunEntryContainer* obj) { ErlFunEntry *fe = &obj->entry; return (HashValue) (fe->old_uniq ^ fe->index ^ atom_val(fe->module)); } static int fun_cmp(ErlFunEntryContainer* obj1, ErlFunEntryContainer* obj2) { ErlFunEntry* fe1 = &obj1->entry; ErlFunEntry* fe2 = &obj2->entry; return !(fe1->old_index == fe2->old_index && fe1->old_uniq == fe2->old_uniq && fe1->module == fe2->module && fe1->index == fe2->index && fe1->arity == fe2->arity && !sys_memcmp(fe1->uniq, fe2->uniq, sizeof(fe1->uniq))); } static ErlFunEntryContainer* fun_alloc(ErlFunEntryContainer* template) { ErlFunEntryContainer* obj; obj = (ErlFunEntryContainer *) erts_alloc(ERTS_ALC_T_FUN_ENTRY, sizeof(ErlFunEntryContainer)); sys_memcpy(obj, template, sizeof(ErlFunEntryContainer)); erts_refc_init(&obj->entry.refc, -1); erts_set_fun_code(&obj->entry, beam_unloaded_fun); obj->entry.pend_purge_address = NULL; return obj; } static void fun_free(ErlFunEntryContainer* obj) { erts_free(ERTS_ALC_T_FUN_ENTRY, (void *) obj); }
squaretwo/react-native-applovin-event-tracker
ios/RNApplovinEventTracker.h
<gh_stars>0 // // RNApplovinEventTracker.h // // Created by <NAME> on 7/25/18. // Copyright © 2018 SquareTwo. All rights reserved. // #import <React/RCTBridgeModule.h> @interface RNApplovinEventTracker : NSObject <RCTBridgeModule> @end
von8/temperature-prediction
factorial.c
#include<stdio.h> int main(){ int t=1; int i; for(i=2;i<=5;){ t=t*i; i=i+1; } printf("t=%d\n",t); }
von8/temperature-prediction
compare2bum.c
#include<stdio.h> int main(){ int max(int x, int y); int a, b, c; printf("Please input the first num:"); scanf("%d", &a); printf("Please input the second num:"); scanf("%d", &b); c = max(a, b); printf("max=%d\n",c); } int max(int x, int y){ int z; if(x>y)z=x; else z=y; return(z); }
von8/temperature-prediction
rootingProcedure.c
#include<stdio.h> #include<math.h> void main(){ float a,b,c,disc,x1,x2,p,q; scanf("a=%f,b=%f,c=%f",&a,&b,&c); disc=b*b-4*a*c; p=-b/(2*a); q=sqrt(disc)/(2*a); x1=p+q;x2=p-q; printf("x1=%5.2f\nx2=%5.2f\n",x1,x2); }
von8/temperature-prediction
reversion.c
#include<stdio.h> void main(){ void inv(int x[], int n); int i, a[10]={3,7,9,11,0,6,7,5,4,2}; printf("The original array:\n"); for(i=0;i<10;i++) printf("%d",a[i]); printf("\n"); inv(a, 10); printf("The array has been inverted:\n"); for(i=0;i<10;i++) printf("%d",a[i]); printf("\n"); } void inv(int x[], int n){ int temp, i, j, m=(n-1)/2; for(i=0;i<=m;i++){ j=n-i-1; temp=x[i];x[i]=x[j];x[j]=temp; } return; }
von8/temperature-prediction
pointerReversion.c
#include<stdio.h> void main(){ void inv(int *x, int n); int i, a[10]={3,7,9,11,0,6,7,5,4,2}; int *p; printf("The original array:\n"); for(i=0;i<10;i++) printf("%d",a[i]); printf("\n"); p=a; inv(p, 10); printf("The array has been inverted:\n"); for(i=0;i<10;i++) printf("%d",a[i]); printf("\n"); } void inv(int *x, int n){ int *p, temp, *i, *j, m=(n-1)/2; i=x; j=x+n-1;p=x+m; for(;i<=p;i++,j--){ temp=*i;*i=*j;*j=temp; } return; }
von8/temperature-prediction
pointer.c
#include<stdio.h> void main(){ void swap(int *p1, int *p2); int a, b; int *pointer_1, *pointer_2; scanf("%d, %d", &a, &b); pointer_1 = &a; pointer_2 = &b; if(a<b)swap(pointer_1, pointer_2); printf("\n%d, %d\n",a,b); } void swap(int *p1, int *p2){ int temp; temp= *p1; *p1 = *p2; *p2 = temp; }
von8/temperature-prediction
pointerArray.c
<filename>pointerArray.c #include<stdio.h> void main(){ int a[10]; int *p; for(p=a;p<(a+10);p++) scanf("%d",p); printf("\n"); p=a; for(int i=0;i<10;i++) printf("%d",*p++); printf("\n"); }
von8/temperature-prediction
hello.c
#include<stdio.h> int main(){ int fahr, celsius; int lower, upper, step; lower = 0; /*温度表的下限*/ upper = 160; /*温度表的上限*/ step = 20; /*步长*/ fahr = lower; while(fahr<=upper){ celsius = 5*(fahr - 32)/9; printf("%d\t%d\n", fahr, celsius); fahr = fahr + step; } return 0; }
von8/temperature-prediction
pointerOrder.c
#include<stdio.h> void main(){ void exchange(int *q1, int *q2, int *q3); int a, b, c, *p1, *p2, *p3; scanf("%d,%d,%d", &a, &b, &c); p1=&a, p2=&b, p3=&c; exchange(p1,p2,p3); printf("\n%d,%d,%d\n",a,b,c); } void exchange(int *q1, int *q2, int *q3){ void swap(int *pt1, int *pt2); if(*q1<*q2) swap(q1,q2); if(*q1<*q3) swap(q1,q3); if(*q2<*q3) swap(q2,q3); } void swap(int *pt1, int *pt2){ int temp; temp = *pt1; *pt1 = *pt2; *pt2 = temp; }
josehu07/hux-kernel
src/common/port.h
/** * Common I/O ports inline assembly utilities (in At&T syntax). */ #ifndef PORT_H #define PORT_H #include <stdint.h> void outb(uint16_t port, uint8_t val); void outw(uint16_t port, uint16_t val); void outl(uint16_t port, uint32_t val); void outsl(uint16_t port, const void *addr, uint32_t cnt); uint8_t inb(uint16_t port); uint16_t inw(uint16_t port); uint32_t inl(uint16_t port); void insl(uint16_t port, void *addr, uint32_t cnt); #endif
josehu07/hux-kernel
user/lib/malloc.h
<reponame>josehu07/hux-kernel /** * User Heap Memory "Next-Fit" Allocator. */ #ifndef MALLOC_H #define MALLOC_H #include <stdint.h> #include <stdbool.h> /** Hardcoded export of the page size and some helper macros. */ #define PAGE_SIZE 4096 #define USER_BASE 0x20000000 #define HEAP_BASE (USER_BASE + 0x00100000) #define ADDR_PAGE_OFFSET(addr) ((addr) & 0x00000FFF) #define ADDR_PAGE_NUMBER(addr) ((addr) >> 12) #define ADDR_PAGE_ALIGNED(addr) (ADDR_PAGE_OFFSET(addr) == 0) #define ADDR_PAGE_ROUND_DN(addr) ((addr) & 0xFFFFF000) #define ADDR_PAGE_ROUND_UP(addr) (ADDR_PAGE_ROUND_DN((addr) + 0x00000FFF)) /** Random magic number to protect against memory overruns. */ #define UHEAP_MAGIC 0xEDAF8461 /** * The free list is embedded inside the heap. Every allocated object (i.e., * memory chunk returned to caller of `kalloc()`) is prefixed with a free-list * header structure. * * See https://pages.cs.wisc.edu/~remzi/OSTEP/vm-freespace.pdf, figure 17.3 * ~ figure 17.7. The only difference is that our `magic` field is separate * from the `next` field and the magic number has weaker protection against * buffer overruns. */ struct free_list_node_header { size_t size; bool free; struct free_list_node_header *next; uint32_t magic; }; typedef struct free_list_node_header fl_header_t; /** Pointer arithmetics helper macros. */ #define HEADER_TO_OBJECT(header) ((header) + sizeof(fl_header_t)) #define OBJECT_TO_HEADER(object) ((object) - sizeof(fl_header_t)) uint32_t malloc(size_t size); void mfree(void *addr); #endif
josehu07/hux-kernel
src/memory/paging.h
<filename>src/memory/paging.h /** * Setting up and switching to paging mode. */ #ifndef PAGING_H #define PAGING_H #include <stdint.h> #include <stdbool.h> /** Assume 4KiB pages, not support any other sizes. */ #define PAGE_SIZE 4096 #define PTES_PER_PAGE 1024 #define PDES_PER_PAGE 1024 /** Number of physical frames available. Assume 128MiB physical memory. */ #define PHYS_MAX 0x08000000 /** 128MiB physical memory. */ #define NUM_FRAMES (PHYS_MAX / PAGE_SIZE) /** Up to where is kernel memory, == the upper bound of kernel heap. */ #define KMEM_MAX 0x00800000 /** 8MiB reserved for the kernel. */ /** Helper macros on addresses and page alignments. */ #define ADDR_PAGE_OFFSET(addr) ((addr) & 0x00000FFF) #define ADDR_PAGE_NUMBER(addr) ((addr) >> 12) #define ADDR_PDE_INDEX(addr) (ADDR_PAGE_NUMBER(addr) / 1024) #define ADDR_PTE_INDEX(addr) (ADDR_PAGE_NUMBER(addr) % 1024) #define ADDR_PAGE_ALIGNED(addr) (ADDR_PAGE_OFFSET(addr) == 0) #define ADDR_PAGE_ROUND_DN(addr) ((addr) & 0xFFFFF000) #define ADDR_PAGE_ROUND_UP(addr) (ADDR_PAGE_ROUND_DN((addr) + 0x00000FFF)) /** * Page table entry format, 32bits per entry. Order in struct * definition is from LSB -> MSB. * * See https://wiki.osdev.org/Paging for the detailed definition. */ struct page_table_entry { uint32_t present : 1; /** Set -> present in memory. */ uint32_t writable : 1; /** Set -> user writable. (read/write bit) */ uint32_t user : 1; /** Set -> user accessible. */ uint32_t unused0 : 2; /** Unused 2 caching bits. */ uint32_t accessed : 1; /** Set -> accessed sinced mapped. */ uint32_t dirty : 1; /** Set -> page has been written to. */ uint32_t unused1 : 5; /** Unused 5 misc bits. */ uint32_t frame : 20; /** Physical frame number of the page. */ } __attribute__((packed)); typedef struct page_table_entry pte_t; /** * Page directory entry format, 32bits per entry. Order in struct * definition is from LSB -> MSB. * * See https://wiki.osdev.org/Paging for the detailed definition. */ struct page_directory_entry { uint32_t present : 1; /** Set -> present in memory. */ uint32_t writable : 1; /** Set -> user writable. (read/write bit) */ uint32_t user : 1; /** Set -> user accessible. */ uint32_t unused0 : 2; /** Unused 2 caching bits. */ uint32_t accessed : 1; /** Set -> accessed sinced mapped. */ uint32_t unused1 : 1; /** Unused bit. */ uint32_t size : 1; /** 0 -> using 4KiB page size. */ uint32_t unused2 : 4; /** Unused 4 misc bits. */ uint32_t frame : 20; /** Physical frame number of level-2 table. */ } __attribute__((packed)); typedef struct page_directory_entry pde_t; /** Helper macro on getting the pointed-to address stored in an entry. */ #define ENTRY_FRAME_ADDR(entry) ((uint32_t) (entry).frame << 12) /** Extern resulted `kheap_curr` for heap allocator initialization. */ extern uint32_t kheap_curr; /** Extern the kernel page directory pointer to the scheduler. */ extern pde_t *kernel_pgdir; void paging_init(); pte_t *paging_walk_pgdir(pde_t *pgdir, uint32_t vaddr, bool alloc); pte_t *paging_walk_pgdir_at_boot(pde_t *pgdir, uint32_t vaddr, bool alloc); void paging_destroy_pgdir(pde_t *pgdir); uint32_t paging_map_upage(pte_t *pte, bool writable); void paging_map_kpage(pte_t *pte, uint32_t paddr); void paging_unmap_range(pde_t *pgdir, uint32_t va_start, uint32_t va_end); bool paging_copy_range(pde_t *dstdir, pde_t *srcdir, uint32_t va_start, uint32_t va_end); void paging_switch_pgdir(pde_t *pgdir); #endif
josehu07/hux-kernel
user/tests/memtest.c
/** * User test program - file system operations. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "../lib/debug.h" #include "../lib/printf.h" #include "../lib/syscall.h" #include "../lib/malloc.h" void main(int argc, char *argv[]) { (void) argc; // Unused. (void) argv; printf("On-stack buffer of size 8200...\n"); char buf[8200]; buf[0] = 'A'; buf[1] = '\0'; printf("Variable buf @ %p: %s\n", buf, buf); printf("\nOn-heap allocations & frees...\n"); char *buf1 = (char *) malloc(200); printf("Buf1: %p\n", buf1); char *buf2 = (char *) malloc(4777); printf("Buf2: %p\n", buf2); mfree(buf1); char *buf3 = (char *) malloc(8); printf("Buf3: %p\n", buf3); mfree(buf3); mfree(buf2); exit(); }
josehu07/hux-kernel
src/common/bitmap.c
<filename>src/common/bitmap.c /** * Bitmap data structure used in paging, file system, etc. */ #include <stdint.h> #include <stdbool.h> #include "bitmap.h" #include "debug.h" #include "string.h" #include "spinlock.h" /** Set a slot as used. */ inline void bitmap_set(bitmap_t *bitmap, uint32_t slot_no) { assert(slot_no < bitmap->slots); bool was_locked = spinlock_locked(&(bitmap->lock)); if (!was_locked) spinlock_acquire(&(bitmap->lock)); size_t outer_idx = BITMAP_OUTER_IDX(slot_no); size_t inner_idx = BITMAP_INNER_IDX(slot_no); bitmap->bits[outer_idx] |= (1 << (7 - inner_idx)); if (!was_locked) spinlock_release(&(bitmap->lock)); } /** Clear a slot as free. */ inline void bitmap_clear(bitmap_t *bitmap, uint32_t slot_no) { assert(slot_no < bitmap->slots); if (slot_no == 0) info("clearing 0"); spinlock_acquire(&(bitmap->lock)); size_t outer_idx = BITMAP_OUTER_IDX(slot_no); size_t inner_idx = BITMAP_INNER_IDX(slot_no); bitmap->bits[outer_idx] &= ~(1 << (7 - inner_idx)); spinlock_release(&(bitmap->lock)); } /** Returns true if a slot is in use, otherwise false. */ inline bool bitmap_check(bitmap_t *bitmap, uint32_t slot_no) { assert(slot_no < bitmap->slots); spinlock_acquire(&(bitmap->lock)); size_t outer_idx = BITMAP_OUTER_IDX(slot_no); size_t inner_idx = BITMAP_INNER_IDX(slot_no); bool result = bitmap->bits[outer_idx] & (1 << (7 - inner_idx)); spinlock_release(&(bitmap->lock)); return result; } /** * Allocate a slot and mark as used. Returns the slot number of * the allocated slot, or `num_slots` if there is no free slot. */ uint32_t bitmap_alloc(bitmap_t *bitmap) { spinlock_acquire(&(bitmap->lock)); for (size_t i = 0; i < (bitmap->slots / 8); ++i) { if (bitmap->bits[i] == 0xFF) continue; for (size_t j = 0; j < 8; ++j) { if ((bitmap->bits[i] & (1 << (7 - j))) == 0) { /** Found a free slot. */ uint32_t slot_no = i * 8 + j; bitmap_set(bitmap, slot_no); spinlock_release(&(bitmap->lock)); return slot_no; } } } spinlock_release(&(bitmap->lock)); return bitmap->slots; } /** Initialize the bitmap. BITS must have been allocated. */ void bitmap_init(bitmap_t *bitmap, uint8_t *bits, uint32_t slots) { bitmap->slots = slots; bitmap->bits = bits; memset(bits, 0, slots / 8); spinlock_init(&(bitmap->lock), "bitmap's spinlock"); }
josehu07/hux-kernel
src/common/parklock.c
<gh_stars>10-100 /** * Lock implementation that blocks the calling process on `acquire()` if * the lock is locked. Can only be used under process context. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "parklock.h" #include "spinlock.h" #include "../process/process.h" #include "../process/scheduler.h" /** Returns true if the lock is currently held by the caller process. */ bool parklock_holding(parklock_t *lock) { spinlock_acquire(&(lock->lock)); bool held = lock->locked && (lock->holder_pid == running_proc()->pid); spinlock_release(&(lock->lock)); return held; } /** * Acquire the lock, blocks (parks) the caller if the lock is currently held * by some other process. */ void parklock_acquire(parklock_t *lock) { process_t *proc = running_proc(); spinlock_acquire(&(lock->lock)); /** * Park until lock is released and I'm the first one scheduled among * woken up process waiting on this lock. */ while (lock->locked) { /** Must hold ptable lock when yielding. */ spinlock_acquire(&ptable_lock); spinlock_release(&lock->lock); proc->wait_lock = lock; process_block(ON_LOCK); proc->wait_lock = NULL; spinlock_release(&ptable_lock); spinlock_acquire(&(lock->lock)); } lock->locked = true; lock->holder_pid = proc->pid; spinlock_release(&(lock->lock)); } /** Release the lock and wake up waiters. */ void parklock_release(parklock_t *lock) { spinlock_acquire(&(lock->lock)); lock->locked = false; lock->holder_pid = 0; /** * Wake up all waiter process - the first one getting scheduled among * them will be the next one succeeding in acquiring this lock. */ spinlock_acquire(&ptable_lock); for (process_t *proc = ptable; proc < &ptable[MAX_PROCS]; ++proc) { if (proc->state == BLOCKED && proc->block_on == ON_LOCK && proc->wait_lock == lock) { process_unblock(proc); } } spinlock_release(&ptable_lock); spinlock_release(&(lock->lock)); } /** Initialize the parking lock. */ void parklock_init(parklock_t *lock, const char *name) { spinlock_init(&(lock->lock), "parklock's internal spinlock"); lock->name = name; lock->locked = false; lock->holder_pid = 0; }
josehu07/hux-kernel
src/filesys/exec.h
/** * Implementation of the `exec()` syscall on ELF-32 file. */ #ifndef EXEC_H #define EXEC_H #include <stdbool.h> #include "file.h" /** Maximum number of arguments allowed in `argv` list. */ #define MAX_EXEC_ARGS 32 bool exec_program(mem_inode_t *inode, char *filename, char **argv); #endif
josehu07/hux-kernel
src/common/intstate.c
/** * Interrupt enable/disable routines. Mimics xv6. */ #include <stdbool.h> #include "intstate.h" #include "debug.h" #include "../process/scheduler.h" /** Check if interrupts are enabled. */ inline bool interrupt_enabled(void) { uint32_t eflags; asm volatile ( "pushfl; popl %0" : "=r" (eflags) : ); return eflags & 0x0200; /** IF flag. */ } /** Disable interrupts if not yet so. */ void cli_push(void) { bool was_enabled = interrupt_enabled(); asm volatile ( "cli" ); /** * If cli stack previously empty, remember the previous interrupt * enable/disable state. */ if (cpu_state.cli_depth == 0) cpu_state.int_enabled = was_enabled; cpu_state.cli_depth++; } /** * Restore interrupt e/d state to previous state if all `cli`s have been * popped. Must be one-one mapped to `cli_push()` in code. */ void cli_pop(void) { assert(!interrupt_enabled()); assert(cpu_state.cli_depth > 0); cpu_state.cli_depth--; if (cpu_state.cli_depth == 0 && cpu_state.int_enabled) asm volatile ( "sti" ); }
josehu07/hux-kernel
src/process/scheduler.h
/** * CPU scheduler and context switching routines. * * Hux only aims to support a single CPU. */ #ifndef SCHEDULER_H #define SCHEDULER_H #include "process.h" #include "../interrupt/syscall.h" /** Per-CPU state (we only have a single CPU). */ struct cpu_state { /** No ID field because only supporting single CPU. */ process_context_t *scheduler; /** CPU scheduler context. */ process_t *running_proc; /** The process running or NULL. */ tss_t task_state; /** Current process task state. */ bool int_enabled; /** Remembered interrupt e/d state. */ uint8_t cli_depth; /** Number of pushed `cli`s. */ }; typedef struct cpu_state cpu_state_t; /** Extern the CPU state to interrupt enable/disable state helpers. */ extern cpu_state_t cpu_state; process_t *running_proc(); void cpu_init(); void scheduler(); void yield_to_scheduler(void); #endif
josehu07/hux-kernel
src/process/scheduler.c
/** * CPU scheduler and context switching routines. * * Hux only aims to support a single CPU. */ #include <stddef.h> #include "scheduler.h" #include "process.h" #include "../common/string.h" #include "../common/debug.h" #include "../common/intstate.h" #include "../device/timer.h" #include "../memory/gdt.h" #include "../memory/paging.h" /** Global CPU state (only a single CPU). */ cpu_state_t cpu_state; /** Extern our context switch routine from ASM `switch.s`. */ extern void context_switch(process_context_t **old, process_context_t *new); /** CPU scheduler, never leaves this function. */ void scheduler(void) { cpu_state.running_proc = NULL; while (1) { /** Loop indefinitely. */ /** * Force an interrupt enable in every iteration of this loop. * This primarily gives a chance to see keyboard input interrupts * and other similar external device interrupts, in case all live * processes are blocking (so the CPU looping in the scheduler). */ asm volatile ( "sti" ); spinlock_acquire(&ptable_lock); /** Look for a ready process in ptable. */ process_t *proc; for (proc = ptable; proc < &ptable[MAX_PROCS]; ++proc) { if (proc->state != READY) continue; /** Schedule this one for at most its `timeslice` ticks. */ uint32_t next_sched_tick = timer_tick + proc->timeslice; while (timer_tick < next_sched_tick && proc->state == READY) { // info("scheduler: going to context switch to %d - '%s'", // proc->pid, proc->name); /** Set up TSS for this process, and switch page directory. */ cli_push(); gdt_switch_tss(&(cpu_state.task_state), proc); paging_switch_pgdir(proc->pgdir); cli_pop(); cpu_state.running_proc = proc; proc->state = RUNNING; /** * Force `int_enabled` to be true, though this is not * necessary because the scheduled process must do `iret` * before going back to user space, and that pops an * EFLAGS register value with interrupt enable flag on. */ cpu_state.int_enabled = true; /** Do the context switch. */ context_switch(&(cpu_state.scheduler), proc->context); /** It switches back, switch to kernel page directory. */ paging_switch_pgdir(kernel_pgdir); cpu_state.running_proc = NULL; } } spinlock_release(&ptable_lock); } } /** Get the current scheduled process. */ inline process_t * running_proc(void) { /** Need to disable interrupts since the get might not be atomic. */ cli_push(); process_t *proc = cpu_state.running_proc; cli_pop(); return proc; } /** * Return back to the scheduler context. * Must be called with `ptable_lock` held. */ void yield_to_scheduler(void) { process_t *proc = running_proc(); assert(proc->state != RUNNING); assert(!interrupt_enabled()); assert(cpu_state.cli_depth == 1); assert(spinlock_locked(&ptable_lock)); /** * Save & restore int_enable state because this state is essentially * a per-process state instead of a pre-CPU state. Imagine the case * where process A forks child B and gets timer-interrupted, yields * to the scheduler (so cli stack depth is 1 with `int_enabled` being * false), and the scheduler picks B to run which starts with the * `_new_process_entry()` function in `process.c` that pops the held * cli. The return back to user mode (return-from-trap) will pop the * EFLAGS register stored on kernel stack of B, whose value should be * 0x202 that enables interrupts in user mode execution of B, which * is good. If B ever calls `cli_push()` e.g. in a syscall, the CPU * `int_enabled` will become overwritten as true. When it comes back * to A's turn, A's false `int_enabled` state gets lost. Hence, we * save & restore this state here to let it become somewhat process- * private. */ bool int_enabled = cpu_state.int_enabled; context_switch(&(proc->context), cpu_state.scheduler); cpu_state.int_enabled = int_enabled; } /** Initialize CPU state. */ void cpu_init(void) { cpu_state.scheduler = NULL; /** Will be set at context switch. */ cpu_state.running_proc = NULL; memset(&(cpu_state.task_state), 0, sizeof(tss_t)); cpu_state.int_enabled = true; cpu_state.cli_depth = 0; }
josehu07/hux-kernel
user/tests/filetest.c
/** * User test program - file system operations. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "../lib/debug.h" #include "../lib/printf.h" #include "../lib/syscall.h" void main(int argc, char *argv[]) { (void) argc; // Unused. (void) argv; char dirname[128] = "temp"; char filepath[128] = "temp/test.txt"; char filename[128] = "test.txt"; printf("[P] Created dir '%s' -> %d\n", dirname, create(dirname, CREATE_DIR)); printf("[P] Created file '%s' -> %d\n", filepath, create(filepath, CREATE_FILE)); printf("[P] Changed cwd to '%s' -> %d\n", dirname, chdir(dirname)); int8_t pid = fork(0); assert(pid >= 0); if (pid == 0) { // Child. char cwd[100]; printf("[C] Called getcwd -> %d\n", getcwd(cwd, 100)); printf(" cwd: %s\n", cwd); int8_t fd = open(filename, OPEN_WR); printf("[C] Opened file '%s' -> %d\n", filename, fd); printf("[C] Written to fd %d -> %d\n", fd, write(fd, "AAAAA", 5)); printf(" src: %s\n", "AAAAA"); exit(); } else { // Parent. assert(wait() == pid); printf("[P] Changed cwd to '%s' -> %d\n", "./..", chdir("./..")); int8_t fd = open(filepath, OPEN_RD); printf("[P] Opened file '%s' -> %d\n", filepath, fd); char buf[6] = {0}; printf("[P] Read from fd %d -> %d\n", fd, read(fd, buf, 5)); printf(" dst: %s\n", buf); printf("[P] Closing fd %d -> %d\n", fd, close(fd)); printf("[P] Removing file '%s' -> %d\n", filepath, remove(filepath)); printf("[P] Removing dir '%s' -> %d\n", dirname, remove(dirname)); } exit(); }
josehu07/hux-kernel
user/tests/schedtest.c
<gh_stars>10-100 /** * User test program - weighted scheduling. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "../lib/debug.h" #include "../lib/printf.h" #include "../lib/syscall.h" static void _test_child_workload(void) { int32_t res = 0; for (int32_t i = 12345; i < 57896; ++i) for (int32_t j = i; j > 12345; --j) res += (i * j) % 567; printf("res %d: %d\n", getpid(), res); } void main(int argc, char *argv[]) { (void) argc; // Unused. (void) argv; int8_t i; printf("parent: forking...\n"); for (i = 1; i <= 3; ++i) { int8_t pid = fork(i*4); if (pid < 0) error("parent: forking child i=%d failed", i); if (pid == 0) { // Child. _test_child_workload(); exit(); } else printf("parent: forked child pid=%d, timeslice=%d\n", pid, i*4); } printf("parent: waiting...\n"); for (i = 1; i <= 3; ++i) { int8_t pid = wait(); printf("parent: waited child pid=%d\n", pid); } exit(); }
josehu07/hux-kernel
src/interrupt/syscall.h
/** * System call-related definitions and handler wrappers. */ #ifndef SYSCALL_H #define SYSCALL_H #include <stdint.h> #include <stddef.h> #include <stdbool.h> #include "isr.h" /** Syscall trap gate registerd at a vacant ISR number. */ #define INT_NO_SYSCALL 64 /** == 0x40 */ /** List of known syscall numbers. */ #define SYSCALL_GETPID 1 #define SYSCALL_FORK 2 #define SYSCALL_EXIT 3 #define SYSCALL_SLEEP 4 #define SYSCALL_WAIT 5 #define SYSCALL_KILL 6 #define SYSCALL_TPRINT 7 #define SYSCALL_UPTIME 8 #define SYSCALL_KBDSTR 9 #define SYSCALL_SETHEAP 10 #define SYSCALL_OPEN 11 #define SYSCALL_CLOSE 12 #define SYSCALL_CREATE 13 #define SYSCALL_REMOVE 14 #define SYSCALL_READ 15 #define SYSCALL_WRITE 16 #define SYSCALL_CHDIR 17 #define SYSCALL_GETCWD 18 #define SYSCALL_EXEC 19 #define SYSCALL_FSTAT 20 #define SYSCALL_SEEK 21 /** * Task state segment (TSS) x86 IA32 format, * see https://wiki.osdev.org/Task_State_Segment#x86_Structure. */ struct task_state_segment { uint32_t link; /** Old TS selector. */ uint32_t esp0; /** Stack pointer after privilege level boost. */ uint8_t ss0; /** Segment selector after privilege level boost. */ uint8_t pad1; uint32_t esp1; uint8_t ss1; uint8_t pad2; uint32_t esp2; uint8_t ss2; uint8_t pad3; uint32_t cr3; /** Page directory base address. */ uint32_t eip; /** Saved EIP from last task switch. Same for below. */ uint32_t eflags; uint32_t eax; uint32_t ecx; uint32_t edx; uint32_t ebx; uint32_t esp; uint32_t ebp; uint32_t esi; uint32_t edi; uint8_t es; uint8_t pad4; uint8_t cs; uint8_t pad5; uint8_t ss; uint8_t pad6; uint8_t ds; uint8_t pad7; uint8_t fs; uint8_t pad8; uint8_t gs; uint8_t pad9; uint8_t ldt; uint8_t pad10; uint8_t pad11; uint8_t iopb; /** I/O map base address. */ } __attribute__((packed)); typedef struct task_state_segment tss_t; /** Individual syscall handler type: void -> int32_t. */ typedef int32_t (*syscall_t)(void); /** Syscall unsuccessful return code. */ #define SYS_FAIL_RC (-1) void syscall(interrupt_state_t *state); bool sysarg_addr_int(uint32_t addr, int32_t *ret); bool sysarg_addr_uint(uint32_t addr, uint32_t *ret); bool sysarg_addr_mem(uint32_t addr, char **mem, size_t len); int32_t sysarg_addr_str(uint32_t addr, char **str); bool sysarg_get_int(int8_t n, int32_t *ret); bool sysarg_get_uint(int8_t n, uint32_t *ret); bool sysarg_get_mem(int8_t n, char **mem, size_t len); int32_t sysarg_get_str(int8_t n, char **str); #endif
josehu07/hux-kernel
src/memory/sysmem.c
/** * Syscalls related to user memory allocation. */ #include <stdint.h> #include "sysmem.h" #include "paging.h" #include "../common/debug.h" #include "../common/string.h" #include "../interrupt/syscall.h" #include "../process/scheduler.h" /** int32_t setheap(uint32_t new_top); */ int32_t syscall_setheap(void) { process_t *proc = running_proc(); uint32_t new_top; if (!sysarg_get_uint(0, &new_top)) return SYS_FAIL_RC; if (new_top < proc->heap_high) { warn("setheap: does not support shrinking heap"); return SYS_FAIL_RC; } if (new_top > proc->stack_low) { warn("setheap: heap meets stack, heap overflow"); return SYS_FAIL_RC; } /** * Compare with current heap page allocation top. If exceeds the top * page, allocate new pages accordingly. */ uint32_t heap_page_high = ADDR_PAGE_ROUND_UP(proc->heap_high); for (uint32_t vaddr = heap_page_high; vaddr < new_top; vaddr += PAGE_SIZE) { pte_t *pte = paging_walk_pgdir(proc->pgdir, vaddr, true); if (pte == NULL) { warn("setheap: cannot walk pgdir, out of kheap memory?"); paging_unmap_range(proc->pgdir, heap_page_high, vaddr); return SYS_FAIL_RC; } uint32_t paddr = paging_map_upage(pte, true); if (paddr == 0) { warn("setheap: cannot map new page, out of memory?"); paging_unmap_range(proc->pgdir, heap_page_high, vaddr); return SYS_FAIL_RC; } memset((char *) paddr, 0, PAGE_SIZE); } proc->heap_high = new_top; return 0; }
josehu07/hux-kernel
src/device/timer.h
<filename>src/device/timer.h /** * Programmable interval timer (PIT) in square wave generator mode to * serve as the system timer. */ #ifndef TIMER_H #define TIMER_H #include <stdint.h> #include "../common/spinlock.h" /** Timer interrupt frequency in Hz. */ #define TIMER_FREQ_HZ 100 /** Extern the global timer ticks value to the scheduler. */ extern uint32_t timer_tick; extern spinlock_t timer_tick_lock; void timer_init(); #endif
josehu07/hux-kernel
src/process/sysproc.h
/** * Syscalls related to process state & operations. */ #ifndef SYSPROC_H #define SYSPROC_H #include <stdint.h> int32_t syscall_getpid(); int32_t syscall_fork(); int32_t syscall_exit(); int32_t syscall_sleep(); int32_t syscall_wait(); int32_t syscall_kill(); #endif
josehu07/hux-kernel
src/common/spinlock.h
<reponame>josehu07/hux-kernel /** * Spinlock implementation (synonym to `cli_push()`/`cli_pop()` pairs * in single-CPU Hux). */ #ifndef SPINLOCK_H #define SPINLOCK_H #include <stdint.h> #include <stdbool.h> /** Simple spinlock structure. */ struct spinlock { uint32_t locked; /** 0 unlocked, 1 locked, changes must be atomic. */ const char *name; /** Lock name for debugging. */ }; typedef struct spinlock spinlock_t; void spinlock_acquire(spinlock_t *lock); void spinlock_release(spinlock_t *lock); bool spinlock_locked(spinlock_t *lock); void spinlock_init(spinlock_t *lock, const char *name); #endif
josehu07/hux-kernel
src/common/bitmap.h
<filename>src/common/bitmap.h /** * Bitmap data structure used in paging, file system, etc. */ #ifndef BITMAP_H #define BITMAP_H #include <stdint.h> #include <stdbool.h> #include "../common/spinlock.h" /** Bitmap is simply a contiguous array of bits. */ struct bitmap { uint8_t *bits; /** Must ensure zero'ed out at intialization. */ uint32_t slots; /** Must be a multiple of 8. */ spinlock_t lock; /** Lock protecting this bitmap. */ }; typedef struct bitmap bitmap_t; /** * Every bit indicates the free/used state of a corresponding slot * of something. Slot number one-one maps to bit index. */ #define BITMAP_OUTER_IDX(slot_num) ((slot_num) / 8) #define BITMAP_INNER_IDX(slot_num) ((slot_num) % 8) void bitmap_set(bitmap_t *bitmap, uint32_t slot_no); void bitmap_clear(bitmap_t *bitmap, uint32_t slot_no); bool bitmap_check(bitmap_t *bitmap, uint32_t slot_no); uint32_t bitmap_alloc(bitmap_t *bitmap); void bitmap_init(bitmap_t *bitmap, uint8_t *bits, uint32_t slots); #endif
josehu07/hux-kernel
src/device/keyboard.c
/** * PS/2 keyboard input support. */ #include <stdint.h> #include <stdbool.h> #include "keyboard.h" #include "../common/port.h" #include "../common/printf.h" #include "../common/debug.h" #include "../common/string.h" #include "../common/spinlock.h" #include "../display/vga.h" #include "../display/terminal.h" #include "../interrupt/isr.h" #include "../process/process.h" #include "../process/scheduler.h" /** * Hardcode scancode -> key event mapping. * * Check out https://wiki.osdev.org/Keyboard#Scan_Code_Set_1 * for a complete list of mappings. * * We will only code a partial set of mappings - only those most * useful events. */ #define NO_KEY { .press = false, .ascii = false, .info = { .meta = KEY_NULL } } static keyboard_key_event_t scancode_event_map[0xE0] = { NO_KEY, // 0x00 { .press = true , .ascii = false, .info = { .meta = KEY_ESC } }, // 0x01 { .press = true , .ascii = true , .info = { .codel = '1' , .codeu = '!' } }, // 0x02 { .press = true , .ascii = true , .info = { .codel = '2' , .codeu = '@' } }, // 0x03 { .press = true , .ascii = true , .info = { .codel = '3' , .codeu = '#' } }, // 0x04 { .press = true , .ascii = true , .info = { .codel = '4' , .codeu = '$' } }, // 0x05 { .press = true , .ascii = true , .info = { .codel = '5' , .codeu = '%' } }, // 0x06 { .press = true , .ascii = true , .info = { .codel = '6' , .codeu = '^' } }, // 0x07 { .press = true , .ascii = true , .info = { .codel = '7' , .codeu = '&' } }, // 0x08 { .press = true , .ascii = true , .info = { .codel = '8' , .codeu = '*' } }, // 0x09 { .press = true , .ascii = true , .info = { .codel = '9' , .codeu = '(' } }, // 0x0A { .press = true , .ascii = true , .info = { .codel = '0' , .codeu = ')' } }, // 0x0B { .press = true , .ascii = true , .info = { .codel = '-' , .codeu = '_' } }, // 0x0C { .press = true , .ascii = true , .info = { .codel = '=' , .codeu = '+' } }, // 0x0D { .press = true , .ascii = false, .info = { .meta = KEY_BACK } }, // 0x0E { .press = true , .ascii = false, .info = { .meta = KEY_TAB } }, // 0x0F { .press = true , .ascii = true , .info = { .codel = 'q' , .codeu = 'Q' } }, // 0x10 { .press = true , .ascii = true , .info = { .codel = 'w' , .codeu = 'W' } }, // 0x11 { .press = true , .ascii = true , .info = { .codel = 'e' , .codeu = 'E' } }, // 0x12 { .press = true , .ascii = true , .info = { .codel = 'r' , .codeu = 'R' } }, // 0x13 { .press = true , .ascii = true , .info = { .codel = 't' , .codeu = 'T' } }, // 0x14 { .press = true , .ascii = true , .info = { .codel = 'y' , .codeu = 'Y' } }, // 0x15 { .press = true , .ascii = true , .info = { .codel = 'u' , .codeu = 'U' } }, // 0x16 { .press = true , .ascii = true , .info = { .codel = 'i' , .codeu = 'I' } }, // 0x17 { .press = true , .ascii = true , .info = { .codel = 'o' , .codeu = 'O' } }, // 0x18 { .press = true , .ascii = true , .info = { .codel = 'p' , .codeu = 'P' } }, // 0x19 { .press = true , .ascii = true , .info = { .codel = '[' , .codeu = '{' } }, // 0x1A { .press = true , .ascii = true , .info = { .codel = ']' , .codeu = '}' } }, // 0x1B { .press = true , .ascii = false, .info = { .meta = KEY_ENTER } }, // 0x1C { .press = true , .ascii = false, .info = { .meta = KEY_CTRL } }, // 0x1D { .press = true , .ascii = true , .info = { .codel = 'a' , .codeu = 'A' } }, // 0x1E { .press = true , .ascii = true , .info = { .codel = 's' , .codeu = 'S' } }, // 0x1F { .press = true , .ascii = true , .info = { .codel = 'd' , .codeu = 'D' } }, // 0x20 { .press = true , .ascii = true , .info = { .codel = 'f' , .codeu = 'F' } }, // 0x21 { .press = true , .ascii = true , .info = { .codel = 'g' , .codeu = 'G' } }, // 0x22 { .press = true , .ascii = true , .info = { .codel = 'h' , .codeu = 'H' } }, // 0x23 { .press = true , .ascii = true , .info = { .codel = 'j' , .codeu = 'J' } }, // 0x24 { .press = true , .ascii = true , .info = { .codel = 'k' , .codeu = 'K' } }, // 0x25 { .press = true , .ascii = true , .info = { .codel = 'l' , .codeu = 'L' } }, // 0x26 { .press = true , .ascii = true , .info = { .codel = ';' , .codeu = ':' } }, // 0x27 { .press = true , .ascii = true , .info = { .codel = '\'', .codeu = '"' } }, // 0x28 { .press = true , .ascii = true , .info = { .codel = '`' , .codeu = '~' } }, // 0x29 { .press = true , .ascii = false, .info = { .meta = KEY_SHIFT } }, // 0x2A { .press = true , .ascii = true , .info = { .codel = '\\', .codeu = '|' } }, // 0x2B { .press = true , .ascii = true , .info = { .codel = 'z' , .codeu = 'Z' } }, // 0x2C { .press = true , .ascii = true , .info = { .codel = 'x' , .codeu = 'X' } }, // 0x2D { .press = true , .ascii = true , .info = { .codel = 'c' , .codeu = 'C' } }, // 0x2E { .press = true , .ascii = true , .info = { .codel = 'v' , .codeu = 'V' } }, // 0x2F { .press = true , .ascii = true , .info = { .codel = 'b' , .codeu = 'B' } }, // 0x30 { .press = true , .ascii = true , .info = { .codel = 'n' , .codeu = 'N' } }, // 0x31 { .press = true , .ascii = true , .info = { .codel = 'm' , .codeu = 'M' } }, // 0x32 { .press = true , .ascii = true , .info = { .codel = ',' , .codeu = '<' } }, // 0x33 { .press = true , .ascii = true , .info = { .codel = '.' , .codeu = '>' } }, // 0x34 { .press = true , .ascii = true , .info = { .codel = '/' , .codeu = '?' } }, // 0x35 { .press = true , .ascii = false, .info = { .meta = KEY_SHIFT } }, // 0x36 NO_KEY, // 0x37 { .press = true , .ascii = false, .info = { .meta = KEY_ALT } }, // 0x38 { .press = true , .ascii = true , .info = { .codel = ' ' , .codeu = ' ' } }, // 0x39 { .press = true , .ascii = false, .info = { .meta = KEY_CAPS } }, // 0x3A NO_KEY, // 0x3B NO_KEY, // 0x3C NO_KEY, // 0x3D NO_KEY, // 0x3E NO_KEY, // 0x3F NO_KEY, // 0x40 NO_KEY, // 0x41 NO_KEY, // 0x42 NO_KEY, // 0x43 NO_KEY, // 0x44 NO_KEY, // 0x45 NO_KEY, // 0x46 NO_KEY, // 0x47 NO_KEY, // 0x48 NO_KEY, // 0x49 NO_KEY, // 0x4A NO_KEY, // 0x4B NO_KEY, // 0x4C NO_KEY, // 0x4D NO_KEY, // 0x4E NO_KEY, // 0x4F NO_KEY, // 0x50 NO_KEY, // 0x51 NO_KEY, // 0x52 NO_KEY, // 0x53 NO_KEY, // 0x54 NO_KEY, // 0x55 NO_KEY, // 0x56 NO_KEY, // 0x57 NO_KEY, // 0x58 NO_KEY, // 0x59 NO_KEY, // 0x5A NO_KEY, // 0x5B NO_KEY, // 0x5C NO_KEY, // 0x5D NO_KEY, // 0x5E NO_KEY, // 0x5F NO_KEY, // 0x60 NO_KEY, // 0x61 NO_KEY, // 0x62 NO_KEY, // 0x63 NO_KEY, // 0x64 NO_KEY, // 0x65 NO_KEY, // 0x66 NO_KEY, // 0x67 NO_KEY, // 0x68 NO_KEY, // 0x69 NO_KEY, // 0x6A NO_KEY, // 0x6B NO_KEY, // 0x6C NO_KEY, // 0x6D NO_KEY, // 0x6E NO_KEY, // 0x6F NO_KEY, // 0x70 NO_KEY, // 0x71 NO_KEY, // 0x72 NO_KEY, // 0x73 NO_KEY, // 0x74 NO_KEY, // 0x75 NO_KEY, // 0x76 NO_KEY, // 0x77 NO_KEY, // 0x78 NO_KEY, // 0x79 NO_KEY, // 0x7A NO_KEY, // 0x7B NO_KEY, // 0x7C NO_KEY, // 0x7D NO_KEY, // 0x7E NO_KEY, // 0x7F NO_KEY, // 0x80 { .press = false, .ascii = false, .info = { .meta = KEY_ESC } }, // 0x81 { .press = false, .ascii = true , .info = { .codel = '1' , .codeu = '!' } }, // 0x82 { .press = false, .ascii = true , .info = { .codel = '2' , .codeu = '@' } }, // 0x83 { .press = false, .ascii = true , .info = { .codel = '3' , .codeu = '#' } }, // 0x84 { .press = false, .ascii = true , .info = { .codel = '4' , .codeu = '$' } }, // 0x85 { .press = false, .ascii = true , .info = { .codel = '5' , .codeu = '%' } }, // 0x86 { .press = false, .ascii = true , .info = { .codel = '6' , .codeu = '^' } }, // 0x87 { .press = false, .ascii = true , .info = { .codel = '7' , .codeu = '&' } }, // 0x88 { .press = false, .ascii = true , .info = { .codel = '8' , .codeu = '*' } }, // 0x89 { .press = false, .ascii = true , .info = { .codel = '9' , .codeu = '(' } }, // 0x8A { .press = false, .ascii = true , .info = { .codel = '0' , .codeu = ')' } }, // 0x8B { .press = false, .ascii = true , .info = { .codel = '-' , .codeu = '_' } }, // 0x8C { .press = false, .ascii = true , .info = { .codel = '=' , .codeu = '+' } }, // 0x8D { .press = false, .ascii = false, .info = { .meta = KEY_BACK } }, // 0x8E { .press = false, .ascii = false, .info = { .meta = KEY_TAB } }, // 0x8F { .press = false, .ascii = true , .info = { .codel = 'q' , .codeu = 'Q' } }, // 0x90 { .press = false, .ascii = true , .info = { .codel = 'w' , .codeu = 'W' } }, // 0x91 { .press = false, .ascii = true , .info = { .codel = 'e' , .codeu = 'E' } }, // 0x92 { .press = false, .ascii = true , .info = { .codel = 'r' , .codeu = 'R' } }, // 0x93 { .press = false, .ascii = true , .info = { .codel = 't' , .codeu = 'T' } }, // 0x94 { .press = false, .ascii = true , .info = { .codel = 'y' , .codeu = 'Y' } }, // 0x95 { .press = false, .ascii = true , .info = { .codel = 'u' , .codeu = 'U' } }, // 0x96 { .press = false, .ascii = true , .info = { .codel = 'i' , .codeu = 'I' } }, // 0x97 { .press = false, .ascii = true , .info = { .codel = 'o' , .codeu = 'O' } }, // 0x98 { .press = false, .ascii = true , .info = { .codel = 'p' , .codeu = 'P' } }, // 0x99 { .press = false, .ascii = true , .info = { .codel = '[' , .codeu = '{' } }, // 0x9A { .press = false, .ascii = true , .info = { .codel = ']' , .codeu = '}' } }, // 0x9B { .press = false, .ascii = false, .info = { .meta = KEY_ENTER } }, // 0x9C { .press = false, .ascii = false, .info = { .meta = KEY_CTRL } }, // 0x9D { .press = false, .ascii = true , .info = { .codel = 'a' , .codeu = 'A' } }, // 0x9E { .press = false, .ascii = true , .info = { .codel = 's' , .codeu = 'S' } }, // 0x9F { .press = false, .ascii = true , .info = { .codel = 'd' , .codeu = 'D' } }, // 0xA0 { .press = false, .ascii = true , .info = { .codel = 'f' , .codeu = 'F' } }, // 0xA1 { .press = false, .ascii = true , .info = { .codel = 'g' , .codeu = 'G' } }, // 0xA2 { .press = false, .ascii = true , .info = { .codel = 'h' , .codeu = 'H' } }, // 0xA3 { .press = false, .ascii = true , .info = { .codel = 'j' , .codeu = 'J' } }, // 0xA4 { .press = false, .ascii = true , .info = { .codel = 'k' , .codeu = 'K' } }, // 0xA5 { .press = false, .ascii = true , .info = { .codel = 'l' , .codeu = 'L' } }, // 0xA6 { .press = false, .ascii = true , .info = { .codel = ';' , .codeu = ':' } }, // 0xA7 { .press = false, .ascii = true , .info = { .codel = '\'', .codeu = '"' } }, // 0xA8 { .press = false, .ascii = true , .info = { .codel = '`' , .codeu = '~' } }, // 0xA9 { .press = false, .ascii = false, .info = { .meta = KEY_SHIFT } }, // 0xAA { .press = false, .ascii = true , .info = { .codel = '\\', .codeu = '|' } }, // 0xAB { .press = false, .ascii = true , .info = { .codel = 'z' , .codeu = 'Z' } }, // 0xAC { .press = false, .ascii = true , .info = { .codel = 'x' , .codeu = 'X' } }, // 0xAD { .press = false, .ascii = true , .info = { .codel = 'c' , .codeu = 'C' } }, // 0xAE { .press = false, .ascii = true , .info = { .codel = 'v' , .codeu = 'V' } }, // 0xAF { .press = false, .ascii = true , .info = { .codel = 'b' , .codeu = 'B' } }, // 0xB0 { .press = false, .ascii = true , .info = { .codel = 'n' , .codeu = 'N' } }, // 0xB1 { .press = false, .ascii = true , .info = { .codel = 'm' , .codeu = 'M' } }, // 0xB2 { .press = false, .ascii = true , .info = { .codel = ',' , .codeu = '<' } }, // 0xB3 { .press = false, .ascii = true , .info = { .codel = '.' , .codeu = '>' } }, // 0xB4 { .press = false, .ascii = true , .info = { .codel = '/' , .codeu = '?' } }, // 0xB5 { .press = false, .ascii = false, .info = { .meta = KEY_SHIFT } }, // 0xB6 NO_KEY, // 0xB7 { .press = false, .ascii = false, .info = { .meta = KEY_ALT } }, // 0xB8 { .press = false, .ascii = true , .info = { .codel = ' ' , .codeu = ' ' } }, // 0xB9 { .press = false, .ascii = false, .info = { .meta = KEY_CAPS } }, // 0xBA NO_KEY, // 0xBB NO_KEY, // 0xBC NO_KEY, // 0xBD NO_KEY, // 0xBE NO_KEY, // 0xBF NO_KEY, // 0xC0 NO_KEY, // 0xC1 NO_KEY, // 0xC2 NO_KEY, // 0xC3 NO_KEY, // 0xC4 NO_KEY, // 0xC5 NO_KEY, // 0xC6 NO_KEY, // 0xC7 NO_KEY, // 0xC8 NO_KEY, // 0xC9 NO_KEY, // 0xCA NO_KEY, // 0xCB NO_KEY, // 0xCC NO_KEY, // 0xCD NO_KEY, // 0xCE NO_KEY, // 0xCF NO_KEY, // 0xD0 NO_KEY, // 0xD1 NO_KEY, // 0xD2 NO_KEY, // 0xD3 NO_KEY, // 0xD4 NO_KEY, // 0xD5 NO_KEY, // 0xD6 NO_KEY, // 0xD7 NO_KEY, // 0xD8 NO_KEY, // 0xD9 NO_KEY, // 0xDA NO_KEY, // 0xDB NO_KEY, // 0xDC NO_KEY, // 0xDD NO_KEY, // 0xDE NO_KEY, // 0xDF }; static keyboard_key_event_t extendcode_event_map[0xE0] = { NO_KEY, // 0x00 NO_KEY, // 0x01 NO_KEY, // 0x02 NO_KEY, // 0x03 NO_KEY, // 0x04 NO_KEY, // 0x05 NO_KEY, // 0x06 NO_KEY, // 0x07 NO_KEY, // 0x08 NO_KEY, // 0x09 NO_KEY, // 0x0A NO_KEY, // 0x0B NO_KEY, // 0x0C NO_KEY, // 0x0D NO_KEY, // 0x0E NO_KEY, // 0x0F NO_KEY, // 0x10 NO_KEY, // 0x11 NO_KEY, // 0x12 NO_KEY, // 0x13 NO_KEY, // 0x14 NO_KEY, // 0x15 NO_KEY, // 0x16 NO_KEY, // 0x17 NO_KEY, // 0x18 NO_KEY, // 0x19 NO_KEY, // 0x1A NO_KEY, // 0x1B NO_KEY, // 0x1C { .press = true , .ascii = false, .info = { .meta = KEY_CTRL } }, // 0x1D NO_KEY, // 0x1E NO_KEY, // 0x1F NO_KEY, // 0x20 NO_KEY, // 0x21 NO_KEY, // 0x22 NO_KEY, // 0x23 NO_KEY, // 0x24 NO_KEY, // 0x25 NO_KEY, // 0x26 NO_KEY, // 0x27 NO_KEY, // 0x28 NO_KEY, // 0x29 NO_KEY, // 0x2A NO_KEY, // 0x2B NO_KEY, // 0x2C NO_KEY, // 0x2D NO_KEY, // 0x2E NO_KEY, // 0x2F NO_KEY, // 0x30 NO_KEY, // 0x31 NO_KEY, // 0x32 NO_KEY, // 0x33 NO_KEY, // 0x34 NO_KEY, // 0x35 NO_KEY, // 0x36 NO_KEY, // 0x37 { .press = true , .ascii = false, .info = { .meta = KEY_ALT } }, // 0x38 NO_KEY, // 0x39 NO_KEY, // 0x3A NO_KEY, // 0x3B NO_KEY, // 0x3C NO_KEY, // 0x3D NO_KEY, // 0x3E NO_KEY, // 0x3F NO_KEY, // 0x40 NO_KEY, // 0x41 NO_KEY, // 0x42 NO_KEY, // 0x43 NO_KEY, // 0x44 NO_KEY, // 0x45 NO_KEY, // 0x46 { .press = true , .ascii = false, .info = { .meta = KEY_HOME } }, // 0x47 { .press = true , .ascii = false, .info = { .meta = KEY_UP } }, // 0x48 { .press = true , .ascii = false, .info = { .meta = KEY_PGUP } }, // 0x49 NO_KEY, // 0x4A { .press = true , .ascii = false, .info = { .meta = KEY_LEFT } }, // 0x4B NO_KEY, // 0x4C { .press = true , .ascii = false, .info = { .meta = KEY_RIGHT } }, // 0x4D NO_KEY, // 0x4E { .press = true , .ascii = false, .info = { .meta = KEY_END } }, // 0x4F { .press = true , .ascii = false, .info = { .meta = KEY_DOWN } }, // 0x50 { .press = true , .ascii = false, .info = { .meta = KEY_PGDN } }, // 0x51 { .press = true , .ascii = false, .info = { .meta = KEY_INS } }, // 0x52 { .press = true , .ascii = false, .info = { .meta = KEY_DEL } }, // 0x53 NO_KEY, // 0x54 NO_KEY, // 0x55 NO_KEY, // 0x56 NO_KEY, // 0x57 NO_KEY, // 0x58 NO_KEY, // 0x59 NO_KEY, // 0x5A NO_KEY, // 0x5B NO_KEY, // 0x5C NO_KEY, // 0x5D NO_KEY, // 0x5E NO_KEY, // 0x5F NO_KEY, // 0x60 NO_KEY, // 0x61 NO_KEY, // 0x62 NO_KEY, // 0x63 NO_KEY, // 0x64 NO_KEY, // 0x65 NO_KEY, // 0x66 NO_KEY, // 0x67 NO_KEY, // 0x68 NO_KEY, // 0x69 NO_KEY, // 0x6A NO_KEY, // 0x6B NO_KEY, // 0x6C NO_KEY, // 0x6D NO_KEY, // 0x6E NO_KEY, // 0x6F NO_KEY, // 0x70 NO_KEY, // 0x71 NO_KEY, // 0x72 NO_KEY, // 0x73 NO_KEY, // 0x74 NO_KEY, // 0x75 NO_KEY, // 0x76 NO_KEY, // 0x77 NO_KEY, // 0x78 NO_KEY, // 0x79 NO_KEY, // 0x7A NO_KEY, // 0x7B NO_KEY, // 0x7C NO_KEY, // 0x7D NO_KEY, // 0x7E NO_KEY, // 0x7F NO_KEY, // 0x80 NO_KEY, // 0x81 NO_KEY, // 0x82 NO_KEY, // 0x83 NO_KEY, // 0x84 NO_KEY, // 0x85 NO_KEY, // 0x86 NO_KEY, // 0x87 NO_KEY, // 0x88 NO_KEY, // 0x89 NO_KEY, // 0x8A NO_KEY, // 0x8B NO_KEY, // 0x8C NO_KEY, // 0x8D NO_KEY, // 0x8E NO_KEY, // 0x8F NO_KEY, // 0x90 NO_KEY, // 0x91 NO_KEY, // 0x92 NO_KEY, // 0x93 NO_KEY, // 0x94 NO_KEY, // 0x95 NO_KEY, // 0x96 NO_KEY, // 0x97 NO_KEY, // 0x98 NO_KEY, // 0x99 NO_KEY, // 0x9A NO_KEY, // 0x9B NO_KEY, // 0x9C { .press = false, .ascii = false, .info = { .meta = KEY_CTRL } }, // 0x9D NO_KEY, // 0xA0 NO_KEY, // 0xA1 NO_KEY, // 0xA2 NO_KEY, // 0xA3 NO_KEY, // 0xA4 NO_KEY, // 0xA5 NO_KEY, // 0xA6 NO_KEY, // 0xA7 NO_KEY, // 0xA8 NO_KEY, // 0xA9 NO_KEY, // 0xAA NO_KEY, // 0xAB NO_KEY, // 0xAC NO_KEY, // 0xAD NO_KEY, // 0xAE NO_KEY, // 0xAF NO_KEY, // 0xB0 NO_KEY, // 0xB1 NO_KEY, // 0xB2 NO_KEY, // 0xB3 NO_KEY, // 0xB4 NO_KEY, // 0xB5 NO_KEY, // 0xB6 NO_KEY, // 0xB7 { .press = false, .ascii = false, .info = { .meta = KEY_ALT } }, // 0xB8 NO_KEY, // 0xB9 NO_KEY, // 0xBA NO_KEY, // 0xBB NO_KEY, // 0xBC NO_KEY, // 0xBD NO_KEY, // 0xBE NO_KEY, // 0xBF NO_KEY, // 0xC0 NO_KEY, // 0xC1 NO_KEY, // 0xC2 NO_KEY, // 0xC3 NO_KEY, // 0xC4 NO_KEY, // 0xC5 NO_KEY, // 0xC6 { .press = false, .ascii = false, .info = { .meta = KEY_HOME } }, // 0xC7 { .press = false, .ascii = false, .info = { .meta = KEY_UP } }, // 0xC8 { .press = false, .ascii = false, .info = { .meta = KEY_PGUP } }, // 0xC9 NO_KEY, // 0xCA { .press = false, .ascii = false, .info = { .meta = KEY_LEFT } }, // 0xCB NO_KEY, // 0xCC { .press = false, .ascii = false, .info = { .meta = KEY_RIGHT } }, // 0xCD NO_KEY, // 0xCE { .press = false, .ascii = false, .info = { .meta = KEY_END } }, // 0xCF { .press = false, .ascii = false, .info = { .meta = KEY_DOWN } }, // 0xD0 { .press = false, .ascii = false, .info = { .meta = KEY_PGDN } }, // 0xD1 { .press = false, .ascii = false, .info = { .meta = KEY_INS } }, // 0xD2 { .press = false, .ascii = false, .info = { .meta = KEY_DEL } }, // 0xD3 NO_KEY, // 0xD4 NO_KEY, // 0xD5 NO_KEY, // 0xD6 NO_KEY, // 0xD7 NO_KEY, // 0xD8 NO_KEY, // 0xD9 NO_KEY, // 0xDA NO_KEY, // 0xDB NO_KEY, // 0xDC NO_KEY, // 0xDD NO_KEY, // 0xDE NO_KEY, // 0xDF }; /** A circular buffer for recording the input string from keyboard. */ #define INPUT_BUF_SIZE 256 static char input_buf[INPUT_BUF_SIZE]; /** * These two numbers grow indefinitely. `loc % INPUT_BUF_SIZE` is the * actual index in the circular buffer. */ static size_t input_put_loc = 0; // Place to record the next char. static size_t input_get_loc = 0; // Start of the first unfetched char. /** Upper case triggers, both on means lower case. */ static bool shift_held = false; static bool capslock_on = false; /** If not NULL, that process is listening on keyboard events. */ static process_t *listener_proc = NULL; static spinlock_t keyboard_lock; /** * Keyboard interrupt handler registered for IRQ #1. * Serves keyboard input requests. Interrupts should have been disabled * automatically since this is an interrupt gate. * * Currently only supports lower cased ASCII characters, upper case by * holding SHIFT or activating CAPSLOCK, and newline. Assumes that at * most one process could be listening on keyboard input at the same time. */ static void keyboard_interrupt_handler(interrupt_state_t *state) { (void) state; /** Unused. */ keyboard_key_event_t event = NO_KEY; /** * Read our the event's scancode. Translate the scancode into a key * event, following the scancode set 1 mappings. */ uint8_t scancode = inb(0x60); if (scancode < 0xE0) event = scancode_event_map[scancode]; else if (scancode == 0xE0) { /** Is a key in extended set. */ uint8_t extendcode = inb(0x60); if (extendcode < 0xE0) event = extendcode_event_map[extendcode]; } // if (event.press && event.ascii) // printf("%c", event.info.codel); spinlock_acquire(&keyboard_lock); /** * React only if no overwriting could happen and if a process is * listening on keyboard input. Record the char to the circular buffer, * unblock it when buffer is full or when an ENTER press happens. * Interactively displays the character. */ if (input_put_loc - input_get_loc < INPUT_BUF_SIZE && listener_proc != NULL && listener_proc->state == BLOCKED && listener_proc->block_on == ON_KBDIN) { bool is_enter = !event.ascii && event.info.meta == KEY_ENTER; bool is_back = !event.ascii && event.info.meta == KEY_BACK; bool is_shift = !event.ascii && event.info.meta == KEY_SHIFT; bool is_caps = !event.ascii && event.info.meta == KEY_CAPS; if (!shift_held && event.press && is_shift) shift_held = true; else if (shift_held && !event.press && is_shift) shift_held = false; capslock_on = (event.press && is_caps) ? !capslock_on : capslock_on; bool upper_case = (shift_held != capslock_on); if (event.press && (event.ascii || is_enter)) { char c = !event.ascii ? '\n' : upper_case ? event.info.codeu : event.info.codel; input_buf[(input_put_loc++) % INPUT_BUF_SIZE] = c; printf("%c", c); } else if (event.press && is_back) { if (input_put_loc > input_get_loc) { input_put_loc--; spinlock_acquire(&terminal_lock); terminal_erase(); spinlock_release(&terminal_lock); } } if ((event.press && is_enter) || input_put_loc >= input_get_loc + INPUT_BUF_SIZE) { spinlock_acquire(&ptable_lock); process_unblock(listener_proc); spinlock_release(&ptable_lock); } } spinlock_release(&keyboard_lock); } /** Initialize the PS/2 keyboard device. */ void keyboard_init() { memset(input_buf, 0, sizeof(char) * INPUT_BUF_SIZE); input_put_loc = 0; input_get_loc = 0; shift_held = false; capslock_on = false; listener_proc = NULL; spinlock_init(&keyboard_lock, "keyboard_lock"); /** Register keyboard interrupt ISR handler. */ isr_register(INT_NO_KEYBOARD, &keyboard_interrupt_handler); } /** * Listen on keyboard characters, interpret as an input string, and write the * string into the given buffer. Returns the length of the string actually * fetched, or -1 on errors. * * The listening terminates on any of the following cases: * - A total of `len - 1` bytes have been fetched; * - Got a newline symbol. */ int32_t keyboard_getstr(char *buf, size_t len) { assert(buf != NULL); assert(len > 0); spinlock_acquire(&keyboard_lock); if (listener_proc != NULL) { warn("keyboard_getstr: there is already a keyboard listener"); spinlock_release(&keyboard_lock); return -1; } process_t *proc = running_proc(); listener_proc = proc; input_get_loc = input_put_loc; size_t left = len; while (left > 1) { /** Wait until there are unhandled chars. */ while (input_get_loc == input_put_loc) { if (proc->killed) { spinlock_release(&keyboard_lock); return -1; } spinlock_acquire(&ptable_lock); spinlock_release(&keyboard_lock); process_block(ON_KBDIN); spinlock_release(&ptable_lock); spinlock_acquire(&keyboard_lock); } /** Fetch the next unhandled char. */ char c = input_buf[(input_get_loc++) % INPUT_BUF_SIZE]; buf[len - left] = c; left--; if (c == '\n') /** Newline triggers early break. */ break; } /** Fill a null-terminator to finish the string. */ size_t fetched = len - left; buf[fetched] = '\0'; /** Clear the listener. */ listener_proc = NULL; spinlock_release(&keyboard_lock); return fetched; }
josehu07/hux-kernel
src/memory/gdt.c
/** * Global descriptor table (GDT) related. */ #include <stdint.h> #include <stddef.h> #include "gdt.h" #include "../common/debug.h" #include "../process/process.h" /** * The GDT table. We maintain 6 entries: * 0. a null entry; * 1. kernel mode code segment; * 2. kernel mode data segment; * 3. user mode code segment; * 4. user mode data segment; * 5. task state segment for user mode execution. */ static gdt_entry_t gdt[NUM_SEGMENTS]; /** GDTR address register. */ static gdt_register_t gdtr; /** * Setup one GDT entry. * Here, BASE and LIMIT are in their complete version, ACCESS represents * the access byte, and FLAGS has the 4-bit granularity flags field in its * higer 4 bits. */ static void gdt_set_entry(int idx, uint32_t base, uint32_t limit, uint8_t access, uint8_t flags) { gdt[idx].limit_lo = (uint16_t) (limit & 0xFFFF); gdt[idx].base_lo = (uint16_t) (base & 0xFFFF); gdt[idx].base_mi = (uint8_t) ((base >> 16) & 0xFF); gdt[idx].access = (uint8_t) access; gdt[idx].limit_hi_flags = (uint8_t) ((limit >> 16) & 0x0F); gdt[idx].limit_hi_flags |= (uint8_t) (flags & 0xF0); gdt[idx].base_hi = (uint8_t) ((base >> 24) & 0xFF); } /** Extern our load routine written in ASM `gdt-load.s`. */ extern void gdt_load(uint32_t gdtr_ptr, uint32_t data_selector_offset, uint32_t code_selector_offset); /** * Initialize the global descriptor table (GDT) by setting up the 5 entries * of GDT, setting the GDTR register to point to our GDT address, and then * (through assembly `lgdt` instruction) load our GDT. */ void gdt_init() { /** * First, see https://wiki.osdev.org/Global_Descriptor_Table for a * detailed anatomy of Access Byte and Flags fields. * * Access Byte - * - Pr = 1: present, must be 1 for valid selectors * - Privl = ?: ring level, 0 for kernel and 3 for user mode * - S = 1: should be 1 for all non-system segments * - Ex = ?: executable, 1 for code and 0 for data segment * - DC = * - Direction bit for data selectors, = 0: segment spans up * - Conforming bit for code selectors, = 0: can only be executed * from ring level set * in `Privl` field * - RW = * - Readable bit for code selectors, = 1: allow reading * - Writable bit for data selectors, = 1: allow writing * - Ac = 0: access bit, CPU sets it to 1 when accessing it * Hence, the four values used below. * * Flags - * - Gr = 1: using page granularity * - Sz = 1: in 32-bit protected mode * Hence, 0b1100 -> 0xC for all these four segments. */ gdt_set_entry(SEGMENT_UNUSED, 0u, 0u, 0u, 0u); /** 0-th entry unused. */ gdt_set_entry(SEGMENT_KCODE, 0u, 0xFFFFF, 0x9A, 0xC0); gdt_set_entry(SEGMENT_KDATA, 0u, 0xFFFFF, 0x92, 0xC0); gdt_set_entry(SEGMENT_UCODE, 0u, 0xFFFFF, 0xFA, 0xC0); gdt_set_entry(SEGMENT_UDATA, 0u, 0xFFFFF, 0xF2, 0xC0); /** Setup the GDTR register value. */ gdtr.boundary = (sizeof(gdt_entry_t) * NUM_SEGMENTS) - 1; gdtr.base = (uint32_t) &gdt; /** * Load the GDT. * Passing pointer to `gdtr` as unsigned integer. Each GDT entry takes 8 * bytes, therefore kernel data selector is at 0x10 and kernel code * selector is at 0x08. */ gdt_load((uint32_t) &gdtr, SEGMENT_KDATA << 3, SEGMENT_KCODE << 3); } /** * Set up TSS for a process to be switched, so that the CPU will be able * to jump to its kernel stack when a system call happens. * Check out https://wiki.osdev.org/Task_State_Segment for details. * * Must be called with `cli` pushed explicitly. */ void gdt_switch_tss(tss_t *tss, process_t *proc) { assert(proc != NULL); assert(proc->pgdir != NULL); assert(proc->kstack != 0); /** * Task state segment (TSS) has: * * Access Byte - * - Pr = 1: present * - Privl = 0: kernel privilege * - S = 0: it is a system segment * - Ex = 1: executable * - DC = 0: conforming * - RW = 0: readable code * - Ac = 1: accessed * Hence, 0x89. */ gdt_set_entry(5, (uint32_t) tss, (uint32_t) (sizeof(tss_t) - 1), 0x89, 0x00); /** Fill in task state information. */ tss->ss0 = SEGMENT_KDATA << 3; /** Kernel data segment. */ tss->esp0 = proc->kstack + KSTACK_SIZE; /** Top of kernel stack. */ tss->iopb = sizeof(tss_t); /** Forbids e.g. inb/outb from user space. */ tss->ebp = 0; /** Ensure EBP is 0 on switch, for stack backtracing. */ /** * Load task segment register. Segment selectors need to be shifted * to the left by 3, because the lower 3 bits are TI & RPL flags. */ uint16_t tss_seg_reg = SEGMENT_TSS << 3; asm volatile ( "ltr %0" : : "r" (tss_seg_reg) ); }
josehu07/hux-kernel
src/interrupt/idt.c
/** * Interrupt descriptor table (IDT) related. */ #include <stdint.h> #include "idt.h" #include "syscall.h" #include "../common/string.h" #include "../common/port.h" #include "../memory/gdt.h" /** * The IDT table. Should contain 256 gate entries. * - 0 - 31: reserved by x86 CPU for various exceptions * - 32 - 255: free for our OS kernel to define */ static idt_gate_t idt[NUM_GATE_ENTRIES]; /** IDTR address register. */ static idt_register_t idtr; /** * Setup one IDT gate entry. * Here, BASE is in its complete version, SELECTOR represents the selector * field, and FLAGS represents the 8-bit flags field. */ static void idt_set_gate(int idx, uint32_t base, uint16_t selector, uint8_t flags) { idt[idx].base_lo = (uint16_t) (base & 0xFFFF); idt[idx].selector = (uint16_t) selector; idt[idx].zero = (uint8_t) 0; idt[idx].flags = (uint8_t) flags; idt[idx].base_hi = (uint16_t) ((base >> 16) & 0xFFFF); } /** Extern our load routine written in ASM `idt-load.s`. */ extern void idt_load(uint32_t idtr_ptr); /** Extern our trap ISR handlers written in ASM `isr-stub.s`. */ extern void isr0 (void); extern void isr1 (void); extern void isr2 (void); extern void isr3 (void); extern void isr4 (void); extern void isr5 (void); extern void isr6 (void); extern void isr7 (void); extern void isr8 (void); extern void isr9 (void); extern void isr10(void); extern void isr11(void); extern void isr12(void); extern void isr13(void); extern void isr14(void); extern void isr15(void); extern void isr16(void); extern void isr17(void); extern void isr18(void); extern void isr19(void); extern void isr20(void); extern void isr21(void); extern void isr22(void); extern void isr23(void); extern void isr24(void); extern void isr25(void); extern void isr26(void); extern void isr27(void); extern void isr28(void); extern void isr29(void); extern void isr30(void); extern void isr31(void); /** Extern our PIC IRQ handlers written in ASM `irq-stub.s`. */ extern void irq0 (void); extern void irq1 (void); extern void irq2 (void); extern void irq3 (void); extern void irq4 (void); extern void irq5 (void); extern void irq6 (void); extern void irq7 (void); extern void irq8 (void); extern void irq9 (void); extern void irq10(void); extern void irq11(void); extern void irq12(void); extern void irq13(void); extern void irq14(void); extern void irq15(void); /** Extern the syscall trap gate handler. */ extern void syscall_handler(void); /** * Initialize the interrupt descriptor table (IDT) by setting up gate * entries of IDT, setting the IDTR register to point to our IDT address, * and then (through assembly `lidt` instruction) load our IDT. */ void idt_init() { /** * Remap PIC cascade mode external interrupt numbers. * * I/O ports: * - Master PIC: command port `0x20`, data port `0x21` * - Slave PIC: command port `0xA0`, data port `0xA1` * * These PIC initialization commands are called initialization words * (ICWs) and the order of 4 ICWs must be correct. */ outb(0x20, 0x11); /** Initialize master PIC in cascade mode. */ outb(0xA0, 0x11); /** Initialize slave PIC in cascade mode. */ outb(0x21, 0x20); /** Master PIC mapping offset = 0x20. */ outb(0xA1, 0x28); /** Slave PIC mapping offset = 0x28. */ outb(0x21, 0x04); /** Tell master PIC that slave PIC at IRQ # 2. */ outb(0xA1, 0x02); /** Tell slave PIC its cascade identity at 2. */ outb(0x21, 0x01); /** Set master PIC in 8086/88 mode. */ outb(0xA1, 0x01); /** Set slave PIC in 8086/88 mode. */ /** Pin masking. */ outb(0x21, 0x0); /** Set masking of master PIC. */ outb(0xA1, 0x0); /** Set masking of slave PIC. */ /** * First, see https://wiki.osdev.org/IDT for a detailed anatomy of * flags field. * * Flags - * - P = ?: present, 0 for inactive entries and 1 for valid ones * - DPL = ?: ring level, specifies which privilege level should the * calling segment at least have * - S = ?: 0 for interrupt and trap gates * - Type = * - 0x5: 32-bit task gate * - 0x6: 16-bit interrupt gate * - 0x7: 16-bit trap gate * - 0xE: 32-bit interrupt gate * - 0xF: 32-bit trap gate * Hence, all interrupt gates have flag field 0x8E and all trap gates * have flag field 0x8F for now. The difference between trap gates and * interrupt gates is that interrupt gates automatically disable * interrupts upon entry and restores upon `iret` instruction (which * restores the saved EFLAGS). Trap gates do not do this. * * Selector = 0x08: pointing to kernel code segment. * * Unused entries and field all default to 0, so memset first. */ memset(idt, 0, sizeof(idt_gate_t) * 256); idt_set_gate(0 , (uint32_t) isr0 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(1 , (uint32_t) isr1 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(2 , (uint32_t) isr2 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(3 , (uint32_t) isr3 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(4 , (uint32_t) isr4 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(5 , (uint32_t) isr5 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(6 , (uint32_t) isr6 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(7 , (uint32_t) isr7 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(8 , (uint32_t) isr8 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(9 , (uint32_t) isr9 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(10, (uint32_t) isr10, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(11, (uint32_t) isr11, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(12, (uint32_t) isr12, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(13, (uint32_t) isr13, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(14, (uint32_t) isr14, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(15, (uint32_t) isr15, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(16, (uint32_t) isr16, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(17, (uint32_t) isr17, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(18, (uint32_t) isr18, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(19, (uint32_t) isr19, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(20, (uint32_t) isr20, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(21, (uint32_t) isr21, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(22, (uint32_t) isr22, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(23, (uint32_t) isr23, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(24, (uint32_t) isr24, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(25, (uint32_t) isr25, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(26, (uint32_t) isr26, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(27, (uint32_t) isr27, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(28, (uint32_t) isr28, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(29, (uint32_t) isr29, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(30, (uint32_t) isr30, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(31, (uint32_t) isr31, SEGMENT_KCODE << 3, 0x8E); /** These are for PIC IRQs (remapped). */ idt_set_gate(32, (uint32_t) irq0 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(33, (uint32_t) irq1 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(34, (uint32_t) irq2 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(35, (uint32_t) irq3 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(36, (uint32_t) irq4 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(37, (uint32_t) irq5 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(38, (uint32_t) irq6 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(39, (uint32_t) irq7 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(40, (uint32_t) irq8 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(41, (uint32_t) irq9 , SEGMENT_KCODE << 3, 0x8E); idt_set_gate(42, (uint32_t) irq10, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(43, (uint32_t) irq11, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(44, (uint32_t) irq12, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(45, (uint32_t) irq13, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(46, (uint32_t) irq14, SEGMENT_KCODE << 3, 0x8E); idt_set_gate(47, (uint32_t) irq15, SEGMENT_KCODE << 3, 0x8E); /** * Register user syscall trap gate. The flag here is different in * two fields: * - DPL: user process is in privilege ring 3 instead of 0 * - Type: syscall gate is normally registered as a "trap gate" * instead of "interrupt gate"; trap gates do not disable * interrupts automatically upon entry */ idt_set_gate(INT_NO_SYSCALL, (uint32_t) syscall_handler, SEGMENT_KCODE << 3, 0xEF); /** Setup the IDTR register value. */ idtr.boundary = (sizeof(idt_gate_t) * NUM_GATE_ENTRIES) - 1; idtr.base = (uint32_t) &idt; /** * Load the IDT. * Passing pointer to `idtr` as unsigned integer. */ idt_load((uint32_t) &idtr); }
josehu07/hux-kernel
src/device/keyboard.h
/** * PS/2 keyboard input support. */ #ifndef KEYBOARD_H #define KEYBOARD_H #include <stdint.h> #include <stdbool.h> #include <stddef.h> /** A partial set of special keys on US QWERTY keyboard. */ enum keyboard_meta_key { KEY_NULL, // Dummy placeholder for empty key KEY_ESC, // Escape KEY_BACK, // Backspace KEY_TAB, // Tab KEY_ENTER, // Enter KEY_CTRL, // Both ctrls KEY_SHIFT, // Both shifts KEY_ALT, // Both alts KEY_CAPS, // Capslock KEY_HOME, // Home KEY_END, // End KEY_UP, // Cursor up KEY_DOWN, // Cursor down KEY_LEFT, // Cursor left KEY_RIGHT, // Cursor right KEY_PGUP, // Page up KEY_PGDN, // Page down KEY_INS, // Insert KEY_DEL, // Delete }; typedef enum keyboard_meta_key keyboard_meta_key_t; /** Holds info for a keyboard key. */ struct keyboard_key_info { keyboard_meta_key_t meta; /** Special meta key. */ char codel; /** ASCII byte code - lower case. */ char codeu; /** ASCII byte code - upper case. */ }; typedef struct keyboard_key_info keyboard_key_info_t; /** Struct for a keyboard event. */ struct keyboard_key_event { bool press; /** False if is a release event. */ bool ascii; /** True if is ASCII character, otherwise special. */ keyboard_key_info_t info; }; typedef struct keyboard_key_event keyboard_key_event_t; void keyboard_init(); int32_t keyboard_getstr(char *buf, size_t len); #endif
josehu07/hux-kernel
src/device/sysdev.c
/** * Syscalls related to communication with external devices other than * the VGA terminal display. */ #include <stdint.h> #include "sysdev.h" #include "timer.h" #include "keyboard.h" #include "../common/spinlock.h" #include "../interrupt/syscall.h" /** int32_t uptime(void); */ int32_t syscall_uptime(void) { spinlock_acquire(&timer_tick_lock); uint32_t curr_tick = timer_tick; spinlock_release(&timer_tick_lock); return (int32_t) (curr_tick * 1000 / TIMER_FREQ_HZ); } /** int32_t kbdstr(char *buf, uint32_t len); */ int32_t syscall_kbdstr(void) { char *buf; uint32_t len; if (!sysarg_get_uint(1, &len)) return SYS_FAIL_RC; if (!sysarg_get_mem(0, &buf, len)) return SYS_FAIL_RC; return (int32_t) (keyboard_getstr(buf, len)); }
josehu07/hux-kernel
src/memory/kheap.h
<reponame>josehu07/hux-kernel /** * Kernel Heap Memory "Next-Fit" Allocator. */ #ifndef KHEAP_H #define KHEAP_H #include <stdint.h> #include <stdbool.h> #include "slabs.h" /** The region between `kheap_curr` and slab allocators is free heap. */ #define KHEAP_MAX PAGE_SLAB_MIN /** Random magic number to protect against memory overruns. */ #define KHEAP_MAGIC 0xFBCA0739 /** * The free list is embedded inside the heap. Every allocated object (i.e., * memory chunk returned to caller of `kalloc()`) is prefixed with a free-list * header structure. * * See https://pages.cs.wisc.edu/~remzi/OSTEP/vm-freespace.pdf, figure 17.3 * ~ figure 17.7. The only difference is that our `magic` field is separate * from the `next` field and the magic number has weaker protection against * buffer overruns. */ struct free_list_node_header { size_t size; bool free; struct free_list_node_header *next; uint32_t magic; }; typedef struct free_list_node_header fl_header_t; /** Pointer arithmetics helper macros. */ #define HEADER_TO_OBJECT(header) ((header) + sizeof(fl_header_t)) #define OBJECT_TO_HEADER(object) ((object) - sizeof(fl_header_t)) void kheap_init(); uint32_t kalloc(size_t size); void kfree(void *addr); #endif
josehu07/hux-kernel
src/memory/gdt.h
/** * Global descriptor table (GDT) related. */ #ifndef GDT_H #define GDT_H #include <stdint.h> #include "../process/process.h" #include "../process/scheduler.h" /** * GDT entry format. * Check out https://wiki.osdev.org/Global_Descriptor_Table * for detailed anatomy of fields. */ struct gdt_entry { uint16_t limit_lo; /** Limit 0:15. */ uint16_t base_lo; /** Base 0:15. */ uint8_t base_mi; /** Base 16:23. */ uint8_t access; /** Access Byte. */ uint8_t limit_hi_flags; /** Limit 16:19 | FLags. */ uint8_t base_hi; /** Base 24:31. */ } __attribute__((packed)); typedef struct gdt_entry gdt_entry_t; /** * 48-bit GDTR address register format. * Used for loading the GDT table with `lgdt` instruction. */ struct gdt_register { uint16_t boundary; /** Boundary = length in bytes - 1. */ uint32_t base; /** GDT base address. */ } __attribute__((packed)); typedef struct gdt_register gdt_register_t; /** List of segments registered in GDT. */ #define SEGMENT_UNUSED 0x0 #define SEGMENT_KCODE 0x1 #define SEGMENT_KDATA 0x2 #define SEGMENT_UCODE 0x3 #define SEGMENT_UDATA 0x4 #define SEGMENT_TSS 0x5 #define NUM_SEGMENTS 6 void gdt_init(); void gdt_switch_tss(tss_t *tss, process_t *proc); #endif
josehu07/hux-kernel
src/kernel.c
<gh_stars>10-100 /** * The Hux kernel entry point. */ /** Check correct cross compiling. */ #if !defined(__i386__) #error "The Hux kernel needs to be compiled with an 'ix86-elf' compiler" #endif #include "boot/multiboot.h" #include "boot/elf.h" #include "common/printf.h" #include "common/string.h" #include "common/debug.h" #include "display/terminal.h" #include "display/vga.h" #include "interrupt/idt.h" #include "memory/gdt.h" #include "memory/paging.h" #include "memory/slabs.h" #include "memory/kheap.h" #include "process/process.h" #include "process/scheduler.h" #include "device/timer.h" #include "device/keyboard.h" #include "device/idedisk.h" #include "filesys/block.h" #include "filesys/vsfs.h" /** Displaying initialization progress message. */ static inline void _init_message(char *msg) { printf("["); cprintf(VGA_COLOR_BLUE, "INIT"); printf("] %s...", msg); } static inline void _init_message_ok(void) { cprintf(VGA_COLOR_GREEN, " OK\n"); } /** The main function that `boot.s` jumps to. */ void kernel_main(unsigned long magic, unsigned long addr) { /** Initialize VGA text-mode terminal support. */ terminal_init(); _init_message("setting up VGA terminal display"); _init_message_ok(); /** Double check the multiboot magic number. */ if (magic != MULTIBOOT_BOOTLOADER_MAGIC) error("invalid bootloader magic: %#x", magic); /** Get pointer to multiboot info. */ multiboot_info_t *mbi = (multiboot_info_t *) addr; /** Initialize debugging utilities. */ _init_message("initializing debugging utilities"); debug_init(mbi); _init_message_ok(); /** Initialize global descriptor table (GDT). */ _init_message("setting up global descriptor table (GDT)"); gdt_init(); _init_message_ok(); /** Initialize interrupt descriptor table (IDT). */ _init_message("setting up interrupt descriptor table (IDT)"); idt_init(); _init_message_ok(); /** Initialize PIT timer at 100 Hz frequency. */ _init_message("kicking off the PIT timer & interrupts"); timer_init(); _init_message_ok(); info("timer frequency is set to %dHz", TIMER_FREQ_HZ); /** Initialize PS/2 keyboard support. */ _init_message("initializing PS/2 keyboard support"); keyboard_init(); _init_message_ok(); /** Initialize paging and switch to use paging. */ _init_message("setting up virtual memory using paging"); paging_init(); _init_message_ok(); info("supporting physical memory size: %3dMiB", NUM_FRAMES * 4 / 1024); info("reserving memory for the kernel: %3dMiB", KMEM_MAX / 1024 / 1024); /** Initialize the kernel heap allocators. */ _init_message("initializing kernel heap memory allocators"); page_slab_init(); kheap_init(); _init_message_ok(); info("kernel page SLAB list starts at %p", PAGE_SLAB_MIN); info("kernel flexible heap starts at %p", kheap_curr); /** Initialize CPU state, process structures, and the `init` process. */ _init_message("initializing CPU state & process structures"); cpu_init(); process_init(); _init_message_ok(); info("maximum number of processes: %d", MAX_PROCS); /** Initialize IDE hard disk storage device. */ _init_message("initializing IDE hard disk device driver"); idedisk_init(); _init_message_ok(); /** Initialize the VSFS file system from disk. */ _init_message("initializing VSFS file system from disk"); filesys_init(); initproc_init(); _init_message_ok(); info("file system block size: %u KiB", BLOCK_SIZE); info("file system image has %u blocks", superblock.fs_blocks); /** Executes `sti`, CPU starts taking in interrupts. */ asm volatile ( "sti" ); /** * Jump into the scheduler. For now, it will pick up the only ready * process which is `init` and context switch to it, then never * switching back. */ terminal_clear(); scheduler(); error("CPU leaves the scheduler loop, should not happen"); }
josehu07/hux-kernel
user/lib/debug.h
/** * Common debugging message macros. */ #ifndef DEBUG_H #define DEBUG_H #include "printf.h" /** Panicking macro. */ #define panic(fmt, args...) do { \ cprintf(VGA_COLOR_MAGENTA, "PANIC: " fmt "\n", \ ##args); \ exit(); \ } while (0) /** Assertion macro. */ #define assert(condition) do { \ if (!(condition)) { \ panic("assertion failed @ function '%s'," \ " file '%s': line %d", \ __FUNCTION__, __FILE__, __LINE__); \ } \ } while (0) /** Error prompting macro. */ #define error(fmt, args...) do { \ cprintf(VGA_COLOR_RED, "ERROR: " fmt "\n", \ ##args); \ panic("error occurred @ function '%s'," \ " file '%s': line %d", \ __FUNCTION__, __FILE__, __LINE__); \ } while (0) /** Warning prompting macro. */ #define warn(fmt, args...) do { \ cprintf(VGA_COLOR_MAGENTA, "WARN: " fmt "\n", \ ##args); \ } while (0) /** Info prompting macro. */ #define info(fmt, args...) do { \ cprintf(VGA_COLOR_CYAN, "INFO: " fmt "\n", \ ##args); \ } while (0) #endif
josehu07/hux-kernel
user/put.c
<gh_stars>10-100 /** * Command line utility - put string into file. * * Due to limited shell cmdline parsing capability, strings containing * whitespaces are not supported yet. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "lib/syscall.h" #include "lib/printf.h" #include "lib/debug.h" #include "lib/string.h" #define MAX_STR_LEN 256 static int8_t _open_writable_file(char *path, bool overwrite) { /** If path is not regular writable file, fail. */ int8_t fd = open(path, OPEN_WR); if (fd < 0) { warn("put: cannot open path '%s' for write", path); return -1; } file_stat_t stat; if (fstat(fd, &stat) != 0) { warn("put: cannot get stat of '%s'", path); close(fd); return -1; } if (stat.type != INODE_TYPE_FILE) { warn("put: path '%s' is not regular file", path); close(fd); return -1; } /** If not overwriting, seek to current file end. */ if (!overwrite) { int ret = seek(fd, stat.size); if (ret != 0) { warn("put: cannot seek to offset %lu", stat.size); close(fd); return -1; } } return fd; } static void _file_put_str(char *path, char *str, size_t len, bool overwrite, bool newline) { int8_t fd = _open_writable_file(path, overwrite); if (fd < 0) return; size_t bytes_written = write(fd, str, len); if (bytes_written != len) { warn("put: bytes written %lu != given length %lu", bytes_written, len); close(fd); return; } /** If putting newline after string. */ if (newline) { bytes_written = write(fd, "\n", 1); if (bytes_written != 1) warn("put: newline written %lu != 1", bytes_written); } close(fd); } static void _print_help_exit(char *me) { printf("Usage: %s [-h] [-o] [-e] file str\n", me); exit(); } void main(int argc, char *argv[]) { if (argc < 2 || strncmp(argv[1], "-h", 2) == 0) _print_help_exit(argv[0]); int argi = 1; bool newline = true, overwrite = false; while (true) { if (strncmp(argv[argi], "-e", 2) == 0) { argi++; newline = false; } else if (strncmp(argv[argi], "-o", 2) == 0) { argi++; overwrite = true; } else break; } char *path, *str; if (argc - argi != 2) _print_help_exit(argv[0]); path = argv[argi++]; str = argv[argi++]; size_t len = strnlen(str, MAX_STR_LEN); if (len == MAX_STR_LEN) warn("put: str exceeds max length %lu", MAX_STR_LEN); _file_put_str(path, str, len, overwrite, newline); exit(); }
josehu07/hux-kernel
src/memory/slabs.h
/** * Simple SLAB allocators for fixed-granularity kernel objects. */ #ifndef SLABS_H #define SLABS_H #include <stdint.h> #include "paging.h" /** Reserve kheap top 4MiB for the page slabs. */ #define PAGE_SLAB_MAX KMEM_MAX #define PAGE_SLAB_MIN (KMEM_MAX - 0x00400000) /** Node of a SLAB free-list. */ struct slab_node { struct slab_node *next; }; typedef struct slab_node slab_node_t; void page_slab_init(); uint32_t salloc_page(); void sfree_page(void *addr); #endif
josehu07/hux-kernel
user/shell.c
<reponame>josehu07/hux-kernel /** * Basic command line shell. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "lib/syscall.h" #include "lib/printf.h" #include "lib/debug.h" #include "lib/string.h" #include "lib/types.h" #define CWD_BUF_SIZE 256 #define LINE_BUF_SIZE 256 #define MAX_EXEC_ARGS 32 #define ENV_PATH "/" /** A fancy welcome logo in ASCII. */ static void _shell_welcome_logo(void) { cprintf(VGA_COLOR_LIGHT_BLUE, "\n" " /--/ /--/ \n" " / / / / \n" "Welcome to / /---/ / /--/ /--/ /--/ /--/ OS!\n" " / /---/ / / / / / | |-/ / \n" " / / / / / /---/ / / /-| | \n" " /--/ /--/ /---------/ /--/ |--| \n" "\n"); } /** Compose & print the prompt. */ static void _print_prompt(void) { char username[5] = "hush"; cprintf(VGA_COLOR_GREEN, "%s", username); cprintf(VGA_COLOR_DARK_GREY, ":"); char cwd[CWD_BUF_SIZE]; memset(cwd, 0, CWD_BUF_SIZE); if (getcwd(cwd, CWD_BUF_SIZE - MAX_FILENAME - 1) != 0) error("shell: failed to get cwd"); size_t ret_len = strlen(cwd); if (cwd[ret_len - 1] != '/') { cwd[ret_len] = '/'; cwd[ret_len + 1] = '\0'; } cprintf(VGA_COLOR_CYAN, "%s", cwd); cprintf(VGA_COLOR_DARK_GREY, "$ "); } /** Built-in cmd `cd`: change directory. */ static void _change_cwd(char *path) { if (path == NULL) /** empty path, go to "/". */ path = "/"; if (chdir(path) != 0) warn("shell: cd to path '%s' failed", path); } /** Fork + exec external command executable. */ static void _fork_exec(char *path, char **argv) { int8_t pid = fork(0); if (pid < 0) { warn("shell: failed to fork child process"); return; } if (pid == 0) { /** Child. */ /** * Try the executable under current working directory if found. * Otherwise, fall through to try the one under ENV_PATH (which * is the root directory "/"). */ if (open(path, OPEN_RD) < 0) { /** Not found. */ char full[CWD_BUF_SIZE]; snprintf(full, CWD_BUF_SIZE, "%s/%s", ENV_PATH, path); exec(full, argv); } else exec(path, argv); warn("shell: failed to exec '%s'", path); exit(); } else { /** Parent shell. */ int8_t pid_w = wait(); if (pid_w != pid) warn("shell: waited pid %d does not equal %d", pid_w, pid); } } /** * Parse a command line. Parse the tokens according to whitespaces, * interpret the first one as the executable filename, and together * with the rest as the argument list. */ static int _parse_tokens(char *line, char **argv) { size_t argi; size_t pos = 0; for (argi = 0; argi < MAX_EXEC_ARGS - 1; ++argi) { while (isspace(line[pos])) pos++; if (line[pos] == '\0') { argv[argi] = NULL; return argi; } argv[argi] = &line[pos]; while(!isspace(line[pos])) pos++; if (line[pos] != '\0') line[pos++] = '\0'; } argv[argi] = NULL; return argi; } /** * Handle a command line. * - If is a built-in command, handle directly; * - Else, fork a child process to run the command executable. */ static void _handle_cmdline(char *line) { char *argv[MAX_EXEC_ARGS]; int argc = _parse_tokens(line, argv); if (argc <= 0) return; else if (argc >= MAX_EXEC_ARGS) warn("shell: line exceeds max num of args %d", MAX_EXEC_ARGS); if (strncmp(argv[0], "cd", 2) == 0) _change_cwd(argv[1]); /** Could be NULL. */ else _fork_exec(argv[0], argv); } void main(int argc, char *argv[]) { (void) argc; // Unused. (void) argv; _shell_welcome_logo(); char cmd_buf[LINE_BUF_SIZE]; memset(cmd_buf, 0, LINE_BUF_SIZE); while (1) { _print_prompt(); if (kbdstr(cmd_buf, LINE_BUF_SIZE) < 0) error("shell: failed to get keyboard string"); else _handle_cmdline(cmd_buf); memset(cmd_buf, 0, LINE_BUF_SIZE); } exit(); }
josehu07/hux-kernel
src/memory/sysmem.h
<filename>src/memory/sysmem.h /** * Syscalls related to user memory allocation. */ #ifndef SYSMEM_H #define SYSMEM_H #include <stdint.h> int32_t syscall_setheap(); #endif
josehu07/hux-kernel
user/lib/syscall.h
<filename>user/lib/syscall.h<gh_stars>10-100 /** * User program system calls library. * * For more documentation on syscall functions signature, please see: * https://github.com/josehu07/hux-kernel/wiki/16.-Essential-System-Calls, and * https://github.com/josehu07/hux-kernel/wiki/22.-File‐Related-Syscalls */ #ifndef SYSCALL_H #define SYSCALL_H #include <stdint.h> /** Color code for `tprint()` is in `printf.h`. */ /** Flags for `open()`. */ #define OPEN_RD 0x1 #define OPEN_WR 0x2 /** Flags for `create()`. */ #define CREATE_FILE 0x1 #define CREATE_DIR 0x2 /** Struct & type code for `fstat()`. */ #define INODE_TYPE_EMPTY 0 #define INODE_TYPE_FILE 1 #define INODE_TYPE_DIR 2 struct file_stat { uint32_t inumber; uint32_t type; uint32_t size; }; typedef struct file_stat file_stat_t; /** Struct of a directory entry, see `vsfs.h`. */ #define DENTRY_SIZE 128 #define MAX_FILENAME 100 struct dentry { uint32_t valid; uint32_t inumber; char filename[DENTRY_SIZE - 8]; } __attribute__((packed)); typedef struct dentry dentry_t; /** * Externed from ASM `syscall.s`. * * Be sure that all arguments & returns values are 32-bit values, since * Hux parses syscall arguments by simply getting 32-bit values on stack. */ extern int32_t getpid(); extern int32_t fork(uint32_t timeslice); extern void exit(); extern int32_t sleep(uint32_t millisecs); extern int32_t wait(); extern int32_t kill(int32_t pid); extern int32_t uptime(); extern int32_t tprint(uint32_t color, char *str); extern int32_t kbdstr(char *buf, uint32_t len); extern int32_t setheap(uint32_t new_top); extern int32_t open(char *path, uint32_t mode); extern int32_t close(int32_t fd); extern int32_t create(char *path, uint32_t mode); extern int32_t remove(char *path); extern int32_t read(int32_t fd, char *dst, uint32_t len); extern int32_t write(int32_t fd, char *src, uint32_t len); extern int32_t chdir(char *path); extern int32_t getcwd(char *buf, uint32_t limit); extern int32_t exec(char *path, char **argv); extern int32_t fstat(int32_t fd, file_stat_t *stat); extern int32_t seek(int32_t fd, uint32_t offset); #endif
josehu07/hux-kernel
user/cat.c
<reponame>josehu07/hux-kernel /** * Command line utility - dump file content. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "lib/syscall.h" #include "lib/printf.h" #include "lib/debug.h" #include "lib/string.h" #define DUMP_BUF_LEN 128 static char dump_buf[DUMP_BUF_LEN]; static void _dump_file(char *path) { /** If path is not regular readable file, fail. */ int8_t fd = open(path, OPEN_RD); if (fd < 0) { warn("put: cannot open path '%s' for read", path); return; } /** Loop to dump all file content. */ size_t bytes_read; while ((bytes_read = read(fd, dump_buf, DUMP_BUF_LEN - 1)) > 0) { assert(bytes_read <= DUMP_BUF_LEN - 1); dump_buf[bytes_read] = '\0'; printf("%s", dump_buf); } close(fd); } static void _print_help_exit(char *me) { printf("Usage: %s [-h] file\n", me); exit(); } void main(int argc, char *argv[]) { if (argc < 2 || strncmp(argv[1], "-h", 2) == 0) _print_help_exit(argv[0]); if (argc != 2) _print_help_exit(argv[0]); char *path = argv[1]; _dump_file(path); exit(); }
josehu07/hux-kernel
src/process/sysproc.c
/** * Syscalls related to process state & operations. */ #include <stdint.h> #include "sysproc.h" #include "../common/printf.h" #include "../common/debug.h" #include "../device/timer.h" #include "../interrupt/syscall.h" #include "../process/process.h" #include "../process/scheduler.h" /** int32_t getpid(void); */ int32_t syscall_getpid(void) { return running_proc()->pid; } /** int32_t fork(uint32_t timeslice); */ int32_t syscall_fork(void) { uint32_t timeslice; if (!sysarg_get_uint(0, &timeslice)) return SYS_FAIL_RC; if (timeslice > 16) { warn("fork: timeslice value cannot be larger than 16"); return SYS_FAIL_RC; } if (timeslice == 0) return process_fork(running_proc()->timeslice); return process_fork(timeslice); } /** void exit(void); */ int32_t syscall_exit(void) { process_exit(); return 0; /** Not reached. */ } /** int32_t sleep(uint32_t millisecs); */ int32_t syscall_sleep(void) { uint32_t millisecs; if (!sysarg_get_uint(0, &millisecs)) return SYS_FAIL_RC; uint32_t sleep_ticks = millisecs * TIMER_FREQ_HZ / 1000; process_sleep(sleep_ticks); return 0; } /** int32_t wait(void); */ int32_t syscall_wait(void) { return process_wait(); } /** int32_t kill(int32_t pid); */ int32_t syscall_kill(void) { int32_t pid; if (!sysarg_get_int(0, &pid)) return SYS_FAIL_RC; if (pid < 0) return SYS_FAIL_RC; return process_kill(pid); }
josehu07/hux-kernel
src/device/idedisk.c
/** * Parallel ATA (IDE) hard disk driver. * Assumes only port I/O (PIO) mode without DMA for now. */ #include <stdint.h> #include <stddef.h> #include <stdbool.h> #include "idedisk.h" #include "../common/port.h" #include "../common/debug.h" #include "../common/string.h" #include "../common/spinlock.h" #include "../interrupt/isr.h" #include "../filesys/block.h" #include "../process/process.h" #include "../process/scheduler.h" /** Data returned by the IDENTIFY command during initialization. */ static uint16_t ide_identify_data[256]; /** IDE pending requests software queue. */ static block_request_t *ide_queue_head = NULL; static block_request_t *ide_queue_tail = NULL; static spinlock_t ide_lock; /** * Wait for IDE disk on primary bus to become ready. Returns false on errors * or device faults, otherwise true. */ static bool _ide_wait_ready(void) { uint8_t status; do { /** Read from alternative status so it won't affect interrupts. */ status = inb(IDE_PORT_R_ALT_STATUS); } while ((status & (IDE_STATUS_BSY | IDE_STATUS_RDY)) != IDE_STATUS_RDY); if ((status & (IDE_STATUS_DF | IDE_STATUS_ERR)) != 0) return false; return true; } /** * Start a request to IDE disk. * Must be called with interrupts off. */ static void _ide_start_req(block_request_t *req) { assert(req != NULL); // if (req->block_no >= FILESYS_SIZE) // error("idedisk: request block number exceeds file system size"); uint8_t sectors_per_block = BLOCK_SIZE / IDE_SECTOR_SIZE; uint32_t sector_no = req->block_no * sectors_per_block; /** Wait for disk to be in ready state. */ _ide_wait_ready(); outb(IDE_PORT_RW_SECTORS, sectors_per_block); /** Number of sectors. */ outb(IDE_PORT_RW_LBA_LO, sector_no & 0xFF); /** LBA address - low bits. */ outb(IDE_PORT_RW_LBA_MID, (sector_no >> 8) & 0xFF); /** LBA address - mid bits. */ outb(IDE_PORT_RW_LBA_HI, (sector_no >> 16) & 0xFF); /** LBA address - high bits. */ outb(IDE_PORT_RW_SELECT, ide_select_entry(true, 0, sector_no)); /** LBA bits 24-27. */ /** If dirty, kick off a write with data, otherwise kick off a read. */ if (req->dirty) { outb(IDE_PORT_W_COMMAND, (sectors_per_block == 1) ? IDE_CMD_WRITE : IDE_CMD_WRITE_MULTIPLE); /** Must be a stream in 32-bit dwords, can't be in 8-bit bytes. */ outsl(IDE_PORT_RW_DATA, req->data, BLOCK_SIZE / sizeof(uint32_t)); } else { outb(IDE_PORT_W_COMMAND, (sectors_per_block == 1) ? IDE_CMD_READ : IDE_CMD_READ_MULTIPLE); } } /** Poll until an IDE request has been served. */ static void _ide_poll_req(block_request_t *req) { /** If is a read, get data now. */ if (!req->dirty) { if (_ide_wait_ready()) { /** Must be a stream in 32-bit dwords, can't be in 8-bit bytes. */ insl(IDE_PORT_RW_DATA, req->data, BLOCK_SIZE / sizeof(uint32_t)); req->valid = true; } } else { if (_ide_wait_ready()) req->dirty = false; } } /** IDE disk interrupt handler registered for IRQ # 14. */ static void idedisk_interrupt_handler(interrupt_state_t *state) { (void) state; /** Unused. */ spinlock_acquire(&ide_lock); /** Head of queue is the active request currently on the fly. */ block_request_t *req = ide_queue_head; if (req == NULL) { spinlock_release(&ide_lock); return; } ide_queue_head = ide_queue_head->next; /** * This "poll" should finish immediately, as the interrupt indicates * that the disk must have been ready. */ _ide_poll_req(req); /** Wake up the process waiting on this request. */ spinlock_acquire(&ptable_lock); for (process_t *proc = ptable; proc < &ptable[MAX_PROCS]; ++proc) { if (proc->state == BLOCKED && proc->block_on == ON_IDEDISK && proc->wait_req == req) { process_unblock(proc); } } spinlock_release(&ptable_lock); /** If more requests in queue, start the disk on the next one. */ if (ide_queue_head != NULL) _ide_start_req(ide_queue_head); else ide_queue_tail = NULL; spinlock_release(&ide_lock); } /** * Initialize a single IDE disk 0 on the default primary bus. Registers the * IDE request interrupt ISR handler. */ void idedisk_init(void) { ide_queue_head = NULL; ide_queue_tail = NULL; spinlock_init(&ide_lock, "ide_lock"); /** Register IDE disk interrupt ISR handler. */ isr_register(INT_NO_IDEDISK, &idedisk_interrupt_handler); /** Select disk 0 on primary bus and wait for it to be ready */ outb(IDE_PORT_RW_SELECT, ide_select_entry(true, 0, 0)); _ide_wait_ready(); outb(IDE_PORT_W_CONTROL, 0); /** Ensure interrupts on. */ /** * Detect that disk 0 on the primary ATA bus is there and is a PATA * (IDE) device. Utilzies the IDENTIFY command. */ outb(IDE_PORT_RW_SECTORS, 0); outb(IDE_PORT_RW_LBA_LO, 0); outb(IDE_PORT_RW_LBA_MID, 0); outb(IDE_PORT_RW_LBA_HI, 0); outb(IDE_PORT_W_COMMAND, IDE_CMD_IDENTIFY); uint8_t status = inb(IDE_PORT_R_ALT_STATUS); if (status == 0) error("idedisk_init: drive does not exist on primary bus"); do { status = inb(IDE_PORT_R_ALT_STATUS); if (inb(IDE_PORT_RW_LBA_MID) != 0 || inb(IDE_PORT_RW_LBA_HI) != 0) error("idedisk_init: drive on primary bus is not PATA"); } while ((status & (IDE_STATUS_BSY)) != 0 || (status & (IDE_STATUS_DRQ | IDE_STATUS_ERR)) == 0); if ((status & (IDE_STATUS_ERR)) != 0) error("idedisk_init: error returned from the IDENTIFY command"); /** Must be a stream in 32-bit dwords. */ memset(ide_identify_data, 0, 256 * sizeof(uint16_t)); insl(IDE_PORT_RW_DATA, ide_identify_data, 256 * sizeof(uint16_t) / sizeof(uint32_t)); } /** * Start and wait for a block request to complete. If request is dirty, * write to disk, clear dirty, and set valid. Else if request is not * valid, read from disk into data and set valid. Returns true on success * and false if error appears in IDE port communications. */ bool idedisk_do_req(block_request_t *req) { process_t *proc = running_proc(); if (req->valid && !req->dirty) error("idedisk_do_req: request valid and not dirty, nothing to do"); if (!req->valid && req->dirty) error("idedisk_do_req: caught a dirty request that is not valid"); spinlock_acquire(&ide_lock); /** Append to IDE pending requests queue. */ req->next = NULL; if (ide_queue_tail != NULL) ide_queue_tail->next = req; else ide_queue_head = req; ide_queue_tail = req; /** Start he disk device if it was idle. */ if (ide_queue_head == req) _ide_start_req(req); /** Wait for this request to have been served. */ spinlock_acquire(&ptable_lock); spinlock_release(&ide_lock); proc->wait_req = req; process_block(ON_IDEDISK); proc->wait_req = NULL; spinlock_release(&ptable_lock); spinlock_acquire(&ide_lock); /** * Could be re=scheduld when an IDE interrupt comes saying that this * request has been served. If valid is not set at this time, it means * error occurred. */ if (!req->valid || req->dirty) { warn("idedisk_do_req: error occurred in IDE disk request"); spinlock_release(&ide_lock); return false; } spinlock_release(&ide_lock); return true; } /** Do request in polling mode, used only at file system initialization. */ bool idedisk_do_req_at_boot(block_request_t *req) { if (req->valid && !req->dirty) error("idedisk_do_req: request valid and not dirty, nothing to do"); if (!req->valid && req->dirty) error("idedisk_do_req: caught a dirty request that is not valid"); _ide_start_req(req); _ide_poll_req(req); if (!req->valid || req->dirty) { warn("idedisk_do_req: error occurred in IDE disk request"); return false; } return true; }
josehu07/hux-kernel
src/interrupt/idt.h
/** * Interrupt descriptor table (IDT) related. */ #ifndef IDT_H #define IDT_H #include <stdint.h> /** * IDT gate entry format. * Check out https://wiki.osdev.org/IDT for detailed anatomy * of fields. */ struct idt_gate { uint16_t base_lo; /** Base 0:15. */ uint16_t selector; /** Segment selector. */ uint8_t zero; /** Unused. */ uint8_t flags; /** Type & attribute flags. */ uint16_t base_hi; /** Base 16:31. */ } __attribute__((packed)); typedef struct idt_gate idt_gate_t; /** * 48-bit IDTR address register format. * Used for loading the IDT with `lidt` instruction. */ struct idt_register { uint16_t boundary; /** Boundary = length in bytes - 1. */ uint32_t base; /** IDT base address. */ } __attribute__((packed)); typedef struct idt_register idt_register_t; /** Length of IDT. */ #define NUM_GATE_ENTRIES 256 void idt_init(); #endif
josehu07/hux-kernel
src/device/idedisk.h
/** * Parallel ATA (IDE) hard disk driver. * Assumes only port I/O (PIO) mode without DMA for now. */ #ifndef IDEDISK_H #define IDEDISK_H #include <stdint.h> #include <stdbool.h> #include "../filesys/block.h" /** Hard disk sector size is 512 bytes. */ #define IDE_SECTOR_SIZE 512 /** * Default I/O ports that are mapped to device registers of the IDE disk * on the primary bus (with I/O base = 0x1F0). * See https://wiki.osdev.org/ATA_PIO_Mode#Registers. */ #define IDE_PORT_IO_BASE 0x1F0 #define IDE_PORT_RW_DATA (IDE_PORT_IO_BASE + 0) #define IDE_PORT_R_ERROR (IDE_PORT_IO_BASE + 1) #define IDE_PORT_W_FEATURES (IDE_PORT_IO_BASE + 1) #define IDE_PORT_RW_SECTORS (IDE_PORT_IO_BASE + 2) #define IDE_PORT_RW_LBA_LO (IDE_PORT_IO_BASE + 3) #define IDE_PORT_RW_LBA_MID (IDE_PORT_IO_BASE + 4) #define IDE_PORT_RW_LBA_HI (IDE_PORT_IO_BASE + 5) #define IDE_PORT_RW_SELECT (IDE_PORT_IO_BASE + 6) #define IDE_PORT_R_STATUS (IDE_PORT_IO_BASE + 7) #define IDE_PORT_W_COMMAND (IDE_PORT_IO_BASE + 7) #define IDE_PORT_CTRL_BASE 0x3F6 #define IDE_PORT_R_ALT_STATUS (IDE_PORT_CTRL_BASE + 0) #define IDE_PORT_W_CONTROL (IDE_PORT_CTRL_BASE + 0) #define IDE_PORT_R_DRIVE_ADDR (IDE_PORT_CTRL_BASE + 1) /** * IDE error register flags (from PORT_R_ERROR). * See https://wiki.osdev.org/ATA_PIO_Mode#Error_Register. */ #define IDE_ERROR_AMNF (1 << 0) #define IDE_ERROR_TKZNF (1 << 1) #define IDE_ERROR_ABRT (1 << 2) #define IDE_ERROR_MCR (1 << 3) #define IDE_ERROR_IDNF (1 << 4) #define IDE_ERROR_MC (1 << 5) #define IDE_ERROR_UNC (1 << 6) #define IDE_ERROR_BBK (1 << 7) /** * IDE status register flags (from PORT_R_STATUS / PORT_R_ALT_STATUS). * See https://wiki.osdev.org/ATA_PIO_Mode#Status_Register_.28I.2FO_base_.2B_7.29. */ #define IDE_STATUS_ERR (1 << 0) #define IDE_STATUS_DRQ (1 << 3) #define IDE_STATUS_SRV (1 << 4) #define IDE_STATUS_DF (1 << 5) #define IDE_STATUS_RDY (1 << 6) #define IDE_STATUS_BSY (1 << 7) /** * IDE command codes (to PORT_W_COMMAND). * See https://wiki.osdev.org/ATA_Command_Matrix. */ #define IDE_CMD_READ 0x20 #define IDE_CMD_WRITE 0x30 #define IDE_CMD_READ_MULTIPLE 0xC4 #define IDE_CMD_WRITE_MULTIPLE 0xC5 #define IDE_CMD_IDENTIFY 0xEC /** * IDE drive/head register (PORT_RW_SELECT) value. * See https://wiki.osdev.org/ATA_PIO_Mode#Drive_.2F_Head_Register_.28I.2FO_base_.2B_6.29. */ #define IDE_SELECT_DRV (1 << 4) #define IDE_SELECT_LBA (1 << 6) static inline uint8_t ide_select_entry(bool use_lba, uint8_t drive, uint32_t sector_no) { uint8_t reg = 0xA0; if (use_lba) /** Useing LBA addressing mode. */ reg |= IDE_SELECT_LBA; if (drive != 0) /** Can only be 0 or 1 on one bus. */ reg |= IDE_SELECT_DRV; reg |= (sector_no >> 24) & 0x0F; /** LBA address, bits 24-27. */ return reg; } void idedisk_init(); bool idedisk_do_req(block_request_t *req); bool idedisk_do_req_at_boot(block_request_t *req); #endif
josehu07/hux-kernel
src/common/intstate.h
<reponame>josehu07/hux-kernel<filename>src/common/intstate.h /** * Interrupt enable/disable routines. Mimics xv6. */ #ifndef INTSTATE_H #define INTSTATE_H #include <stdbool.h> bool interrupt_enabled(void); void cli_push(void); void cli_pop(void); #endif
josehu07/hux-kernel
src/boot/elf.h
/** * ELF 32-bit format related structures. * See http://www.cs.cmu.edu/afs/cs/academic/class/15213-f00/docs/elf.pdf */ #ifndef ELF_H #define ELF_H #include <stdint.h> /** ELF file header. */ struct elf_file_header { uint32_t magic; /** In little-endian on x86. */ uint8_t ident[12]; /** Rest of `e_ident`. */ uint16_t type; uint16_t machine; uint32_t version; uint32_t entry; uint32_t phoff; uint32_t shoff; uint32_t flags; uint16_t ehsize; uint16_t phentsize; uint16_t phnum; uint16_t shentsize; uint16_t shnum; uint16_t shstrndx; } __attribute__((packed)); typedef struct elf_file_header elf_file_header_t; /** * ELF magic number 0x7F'E''L''F' in little endian. * See https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html#elfid */ #define ELF_MAGIC 0x464C457F /** ELF program header. */ struct elf_program_header { uint32_t type; uint32_t offset; uint32_t vaddr; uint32_t paddr; uint32_t filesz; uint32_t memsz; uint32_t flags; uint32_t align; } __attribute__((packed)); typedef struct elf_program_header elf_program_header_t; /** ELF program header flags. */ #define ELF_PROG_FLAG_EXEC 0x1 #define ELF_PROG_FLAG_WRITE 0x2 #define ELF_PROG_FLAG_READ 0x4 /** ELF program header types. */ #define ELF_PROG_TYPE_LOAD 0x1 /** ELF section header. */ struct elf_section_header { uint32_t name; uint32_t type; uint32_t flags; uint32_t addr; uint32_t offset; uint32_t size; uint32_t link; uint32_t info; uint32_t addralign; uint32_t entsize; } __attribute__((packed)); typedef struct elf_section_header elf_section_header_t; /** ELF symbol. */ struct elf_symbol { uint32_t name; uint32_t value; uint32_t size; uint8_t info; uint8_t other; uint16_t shndx; } __attribute__((packed)); typedef struct elf_symbol elf_symbol_t; /** * For getting the type of a symbol table entry. Function type code == 0x2. * See https://docs.oracle.com/cd/E23824_01/html/819-0690/chapter6-79797.html#chapter6-tbl-21 */ #define ELF_SYM_TYPE(info) ((info) & 0xf) #define ELF_SYM_TYPE_FUNC 0x2 #endif
josehu07/hux-kernel
user/ls.c
<reponame>josehu07/hux-kernel /** * Command line utility - list directory. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "lib/syscall.h" #include "lib/printf.h" #include "lib/debug.h" #include "lib/string.h" #define CONCAT_BUF_SIZE 300 static char * _get_filename(char *path) { int32_t idx; for (idx = strlen(path) - 1; idx >= 0; --idx) { if (path[idx] == '/') return &path[idx + 1]; } return path; } static void _print_file_stat(char *filename, file_stat_t *stat) { bool is_dir = stat->type == INODE_TYPE_DIR; printf("%5u %s %8u ", stat->inumber, is_dir ? "D" : "F", stat->size); cprintf(is_dir ? VGA_COLOR_LIGHT_BLUE : VGA_COLOR_LIGHT_GREY, "%s\n", filename); } static void _list_directory(char *path) { int8_t fd = open(path, OPEN_RD); if (fd < 0) { warn("ls: cannot open path '%s'", path); return; } file_stat_t stat; if (fstat(fd, &stat) != 0) { warn("ls: cannot get stat of '%s'", path); close(fd); return; } /** Listing on a regular file. */ if (stat.type == INODE_TYPE_FILE) { _print_file_stat(_get_filename(path), &stat); close(fd); return; } /** * Listing a directory, then read out its content, interpret as an * array of directory entries. */ char concat_buf[CONCAT_BUF_SIZE]; strncpy(concat_buf, path, CONCAT_BUF_SIZE - 2); if (CONCAT_BUF_SIZE - 1 - strlen(concat_buf) < MAX_FILENAME) { warn("ls: path '%s' too long", path); close(fd); return; } char *name_buf = &concat_buf[strlen(concat_buf)]; *(name_buf++) = '/'; *name_buf = '\0'; dentry_t dentry; while (read(fd, (char *) &dentry, sizeof(dentry_t)) == sizeof(dentry_t)) { if (dentry.valid != 1) continue; strncpy(name_buf, dentry.filename, MAX_FILENAME); int8_t inner_fd = open(concat_buf, OPEN_RD); if (inner_fd < 0) { warn("ls: cannot open path '%s'", concat_buf); close(fd); return; } file_stat_t inner_stat; if (fstat(inner_fd, &inner_stat) != 0) { warn("ls: cannot get stat of '%s'", concat_buf); close(inner_fd); close(fd); return; } _print_file_stat(dentry.filename, &inner_stat); close(inner_fd); } close(fd); } static void _print_help_exit(char *me) { printf("Usage: %s [-h] [path [paths]]\n", me); exit(); } void main(int argc, char *argv[]) { if (argc < 2) { _list_directory("."); exit(); } if (strncmp(argv[1], "-h", 2) == 0) _print_help_exit(argv[0]); if (argc == 2) { _list_directory(argv[1]); exit(); } /** If multiple directories, display as multiple sections. */ for (int i = 1; i < argc; ++i) { printf("%s:\n", argv[i]); _list_directory(argv[i]); if (i < argc - 1) printf("\n"); } exit(); }
josehu07/hux-kernel
src/device/sysdev.h
<gh_stars>10-100 /** * Syscalls related to communication with external devices other than * the VGA terminal display. */ #ifndef SYSDEV_H #define SYSDEV_H #include <stdint.h> int32_t syscall_uptime(); int32_t syscall_kbdstr(); #endif
josehu07/hux-kernel
src/common/parklock.h
<filename>src/common/parklock.h<gh_stars>10-100 /** * Lock implementation that blocks the calling process on `acquire()` if * the lock is locked. Can only be used under process context. */ #ifndef PARKLOCK_H #define PARKLOCK_H #include <stdint.h> #include <stdbool.h> #include "spinlock.h" /** Parking lock structure. */ struct parklock { bool locked; /** True if locked, changes must be protected. */ spinlock_t lock; /** Internal spinlocks that protects `locked`. */ int8_t holder_pid; /** Holder process's PID. */ const char *name; /** Lock name for debugging. */ }; typedef struct parklock parklock_t; void parklock_acquire(parklock_t *lock); void parklock_release(parklock_t *lock); bool parklock_holding(parklock_t *lock); void parklock_init(parklock_t *lock, const char *name); #endif
josehu07/hux-kernel
src/filesys/vsfs.c
/** * Very simple file system (VSFS) data structures & Layout. * * This part borrows code heavily from xv6. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "vsfs.h" #include "block.h" #include "file.h" #include "sysfile.h" #include "exec.h" #include "../common/debug.h" #include "../common/string.h" #include "../common/bitmap.h" #include "../common/spinlock.h" #include "../common/parklock.h" #include "../memory/kheap.h" #include "../process/process.h" #include "../process/scheduler.h" /** In-memory runtime copy of the FS meta-data structures. */ superblock_t superblock; bitmap_t inode_bitmap; bitmap_t data_bitmap; /** * Allocates a free file descriptor of the caller process. Returns -1 * if no free fd in this process. */ static int8_t _alloc_process_fd(file_t *file) { process_t *proc = running_proc(); for (int8_t fd = 0; fd < MAX_FILES_PER_PROC; ++fd) { if (proc->files[fd] == NULL) { proc->files[fd] = file; return fd; } } return -1; } /** Find out what's the `file_t` for given FD for current process. */ static file_t * _find_process_file(int8_t fd) { process_t *proc = running_proc(); if (fd < 0 || fd >= MAX_FILES_PER_PROC || proc->files[fd] == NULL) return NULL; return proc->files[fd]; } /** * Look for a filename in a directory. Returns a got inode on success, or * NULL if not found. If found, sets *ENTRY_OFFSET to byte offset of the * entry. * Must be called with lock on DIR_INODE held. */ static mem_inode_t * _dir_find(mem_inode_t *dir_inode, char *filename, uint32_t *entry_offset) { assert(dir_inode->d_inode.type == INODE_TYPE_DIR); /** Search for the filename in this directory. */ dentry_t dentry; for (size_t offset = 0; offset < dir_inode->d_inode.size; offset += sizeof(dentry_t)) { if (inode_read(dir_inode, (char *) &dentry, offset, sizeof(dentry_t)) != sizeof(dentry_t)) { warn("dir_find: failed to read at offset %u", offset); return NULL; } if (dentry.valid == 0) continue; /** If matches, get the inode. */ if (strncmp(dentry.filename, filename, MAX_FILENAME) == 0) { if (entry_offset != NULL) *entry_offset = offset; return inode_get(dentry.inumber); } } return NULL; } /** * Add a new directory entry. * Must be called with lock on DIR_INODE held. */ static bool _dir_add(mem_inode_t *dir_inode, char *filename, uint32_t inumber) { /** The name must not be present. */ mem_inode_t *file_inode = _dir_find(dir_inode, filename, NULL); if (file_inode != NULL) { warn("dir_add: file '%s' already exists", filename); inode_put(file_inode); return false; } /** Look for an emtpy directory entry. */ dentry_t dentry; uint32_t offset; for (offset = 0; offset < dir_inode->d_inode.size; offset += sizeof(dentry_t)) { if (inode_read(dir_inode, (char *) &dentry, offset, sizeof(dentry_t)) != sizeof(dentry_t)) { warn("dir_add: failed to read at offset %u", offset); return false; } if (dentry.valid == 0) break; } /** Add into this empty slot. */ memset(&dentry, 0, sizeof(dentry_t)); strncpy(dentry.filename, filename, MAX_FILENAME); dentry.inumber = inumber; dentry.valid = 1; if (inode_write(dir_inode, (char *) &dentry, offset, sizeof(dentry_t)) != sizeof(dentry_t)) { warn("dir_add: failed to write at offset %u", offset); return false; } return true; } /** * Returns true if the directory is empty. Since directory size grows * in Hux and never gets recycled until removed, need to loop over all * allocated dentry slots to check if all are now unused. * Must be called with lock on DIR_INODE held. */ static bool _dir_empty(mem_inode_t *dir_inode) { dentry_t dentry; for (size_t offset = 2 * sizeof(dentry_t); /** Skip '.' and '..' */ offset < dir_inode->d_inode.size; offset += sizeof(dentry_t)) { if (inode_read(dir_inode, (char *) &dentry, offset, sizeof(dentry_t)) != sizeof(dentry_t)) { warn("dir_empty: failed to read at offset %u", offset); return false; } if (dentry.valid != 0) return false; } return true; } /** * Get filename for child inode of INUMBER. * Must be called with lock on DIR_INODE held. */ static size_t _dir_filename(mem_inode_t *dir_inode, uint32_t inumber, char *buf, size_t limit) { dentry_t dentry; for (size_t offset = 2 * sizeof(dentry_t); /** Skip '.' and '..' */ offset < dir_inode->d_inode.size; offset += sizeof(dentry_t)) { if (inode_read(dir_inode, (char *) &dentry, offset, sizeof(dentry_t)) != sizeof(dentry_t)) { warn("dir_filename: failed to read at offset %u", offset); return limit; } if (dentry.valid == 0) continue; if (dentry.inumber == inumber) { size_t len = limit - 1; if (len < strlen(dentry.filename)) return limit; else if (len > strlen(dentry.filename)) len = strlen(dentry.filename); strncpy(buf, dentry.filename, len); return len; } } warn("dir_filename: child inumber %u not found", inumber); return limit; } /** * Copy the next path element into ELEM, and returns the rest path with * leading slashes removed. * E.g., _parse_elem("aaa///bb/c") sets ELEM to "aaa" and returns a pointer * to "bb/c". If there are no elements, return NULL. */ static char * _parse_elem(char *path, char *elem) { while (*path == '/') path++; if (*path == '\0') return NULL; char *elem_start = path; while (*path != '/' && *path != '\0') path++; size_t elem_len = path - elem_start; if (elem_len > MAX_FILENAME - 1) elem_len = MAX_FILENAME - 1; memcpy(elem, elem_start, elem_len); elem[elem_len] = '\0'; while (*path == '/') path++; return path; } /** * Search the directory tree and get the inode for a path name. * Returns NULL if not found. * * If STOP_AT_PARENT is set, returns the inode for the parent * directory and writes the final path element into FILENAME, * which must be at least MAX_FILENAME in size. */ static mem_inode_t * _path_to_inode(char *path, bool stop_at_parent, char *filename) { mem_inode_t *inode; /** Starting point. */ if (*path == '/') inode = inode_get(ROOT_INUMBER); else { inode = running_proc()->cwd; inode_ref(inode); } if (inode == NULL) { warn("path_lookup: failed to get starting point of %s", path); return NULL; } /** For each path element, go into that directory. */ do { path = _parse_elem(path, filename); if (path == NULL) break; inode_lock(inode); if (inode->d_inode.type != INODE_TYPE_DIR) { inode_unlock(inode); inode_put(inode); return NULL; } if (stop_at_parent && *path == '\0') { inode_unlock(inode); return inode; /** Stopping one-level early. */ } mem_inode_t *next = _dir_find(inode, filename, NULL); if (next == NULL) { inode_unlock(inode); inode_put(inode); return NULL; } inode_unlock(inode); inode_put(inode); inode = next; } while (path != NULL); if (stop_at_parent) { inode_put(inode); /** Path does not contain parent. */ return NULL; } return inode; } /** Wrappers for path lookup. */ static mem_inode_t * _path_lookup(char *path) { char filename[MAX_FILENAME]; return _path_to_inode(path, false, filename); } static mem_inode_t * _path_lookup_parent(char *path, char *filename) { return _path_to_inode(path, true, filename); } /** * Open a file for the caller process. Returns the file descriptor (>= 0) * on success, and -1 on failure. */ int8_t filesys_open(char *path, uint32_t mode) { mem_inode_t *inode = _path_lookup(path); if (inode == NULL) return -1; inode_lock(inode); if (inode->d_inode.type == INODE_TYPE_DIR && mode != OPEN_RD) { inode_unlock(inode); inode_put(inode); return -1; } file_t *file = file_get(); if (file == NULL) { warn("open: failed to allocate open file structure, reached max?"); inode_unlock(inode); // Maybe use goto. inode_put(inode); return -1; } int8_t fd = _alloc_process_fd(file); if (fd < 0) { warn("open: failed to allocate file descriptor, reached max?"); file_put(file); inode_unlock(inode); // Maybe use goto. inode_put(inode); return -1; } inode_unlock(inode); file->inode = inode; file->readable = (mode & OPEN_RD) != 0; file->writable = (mode & OPEN_WR) != 0; file->offset = 0; return fd; } /** Closes an open file, actually closing if reference count drops to 0. */ bool filesys_close(int8_t fd) { file_t *file = _find_process_file(fd); if (file == NULL) { warn("close: cannot find file for fd %d", fd); return false; } running_proc()->files[fd] = NULL; file_put(file); return true; } /** * Create a file or directory at the given path name. Returns true on * success and false on failures. */ bool filesys_create(char *path, uint32_t mode) { char filename[MAX_FILENAME]; mem_inode_t *parent_inode = _path_lookup_parent(path, filename); if (parent_inode == NULL) { warn("create: cannot find parent directory of '%s'", path); return false; } inode_lock(parent_inode); mem_inode_t *file_inode = _dir_find(parent_inode, filename, NULL); if (file_inode != NULL) { warn("create: file '%s' already exists", path); inode_unlock(parent_inode); inode_put(parent_inode); inode_put(file_inode); return false; } uint32_t type = (mode & CREATE_FILE) ? INODE_TYPE_FILE : INODE_TYPE_DIR; file_inode = inode_alloc(type); if (file_inode == NULL) { warn("create: failed to allocate inode on disk, out of space?"); return false; } inode_lock(file_inode); /** Create '.' and '..' entries for new directory. */ if (type == INODE_TYPE_DIR) { if (!_dir_add(file_inode, ".", file_inode->inumber) || !_dir_add(file_inode, "..", parent_inode->inumber)) { warn("create: failed to create '.' or '..' entries"); inode_free(file_inode); return false; } } /** Put file into parent directory. */ if (!_dir_add(parent_inode, filename, file_inode->inumber)) { warn("create: failed to put '%s' into its parent directory", path); inode_free(file_inode); return false; } inode_unlock(parent_inode); inode_put(parent_inode); inode_unlock(file_inode); inode_put(file_inode); return true; } /** * Remove a file or directory from the file system. If is removing a * directory, the directory must be empty. */ bool filesys_remove(char *path) { char filename[MAX_FILENAME]; mem_inode_t *parent_inode = _path_lookup_parent(path, filename); if (parent_inode == NULL) { warn("remove: cannot find parent directory of '%s'", path); return false; } inode_lock(parent_inode); /** Cannot remove '.' or '..'. */ if (strncmp(filename, ".", MAX_FILENAME) == 0 || strncmp(filename, "..", MAX_FILENAME) == 0) { warn("remove: cannot remove '.' or '..' entries"); inode_unlock(parent_inode); // Maybe use goto. inode_put(parent_inode); return false; } uint32_t offset; mem_inode_t *file_inode = _dir_find(parent_inode, filename, &offset); if (file_inode == NULL) { warn("remove: cannot find file '%s'", path); inode_unlock(parent_inode); // Maybe use goto. inode_put(parent_inode); return false; } inode_lock(file_inode); /** Cannot remove a non-empty directory. */ if (file_inode->d_inode.type == INODE_TYPE_DIR && !_dir_empty(file_inode)) { warn("remove: cannot remove non-empty directory '%s'", path); inode_unlock(file_inode); // Maybe use goto. inode_put(file_inode); inode_unlock(parent_inode); // Maybe use goto. inode_put(parent_inode); return false; } /** Write zeros into the corresponding entry in parent directory. */ dentry_t dentry; memset(&dentry, 0, sizeof(dentry_t)); if (inode_write(parent_inode, (char *) &dentry, offset, sizeof(dentry_t)) != sizeof(dentry_t)) { warn("remove: failed to write at offset %u", offset); inode_unlock(file_inode); // Maybe use goto. inode_put(file_inode); inode_unlock(parent_inode); // Maybe use goto. inode_put(parent_inode); return false; } inode_unlock(parent_inode); inode_put(parent_inode); /** Erase its metadata on disk. */ inode_free(file_inode); inode_unlock(file_inode); inode_put(file_inode); return true; } /** Read from current offset of file into user buffer. */ int32_t filesys_read(int8_t fd, char *dst, size_t len) { file_t *file = _find_process_file(fd); if (file == NULL) { warn("read: cannot find file for fd %d", fd); return -1; } if (!file->readable) { warn("read: file for fd %d is not readable", fd); return -1; } inode_lock(file->inode); size_t bytes_read = inode_read(file->inode, dst, file->offset, len); if (bytes_read > 0) /** Update file offset. */ file->offset += bytes_read; inode_unlock(file->inode); return bytes_read; } /** Write from user buffer into current offset of file. */ int32_t filesys_write(int8_t fd, char *src, size_t len) { file_t *file = _find_process_file(fd); if (file == NULL) { warn("read: cannot find file for fd %d", fd); return -1; } if (!file->writable) { warn("write: file for fd %d is not writable", fd); return -1; } inode_lock(file->inode); size_t bytes_written = inode_write(file->inode, src, file->offset, len); if (bytes_written > 0) /** Update file offset. */ file->offset += bytes_written; inode_unlock(file->inode); return bytes_written; } /** Change the current working directory (cwd) of caller process. */ bool filesys_chdir(char *path) { mem_inode_t *inode = _path_lookup(path); if (inode == NULL) { warn("chdir: target path '%s' does not exist", path); return false; } inode_lock(inode); if (inode->d_inode.type != INODE_TYPE_DIR) { warn("chdir: target path '%s' is not a directory", path); inode_unlock(inode); inode_put(inode); return false; } inode_unlock(inode); /** Put the old cwd and keep the new one. */ process_t *proc = running_proc(); inode_put(proc->cwd); proc->cwd = inode; return true; } /** Get an absolute string path of current working directory. */ static size_t _recurse_abs_path(mem_inode_t *inode, char *buf, size_t limit) { if (inode->inumber == ROOT_INUMBER) { buf[0] = '/'; return 1; } inode_lock(inode); /** Check the parent directory. */ mem_inode_t *parent_inode = _dir_find(inode, "..", NULL); if (parent_inode == NULL) { warn("abs_path: failed to get parent inode of %u", inode->inumber); inode_unlock(inode); return limit; } inode_unlock(inode); /** If parent is root, stop recursion.. */ if (parent_inode->inumber == ROOT_INUMBER) { buf[0] = '/'; inode_lock(parent_inode); size_t written = _dir_filename(parent_inode, inode->inumber, &buf[1], limit - 1); inode_unlock(parent_inode); inode_put(parent_inode); return 1 + written; } size_t curr = _recurse_abs_path(parent_inode, buf, limit); if (curr >= limit - 1) return limit; inode_lock(parent_inode); size_t written = _dir_filename(parent_inode, inode->inumber, &buf[curr], limit - curr); inode_unlock(parent_inode); inode_put(parent_inode); return curr + written; } bool filesys_getcwd(char *buf, size_t limit) { mem_inode_t *inode = running_proc()->cwd; inode_ref(inode); size_t written = _recurse_abs_path(inode, buf, limit); if (written >= limit) return false; else buf[limit - 1] = '\0'; inode_put(inode); return true; } /** Wrapper over `exec_program()`. */ bool filesys_exec(char *path, char **argv) { mem_inode_t *inode = _path_lookup(path); if (inode == NULL) { warn("exec: failed to lookup path '%s'", path); return false; } char *filename = &path[strlen(path) - 1]; while (*filename != '/' && filename != path) filename--; return exec_program(inode, filename, argv); } /** Get metadata information about an open file. */ bool filesys_fstat(int8_t fd, file_stat_t *stat) { file_t *file = _find_process_file(fd); if (file == NULL) { warn("fstat: cannot find file for fd %d", fd); return false; } file_stat(file, stat); return true; } /** Seek to absolute file offset. */ bool filesys_seek(int8_t fd, size_t offset) { file_t *file = _find_process_file(fd); if (file == NULL) { warn("seek: cannot find file for fd %d", fd); return false; } inode_lock(file->inode); size_t filesize = file->inode->d_inode.size; inode_unlock(file->inode); if (offset > filesize) { warn("seek: offset %lu beyond filesize %lu", offset, filesize); return false; } file->offset = offset; return true; } /** Flush the in-memory modified bitmap block to disk. */ bool inode_bitmap_update(uint32_t slot_no) { return block_write((char *) &(inode_bitmap.bits[slot_no]), superblock.inode_bitmap_start * BLOCK_SIZE + slot_no, 1); } bool data_bitmap_update(uint32_t slot_no) { return block_write((char *) &(data_bitmap.bits[slot_no]), superblock.data_bitmap_start * BLOCK_SIZE + slot_no, 1); } /** * Initialize the file system by reading out the image from the * IDE disk and parse according to VSFS layout. */ void filesys_init(void) { /** Block 0 must be the superblock. */ if (!block_read_at_boot((char *) &superblock, 0, sizeof(superblock_t))) error("filesys_init: failed to read superblock from disk"); /** * Currently the VSFS layout is hard-defined, so just do asserts here * to ensure that the mkfs script followed the expected layout. In real * systems, mkfs should have the flexibility to generate FS image as * long as the layout is valid, and we read out the actual layout here. */ assert(superblock.fs_blocks == 262144); assert(superblock.inode_bitmap_start == 1); assert(superblock.inode_bitmap_blocks == 6); assert(superblock.data_bitmap_start == 7); assert(superblock.data_bitmap_blocks == 32); assert(superblock.inode_start == 39); assert(superblock.inode_blocks == 6105); assert(superblock.data_start == 6144); assert(superblock.data_blocks == 256000); /** Read in the two bitmaps into memory. */ uint32_t num_inodes = superblock.inode_blocks * (BLOCK_SIZE / INODE_SIZE); uint8_t *inode_bits = (uint8_t *) kalloc(num_inodes / 8); bitmap_init(&inode_bitmap, inode_bits, num_inodes); if (!block_read_at_boot((char *) inode_bitmap.bits, superblock.inode_bitmap_start * BLOCK_SIZE, num_inodes / 8)) { error("filesys_init: failed to read inode bitmap from disk"); } uint32_t num_dblocks = superblock.data_blocks; uint8_t *data_bits = (uint8_t *) kalloc(num_dblocks / 8); bitmap_init(&data_bitmap, data_bits, num_dblocks); if (!block_read_at_boot((char *) data_bitmap.bits, superblock.data_bitmap_start * BLOCK_SIZE, num_dblocks / 8)) { error("filesys_init: failed to read data bitmap from disk"); } /** Fill open file table and inode table with empty slots. */ for (size_t i = 0; i < MAX_OPEN_FILES; ++i) { ftable[i].ref_cnt = 0; /** Indicates UNUSED. */ ftable[i].readable = false; ftable[i].writable = false; ftable[i].inode = NULL; ftable[i].offset = 0; } spinlock_init(&ftable_lock, "ftable_lock"); for (size_t i = 0; i < MAX_MEM_INODES; ++i) { icache[i].ref_cnt = 0; /** Indicates UNUSED. */ icache[i].inumber = 0; parklock_init(&(icache[i].lock), "inode's parklock"); } spinlock_init(&icache_lock, "icache_lock"); }
josehu07/hux-kernel
src/common/port.c
/** * Common I/O ports inline assembly utilities (in At&T syntax). */ #include "port.h" /** Output 8 bits to an I/O port. */ inline void outb(uint16_t port, uint8_t val) { asm volatile ( "outb %0, %1" : : "a" (val), "d" (port) ); } /** Output 16 bits to an I/O port. */ inline void outw(uint16_t port, uint16_t val) { asm volatile ( "outw %0, %1" : : "a" (val), "d" (port) ); } /** Output 32 bits to an I/O port. */ inline void outl(uint16_t port, uint32_t val) { asm volatile ( "outl %0, %1" : : "a" (val), "d" (port) ); } /** Output CNT 32-bit dwords from the buffer at ADDR to an I/O port. */ inline void outsl(uint16_t port, const void *addr, uint32_t cnt) { asm volatile ( "rep outsl" : "+S" (addr), "+c" (cnt) : "d" (port) ); } /** Input 8 bits from an I/O port. */ inline uint8_t inb(uint16_t port) { uint8_t ret; asm volatile ( "inb %1, %0" : "=a" (ret) : "d" (port) ); return ret; } /** Input 16 bits from an I/O port. */ inline uint16_t inw(uint16_t port) { uint16_t ret; asm volatile ( "inw %1, %0" : "=a" (ret) : "d" (port) ); return ret; } /** Input 32 bits from an I/O port. */ inline uint32_t inl(uint16_t port) { uint32_t ret; asm volatile ( "inl %1, %0" : "=a" (ret) : "d" (port) ); return ret; } /** Input CNT 32-bit dwords into the buffer at ADDR. */ inline void insl(uint16_t port, void *addr, uint32_t cnt) { asm volatile ( "rep insl" : "+D" (addr), "+c" (cnt) : "d" (port) : "memory" ); }
josehu07/hux-kernel
src/device/timer.c
<gh_stars>10-100 /** * Programmable interval timer (PIT) in square wave generator mode to * serve as the system timer. */ #include <stdint.h> #include "timer.h" #include "../common/port.h" #include "../common/printf.h" #include "../common/debug.h" #include "../common/spinlock.h" #include "../interrupt/isr.h" #include "../process/process.h" #include "../process/scheduler.h" /** Global counter of timer ticks elapsed since boot. */ uint32_t timer_tick = 0; spinlock_t timer_tick_lock; /** * Timer interrupt handler registered for IRQ # 0. * Mostly borrowed from xv6's `trap()` code. Interrupts should have been * disabled automatically since this is an interrupt gate. */ static void timer_interrupt_handler(interrupt_state_t *state) { (void) state; /** Unused. */ spinlock_acquire(&timer_tick_lock); /** Increment global timer tick. */ timer_tick++; // printf("."); /** * Check all sleeping processes, set them back to READY if desired * ticks have passed. */ spinlock_acquire(&ptable_lock); for (process_t *proc = ptable; proc < &ptable[MAX_PROCS]; ++proc) { if (proc->state == BLOCKED && proc->block_on == ON_SLEEP && timer_tick >= proc->target_tick) { proc->target_tick = 0; process_unblock(proc); } } spinlock_release(&ptable_lock); spinlock_release(&timer_tick_lock); process_t *proc = running_proc(); bool user_context = (state->cs & 0x3) == 3 /** DPL field is 3. */ && proc != NULL; /** * If we are now in the process's context and it is set to be killed, * exit the process right now. */ if (user_context && proc->killed) process_exit(); /** * If we are in a process and the process is in RUNNING state, yield * to the scheduler to force a new scheduling decision. Could happen * to a provess in kernel context (during a syscall) as well. */ if (proc != NULL && proc->state == RUNNING) { spinlock_acquire(&ptable_lock); proc->state = READY; yield_to_scheduler(); spinlock_release(&ptable_lock); } /** Re-check if we get killed since the yield. */ if (user_context && proc->killed) process_exit(); } /** * Initialize the PIT timer. Registers timer interrupt ISR handler, sets * PIT to run in mode 3 with given frequency in Hz. */ void timer_init(void) { timer_tick = 0; spinlock_init(&timer_tick_lock, "timer_tick_lock"); /** Register timer interrupt ISR handler. */ isr_register(INT_NO_TIMER, &timer_interrupt_handler); /** * Calculate the frequency divisor needed to run with the given * frequency. Divisor = base frequencty / desired frequency. */ uint16_t divisor = 1193182 / TIMER_FREQ_HZ; outb(0x43, 0x36); /** Run in mode 3. */ /** Sends frequency divisor, in lo | hi order. */ outb(0x40, (uint8_t) (divisor & 0xFF)); outb(0x40, (uint8_t) ((divisor >> 8) & 0xFF)); }
josehu07/hux-kernel
src/common/string.c
<gh_stars>10-100 /** * Common string utilities. */ #include <stddef.h> #include "string.h" /** * Copies the byte C into the first COUNT bytes pointed to by DST. * Returns a copy of the pointer DST. */ void * memset(void *dst, unsigned char c, size_t count) { unsigned char *dst_copy = (unsigned char *) dst; while (count-- > 0) *dst_copy++ = c; return dst; } /** * Copies COUNT bytes from where SRC points to into where DST points to. * Assumes no overlapping between these two regions. * Returns a copy of the pointer DST. */ void * memcpy(void *dst, const void *src, size_t count) { unsigned char *dst_copy = (unsigned char *) dst; unsigned char *src_copy = (unsigned char *) src; while (count-- > 0) *dst_copy++ = *src_copy++; return dst; } /** * Copies COUNT bytes from where SRC points to into where DST points to. * The copy is like relayed by an internal buffer, so it is OK if these * two memory regions overlap. * Returns a copy of the pointer DST. */ void * memmove(void *dst, const void *src, size_t count) { unsigned long int dstp = (long int) dst; unsigned long int srcp = (long int) src; if (dstp - srcp >= count) /** Unsigned compare. */ dst = memcpy(dst, src, count); else { /** SRC region overlaps start of DST, do reversed order. */ unsigned char *dst_copy = ((unsigned char *) dst) + count; unsigned char *src_copy = ((unsigned char *) src) + count; while (count-- > 0) *dst_copy-- = *src_copy--; } return dst; } /** * Compare two memory regions byte-wise. Returns zero if they are equal. * Returns <0 if the first unequal byte has a lower unsigned value in * PTR1, and >0 if higher. */ int memcmp(const void *ptr1, const void *ptr2, size_t count) { const char *ptr1_cast = (const char *) ptr1; const char *ptr2_cast = (const char *) ptr2; char b1 = 0, b2 = 0; while (count-- > 0) { b1 = *ptr1_cast++; b2 = *ptr2_cast++; if (b1 != b2) return ((int) b1) - ((int) b2); } return ((int) b1) - ((int) b2); } /** Length of the string (excluding the terminating '\0'). */ size_t strlen(const char *str) { size_t len = 0; while (str[len]) len++; return len; } /** * Length of the string (excluding the terminating '\0'). * If string STR does not terminate before reaching COUNT chars, returns * COUNT. */ size_t strnlen(const char *str, size_t count) { size_t len = 0; while (str[len] && count > 0) { len++; count--; } return len; } /** * Compare two strings, returning less than, equal to or greater than zero * if STR1 is lexicographically less than, equal to or greater than S2. * Limited to upto COUNT chars. */ int strncmp(const char *str1, const char *str2, size_t count) { char c1 = '\0', c2 = '\0'; while (count-- > 0) { c1 = *str1++; c2 = *str2++; if (c1 == '\0' || c1 != c2) return ((int) c1) - ((int) c2); } return ((int) c1) - ((int) c2); } /** * Copy string SRC to DST. Assume DST is large enough. * Limited to upto COUNT chars. Adds implicit null terminator even if * COUNT is smaller than actual length of SRC. */ char * strncpy(char *dst, const char *src, size_t count) { size_t size = strnlen(src, count); if (size != count) memset(dst + size, '\0', count - size); dst[size] = '\0'; return memcpy(dst, src, size); } /** * Concatenate string DST with SRC. Assume DST is large enough. * Returns a copy of the pointer DST. * Limited to upto COUNT chars. */ char * strncat(char *dst, const char *src, size_t count) { char *s = dst; dst += strlen(dst); size_t size = strnlen(src, count); dst[size] = '\0'; memcpy(dst, src, size); return s; }
josehu07/hux-kernel
src/process/layout.h
<filename>src/process/layout.h /** * Virtual address space layout of a user process. * * Each process has an address space of size 1GiB, so the valid virtual * addresses a process could issue range from `0x00000000` to `0x40000000`. * * - The kernel's virtual address space of size 512MiB is mapped to the * bottom-most pages (`0x00000000` to `0x20000000`) * * - The ELF binary (`.text` + `.data` + `.bss` sections) starts from * `0x20000000` (and takes the size of at most 1MiB) * * - The stack begins at the top-most page (`0x40000000`), grows downwards * * - The region in-between is usable by the process heap, growing upwards */ #ifndef LAYOUT_H #define LAYOUT_H /** Virtual address space size: 1GiB. */ #define USER_MAX 0x40000000 /** * The lower-half maps the kernel for simplicity (in contrast to * typical higher-half design). Application uses the higher-half * starting from this address. */ #define USER_BASE 0x20000000 /** * Hux allows user executable to take up at most 1MiB space, starting * at USER_BASE and ending no higher than HEAP_BASE. */ #define HEAP_BASE (USER_BASE + 0x00100000) /** Max stack size limit is 4MiB. */ #define STACK_MIN (USER_MAX - 0x00400000) #endif
josehu07/hux-kernel
src/display/terminal.c
<filename>src/display/terminal.c<gh_stars>10-100 /** * Terminal display control. * * All functions called outside must have `terminal_lock` held already * (`printf()` does that). */ #include <stddef.h> #include <stdint.h> #include "terminal.h" #include "vga.h" #include "../common/string.h" #include "../common/port.h" #include "../common/debug.h" #include "../common/spinlock.h" static uint16_t * const VGA_MEMORY = (uint16_t *) 0xB8000; static const size_t VGA_WIDTH = 80; static const size_t VGA_HEIGHT = 25; /** * Default to black background + light grey foreground. * Foreground color can be customized with '*_color' functions. */ const vga_color_t TERMINAL_DEFAULT_COLOR_BG = VGA_COLOR_BLACK; const vga_color_t TERMINAL_DEFAULT_COLOR_FG = VGA_COLOR_LIGHT_GREY; static uint16_t *terminal_buf; static size_t terminal_row; /** Records current logical cursor pos. */ static size_t terminal_col; spinlock_t terminal_lock; /** Enable physical cursor and set thickness to 2. */ static void _enable_cursor() { outb(0x3D4, 0x0A); outb(0x3D5, (inb(0x3D5) & 0xC0) | 14); /** Start at scanline 14. */ outb(0x3D4, 0x0B); outb(0x3D5, (inb(0x3D5) & 0xE0) | 15); /** End at scanline 15. */ } /** Update the actual cursor position on screen. */ static void _update_cursor() { size_t idx = terminal_row * VGA_WIDTH + terminal_col; outb(0x3D4, 0x0F); outb(0x3D5, (uint8_t) (idx & 0xFF)); outb(0x3D4, 0x0E); outb(0x3D5, (uint8_t) ((idx >> 8) & 0xFF)); } /** * Scroll one line up, by replacing line 0-23 with the line below, and clear * the last line. */ static void _scroll_line() { for (size_t y = 0; y < VGA_HEIGHT; ++y) { for (size_t x = 0; x < VGA_WIDTH; ++x) { size_t idx = y * VGA_WIDTH + x; if (y < VGA_HEIGHT - 1) terminal_buf[idx] = terminal_buf[idx + VGA_WIDTH]; else /** Clear the last line. */ terminal_buf[idx] = vga_entry(TERMINAL_DEFAULT_COLOR_BG, TERMINAL_DEFAULT_COLOR_FG, ' '); } } } /** * Put a character at current cursor position with specified foreground color, * then update the logical cursor position. Should consider special symbols. */ static void _putchar_color(char c, vga_color_t fg) { switch (c) { case '\b': /** Backspace. */ if (terminal_col > 0) terminal_col--; break; case '\t': /** Horizontal tab. */ terminal_col += 4; terminal_col -= terminal_col % 4; if (terminal_col == VGA_WIDTH) terminal_col -= 4; break; case '\n': /** Newline (w/ carriage return). */ terminal_row++; terminal_col = 0; break; case '\r': /** Carriage return. */ terminal_col = 0; break; default: ; /** Displayable character. */ size_t idx = terminal_row * VGA_WIDTH + terminal_col; terminal_buf[idx] = vga_entry(TERMINAL_DEFAULT_COLOR_BG, fg, c); if (++terminal_col == VGA_WIDTH) { terminal_row++; terminal_col = 0; } } /** When going beyond the bottom line, scroll up one line. */ if (terminal_row == VGA_HEIGHT) { _scroll_line(); terminal_row--; } } /** Initialize terminal display. */ void terminal_init(void) { terminal_buf = VGA_MEMORY; terminal_row = 0; terminal_col = 0; spinlock_init(&terminal_lock, "terminal_lock"); _enable_cursor(); terminal_clear(); } /** Write a sequence of data. */ void terminal_write(const char *data, size_t size) { terminal_write_color(data, size, TERMINAL_DEFAULT_COLOR_FG); } /** Write a sequence of data with specified foreground color. */ void terminal_write_color(const char *data, size_t size, vga_color_t fg) { for (size_t i = 0; i < size; ++i) _putchar_color(data[i], fg); _update_cursor(); } /** Erase (backspace) a character. */ void terminal_erase(void) { if (terminal_col > 0) terminal_col--; else if (terminal_row > 0) { terminal_row--; terminal_col = VGA_WIDTH - 1; } size_t idx = terminal_row * VGA_WIDTH + terminal_col; terminal_buf[idx] = vga_entry(TERMINAL_DEFAULT_COLOR_BG, TERMINAL_DEFAULT_COLOR_FG, ' '); _update_cursor(); } /** Clear the terminal window by flushing spaces. */ void terminal_clear(void) { for (size_t y = 0; y < VGA_HEIGHT; ++y) { for (size_t x = 0; x < VGA_WIDTH; ++x) { size_t idx = y * VGA_WIDTH + x; terminal_buf[idx] = vga_entry(TERMINAL_DEFAULT_COLOR_BG, TERMINAL_DEFAULT_COLOR_FG, ' '); } } terminal_row = 0; terminal_col = 0; _update_cursor(); }
josehu07/hux-kernel
src/filesys/sysfile.h
/** * Syscalls related to process state & operations. */ #ifndef SYSFILE_H #define SYSFILE_H #include <stdint.h> /** Flags for `open()`. */ #define OPEN_RD 0x1 #define OPEN_WR 0x2 /** Flags for `create()`. */ #define CREATE_FILE 0x1 #define CREATE_DIR 0x2 /** For the `fstat()` syscall. */ struct file_stat { uint32_t inumber; uint32_t type; uint32_t size; }; typedef struct file_stat file_stat_t; int32_t syscall_open(); int32_t syscall_close(); int32_t syscall_create(); int32_t syscall_remove(); int32_t syscall_read(); int32_t syscall_write(); int32_t syscall_chdir(); int32_t syscall_getcwd(); int32_t syscall_exec(); int32_t syscall_fstat(); int32_t syscall_seek(); #endif
josehu07/hux-kernel
src/process/process.c
<gh_stars>10-100 /** * Providing the abstraction of processes. */ #include <stddef.h> #include <stdint.h> #include <stdbool.h> #include "process.h" #include "layout.h" #include "../common/debug.h" #include "../common/string.h" #include "../common/spinlock.h" #include "../device/timer.h" #include "../interrupt/isr.h" #include "../memory/gdt.h" #include "../memory/slabs.h" #include "../filesys/file.h" /** Process table - list of PCB slots. */ process_t ptable[MAX_PROCS]; spinlock_t ptable_lock; /** Pointing to the `init` process. */ process_t *initproc; /** Next available PID value, incrementing overtime. */ static int8_t next_pid = 1; /** Extern the `return_from_trap` snippet from ASM `isr-stub.s`. */ extern void return_from_trap(void); /** * Any new process "returns" to here, which in turn returns to the * return-from-trap part of `isr_handler_stub`, entering user mode * execution. */ static void _new_process_entry(void) { /** Release the lock held in the scheduler context. */ spinlock_release(&ptable_lock); /** * The trap state on kernel stack, which will get popped, has EFLAGS * register value of 0x202 which means interrupts will be enabled once * enters user mode execution. See `yield_to_scheduler()` in * `scheduler.c` for more. */ } /** * Find an UNUSED slot in the ptable and put it into INITIAL state. If * all slots are in use, return NULL. */ static process_t * _alloc_new_process(void) { process_t *proc; bool found = false; spinlock_acquire(&ptable_lock); for (proc = ptable; proc < &ptable[MAX_PROCS]; ++proc) { if (proc->state == UNUSED) { found = true; break; } } if (!found) { warn("new_process: process table is full, no free slot"); spinlock_release(&ptable_lock); return NULL; } /** Allocate kernel stack. */ proc->kstack = salloc_page(); if (proc->kstack == 0) { warn("new_process: failed to allocate kernel stack page"); return NULL; } uint32_t sp = proc->kstack + KSTACK_SIZE; /** Make proper setups for the new process. */ proc->state = INITIAL; proc->block_on = NOTHING; proc->pid = next_pid++; proc->target_tick = 0; proc->wait_req = NULL; proc->wait_lock = NULL; for (size_t i = 0; i < MAX_FILES_PER_PROC; ++i) proc->files[i] = NULL; spinlock_release(&ptable_lock); /** * Leave room for the trap state. The initial context will be pushed * right below this trap state, with return address EIP pointing to * `_new_process_entry()` which returns to the return-from-trap part * of `isr_handler_stub`. * * In this way, the new process, after context switched to by the * scheduler, automatically jumps into user mode execution. */ sp -= sizeof(interrupt_state_t); proc->trap_state = (interrupt_state_t *) sp; memset(proc->trap_state, 0, sizeof(interrupt_state_t)); sp -= sizeof(uint32_t); *(uint32_t *) sp = (uint32_t) return_from_trap; sp -= sizeof(process_context_t); proc->context = (process_context_t *) sp; memset(proc->context, 0, sizeof(process_context_t)); proc->context->eip = (uint32_t) _new_process_entry; return proc; } /** Fill the ptable with UNUSED entries. */ void process_init(void) { spinlock_init(&ptable_lock, "ptable_lock"); for (size_t i = 0; i < MAX_PROCS; ++i) ptable[i].state = UNUSED; next_pid = 1; } /** * Initialize the `init` process - put it in READY state in the process * table so the scheduler can pick it up. */ void initproc_init(void) { /** Get the embedded binary of `init.s`. */ extern char _binary___user_init_start[]; extern char _binary___user_init_end[]; char *elf_curr = (char *) _binary___user_init_start; char *elf_end = (char *) _binary___user_init_end; /** Get a slot in the ptable. */ process_t *proc = _alloc_new_process(); assert(proc != NULL); strncpy(proc->name, "init", sizeof(proc->name) - 1); proc->parent = NULL; /** * Set up page tables and pre-map necessary pages: * - kernel mapped to lower 512MiB * - program ELF binary follows * - top-most stack page */ proc->pgdir = (pde_t *) salloc_page(); assert(proc->pgdir != NULL); memset(proc->pgdir, 0, sizeof(pde_t) * PDES_PER_PAGE); uint32_t vaddr_btm = 0; /** Kernel-mapped. */ while (vaddr_btm < PHYS_MAX) { pte_t *pte = paging_walk_pgdir(proc->pgdir, vaddr_btm, true); assert(pte != NULL); paging_map_kpage(pte, vaddr_btm); vaddr_btm += PAGE_SIZE; } uint32_t vaddr_elf = USER_BASE; /** ELF binary. */ while (elf_curr < elf_end) { pte_t *pte = paging_walk_pgdir(proc->pgdir, vaddr_elf, true); assert(pte != NULL); uint32_t paddr = paging_map_upage(pte, true); assert(paddr != 0); /** Copy ELF content in. */ memcpy((char *) paddr, elf_curr, elf_curr + PAGE_SIZE > elf_end ? elf_end - elf_curr : PAGE_SIZE); vaddr_elf += PAGE_SIZE; elf_curr += PAGE_SIZE; } while (vaddr_elf < HEAP_BASE) { /** Rest of ELF region. */ pte_t *pte = paging_walk_pgdir(proc->pgdir, vaddr_elf, true); assert(pte != NULL); uint32_t paddr = paging_map_upage(pte, true); assert(paddr != 0); vaddr_elf += PAGE_SIZE; } uint32_t vaddr_top = USER_MAX - PAGE_SIZE; /** Top stack page. */ pte_t *pte_top = paging_walk_pgdir(proc->pgdir, vaddr_top, true); assert(pte_top != NULL); uint32_t paddr_top = paging_map_upage(pte_top, true); assert(paddr_top != 0); memset((char *) paddr_top, 0, PAGE_SIZE); /** Set up the trap state for returning to user mode. */ proc->trap_state->cs = (SEGMENT_UCODE << 3) | 0x3; /** DPL_USER. */ proc->trap_state->ds = (SEGMENT_UDATA << 3) | 0x3; /** DPL_USER. */ proc->trap_state->ss = proc->trap_state->ds; proc->trap_state->eflags = 0x00000202; /** Interrupt enable. */ proc->trap_state->esp = USER_MAX - 4; /** GCC might push an FP. */ proc->trap_state->eip = USER_BASE; /** Beginning of ELF binary. */ proc->stack_low = vaddr_top; proc->heap_high = HEAP_BASE; proc->timeslice = 1; /** Initially at root directory '/'. */ proc->cwd = inode_get_at_boot(ROOT_INUMBER); if (proc->cwd == NULL) error("initproc_init: failed to get inode of root directory"); /** Set process state to READY so the scheduler can pick it up. */ initproc = proc; proc->killed = false; /** Needed to hold lock because the assignment might not be atomic. */ spinlock_acquire(&ptable_lock); proc->state = READY; spinlock_release(&ptable_lock); } /** * Fork a new process that is a duplicate of the caller process. Caller * is the parent process and the new one is the child process. Scheduling * timeslice length of the new process must be an integer in [1, 16]. * * Returns child pid in parent, 0 in child, and -1 if failed, just like * UNIX fork(). */ int8_t process_fork(uint8_t timeslice) { if (timeslice < 1 || timeslice > 16) return -1; process_t *parent = running_proc(); /** Get a slot in the ptable. */ process_t *child = _alloc_new_process(); if (child == NULL) { warn("fork: failed to allocate new child process"); return -1; } /** * Create the new process's page directory, and then copy over the * parent process page directory, mapping all mapped-pages for the * child process to physical frames along the way. */ child->pgdir = (pde_t *) salloc_page(); if (child->pgdir == NULL) { warn("fork: cannot allocate level-1 directory, out of kheap memory?"); sfree_page((char *) child->kstack); // Maybe use goto. child->kstack = 0; child->pid = 0; child->state = UNUSED; return -1; } memset(child->pgdir, 0, sizeof(pde_t) * PDES_PER_PAGE); uint32_t vaddr_btm = 0; /** Kernel-mapped. */ while (vaddr_btm < PHYS_MAX) { pte_t *pte = paging_walk_pgdir(child->pgdir, vaddr_btm, true); if (pte == NULL) { warn("fork: cannot allocate level-2 table, out of kheap memory?"); paging_destroy_pgdir(child->pgdir); // Maybe use goto. child->pgdir = NULL; sfree_page((char *) child->kstack); child->kstack = 0; child->pid = 0; child->state = UNUSED; return -1; } paging_map_kpage(pte, vaddr_btm); vaddr_btm += PAGE_SIZE; } if (!paging_copy_range(child->pgdir, parent->pgdir, USER_BASE, parent->heap_high) || !paging_copy_range(child->pgdir, parent->pgdir, parent->stack_low, USER_MAX)) { warn("fork: failed to copy parent memory state over to child"); paging_unmap_range(child->pgdir, USER_BASE, parent->heap_high); paging_unmap_range(child->pgdir, parent->stack_low, USER_MAX); paging_destroy_pgdir(child->pgdir); // Maybe use goto. child->pgdir = NULL; sfree_page((char *) child->kstack); child->kstack = 0; child->pid = 0; child->state = UNUSED; return -1; } child->stack_low = parent->stack_low; child->heap_high = parent->heap_high; child->timeslice = timeslice; /** Child shares the same set of current open files with parent. */ for (size_t i = 0; i < MAX_FILES_PER_PROC; ++i) { if (parent->files[i] != NULL) { child->files[i] = parent->files[i]; file_ref(parent->files[i]); } } /** Child inherits parent's working directory. */ child->cwd = parent->cwd; inode_ref(parent->cwd); /** * Copy the trap state of parent to the child. Child should resume * execution at the same where place where parent is at right after * `fork()` call. */ memcpy(child->trap_state, parent->trap_state, sizeof(interrupt_state_t)); child->trap_state->eax = 0; /** `fork()` returns 0 in child. */ child->parent = parent; strncpy(child->name, parent->name, sizeof(parent->name)); child->killed = false; int8_t child_pid = child->pid; /** Needed to hold lock because the assignment might not be atomic. */ spinlock_acquire(&ptable_lock); child->state = READY; spinlock_release(&ptable_lock); return child_pid; /** Returns child pid in parent. */ } /** * Block the running process on the given reason. * * Must be called with `ptable_lock` held, and exactly only `ptable_lock` * held (so the CPU's `cli_dpeth` is 1. A common pattern would be like: * * spinlock_acquire(&ptable_lock); * spinlock_release(&something_lock); * * proc->wait_something = ...; * process_block(ON_SOMETHING); * proc->wait_something = NULL; * * spinlock_release(&ptable_lock); * spinlock_acquire(&something_lock); */ inline void process_block(process_block_on_t reason) { assert(spinlock_locked(&ptable_lock)); process_t *proc = running_proc(); proc->block_on = reason; proc->state = BLOCKED; /** Must yield with `ptable_lock` held. */ yield_to_scheduler(); } /** * Unblock a process by setting it to READY state and clear the reason. * Must be called with `ptable_lock` held. */ inline void process_unblock(process_t *proc) { assert(spinlock_locked(&ptable_lock)); proc->block_on = NOTHING; proc->state = READY; } /** Terminate a process. */ void process_exit(void) { process_t *proc = running_proc(); assert(proc != initproc); /** Close all open files. */ for (size_t i = 0; i < MAX_FILES_PER_PROC; ++i) { if (proc->files[i] != NULL) { file_put(proc->files[i]); proc->files[i] = NULL; } } inode_put(proc->cwd); proc->cwd = NULL; spinlock_acquire(&ptable_lock); /** Parent might be blocking due to waiting. */ if (proc->parent->state == BLOCKED && proc->parent->block_on == ON_WAIT) process_unblock(proc->parent); /** * A process must have waited all its children before calling `exit()` * itself. Any children still in the process table becomes a: * - "Orphan" process, if it is still running. Pass it to the `init` * process for later "reaping"; * - "Zombie" process, if it has terminated. In this case, besides * passing to `init`, we should immediately wake up init as well. * * The `init` process should be executing an infinite loop of `wait()`. */ for (process_t *child = ptable; child < &ptable[MAX_PROCS]; ++child) { if (child->parent == proc) { child->parent = initproc; if (child->state == TERMINATED) process_unblock(initproc); } } /** Go back to scheduler context. */ proc->state = TERMINATED; yield_to_scheduler(); error("exit: process gets re-scheduled after termination"); } /** Sleep for specified number of timer ticks. */ void process_sleep(uint32_t sleep_ticks) { process_t *proc = running_proc(); spinlock_acquire(&timer_tick_lock); uint32_t curr_tick = timer_tick; spinlock_release(&timer_tick_lock); uint32_t target_tick = curr_tick + sleep_ticks; proc->target_tick = target_tick; spinlock_acquire(&ptable_lock); process_block(ON_SLEEP); spinlock_release(&ptable_lock); /** Could be re-scheduled only if `timer_tick` passed `target_tick`. */ } /** * Wait for any child process's exit. A child process could have already * exited and becomes a zombie - in this case, it won't block and will * just return the first zombie child it sees in ptable. * * Cleaning up of children ptable entries is done in `wait()` as well. * * Returns the pid of the waited child, or -1 if don't have kids. */ int8_t process_wait(void) { process_t *proc = running_proc(); uint32_t child_pid; spinlock_acquire(&ptable_lock); while (1) { bool have_kids = false; for (process_t *child = ptable; child < &ptable[MAX_PROCS]; ++child) { if (child->parent != proc) continue; have_kids = true; if (child->state == TERMINATED) { /** Found one, clean up its state. */ child_pid = child->pid; sfree_page((char *) child->kstack); child->kstack = 0; paging_unmap_range(child->pgdir, USER_BASE, child->heap_high); paging_unmap_range(child->pgdir, child->stack_low, USER_MAX); paging_destroy_pgdir(child->pgdir); child->pid = 0; child->parent = NULL; child->name[0] = '\0'; child->state = UNUSED; spinlock_release(&ptable_lock); return child_pid; } } /** Dont' have children. */ if (!have_kids || proc->killed) { spinlock_release(&ptable_lock); return -1; } /** * Otherwise, some child process is still running. Block until * a child wakes me up at its exit. */ process_block(ON_WAIT); /** Could be re-scheduled after being woken up. */ } } /** Force to kill a process by pid. Returns -1 if given pid not found. */ int8_t process_kill(int8_t pid) { spinlock_acquire(&ptable_lock); for (process_t *proc = ptable; proc < &ptable[MAX_PROCS]; ++proc) { if (proc->pid == pid) { proc->killed = true; /** Wake it up in case it is blocking on anything. */ if (proc->state == BLOCKED) process_unblock(proc); spinlock_release(&ptable_lock); return 0; } } spinlock_release(&ptable_lock); return -1; }
josehu07/hux-kernel
user/rm.c
/** * Command line utility - remove file or directory. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "lib/syscall.h" #include "lib/printf.h" #include "lib/debug.h" #include "lib/string.h" static bool _dir_is_empty(int8_t fd) { /** Empty dir only has '.' and '..'. */ size_t count = 0; dentry_t dentry; while (read(fd, (char *) &dentry, sizeof(dentry_t)) == sizeof(dentry_t)) { if (dentry.valid == 1) count++; } return count <= 2; } static void _remove_file(char *path, bool allow_dir) { /** If path does not exist, fail. */ int8_t fd = open(path, OPEN_RD); if (fd < 0) { warn("rm: cannot open path '%s'", path); return; } file_stat_t stat; if (fstat(fd, &stat) != 0) { warn("rm: cannot get stat of '%s'", path); close(fd); return; } if (stat.type == INODE_TYPE_DIR) { /** If path is dir but dir not allowed, fail. */ if (!allow_dir) { warn("rm: path '%s' is directory", path); close(fd); return; } /** If path is non-empty dir, fail. */ if (!_dir_is_empty(fd)) { warn("rm: directory '%s' is not empty", path); close(fd); return; } } close(fd); int ret = remove(path); if (ret != 0) warn("rm: remove '%s' failed", path); } static void _print_help_exit(char *me) { printf("Usage: %s [-h] [-r] path\n", me); exit(); } void main(int argc, char *argv[]) { if (argc < 2 || strncmp(argv[1], "-h", 2) == 0) _print_help_exit(argv[0]); bool allow_dir = false; if (strncmp(argv[1], "-r", 2) == 0) allow_dir = true; char *path; if (allow_dir) { if (argc != 3) _print_help_exit(argv[0]); path = argv[2]; } else { if (argc != 2) _print_help_exit(argv[0]); path = argv[1]; } _remove_file(path, allow_dir); exit(); }
josehu07/hux-kernel
src/filesys/file.c
<gh_stars>10-100 /** * In-memory structures and operations on open files. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "file.h" #include "block.h" #include "vsfs.h" #include "sysfile.h" #include "../common/debug.h" #include "../common/string.h" #include "../common/spinlock.h" #include "../common/parklock.h" /** Open inode cache - list of in-memory inode structures. */ mem_inode_t icache[MAX_MEM_INODES]; spinlock_t icache_lock; /** Open file table - list of open file structures. */ file_t ftable[MAX_OPEN_FILES]; spinlock_t ftable_lock; /** For debug printing the state of the two tables. */ __attribute__((unused)) static void _print_icache_state(void) { spinlock_acquire(&icache_lock); info("Inode cache state:"); for (mem_inode_t *inode = icache; inode < &icache[MAX_MEM_INODES]; ++inode) { if (inode->ref_cnt == 0) continue; printf(" inode { inum: %u, ref_cnt: %d, size: %u, dir: %b }\n", inode->inumber, inode->ref_cnt, inode->d_inode.size, inode->d_inode.type == INODE_TYPE_DIR ? 1 : 0); } printf(" end\n"); spinlock_release(&icache_lock); } __attribute__((unused)) static void _print_ftable_state(void) { spinlock_acquire(&ftable_lock); info("Open file table state:"); for (file_t *file = ftable; file < &ftable[MAX_OPEN_FILES]; ++file) { if (file->ref_cnt == 0) continue; printf(" file -> inum %u { ref_cnt: %d, offset: %u, r: %b, w: %b }\n", file->inode->inumber, file->ref_cnt, file->offset, file->readable, file->writable); } printf(" end\n"); spinlock_release(&ftable_lock); } /** Lock/Unlock a mem_ionde's private fields. */ void inode_lock(mem_inode_t *m_inode) { parklock_acquire(&(m_inode->lock)); } void inode_unlock(mem_inode_t *m_inode) { parklock_release(&(m_inode->lock)); } /** * Get inode for given inode number. If that inode has been in memory, * increment its ref count and return. Otherwise, read from the disk into * an empty inode cache slot. */ static mem_inode_t * _inode_get(uint32_t inumber, bool boot) { assert(inumber < (superblock.inode_blocks * (BLOCK_SIZE / INODE_SIZE))); mem_inode_t *m_inode = NULL; spinlock_acquire(&icache_lock); /** Search icache to see if it has been in memory. */ mem_inode_t *empty_slot = NULL; for (m_inode = icache; m_inode < &icache[MAX_MEM_INODES]; ++m_inode) { if (m_inode->ref_cnt > 0 && m_inode->inumber == inumber) { m_inode->ref_cnt++; spinlock_release(&icache_lock); return m_inode; } if (empty_slot == NULL && m_inode->ref_cnt == 0) empty_slot = m_inode; /** Remember empty slot seen. */ } if (empty_slot == NULL) { warn("inode_get: no empty mem_inode slot"); spinlock_release(&icache_lock); return NULL; } m_inode = empty_slot; m_inode->inumber = inumber; m_inode->ref_cnt = 1; spinlock_release(&icache_lock); /** Lock the inode and read from disk. */ inode_lock(m_inode); bool success = boot ? block_read_at_boot((char *) &(m_inode->d_inode), DISK_ADDR_INODE(inumber), sizeof(inode_t)) : block_read((char *) &(m_inode->d_inode), DISK_ADDR_INODE(inumber), sizeof(inode_t)); if (!success) { warn("inode_get: failed to read inode %u from disk", inumber); inode_unlock(m_inode); return NULL; } inode_unlock(m_inode); // _print_icache_state(); return m_inode; } mem_inode_t * inode_get(uint32_t inumber) { return _inode_get(inumber, false); } mem_inode_t * inode_get_at_boot(uint32_t inumber) { return _inode_get(inumber, true); } /** Increment reference to an already-got inode. */ void inode_ref(mem_inode_t *m_inode) { spinlock_acquire(&icache_lock); assert(m_inode->ref_cnt > 0); m_inode->ref_cnt++; spinlock_release(&icache_lock); } /** * Put down a reference to an inode. If the reference count goes to * zero, this icache slot becomes empty. */ void inode_put(mem_inode_t *m_inode) { spinlock_acquire(&icache_lock); assert(!parklock_holding(&(m_inode->lock))); assert(m_inode->ref_cnt > 0); m_inode->ref_cnt--; spinlock_release(&icache_lock); } /** Flush an in-memory modified inode to disk. */ static bool _flush_inode(mem_inode_t *m_inode) { return block_write((char *) &(m_inode->d_inode), DISK_ADDR_INODE(m_inode->inumber), sizeof(inode_t)); } /** Allocate an inode structure on disk (and gets into memory). */ mem_inode_t * inode_alloc(uint32_t type) { /** Get a free slot according to bitmap. */ uint32_t inumber = bitmap_alloc(&inode_bitmap); if (inumber == inode_bitmap.slots) { warn("inode_alloc: no free inode slot left"); return NULL; } inode_t d_inode; memset(&d_inode, 0, sizeof(inode_t)); d_inode.type = type; /** Persist to disk: bitmap first, then the inode. */ if (!inode_bitmap_update(inumber)) { warn("inode_alloc: failed to persist inode bitmap"); bitmap_clear(&inode_bitmap, inumber); return NULL; } if (!block_write((char *) &d_inode, DISK_ADDR_INODE(inumber), sizeof(inode_t))) { warn("inode_alloc: failed to persist inode %u", inumber); bitmap_clear(&inode_bitmap, inumber); inode_bitmap_update(inumber); /** Ignores error. */ return NULL; } return inode_get(inumber); } /** * Free an on-disk inode structure (removing a file). Avoids calling * `_walk_inode_index()` repeatedly. * Must be called with lock on M_INODE held. */ void inode_free(mem_inode_t *m_inode) { m_inode->d_inode.size = 0; m_inode->d_inode.type = 0; /** Direct. */ for (size_t idx0 = 0; idx0 < NUM_DIRECT; ++idx0) { if (m_inode->d_inode.data0[idx0] != 0) { block_free(m_inode->d_inode.data0[idx0]); m_inode->d_inode.data0[idx0] = 0; } } /** Singly-indirect. */ for (size_t idx0 = 0; idx0 < NUM_INDIRECT1; ++idx0) { uint32_t ib1_addr = m_inode->d_inode.data1[idx0]; if (ib1_addr != 0) { uint32_t ib1[UINT32_PB]; if (block_read((char *) ib1, ib1_addr, BLOCK_SIZE)) { for (size_t idx1 = 0; idx1 < UINT32_PB; ++idx1) { if (ib1[idx1] != 0) block_free(ib1[idx1]); } } block_free(ib1_addr); m_inode->d_inode.data1[idx0] = 0; } } /** Doubly-indirect. */ for (size_t idx0 = 0; idx0 < NUM_INDIRECT2; ++idx0) { uint32_t ib1_addr = m_inode->d_inode.data2[idx0]; if (ib1_addr != 0) { uint32_t ib1[UINT32_PB]; if (block_read((char *) ib1, ib1_addr, BLOCK_SIZE)) { for (size_t idx1 = 0; idx1 < UINT32_PB; ++idx1) { uint32_t ib2_addr = ib1[idx1]; if (ib2_addr != 0) { uint32_t ib2[UINT32_PB]; if (block_read((char *) ib2, ib2_addr, BLOCK_SIZE)) { for (size_t idx2 = 0; idx2 < UINT32_PB; ++idx2) { if (ib2[idx2] != 0) block_free(ib2[idx2]); } } block_free(ib1[idx1]); } } } block_free(ib1_addr); m_inode->d_inode.data2[idx0] = 0; } } _flush_inode(m_inode); bitmap_clear(&inode_bitmap, m_inode->inumber); inode_bitmap_update(m_inode->inumber); /** Ignores error. */ } /** * Walk the indexing array to get block number for the n-th block. * Allocates the block if was not allocated. Returns address 0 * (which is invalid for a data block) on failures. */ static uint32_t _walk_inode_index(mem_inode_t *m_inode, uint32_t idx) { /** Direct. */ if (idx < NUM_DIRECT) { if (m_inode->d_inode.data0[idx] == 0) m_inode->d_inode.data0[idx] = block_alloc(); return m_inode->d_inode.data0[idx]; } /** Singly-indirect. */ idx -= NUM_DIRECT; if (idx < NUM_INDIRECT1 * UINT32_PB) { size_t idx0 = idx / UINT32_PB; size_t idx1 = idx % UINT32_PB; /** Load indirect1 block. */ uint32_t ib1_addr = m_inode->d_inode.data1[idx0]; if (ib1_addr == 0) { ib1_addr = block_alloc(); if (ib1_addr == 0) return 0; m_inode->d_inode.data1[idx0] = ib1_addr; } uint32_t ib1[UINT32_PB]; if (!block_read((char *) ib1, ib1_addr, BLOCK_SIZE)) return 0; /** Index in the indirect1 block. */ if (ib1[idx1] == 0) { ib1[idx1] = block_alloc(); if (ib1[idx1] == 0) return 0; if (!block_write((char *) ib1, ib1_addr, BLOCK_SIZE)) return 0; } return ib1[idx1]; } /** Doubly indirect. */ idx -= NUM_INDIRECT1 * UINT32_PB; if (idx < NUM_INDIRECT2 * UINT32_PB*UINT32_PB) { size_t idx0 = idx / (UINT32_PB*UINT32_PB); size_t idx1 = (idx % (UINT32_PB*UINT32_PB)) / UINT32_PB; size_t idx2 = idx % UINT32_PB; /** Load indirect1 block. */ uint32_t ib1_addr = m_inode->d_inode.data2[idx0]; if (ib1_addr == 0) { ib1_addr = block_alloc(); if (ib1_addr == 0) return 0; m_inode->d_inode.data2[idx0] = ib1_addr; } uint32_t ib1[UINT32_PB]; if (!block_read((char *) ib1, ib1_addr, BLOCK_SIZE)) return 0; /** Load indirect2 block. */ uint32_t ib2_addr = ib1[idx1]; if (ib2_addr == 0) { ib2_addr = block_alloc(); if (ib2_addr == 0) return 0; ib1[idx1] = ib2_addr; if (!block_write((char *) ib1, ib1_addr, BLOCK_SIZE)) return 0; } uint32_t ib2[UINT32_PB]; if (!block_read((char *) ib2, ib2_addr, BLOCK_SIZE)) return 0; /** Index in the indirect2 block. */ if (ib2[idx2] == 0) { ib2[idx2] = block_alloc(); if (ib2[idx2] == 0) return 0; if (!block_write((char *) ib2, ib2_addr, BLOCK_SIZE)) return 0; } return ib2[idx2]; } warn("walk_inode_index: index %u is out of range", idx); return 0; } /** * Read data at logical offset from inode. Returns the number of bytes * actually read. * Must with lock on M_INODE held. */ size_t inode_read(mem_inode_t *m_inode, char *dst, uint32_t offset, size_t len) { if (offset > m_inode->d_inode.size) return 0; if (offset + len > m_inode->d_inode.size) len = m_inode->d_inode.size - offset; uint32_t bytes_read = 0; while (len > bytes_read) { uint32_t bytes_left = len - bytes_read; uint32_t start_offset = offset + bytes_read; uint32_t req_offset = ADDR_BLOCK_OFFSET(start_offset); uint32_t next_offset = ADDR_BLOCK_ROUND_DN(start_offset) + BLOCK_SIZE; uint32_t effective = next_offset - start_offset; if (bytes_left < effective) effective = bytes_left; uint32_t block_addr = _walk_inode_index(m_inode, start_offset / BLOCK_SIZE); if (block_addr == 0) { warn("inode_read: failed to walk inode index on offset %u", start_offset); return bytes_read; } if (!block_read(dst + bytes_read, block_addr + req_offset, effective)) { warn("inode_read: failed to read disk address %p", block_addr); return bytes_read; } bytes_read += effective; } return bytes_read; } /** * Write data at logical offset of inode. Returns the number of bytes * actually written. Will extend the inode if the write exceeds current * file size. * Must with lock on M_INODE held. */ size_t inode_write(mem_inode_t *m_inode, char *src, uint32_t offset, size_t len) { if (offset > m_inode->d_inode.size) return 0; uint32_t bytes_written = 0; while (len > bytes_written) { uint32_t bytes_left = len - bytes_written; uint32_t start_offset = offset + bytes_written; uint32_t req_offset = ADDR_BLOCK_OFFSET(start_offset); uint32_t next_offset = ADDR_BLOCK_ROUND_DN(start_offset) + BLOCK_SIZE; uint32_t effective = next_offset - start_offset; if (bytes_left < effective) effective = bytes_left; uint32_t block_addr = _walk_inode_index(m_inode, start_offset / BLOCK_SIZE); if (block_addr == 0) { warn("inode_write: failed to walk inode index on offset %u", start_offset); return bytes_written; } if (!block_write(src + bytes_written, block_addr + req_offset, effective)) { warn("inode_write: failed to write block address %p", block_addr); return bytes_written; } bytes_written += effective; } /** Update inode size if extended. */ if (offset + len > m_inode->d_inode.size) { m_inode->d_inode.size = offset + len; _flush_inode(m_inode); } return bytes_written; } /** Allocate a slot in the opne file table. Returns NULL on failure. */ file_t * file_get(void) { spinlock_acquire(&ftable_lock); for (file_t *file = ftable; file < &ftable[MAX_OPEN_FILES]; ++file) { if (file->ref_cnt == 0) { file->ref_cnt = 1; spinlock_release(&ftable_lock); return file; } } spinlock_release(&ftable_lock); return NULL; } /** Increment reference to an already-got file. */ void file_ref(file_t *file) { spinlock_acquire(&ftable_lock); assert(file->ref_cnt > 0); file->ref_cnt++; spinlock_release(&ftable_lock); } /** * Put down a reference to an open file. If the reference count goes to * zero, actually closes this file and this ftable slot becomes empty. */ void file_put(file_t *file) { mem_inode_t *inode; /** Decrement reference count. */ spinlock_acquire(&ftable_lock); assert(file->ref_cnt > 0); file->ref_cnt--; if (file->ref_cnt > 0) { /** Do nothing if still referenced. */ spinlock_release(&ftable_lock); return; } inode = file->inode; /** Remember inode for putting. */ spinlock_release(&ftable_lock); /** Actually closing, put inode. */ inode_put(inode); } /** Get metadata information of a file. */ void file_stat(file_t *file, file_stat_t *stat) { inode_lock(file->inode); stat->inumber = file->inode->inumber; stat->type = file->inode->d_inode.type; stat->size = file->inode->d_inode.size; inode_unlock(file->inode); }
josehu07/hux-kernel
src/filesys/exec.c
/** * Implementation of the `exec()` syscall on ELF-32 file. */ #include <stdint.h> #include <stddef.h> #include <stdbool.h> #include "exec.h" #include "file.h" #include "vsfs.h" #include "../boot/elf.h" #include "../common/debug.h" #include "../common/string.h" #include "../memory/paging.h" #include "../memory/slabs.h" #include "../process/process.h" #include "../process/scheduler.h" #include "../process/layout.h" /** * Refresh page table, load an executable ELF program at given inode, and * start execution at the beginning of its text section. ARGV is an array * of strings (`char *`s) where the last one must be NULL, indicating * the end of argument array. * * The syscall does not actually return on success, since the process * should have jumped to the newly loaded code after returning from this * trap frame. Returns false on failures. */ bool exec_program(mem_inode_t *inode, char *filename, char **argv) { process_t *proc = running_proc(); pde_t *pgdir = NULL; inode_lock(inode); /** Read in ELF header, sanity check magic number. */ elf_file_header_t elf_header; if (inode_read(inode, (char *) &elf_header, 0, sizeof(elf_file_header_t)) != sizeof(elf_file_header_t)) { warn("exec: failed to read ELF file header"); goto fail; } if (elf_header.magic != ELF_MAGIC) { warn("exec: ELF header magic number mismatch"); goto fail; } /** * Mimicks `initproc_init()` in `process.c`. Sets up a brand-new page table * and pre-maps necessary pages: * - kernel mapped to lower 512MiB * - program ELF binary follows * - top-most stack page * * Need to set up a brand-new copy of page table because if there are any * errors that occur during the process, we can gracefully return an error * to the caller process instead of breaking it. */ pgdir = (pde_t *) salloc_page(); if (pgdir == NULL) { warn("exec: failed to allocate new page directory"); goto fail; } memset(pgdir, 0, sizeof(pde_t) * PDES_PER_PAGE); uint32_t vaddr_btm = 0; /** Kernel-mapped. */ while (vaddr_btm < PHYS_MAX) { pte_t *pte = paging_walk_pgdir(pgdir, vaddr_btm, true); if (pte == NULL) goto fail; paging_map_kpage(pte, vaddr_btm); vaddr_btm += PAGE_SIZE; } elf_program_header_t prog_header; /** ELF binary. */ uint32_t vaddr_elf_max = USER_BASE; for (size_t idx = 0; idx < elf_header.phnum; ++idx) { /** Read in this program header. */ size_t offset = elf_header.phoff + idx * sizeof(elf_program_header_t); if (inode_read(inode, (char *) &prog_header, offset, sizeof(elf_program_header_t)) != sizeof(elf_program_header_t)) { goto fail; } if (prog_header.type != ELF_PROG_TYPE_LOAD) continue; if (prog_header.memsz < prog_header.filesz) goto fail; /** Read in program segment described by this header. */ uint32_t vaddr_curr = prog_header.vaddr; uint32_t vaddr_end = prog_header.vaddr + prog_header.memsz; uint32_t elf_curr = prog_header.offset; uint32_t elf_end = prog_header.offset + prog_header.filesz; while (vaddr_curr < vaddr_end) { size_t effective_v = PAGE_SIZE - ADDR_PAGE_OFFSET(vaddr_curr); if (effective_v > vaddr_end - vaddr_curr) effective_v = vaddr_end - vaddr_curr; size_t effective_e = effective_v; if (effective_e > elf_end - elf_curr) effective_e = elf_end - elf_curr; if (vaddr_curr < USER_BASE) { vaddr_curr += effective_v; elf_curr += effective_e; continue; } pte_t *pte = paging_walk_pgdir(pgdir, vaddr_curr, true); if (pte == NULL) goto fail; uint32_t paddr = pte->present == 0 ? paging_map_upage(pte, true) : ENTRY_FRAME_ADDR(*pte); if (paddr == 0) goto fail; uint32_t paddr_curr = paddr + ADDR_PAGE_OFFSET(vaddr_curr); if (effective_e > 0) { if (inode_read(inode, (char *) paddr_curr, elf_curr, effective_e) != effective_e) { goto fail; } elf_curr += effective_e; } vaddr_curr += effective_v; } if (vaddr_curr > vaddr_elf_max) vaddr_elf_max = ADDR_PAGE_ROUND_UP(vaddr_curr); } inode_unlock(inode); inode_put(inode); inode = NULL; while (vaddr_elf_max < HEAP_BASE) { /** Rest of ELF region. */ pte_t *pte = paging_walk_pgdir(pgdir, vaddr_elf_max, true); if (pte == NULL) goto fail; uint32_t paddr = paging_map_upage(pte, true); if (paddr == 0) goto fail; vaddr_elf_max += PAGE_SIZE; } uint32_t vaddr_top = USER_MAX - PAGE_SIZE; /** Top stack page. */ pte_t *pte_top = paging_walk_pgdir(pgdir, vaddr_top, true); if (pte_top == NULL) goto fail; uint32_t paddr_top = paging_map_upage(pte_top, true); if (paddr_top == 0) goto fail; memset((char *) paddr_top, 0, PAGE_SIZE); /** * Push argument strings to the stack, then push the argv list * pointing to those strings, followed by `argv`, `argc`. */ uint32_t sp = USER_MAX; uint32_t ustack[3 + MAX_EXEC_ARGS + 1]; size_t argc = 0; for (argc = 0; argv[argc] != NULL; ++argc) { if (argc >= MAX_EXEC_ARGS) goto fail; sp = sp - (strlen(argv[argc]) + 1); sp &= 0xFFFFFFFC; /** Align to 32-bit words. */ memcpy((char *) (paddr_top + PAGE_SIZE - (USER_MAX - sp)), argv[argc], strlen(argv[argc]) + 1); ustack[3 + argc] = sp; } ustack[3 + argc] = 0; /** End of argv list. */ ustack[2] = sp - (argc + 1) * 4; /** `argv` */ ustack[1] = argc; /** `argc` */ ustack[0] = 0x0000DEAD; /** Fake return address. */ sp -= (3 + argc + 1) * 4; memcpy((char *) (paddr_top + PAGE_SIZE - (USER_MAX - sp)), ustack, (3 + argc + 1) * 4); /** Change process name. */ strncpy(proc->name, filename, strlen(filename)); /** Switch to the new page directory, discarding old state. */ pde_t *old_pgdir = proc->pgdir; uint32_t old_heap_high = proc->heap_high; uint32_t old_stack_low = proc->stack_low; proc->pgdir = pgdir; proc->stack_low = vaddr_top; proc->heap_high = HEAP_BASE; proc->trap_state->esp = sp; proc->trap_state->eip = elf_header.entry; /** `main()` function. */ paging_switch_pgdir(proc->pgdir); paging_unmap_range(old_pgdir, USER_BASE, old_heap_high); paging_unmap_range(old_pgdir, old_stack_low, USER_MAX); paging_destroy_pgdir(old_pgdir); return true; fail: if (pgdir != NULL) { paging_unmap_range(pgdir, USER_BASE, HEAP_BASE); paging_unmap_range(pgdir, USER_MAX - PAGE_SIZE, USER_MAX); paging_destroy_pgdir(pgdir); } if (inode != NULL) { inode_unlock(inode); inode_put(inode); } return false; }
josehu07/hux-kernel
src/display/vga.h
/** * VGA text mode specifications. */ #ifndef VGA_H #define VGA_H #include <stdint.h> /** Hardcoded 4-bit color codes. */ enum vga_color { VGA_COLOR_BLACK = 0, VGA_COLOR_BLUE = 1, VGA_COLOR_GREEN = 2, VGA_COLOR_CYAN = 3, VGA_COLOR_RED = 4, VGA_COLOR_MAGENTA = 5, VGA_COLOR_BROWN = 6, VGA_COLOR_LIGHT_GREY = 7, VGA_COLOR_DARK_GREY = 8, VGA_COLOR_LIGHT_BLUE = 9, VGA_COLOR_LIGHT_GREEN = 10, VGA_COLOR_LIGHT_CYAN = 11, VGA_COLOR_LIGHT_RED = 12, VGA_COLOR_LIGHT_MAGENTA = 13, VGA_COLOR_LIGHT_BROWN = 14, VGA_COLOR_WHITE = 15, }; typedef enum vga_color vga_color_t; /** * VGA entry composer. * A VGA entry = [4bits bg | 4bits fg | 8bits content]. */ static inline uint16_t vga_entry(vga_color_t bg, vga_color_t fg, unsigned char c) { return (uint16_t) c | (uint16_t) fg << 8 | (uint16_t) bg << 12; } #endif
josehu07/hux-kernel
src/interrupt/isr.h
/** * Interrupt service routines (ISR) handler implementation. */ #ifndef ISR_H #define ISR_H #include <stdint.h> /** * Interrupt state specification, which will be followed by `isr-stub.s` * before calling `isr_handler` below. */ struct interrupt_state { uint32_t ds; uint32_t edi, esi, ebp, useless, ebx, edx, ecx, eax; uint32_t int_no, err_code; uint32_t eip, cs, eflags, esp, ss; } __attribute__((packed)); typedef struct interrupt_state interrupt_state_t; /** Allow other parts to register an ISR. */ typedef void (*isr_t)(interrupt_state_t *); void isr_register(uint8_t int_no, isr_t handler); /** * List of known interrupt numbers in this system. Other parts of the kernel * should refer to these macro names instead of using plain numbers. * - 0 - 31 are ISRs for CPU-generated exceptions, processor-defined, * see https://wiki.osdev.org/Interrupt_Vector_Table * - 32 - 47 are mapped as custom device IRQs, so ISR 32 means IRQ 0, etc. * - 64 i.e. 0x40 is chosen as our syscall trap gate */ #define INT_NO_DIV_BY_ZERO 0 /** Divide by zero. */ // 1 /** Reserved. */ #define INT_NO_NMI 2 /** Non maskable interrupt (NMI). */ #define INT_NO_BREAKPOINT 3 /** Breakpoint. */ #define INT_NO_OVERFLOW 4 /** Overflow. */ #define INT_NO_BOUNDS 5 /** Bounds range exceeded. */ #define INT_NO_ILLEGAL_OP 6 /** Illegal opcode. */ #define INT_NO_DEVICE_NA 7 /** Device not available. */ #define INT_NO_DOUBLE_FAULT 8 /** Double fault. */ // 9 /** No longer used. */ #define INT_NO_INVALID_TSS 10 /** Invalid task state segment (TSS). */ #define INT_NO_SEGMENT_NP 11 /** Segment not present. */ #define INT_NO_STACK_SEG 12 /** Stack segment fault. */ #define INI_NO_PROTECTION 13 /** General protection fault. */ #define INT_NO_PAGE_FAULT 14 /** Page fault. */ // 15 /** Reserved. */ #define INT_NO_FPU_ERROR 16 /** Floating-point unit (FPU) error. */ #define INT_NO_ALIGNMENT 17 /** Alignment check */ #define INT_NO_MACHINE 18 /** Machine check. */ #define INT_NO_SIMD_FP 19 /** SIMD floating-point error. */ // 20 - 31 /** Reserved. */ #define IRQ_BASE_NO 32 #define INT_NO_TIMER (IRQ_BASE_NO + 0) #define INT_NO_KEYBOARD (IRQ_BASE_NO + 1) #define INT_NO_IDEDISK (IRQ_BASE_NO + 14) /** INT_NO_SYSCALL is 64, defined in `syscall.h`. */ #endif
josehu07/hux-kernel
src/display/terminal.h
/** * Terminal display control. */ #ifndef TERMINAL_H #define TERMINAL_H #include <stddef.h> #include "vga.h" #include "../common/spinlock.h" /** Extern to `printf.c` and other places of calling `terminal_`. */ extern spinlock_t terminal_lock; /** * Default to black background + light grey foreground. * Foreground color can be customized with '*_color' functions. */ extern const vga_color_t TERMINAL_DEFAULT_COLOR_BG; extern const vga_color_t TERMINAL_DEFAULT_COLOR_FG; void terminal_init(); void terminal_write(const char *data, size_t size); void terminal_write_color(const char *data, size_t size, vga_color_t fg); void terminal_erase(); void terminal_clear(); #endif
josehu07/hux-kernel
src/memory/paging.c
<reponame>josehu07/hux-kernel<gh_stars>10-100 /** * Setting up and switching to paging mode. */ #include <stdint.h> #include <stddef.h> #include <stdbool.h> #include "paging.h" #include "../common/debug.h" #include "../common/string.h" #include "../common/spinlock.h" #include "../common/bitmap.h" #include "../interrupt/isr.h" #include "../memory/slabs.h" #include "../process/process.h" #include "../process/scheduler.h" #include "../process/layout.h" /** Kernel heap bottom address - should be above `elf_sections_end`. */ uint32_t kheap_curr; /** kernel's identity-mapping page directory. */ pde_t *kernel_pgdir; /** Allocated at paging init. */ /** Bitmap indicating free/used frames. */ static bitmap_t frame_bitmap; /** * Auxiliary function for allocating (page-aligned) chunks of memory in the * kernel heap region that never gets freed. * * Should only be used to allocate the kernel's page directory/tables and * the frames bitmap and other things before our actual heap allocation * algorithm setup. */ static uint32_t _kalloc_temp(size_t size, bool page_align) { /** If `page_align` is set, return an aligned address. */ if (page_align && !ADDR_PAGE_ALIGNED(kheap_curr)) kheap_curr = ADDR_PAGE_ROUND_UP(kheap_curr); /** If exceeds the 8MiB kernel memory boundary, panic. */ if (kheap_curr + size > KMEM_MAX) error("_kalloc_temp: kernel memory exceeds boundary"); uint32_t temp = kheap_curr; kheap_curr += size; return temp; } /** * Helper that allocates a level-2 table. Returns NULL if running out of * kernel heap. */ static pte_t * _paging_alloc_pgtab(pde_t *pde, bool boot) { pte_t *pgtab = NULL; if (boot) pgtab = (pte_t *) _kalloc_temp(sizeof(pte_t) * PTES_PER_PAGE, true); else pgtab = (pte_t *) salloc_page(); if (pgtab == NULL) return NULL; memset(pgtab, 0, sizeof(pte_t) * PTES_PER_PAGE); pde->present = 1; pde->writable = 1; pde->user = 1; /** Just allow user access on all PDEs. */ pde->frame = ADDR_PAGE_NUMBER((uint32_t) pgtab); return pgtab; } static pte_t * paging_alloc_pgtab(pde_t *pde) { return _paging_alloc_pgtab(pde, false); } static pte_t * paging_alloc_pgtab_at_boot(pde_t *pde) { return _paging_alloc_pgtab(pde, true); } /** * Walk a 2-level page table for a virtual address to locate its PTE. * If `alloc` is true, then when a level-2 table is needed but not * allocated yet, will perform the allocation. */ static pte_t * _paging_walk_pgdir(pde_t *pgdir, uint32_t vaddr, bool alloc, bool boot) { size_t pde_idx = ADDR_PDE_INDEX(vaddr); size_t pte_idx = ADDR_PTE_INDEX(vaddr); /** If already has the level-2 table, return the correct PTE. */ if (pgdir[pde_idx].present != 0) { pte_t *pgtab = (pte_t *) ENTRY_FRAME_ADDR(pgdir[pde_idx]); return &pgtab[pte_idx]; } /** * Else, the level-2 table is not allocated yet. Do the allocation if * the alloc argument is set, otherwise return a NULL. */ if (!alloc) return NULL; pte_t *pgtab = boot ? paging_alloc_pgtab_at_boot(&pgdir[pde_idx]) : paging_alloc_pgtab(&pgdir[pde_idx]); if (pgtab == NULL) { warn("walk_pgdir: cannot alloc pgtab, out of kheap memory?"); return NULL; } return &pgtab[pte_idx]; } pte_t * paging_walk_pgdir(pde_t *pgdir, uint32_t vaddr, bool alloc) { return _paging_walk_pgdir(pgdir, vaddr, alloc, false); } pte_t * paging_walk_pgdir_at_boot(pde_t *pgdir, uint32_t vaddr, bool alloc) { return _paging_walk_pgdir(pgdir, vaddr, alloc, true); } /** Dealloc all the kernal heap pages used in a user page directory. */ void paging_destroy_pgdir(pde_t *pgdir) { for (size_t pde_idx = 0; pde_idx < PDES_PER_PAGE; ++pde_idx) { if (pgdir[pde_idx].present == 1) { pte_t *pgtab = (pte_t *) ENTRY_FRAME_ADDR(pgdir[pde_idx]); sfree_page(pgtab); } } /** Free the level-1 directory as well. */ sfree_page(pgdir); } /** * Find a free frame and map a user page (given by a pointer to its PTE) * into physical memory. Returns the physical address allocated, or 0 if * memory allocation failed. */ uint32_t paging_map_upage(pte_t *pte, bool writable) { if (pte->present == 1) { error("map_upage: page re-mapping detected"); return 0; } uint32_t frame_num = bitmap_alloc(&frame_bitmap); if (frame_num == NUM_FRAMES) return 0; pte->present = 1; pte->writable = writable ? 1 : 0; pte->user = 1; pte->frame = frame_num; return ENTRY_FRAME_ADDR(*pte); } /** Map a lower-half kernel page to the user PTE. */ void paging_map_kpage(pte_t *pte, uint32_t paddr) { if (pte->present == 1) { error("map_kpage: page re-mapping detected"); return; } uint32_t frame_num = ADDR_PAGE_NUMBER(paddr); pte->present = 1; pte->writable = 0; pte->user = 0; /** User cannot access kernel-mapped pages. */ pte->frame = frame_num; } /** * Unmap all the mapped pages within a virtual address range in a user * page directory. Avoids calling `walk_pgdir()` repeatedly. */ void paging_unmap_range(pde_t *pgdir, uint32_t va_start, uint32_t va_end) { size_t pde_idx = ADDR_PDE_INDEX(va_start); size_t pte_idx = ADDR_PTE_INDEX(va_start); size_t pde_end = ADDR_PDE_INDEX(ADDR_PAGE_ROUND_UP(va_end)); size_t pte_end = ADDR_PTE_INDEX(ADDR_PAGE_ROUND_UP(va_end)); pte_t *pgtab = (pte_t *) ENTRY_FRAME_ADDR(pgdir[pde_idx]); while (pde_idx < pde_end || (pde_idx == pde_end && pte_idx < pte_end)) { /** * If end of current level-2 table, or current level-2 table not * allocated, go to the next PDE. */ if (pte_idx >= PTES_PER_PAGE || pgdir[pde_idx].present == 0) { pde_idx++; pte_idx = 0; pgtab = (pte_t *) ENTRY_FRAME_ADDR(pgdir[pde_idx]); continue; } if (pgtab[pte_idx].present == 1) { bitmap_clear(&frame_bitmap, pgtab[pte_idx].frame); pgtab[pte_idx].present = 0; pgtab[pte_idx].writable = 0; pgtab[pte_idx].frame = 0; } pte_idx++; } } /** * Copy all the mapped page within a virtual address range from a page * directory to another process's page directory, allocating frames for * the new process on the way. Returns false if memory allocation failed. */ bool paging_copy_range(pde_t *dstdir, pde_t *srcdir, uint32_t va_start, uint32_t va_end) { size_t pde_idx = ADDR_PDE_INDEX(va_start); size_t pte_idx = ADDR_PTE_INDEX(va_start); size_t pde_end = ADDR_PDE_INDEX(ADDR_PAGE_ROUND_UP(va_end)); size_t pte_end = ADDR_PTE_INDEX(ADDR_PAGE_ROUND_UP(va_end)); pte_t *srctab = (pte_t *) ENTRY_FRAME_ADDR(srcdir[pde_idx]); pte_t *dsttab; while (pde_idx < pde_end || (pde_idx == pde_end && pte_idx < pte_end)) { /** * If end of current level-2 table, or current level-2 table not * allocated, go to the next PDE. */ if (pte_idx >= PTES_PER_PAGE || srcdir[pde_idx].present == 0) { pde_idx++; pte_idx = 0; srctab = (pte_t *) ENTRY_FRAME_ADDR(srcdir[pde_idx]); continue; } /** * If new page directory does not have this level-2 table yet, * allocate one for it on kernel heap. */ if (dstdir[pde_idx].present == 0) { dsttab = paging_alloc_pgtab(&dstdir[pde_idx]); if (dsttab == NULL) { warn("copy_range: cannot alloc pgtab, out of kheap memory?"); paging_unmap_range(dstdir, va_start, va_end); return false; } } dsttab = (pte_t *) ENTRY_FRAME_ADDR(dstdir[pde_idx]); /** Map destination frame, and copy source frame content. */ if (srctab[pte_idx].present == 1) { uint32_t paddr = paging_map_upage(&dsttab[pte_idx], srctab[pte_idx].writable); if (paddr == 0) { warn("copy_range: cannot map page, out of physical memory?"); paging_unmap_range(dstdir, va_start, va_end); return false; } memcpy((char *) paddr, (char *) ENTRY_FRAME_ADDR(srctab[pte_idx]), PAGE_SIZE); } pte_idx++; } return true; } /** Switch the current page directory to the given one. */ inline void paging_switch_pgdir(pde_t *pgdir) { assert(pgdir != NULL); asm volatile ( "movl %0, %%cr3" : : "r" (pgdir) ); } /** * Page fault (ISR # 14) handler. * Interrupts should have been disabled since this is an interrupt gate. */ static void page_fault_handler(interrupt_state_t *state) { /** The CR2 register holds the faulty address. */ uint32_t faulty_addr; asm ( "movl %%cr2, %0" : "=r" (faulty_addr) : ); /** * Analyze the least significant 3 bits of error code to see what * triggered this page fault: * - bit 0: page present -> 1, otherwise 0 * - bit 1: is a write operation -> 1, read -> 0 * - bit 2: is from user mode -> 1, kernel -> 0 * * See https://wiki.osdev.org/Paging for more. */ bool present = state->err_code & 0x1; bool write = state->err_code & 0x2; bool user = state->err_code & 0x4; process_t *proc = running_proc(); /** * If is a valid stack growth page fault (within stack size limit * and not meeting the heap upper boundary), then allocate and map * the new pages. */ if (!present && user && faulty_addr < proc->stack_low && faulty_addr >= STACK_MIN && faulty_addr >= proc->heap_high) { uint32_t old_btm = ADDR_PAGE_ROUND_DN(proc->stack_low); uint32_t new_btm = ADDR_PAGE_ROUND_DN(faulty_addr); uint32_t vaddr; for (vaddr = new_btm; vaddr < old_btm; vaddr += PAGE_SIZE) { pte_t *pte = paging_walk_pgdir(proc->pgdir, vaddr, true); if (pte == NULL) { warn("page_fault: cannot walk pgdir, out of kheap memory?"); break; } uint32_t paddr = paging_map_upage(pte, true); if (paddr == 0) { warn("page_fault: cannot map new page, out of memory?"); break; } memset((char *) paddr, 0, PAGE_SIZE); } if (vaddr < old_btm) { warn("page_fault: stack growth to %p failed", new_btm); process_exit(); } else proc->stack_low = new_btm; return; } /** Other page faults are considered truly harmful. */ info("Caught page fault {\n" " faulty addr = %p\n" " present: %d\n" " write: %d\n" " user: %d\n" "} not handled!", faulty_addr, present, write, user); process_exit(); } /** Initialize paging and switch to use paging. */ void paging_init(void) { /** Kernel heap starts above all ELF sections. */ kheap_curr = ADDR_PAGE_ROUND_UP((uint32_t) elf_sections_end); /** * The frame bitmap also needs space, so allocate space for it in * our kernel heap. Clear it to zeros. */ uint8_t *frame_bits = (uint8_t *) _kalloc_temp(NUM_FRAMES / 8, false); bitmap_init(&frame_bitmap, frame_bits, NUM_FRAMES); /** * Allocate the one-page space for the kernel's page directory in * the kernel heap. All pages of page directory/tables must be * page-aligned. */ kernel_pgdir = (pde_t *) _kalloc_temp(sizeof(pde_t) * PDES_PER_PAGE, true); memset(kernel_pgdir, 0, sizeof(pde_t) * PDES_PER_PAGE); /** * Identity-map the kernel's virtual address space to the physical * memory. This means we need to map all the allowed kernel physical * frames (from 0 -> KMEM_MAX) as its identity virtual address in * the kernel page table, and reserve this entire physical memory region. * * Assumes that `bitmap_alloc()` behaves sequentially. */ uint32_t addr = 0; while (addr < KMEM_MAX) { uint32_t frame_num = bitmap_alloc(&frame_bitmap); assert(frame_num < NUM_FRAMES); pte_t *pte = paging_walk_pgdir_at_boot(kernel_pgdir, addr, true); assert(pte != NULL); /** Update the bits in this PTE. */ pte->present = 1; pte->writable = 0; /** Has no affect. */ pte->user = 0; pte->frame = frame_num; addr += PAGE_SIZE; } /** * Also map the rest of physical memory into the scheduler page table, * so it could access any physical address directly. */ while (addr < PHYS_MAX) { pte_t *pte = paging_walk_pgdir_at_boot(kernel_pgdir, addr, true); assert(pte != NULL); /** Update the bits in this PTE. */ pte->present = 1; pte->writable = 0; /** Has no affect. */ pte->user = 0; pte->frame = ADDR_PAGE_NUMBER(addr); addr += PAGE_SIZE; } /** * Register the page fault handler. This acation must be done before * we do the acatual switch towards using paging. */ isr_register(INT_NO_PAGE_FAULT, &page_fault_handler); /** Load the address of kernel page directory into CR3. */ paging_switch_pgdir(kernel_pgdir); /** * Enable paging by setting the two proper bits of CR0: * - PG bit (31): enable paging * - PE bit (0): enable protected mode * * We are not setting the WP bit, so the read/write bit of any PTE just * controls whether the page is user writable - in kernel priviledge any * page can be written. */ uint32_t cr0; asm volatile ( "movl %%cr0, %0" : "=r" (cr0) : ); cr0 |= 0x80000001; asm volatile ( "movl %0, %%cr0" : : "r" (cr0) ); }
josehu07/hux-kernel
user/lib/printf.h
/** * Formatted printing user library. * * Format specifier := %[special][width][.precision][length]<type>. Only * limited features are provided (documented in the wiki page). * * The final string CANNOT exceed 1024 bytes for every single invocation. */ #ifndef PRINTF_H #define PRINTF_H #include <stdint.h> #include <stddef.h> /** Hardcoded 4-bit VGA color codes. */ enum vga_color { VGA_COLOR_BLACK = 0, VGA_COLOR_BLUE = 1, VGA_COLOR_GREEN = 2, VGA_COLOR_CYAN = 3, VGA_COLOR_RED = 4, VGA_COLOR_MAGENTA = 5, VGA_COLOR_BROWN = 6, VGA_COLOR_LIGHT_GREY = 7, VGA_COLOR_DARK_GREY = 8, VGA_COLOR_LIGHT_BLUE = 9, VGA_COLOR_LIGHT_GREEN = 10, VGA_COLOR_LIGHT_CYAN = 11, VGA_COLOR_LIGHT_RED = 12, VGA_COLOR_LIGHT_MAGENTA = 13, VGA_COLOR_LIGHT_BROWN = 14, VGA_COLOR_WHITE = 15, }; typedef enum vga_color vga_color_t; /** Default character color is light grey. */ #define PRINTF_DEFAULT_COLOR VGA_COLOR_LIGHT_GREY void printf(const char *fmt, ...); void cprintf(vga_color_t fg, const char *fmt, ...); void snprintf(char *buf, size_t count, const char *fmt, ...); #endif
josehu07/hux-kernel
src/display/sysdisp.c
<gh_stars>10-100 /** * Syscalls related to terminal printing. */ #include <stdint.h> #include "sysdisp.h" #include "vga.h" #include "../common/printf.h" #include "../interrupt/syscall.h" /** void tprint(uint32_t color, char *str); */ int32_t syscall_tprint(void) { uint32_t color; char *str; if (!sysarg_get_uint(0, &color)) return SYS_FAIL_RC; if (color > 15) return SYS_FAIL_RC; if (sysarg_get_str(1, &str) < 0) return SYS_FAIL_RC; cprintf((vga_color_t) color, "%s", str); return 0; }
josehu07/hux-kernel
user/tests/proctest.c
<gh_stars>10-100 /** * User test program - file system operations. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "../lib/debug.h" #include "../lib/printf.h" #include "../lib/syscall.h" void main(int argc, char *argv[]) { (void) argc; // Unused. (void) argv; int32_t mypid = getpid(); printf(" Parent: parent gets pid - %d\n", mypid); sleep(2000); cprintf(VGA_COLOR_LIGHT_GREEN, "\n Round 1 --\n"); printf(" Parent: forking child 1\n"); int32_t pid1 = fork(0); if (pid1 < 0) { cprintf(VGA_COLOR_RED, " Parent: fork failed\n"); exit(); } if (pid1 == 0) { // Child. printf(" Child1: entering an infinite loop\n"); while (true) sleep(5000); } else { // Parent. printf(" Parent: child 1 has pid - %d\n", pid1); sleep(1500); printf(" Parent: slept 1.5 secs, going to kill child 1\n"); kill(pid1); wait(); printf(" Parent: killed child 1\n"); } cprintf(VGA_COLOR_LIGHT_GREEN, "\n Round 2 --\n"); printf(" Current uptime: %d ms\n", uptime()); printf(" Going to sleep for 2000 ms...\n"); sleep(2000); printf(" Current uptime: %d ms\n", uptime()); cprintf(VGA_COLOR_LIGHT_GREEN, "\n Round 3 --\n"); printf(" Parent: forking child 2\n"); int32_t pid2 = fork(0); if (pid2 < 0) { cprintf(VGA_COLOR_RED, " Parent: fork failed\n"); exit(); } if (pid2 == 0) { // Child. printf(" Child2: going to sleep 2 secs\n"); sleep(2000); exit(); } else { // Parent. printf(" Parent: child 2 has pid - %d\n", pid2); wait(); printf(" Parent: waited child 2\n"); } cprintf(VGA_COLOR_GREEN, "\n Cases done!\n"); exit(); }
josehu07/hux-kernel
user/mk.c
<reponame>josehu07/hux-kernel /** * Command line utility - create file or directory. */ #include <stdint.h> #include <stdbool.h> #include <stddef.h> #include "lib/syscall.h" #include "lib/printf.h" #include "lib/debug.h" #include "lib/string.h" static void _create_file(char *path, bool is_dir) { /** If path exists, fail. */ int8_t fd = open(path, OPEN_RD); if (fd >= 0) { warn("mk: path '%s' exists", path); close(fd); return; } int ret = create(path, is_dir ? CREATE_DIR : CREATE_FILE); if (ret != 0) warn("mk: create '%s' failed", path); } static void _print_help_exit(char *me) { printf("Usage: %s [-h] [-r] path\n", me); exit(); } void main(int argc, char *argv[]) { if (argc < 2 || strncmp(argv[1], "-h", 2) == 0) _print_help_exit(argv[0]); bool is_dir = false; if (strncmp(argv[1], "-r", 2) == 0) is_dir = true; char *path; if (is_dir) { if (argc != 3) _print_help_exit(argv[0]); path = argv[2]; } else { if (argc != 2) _print_help_exit(argv[0]); path = argv[1]; } _create_file(path, is_dir); exit(); }
josehu07/hux-kernel
src/memory/slabs.c
/** * Simple SLAB allocators for fixed-granularity kernel objects. */ #include <stdint.h> #include <stddef.h> #include "slabs.h" #include "paging.h" #include "../common/debug.h" #include "../common/string.h" #include "../common/spinlock.h" /** Page-granularity SLAB free-list. */ static uint32_t page_slab_btm; static uint32_t page_slab_top; static slab_node_t *page_slab_freelist; static spinlock_t page_slab_lock; /** * Internal generic SLAB allocator. Give it a pointer to the pointer to * the first node of any initialized fixed-granularity free-list. * * There is no data integrity checks on magics. Returns 0 on failures. * Must be called with the corresponding SLAB's lock held. */ static uint32_t _salloc_internal(slab_node_t **freelist) { if (freelist == NULL) { warn("salloc: given free-list pointer is NULL"); return 0; } slab_node_t *node = *freelist; /** No slab is free, time to panic. */ if (node == NULL) { warn("salloc: free-list %p has no free slabs", freelist); return 0; } *freelist = node->next; return (uint32_t) node; } /** Wrappers for differnet granularities. */ uint32_t salloc_page(void) { spinlock_acquire(&page_slab_lock); uint32_t addr = _salloc_internal(&page_slab_freelist); spinlock_release(&page_slab_lock); return addr; } /** * Internal generic SLAB deallocator. Assumes the address is valid and * properly-aligned to the granularity. * * Must be called with the corresponding SLAB's lock held. */ static void _sfree_internal(slab_node_t **freelist, void *addr) { slab_node_t *node = (slab_node_t *) addr; /** Simply insert to the head of free-list. */ node->next = *freelist; *freelist = node; } /** Wrapper for different granularities. */ void sfree_page(void *addr) { if ((uint32_t) addr < page_slab_btm || (uint32_t) addr >= page_slab_top) { warn("sfree_page: object %p is out of page slab range", addr); return; } if ((uint32_t) addr % PAGE_SIZE != 0) { warn("sfree_page: object %p is not page-aligned", addr); return; } /** Fill with zero bytes to catch dangling pointers use. */ memset((char *) addr, 0, PAGE_SIZE); spinlock_acquire(&page_slab_lock); _sfree_internal(&page_slab_freelist, addr); spinlock_release(&page_slab_lock); } /** Initializers for SLAB allocators. */ void page_slab_init(void) { page_slab_btm = PAGE_SLAB_MIN; page_slab_top = PAGE_SLAB_MAX; page_slab_freelist = NULL; for (uint32_t addr = page_slab_btm; addr < page_slab_top; addr += PAGE_SIZE) { sfree_page((char *) addr); } spinlock_init(&page_slab_lock, "page_slab_lock"); }
josehu07/hux-kernel
src/common/spinlock.c
/** * Spinlock implementation (synonym to `cli_push()`/`cli_pop()` pairs * in single-CPU Hux). */ #include <stdint.h> #include <stdbool.h> #include "spinlock.h" #include "intstate.h" #include "debug.h" /** Returns true if the lock is currently locked. */ bool spinlock_locked(spinlock_t *lock) { cli_push(); bool locked = (lock->locked == 1); cli_pop(); return locked; } /** x86 atomic XCHG instruction wrapper. */ static inline uint32_t _xchgl(volatile uint32_t *ptr, uint32_t new_val) { uint32_t old_val; asm volatile ( "lock; xchgl %0, %1" : "+m" (*ptr), "=a" (old_val) : "1" (new_val) ); return old_val; } /** * Loops until the lock is acquired. * * Should succeed immediately in Hux since we only have one CPU and any * process must not yield when holding a spinlock (which may cause * another process that gets scheduled to deadlock on spinning on the * lock). Hence, it basically serves as `cli_push()` for now. */ void spinlock_acquire(spinlock_t *lock) { cli_push(); if (spinlock_locked(lock)) error("spinlock_acquire: lock %s is already locked", lock->name); /** Spins until XCHG gets old value of "unlocked". */ while (_xchgl(&(lock->locked), 1) != 0) {} /** Memory barrier, no loads/stores could cross this point. */ __sync_synchronize(); } /** Release the lock. */ void spinlock_release(spinlock_t *lock) { if (!spinlock_locked(lock)) error("spinlock_release: lock %s is not locked", lock->name); /** Memory barrier, no loads/stores could cross this point. */ __sync_synchronize(); /** Atomically assign to 0 (C statement could be non-atomic). */ asm volatile ( "movl $0, %0" : "+m" (lock->locked) : ); cli_pop(); } /** Initialize the spinlock. */ void spinlock_init(spinlock_t *lock, const char *name) { lock->name = name; lock->locked = 0; }
josehu07/hux-kernel
src/filesys/sysfile.c
/** * Syscalls related to process state & operations. */ #include <stdint.h> #include <stdbool.h> #include "sysfile.h" #include "vsfs.h" #include "file.h" #include "exec.h" #include "../common/debug.h" #include "../common/string.h" #include "../interrupt/syscall.h" /** int32_t open(char *path, uint32_t mode); */ int32_t syscall_open(void) { char *path; uint32_t mode; if (sysarg_get_str(0, &path) <= 0) return SYS_FAIL_RC; if (!sysarg_get_uint(1, &mode)) return SYS_FAIL_RC; if ((mode & (OPEN_RD | OPEN_WR)) == 0) { warn("open: mode is neither readable nor writable"); return SYS_FAIL_RC; } return filesys_open(path, mode); } /** int32_t close(int32_t fd); */ int32_t syscall_close(void) { int32_t fd; if (!sysarg_get_int(0, &fd)) return SYS_FAIL_RC; if (fd < 0 || fd >= MAX_FILES_PER_PROC) return SYS_FAIL_RC; if (!filesys_close(fd)) return SYS_FAIL_RC; return 0; } /** int32_t create(char *path, uint32_t mode); */ int32_t syscall_create(void) { char *path; uint32_t mode; if (sysarg_get_str(0, &path) <= 0) return SYS_FAIL_RC; if (!sysarg_get_uint(1, &mode)) return SYS_FAIL_RC; if ((mode & (CREATE_FILE | CREATE_DIR)) == 0) { warn("create: mode is neigher file nor directory"); return SYS_FAIL_RC; } if ((mode & CREATE_FILE) != 0 && (mode & CREATE_DIR) != 0) { warn("create: mode is both file and directory"); return SYS_FAIL_RC; } if (!filesys_create(path, mode)) return SYS_FAIL_RC; return 0; } /** int32_t remove(char *path); */ int32_t syscall_remove(void) { char *path; if (sysarg_get_str(0, &path) <= 0) return SYS_FAIL_RC; if (!filesys_remove(path)) return SYS_FAIL_RC; return 0; } /** int32_t read(int32_t fd, char *dst, uint32_t len); */ int32_t syscall_read(void) { int32_t fd; char *dst; uint32_t len; if (!sysarg_get_int(0, &fd)) return SYS_FAIL_RC; if (fd < 0 || fd >= MAX_FILES_PER_PROC) return SYS_FAIL_RC; if (!sysarg_get_uint(2, &len)) return SYS_FAIL_RC; if (!sysarg_get_mem(1, &dst, len)) return SYS_FAIL_RC; return filesys_read(fd, dst, len); } /** int32_t write(int32_t fd, char *src, uint32_t len); */ int32_t syscall_write(void) { int32_t fd; char *src; uint32_t len; if (!sysarg_get_int(0, &fd)) return SYS_FAIL_RC; if (fd < 0 || fd >= MAX_FILES_PER_PROC) return SYS_FAIL_RC; if (!sysarg_get_uint(2, &len)) return SYS_FAIL_RC; if (!sysarg_get_mem(1, &src, len)) return SYS_FAIL_RC; return filesys_write(fd, src, len); } /** int32_t chdir(char *path); */ int32_t syscall_chdir(void) { char *path; if (sysarg_get_str(0, &path) <= 0) return SYS_FAIL_RC; if (!filesys_chdir(path)) return SYS_FAIL_RC; return 0; } /** int32_t getcwd(char *buf, uint32_t limit); */ int32_t syscall_getcwd(void) { char *buf; uint32_t limit; if (!sysarg_get_uint(1, &limit)) return SYS_FAIL_RC; if (limit < 2) return SYS_FAIL_RC; if (!sysarg_get_mem(0, &buf, limit)) return SYS_FAIL_RC; if (!filesys_getcwd(buf, limit)) return SYS_FAIL_RC; return 0; } /** int32_t exec(char *path, char **argv); */ int32_t syscall_exec(void) { char *path; uint32_t uargv; if (!sysarg_get_str(0, &path)) return SYS_FAIL_RC; if (!sysarg_get_uint(1, &uargv)) return SYS_FAIL_RC; char *argv[MAX_EXEC_ARGS]; memset(argv, 0, MAX_EXEC_ARGS * sizeof(char *)); for (size_t argc = 0; argc < MAX_EXEC_ARGS; ++argc) { uint32_t uarg; if (!sysarg_addr_uint(uargv + 4 * argc, &uarg)) return SYS_FAIL_RC; if (uarg == 0) { /** Reached end of list. */ argv[argc] = 0; if (!filesys_exec(path, argv)) return SYS_FAIL_RC; return 0; } if (sysarg_addr_str(uarg, &argv[argc]) < 0) return SYS_FAIL_RC; } return SYS_FAIL_RC; } /** int32_t fstat(int32_t fd, file_stat_t *stat); */ int32_t syscall_fstat(void) { int32_t fd; file_stat_t *stat; if (!sysarg_get_int(0, &fd)) return SYS_FAIL_RC; if (!sysarg_get_mem(1, (char **) &stat, sizeof(file_stat_t))) return SYS_FAIL_RC; if (!filesys_fstat(fd, stat)) return SYS_FAIL_RC; return 0; } /** int32_t seek(int32_t fd, uint32_t offset); */ int32_t syscall_seek(void) { int32_t fd; uint32_t offset; if (!sysarg_get_int(0, &fd)) return SYS_FAIL_RC; if (!sysarg_get_uint(1, &offset)) return SYS_FAIL_RC; if (!filesys_seek(fd, offset)) return SYS_FAIL_RC; return 0; }
josehu07/hux-kernel
src/filesys/block.h
/** * Block-level I/O general request layer. */ #ifndef BLOCK_H #define BLOCK_H #include <stdint.h> #include <stdbool.h> /** All block requests are of size 1024 bytes. */ #define BLOCK_SIZE 1024 /** Helper macros on addresses and block alignments. */ #define ADDR_BLOCK_OFFSET(addr) ((addr) & 0x000003FF) #define ADDR_BLOCK_NUMBER(addr) ((addr) >> 10) #define ADDR_BLOCK_ALIGNED(addr) (ADDR_BLOCK_OFFSET(addr) == 0) #define ADDR_BLOCK_ROUND_DN(addr) ((addr) & 0xFFFFFC00) #define ADDR_BLOCK_ROUND_UP(addr) (ADDR_BLOCK_ROUND_DN((addr) + 0x000003FF)) /** * Block device request buffer. * - valid && dirty: waiting to be written to disk * - !valid && !dirty: waiting to be read from disk * - valid && !dirty: normal buffer with valid data * - !valid && dirty: cannot happen */ struct block_request { bool valid; bool dirty; struct block_request *next; /** Next in device queue. */ uint32_t block_no; /** Block index on disk. */ uint8_t data[BLOCK_SIZE]; }; typedef struct block_request block_request_t; bool block_read(char *dst, uint32_t disk_addr, uint32_t len); bool block_read_at_boot(char *dst, uint32_t disk_addr, uint32_t len); bool block_write(char *src, uint32_t disk_addr, uint32_t len); uint32_t block_alloc(); void block_free(uint32_t disk_addr); #endif
josehu07/hux-kernel
src/process/process.h
/** * Providing the abstraction of processes. */ #ifndef PROCESS_H #define PROCESS_H #include <stdint.h> #include <stdbool.h> #include "../common/spinlock.h" #include "../common/parklock.h" #include "../interrupt/isr.h" #include "../memory/paging.h" #include "../filesys/block.h" #include "../filesys/file.h" /** Max number of processes at any time. */ #define MAX_PROCS 32 /** Each process has a kernel stack of one page. */ #define KSTACK_SIZE PAGE_SIZE /** * Process context registers defined to be saved across switches. * * PC is the last member (so the last value on stack not being explicitly * popped in `context_switch()), because it will then be used as the * return address of the snippet. */ struct process_context { uint32_t edi; uint32_t esi; uint32_t ebx; uint32_t ebp; /** Frame pointer. */ uint32_t eip; /** Instruction pointer (PC). */ } __attribute__((packed)); typedef struct process_context process_context_t; /** Process state. */ enum process_block_on { NOTHING, ON_SLEEP, ON_WAIT, ON_KBDIN, ON_IDEDISK, ON_LOCK }; typedef enum process_block_on process_block_on_t; enum process_state { UNUSED, /** Indicates PCB slot unused. */ INITIAL, READY, RUNNING, BLOCKED, TERMINATED }; typedef enum process_state process_state_t; /** Process control block (PCB). */ struct process { char name[16]; /** Process name. */ int8_t pid; /** Process ID. */ process_context_t *context; /** Registers context. */ process_state_t state; /** Process state */ process_block_on_t block_on; /** If state is BLOCKED, the reason. */ pde_t *pgdir; /** Process page directory. */ uint32_t kstack; /** Bottom of its kernel stack. */ interrupt_state_t *trap_state; /** Trap state of latest trap. */ uint32_t stack_low; /** Current bottom of stack pages. */ uint32_t heap_high; /** Current top of heap pages. */ struct process *parent; /** Parent process. */ bool killed; /** True if should exit. */ uint8_t timeslice; /** Timeslice length for scheduling. */ uint32_t target_tick; /** Target wake up timer tick. */ block_request_t *wait_req; /** Waiting on this block request. */ parklock_t *wait_lock; /** Waiting on this parking lock. */ file_t *files[MAX_FILES_PER_PROC]; /** File descriptor -> open file. */ mem_inode_t *cwd; /** Current working directory. */ }; typedef struct process process_t; /** Extern the process table to the scheduler. */ extern process_t ptable[]; extern spinlock_t ptable_lock; extern process_t *initproc; void process_init(); void initproc_init(); void process_block(process_block_on_t reason); void process_unblock(process_t *proc); int8_t process_fork(uint8_t timeslice); void process_exit(); void process_sleep(uint32_t sleep_ticks); int8_t process_wait(); int8_t process_kill(int8_t pid); #endif
josehu07/hux-kernel
src/display/sysdisp.h
/** * Syscalls related to terminal printing. */ #ifndef SYSDISP_H #define SYSDISP_H #include <stdint.h> int32_t syscall_tprint(); #endif