//
// Created by bjliuyong on 2021/7/15.
//

#ifndef JVM_BYTES_H
#define JVM_BYTES_H


#include "memory/allocation.h"
#include "utilities/globalDefinitions.h"


class Bytes : AllStatic {
// Helper function for swap_u8
    static inline u8   swap_u8_base(u4 x, u4 y);        // compiler-dependent implementation
public:
    // Returns true if the byte ordering used by Java is different from the native byte ordering
    // of the underlying machine. For example, this is true for Intel x86, but false for Solaris
    // on Sparc.
    static inline bool is_Java_byte_ordering_different(){ return true; }


    // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
    // (no special code is needed since x86 CPUs can access unaligned data)
    static inline u2   get_native_u2(address p)         { return *(u2*)p; }
    static inline u4   get_native_u4(address p)         { return *(u4*)p; }
    static inline u8   get_native_u8(address p)         { return *(u8*)p; }

    static inline void put_native_u2(address p, u2 x)   { *(u2*)p = x; }
    static inline void put_native_u4(address p, u4 x)   { *(u4*)p = x; }
    static inline void put_native_u8(address p, u8 x)   { *(u8*)p = x; }


    // Efficient reading and writing of unaligned unsigned data in Java
    // byte ordering (i.e. big-endian ordering). Byte-order reversal is
    // needed since x86 CPUs use little-endian format.
    static inline u2   get_Java_u2(address p)           { return swap_u2(get_native_u2(p)); }
    static inline u4   get_Java_u4(address p)           { return swap_u4(get_native_u4(p)); }
    static inline u8   get_Java_u8(address p)           { return swap_u8(get_native_u8(p)); }

    static inline void put_Java_u2(address p, u2 x)     { put_native_u2(p, swap_u2(x)); }
    static inline void put_Java_u4(address p, u4 x)     { put_native_u4(p, swap_u4(x)); }
    static inline void put_Java_u8(address p, u8 x)     { put_native_u8(p, swap_u8(x)); }


    // Efficient swapping of byte ordering
    static inline u2   swap_u2(u2 x);                   // compiler-dependent implementation
    static inline u4   swap_u4(u4 x);                   // compiler-dependent implementation
    static inline u8   swap_u8(u8 x);

};

u2 Bytes::swap_u2(u2 x) {
    u2 ret;
    __asm__ __volatile__ (
    "movw %0, %%ax;"
    "xchg %%al, %%ah;"
    "movw %%ax, %0"
    :"=r" (ret)      // output : register 0 => ret
    :"0"  (x)        // input  : x => register 0
    :"ax", "0"       // clobbered registers
    );
    return ret;
}

inline u4 Bytes::swap_u4(u4 x) {
    u4 ret;
    __asm__ __volatile__ (
    "bswap %0"
    :"=r" (ret)      // output : register 0 => ret
    :"0"  (x)        // input  : x => register 0
    :"0"             // clobbered register
    );
    return ret;
}

// Helper function for swap_u8
inline u8   Bytes::swap_u8_base(u4 x, u4 y) {
    return (((u8)swap_u4(x))<<32) | swap_u4(y);
}

inline u8 Bytes::swap_u8(u8 x) {
    return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1)); //秒啊
}

#endif //JVM_BYTES_H
