// Author: 8891689
// https://github.com/8891689
#include "aes_maes.h"
#include <string.h>
#include <stdio.h>

static inline __m128i AES_128_ASSIST(__m128i temp1, __m128i temp2) {
    temp2 = _mm_shuffle_epi32(temp2, 0xff);
    temp1 = _mm_xor_si128(temp1, _mm_slli_si128(temp1, 4));
    temp1 = _mm_xor_si128(temp1, _mm_slli_si128(temp1, 4));
    temp1 = _mm_xor_si128(temp1, _mm_slli_si128(temp1, 4));
    return _mm_xor_si128(temp1, temp2);
}

static inline void AES_256_ASSIST1(__m128i* temp1, __m128i * temp2) {
    __m128i temp4;
    *temp2 = _mm_shuffle_epi32(*temp2, 0xff);
    temp4 = _mm_slli_si128(*temp1, 0x4);
    *temp1 = _mm_xor_si128(*temp1, temp4);
    temp4 = _mm_slli_si128(temp4, 0x4);
    *temp1 = _mm_xor_si128(*temp1, temp4);
    temp4 = _mm_slli_si128(temp4, 0x4);
    *temp1 = _mm_xor_si128(*temp1, temp4);
    *temp1 = _mm_xor_si128(*temp1, *temp2);
}

static inline void AES_256_ASSIST2(__m128i* temp1, __m128i * temp3) {
    __m128i temp2, temp4;
    temp4 = _mm_aeskeygenassist_si128(*temp1, 0x00);
    temp2 = _mm_shuffle_epi32(temp4, 0xaa);
    temp4 = _mm_slli_si128(*temp3, 0x4);
    *temp3 = _mm_xor_si128(*temp3, temp4);
    temp4 = _mm_slli_si128(temp4, 0x4);
    *temp3 = _mm_xor_si128(*temp3, temp4);
    temp4 = _mm_slli_si128(temp4, 0x4);
    *temp3 = _mm_xor_si128(*temp3, temp4);
    *temp3 = _mm_xor_si128(*temp3, temp2);
}

int AES_KeyExpansion(const uint8_t *key, __m128i *round_keys, AES_KeySize Nk_enum, AES_KeySchedule_Mode mode) {
    int Nr = AES_Nr_Rounds(Nk_enum);
    __m128i temp1, temp2, temp3;

    if (Nk_enum == AES_KEY_128) {
        temp1 = _mm_loadu_si128((const __m128i*)key);
        round_keys[0] = temp1;
        temp2 = _mm_aeskeygenassist_si128(temp1, 0x01); round_keys[1] = AES_128_ASSIST(temp1, temp2);
        temp1 = round_keys[1]; temp2 = _mm_aeskeygenassist_si128(temp1, 0x02); round_keys[2] = AES_128_ASSIST(temp1, temp2);
        temp1 = round_keys[2]; temp2 = _mm_aeskeygenassist_si128(temp1, 0x04); round_keys[3] = AES_128_ASSIST(temp1, temp2);
        temp1 = round_keys[3]; temp2 = _mm_aeskeygenassist_si128(temp1, 0x08); round_keys[4] = AES_128_ASSIST(temp1, temp2);
        temp1 = round_keys[4]; temp2 = _mm_aeskeygenassist_si128(temp1, 0x10); round_keys[5] = AES_128_ASSIST(temp1, temp2);
        temp1 = round_keys[5]; temp2 = _mm_aeskeygenassist_si128(temp1, 0x20); round_keys[6] = AES_128_ASSIST(temp1, temp2);
        temp1 = round_keys[6]; temp2 = _mm_aeskeygenassist_si128(temp1, 0x40); round_keys[7] = AES_128_ASSIST(temp1, temp2);
        temp1 = round_keys[7]; temp2 = _mm_aeskeygenassist_si128(temp1, 0x80); round_keys[8] = AES_128_ASSIST(temp1, temp2);
        temp1 = round_keys[8]; temp2 = _mm_aeskeygenassist_si128(temp1, 0x1b); round_keys[9] = AES_128_ASSIST(temp1, temp2);
        temp1 = round_keys[9]; temp2 = _mm_aeskeygenassist_si128(temp1, 0x36); round_keys[10] = AES_128_ASSIST(temp1, temp2);
    } else if (Nk_enum == AES_KEY_192) {
        uint8_t temp_key_192_high[16];
        memset(temp_key_192_high, 0, 16);
        memcpy(temp_key_192_high, key + 16, 8);
        temp1 = _mm_loadu_si128((const __m128i*)key);
        temp3 = _mm_loadu_si128((const __m128i*)temp_key_192_high);
        round_keys[0] = temp1;
        round_keys[1] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x01); AES_256_ASSIST1(&temp1, &temp2); round_keys[1] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[2] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x02); AES_256_ASSIST1(&temp1, &temp2); round_keys[3] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[4] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x04); AES_256_ASSIST1(&temp1, &temp2); round_keys[5] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[6] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x08); AES_256_ASSIST1(&temp1, &temp2); round_keys[7] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[8] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x10); AES_256_ASSIST1(&temp1, &temp2); round_keys[9] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[10] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x20); AES_256_ASSIST1(&temp1, &temp2); round_keys[11] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[12] = temp3;
    } else if (Nk_enum == AES_KEY_256) {
        temp1 = _mm_loadu_si128((const __m128i*)key);
        temp3 = _mm_loadu_si128((const __m128i*)(key + 16));
        round_keys[0] = temp1;
        round_keys[1] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x01); AES_256_ASSIST1(&temp1, &temp2); round_keys[2] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[3] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x02); AES_256_ASSIST1(&temp1, &temp2); round_keys[4] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[5] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x04); AES_256_ASSIST1(&temp1, &temp2); round_keys[6] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[7] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x08); AES_256_ASSIST1(&temp1, &temp2); round_keys[8] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[9] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x10); AES_256_ASSIST1(&temp1, &temp2); round_keys[10] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[11] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x20); AES_256_ASSIST1(&temp1, &temp2); round_keys[12] = temp1;
        AES_256_ASSIST2(&temp1, &temp3); round_keys[13] = temp3;
        temp2 = _mm_aeskeygenassist_si128(temp3, 0x40); AES_256_ASSIST1(&temp1, &temp2); round_keys[14] = temp1;
    } else {
        return -1;
    }

    if (mode == AES_KEY_SCHEDULE_DECRYPT) {
        for (int i = 1; i < Nr; i++) {
            round_keys[i] = _mm_aesimc_si128(round_keys[i]);
        }
    }
    return 0;
}

void AES_PrintState(const uint8_t *state) {
    for (int i = 0; i < AES_BLOCK_SIZE; i++) {
        printf("%02x ", state[i]);
    }
    printf("\n");
}

void AES_Encrypt_Block(uint8_t *output_block, const uint8_t *input_block, const __m128i *round_keys, int num_rounds) {
    __m128i block_data = _mm_loadu_si128((const __m128i*)input_block);
    block_data = _mm_xor_si128(block_data, round_keys[0]);
    for (int r = 1; r < num_rounds; ++r) {
        block_data = _mm_aesenc_si128(block_data, round_keys[r]);
    }
    block_data = _mm_aesenclast_si128(block_data, round_keys[num_rounds]);
    _mm_storeu_si128((__m128i*)output_block, block_data);
}

void AES_Decrypt_Block(uint8_t *output_block, const uint8_t *input_block, const __m128i *round_keys, int num_rounds) {
    __m128i block_data = _mm_loadu_si128((const __m128i*)input_block);
    block_data = _mm_xor_si128(block_data, round_keys[num_rounds]);
    for (int r = num_rounds - 1; r > 0; --r) {
        block_data = _mm_aesdec_si128(block_data, round_keys[r]);
    }
    block_data = _mm_aesdeclast_si128(block_data, round_keys[0]);
    _mm_storeu_si128((__m128i*)output_block, block_data);
}


__attribute__((target("aes,avx2")))
void AES_Encrypt_AVX2(uint8_t *blocks, size_t num_blocks, const __m128i *round_keys, int num_rounds) {
    const size_t block_bytes = AES_BLOCK_SIZE;
    const size_t stride_bytes = 16 * block_bytes;

    size_t main_iterations = num_blocks / 16;
    size_t i;

    for (i = 0; i < main_iterations; ++i) {
        uint8_t* current_block_ptr = blocks + i * stride_bytes;

        if (i + 1 < main_iterations) {
            uint8_t* prefetch_addr_base = blocks + (i + 1) * stride_bytes;
            _mm_prefetch((const char*)(prefetch_addr_base + 0 * 64), _MM_HINT_T0);
            _mm_prefetch((const char*)(prefetch_addr_base + 1 * 64), _MM_HINT_T0);
            _mm_prefetch((const char*)(prefetch_addr_base + 2 * 64), _MM_HINT_T0);
            _mm_prefetch((const char*)(prefetch_addr_base + 3 * 64), _MM_HINT_T0);
        }

        __m128i b0 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 0 * block_bytes));
        __m128i b1 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 1 * block_bytes));
        __m128i b2 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 2 * block_bytes));
        __m128i b3 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 3 * block_bytes));
        __m128i b4 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 4 * block_bytes));
        __m128i b5 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 5 * block_bytes));
        __m128i b6 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 6 * block_bytes));
        __m128i b7 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 7 * block_bytes));
        __m128i b8 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 8 * block_bytes));
        __m128i b9 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 9 * block_bytes));
        __m128i b10 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 10 * block_bytes));
        __m128i b11 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 11 * block_bytes));
        __m128i b12 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 12 * block_bytes));
        __m128i b13 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 13 * block_bytes));
        __m128i b14 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 14 * block_bytes));
        __m128i b15 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 15 * block_bytes));

        b0 = _mm_xor_si128(b0, round_keys[0]); b1 = _mm_xor_si128(b1, round_keys[0]); b2 = _mm_xor_si128(b2, round_keys[0]); b3 = _mm_xor_si128(b3, round_keys[0]); b4 = _mm_xor_si128(b4, round_keys[0]); b5 = _mm_xor_si128(b5, round_keys[0]); b6 = _mm_xor_si128(b6, round_keys[0]); b7 = _mm_xor_si128(b7, round_keys[0]); b8 = _mm_xor_si128(b8, round_keys[0]); b9 = _mm_xor_si128(b9, round_keys[0]); b10 = _mm_xor_si128(b10, round_keys[0]); b11 = _mm_xor_si128(b11, round_keys[0]); b12 = _mm_xor_si128(b12, round_keys[0]); b13 = _mm_xor_si128(b13, round_keys[0]); b14 = _mm_xor_si128(b14, round_keys[0]); b15 = _mm_xor_si128(b15, round_keys[0]);

        int r_idx;
        for (r_idx = 1; r_idx <= num_rounds - 4; r_idx += 4) {
            b0 = _mm_aesenc_si128(b0, round_keys[r_idx]); b1 = _mm_aesenc_si128(b1, round_keys[r_idx]); b2 = _mm_aesenc_si128(b2, round_keys[r_idx]); b3 = _mm_aesenc_si128(b3, round_keys[r_idx]); b4 = _mm_aesenc_si128(b4, round_keys[r_idx]); b5 = _mm_aesenc_si128(b5, round_keys[r_idx]); b6 = _mm_aesenc_si128(b6, round_keys[r_idx]); b7 = _mm_aesenc_si128(b7, round_keys[r_idx]); b8 = _mm_aesenc_si128(b8, round_keys[r_idx]); b9 = _mm_aesenc_si128(b9, round_keys[r_idx]); b10 = _mm_aesenc_si128(b10, round_keys[r_idx]); b11 = _mm_aesenc_si128(b11, round_keys[r_idx]); b12 = _mm_aesenc_si128(b12, round_keys[r_idx]); b13 = _mm_aesenc_si128(b13, round_keys[r_idx]); b14 = _mm_aesenc_si128(b14, round_keys[r_idx]); b15 = _mm_aesenc_si128(b15, round_keys[r_idx]);
            b0 = _mm_aesenc_si128(b0, round_keys[r_idx+1]); b1 = _mm_aesenc_si128(b1, round_keys[r_idx+1]); b2 = _mm_aesenc_si128(b2, round_keys[r_idx+1]); b3 = _mm_aesenc_si128(b3, round_keys[r_idx+1]); b4 = _mm_aesenc_si128(b4, round_keys[r_idx+1]); b5 = _mm_aesenc_si128(b5, round_keys[r_idx+1]); b6 = _mm_aesenc_si128(b6, round_keys[r_idx+1]); b7 = _mm_aesenc_si128(b7, round_keys[r_idx+1]); b8 = _mm_aesenc_si128(b8, round_keys[r_idx+1]); b9 = _mm_aesenc_si128(b9, round_keys[r_idx+1]); b10 = _mm_aesenc_si128(b10, round_keys[r_idx+1]); b11 = _mm_aesenc_si128(b11, round_keys[r_idx+1]); b12 = _mm_aesenc_si128(b12, round_keys[r_idx+1]); b13 = _mm_aesenc_si128(b13, round_keys[r_idx+1]); b14 = _mm_aesenc_si128(b14, round_keys[r_idx+1]); b15 = _mm_aesenc_si128(b15, round_keys[r_idx+1]);
            b0 = _mm_aesenc_si128(b0, round_keys[r_idx+2]); b1 = _mm_aesenc_si128(b1, round_keys[r_idx+2]); b2 = _mm_aesenc_si128(b2, round_keys[r_idx+2]); b3 = _mm_aesenc_si128(b3, round_keys[r_idx+2]); b4 = _mm_aesenc_si128(b4, round_keys[r_idx+2]); b5 = _mm_aesenc_si128(b5, round_keys[r_idx+2]); b6 = _mm_aesenc_si128(b6, round_keys[r_idx+2]); b7 = _mm_aesenc_si128(b7, round_keys[r_idx+2]); b8 = _mm_aesenc_si128(b8, round_keys[r_idx+2]); b9 = _mm_aesenc_si128(b9, round_keys[r_idx+2]); b10 = _mm_aesenc_si128(b10, round_keys[r_idx+2]); b11 = _mm_aesenc_si128(b11, round_keys[r_idx+2]); b12 = _mm_aesenc_si128(b12, round_keys[r_idx+2]); b13 = _mm_aesenc_si128(b13, round_keys[r_idx+2]); b14 = _mm_aesenc_si128(b14, round_keys[r_idx+2]); b15 = _mm_aesenc_si128(b15, round_keys[r_idx+2]);
            b0 = _mm_aesenc_si128(b0, round_keys[r_idx+3]); b1 = _mm_aesenc_si128(b1, round_keys[r_idx+3]); b2 = _mm_aesenc_si128(b2, round_keys[r_idx+3]); b3 = _mm_aesenc_si128(b3, round_keys[r_idx+3]); b4 = _mm_aesenc_si128(b4, round_keys[r_idx+3]); b5 = _mm_aesenc_si128(b5, round_keys[r_idx+3]); b6 = _mm_aesenc_si128(b6, round_keys[r_idx+3]); b7 = _mm_aesenc_si128(b7, round_keys[r_idx+3]); b8 = _mm_aesenc_si128(b8, round_keys[r_idx+3]); b9 = _mm_aesenc_si128(b9, round_keys[r_idx+3]); b10 = _mm_aesenc_si128(b10, round_keys[r_idx+3]); b11 = _mm_aesenc_si128(b11, round_keys[r_idx+3]); b12 = _mm_aesenc_si128(b12, round_keys[r_idx+3]); b13 = _mm_aesenc_si128(b13, round_keys[r_idx+3]); b14 = _mm_aesenc_si128(b14, round_keys[r_idx+3]); b15 = _mm_aesenc_si128(b15, round_keys[r_idx+3]);
        }

        for (; r_idx < num_rounds; ++r_idx) {
            b0 = _mm_aesenc_si128(b0, round_keys[r_idx]); b1 = _mm_aesenc_si128(b1, round_keys[r_idx]); b2 = _mm_aesenc_si128(b2, round_keys[r_idx]); b3 = _mm_aesenc_si128(b3, round_keys[r_idx]); b4 = _mm_aesenc_si128(b4, round_keys[r_idx]); b5 = _mm_aesenc_si128(b5, round_keys[r_idx]); b6 = _mm_aesenc_si128(b6, round_keys[r_idx]); b7 = _mm_aesenc_si128(b7, round_keys[r_idx]); b8 = _mm_aesenc_si128(b8, round_keys[r_idx]); b9 = _mm_aesenc_si128(b9, round_keys[r_idx]); b10 = _mm_aesenc_si128(b10, round_keys[r_idx]); b11 = _mm_aesenc_si128(b11, round_keys[r_idx]); b12 = _mm_aesenc_si128(b12, round_keys[r_idx]); b13 = _mm_aesenc_si128(b13, round_keys[r_idx]); b14 = _mm_aesenc_si128(b14, round_keys[r_idx]); b15 = _mm_aesenc_si128(b15, round_keys[r_idx]);
        }

        b0 = _mm_aesenclast_si128(b0, round_keys[num_rounds]); b1 = _mm_aesenclast_si128(b1, round_keys[num_rounds]); b2 = _mm_aesenclast_si128(b2, round_keys[num_rounds]); b3 = _mm_aesenclast_si128(b3, round_keys[num_rounds]); b4 = _mm_aesenclast_si128(b4, round_keys[num_rounds]); b5 = _mm_aesenclast_si128(b5, round_keys[num_rounds]); b6 = _mm_aesenclast_si128(b6, round_keys[num_rounds]); b7 = _mm_aesenclast_si128(b7, round_keys[num_rounds]); b8 = _mm_aesenclast_si128(b8, round_keys[num_rounds]); b9 = _mm_aesenclast_si128(b9, round_keys[num_rounds]); b10 = _mm_aesenclast_si128(b10, round_keys[num_rounds]); b11 = _mm_aesenclast_si128(b11, round_keys[num_rounds]); b12 = _mm_aesenclast_si128(b12, round_keys[num_rounds]); b13 = _mm_aesenclast_si128(b13, round_keys[num_rounds]); b14 = _mm_aesenclast_si128(b14, round_keys[num_rounds]); b15 = _mm_aesenclast_si128(b15, round_keys[num_rounds]);

        _mm_storeu_si128((__m128i*)(current_block_ptr + 0 * block_bytes), b0);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 1 * block_bytes), b1);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 2 * block_bytes), b2);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 3 * block_bytes), b3);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 4 * block_bytes), b4);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 5 * block_bytes), b5);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 6 * block_bytes), b6);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 7 * block_bytes), b7);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 8 * block_bytes), b8);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 9 * block_bytes), b9);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 10 * block_bytes), b10);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 11 * block_bytes), b11);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 12 * block_bytes), b12);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 13 * block_bytes), b13);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 14 * block_bytes), b14);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 15 * block_bytes), b15);
    }

    size_t remaining_start_offset = main_iterations * stride_bytes;
    size_t remaining_blocks_count = num_blocks % 16;
    uint8_t *remaining_ptr = blocks + remaining_start_offset;
    size_t k_rem = 0;

    if (remaining_blocks_count >= 8) {
        __m128i rb0 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 0 * block_bytes));
        __m128i rb1 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 1 * block_bytes));
        __m128i rb2 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 2 * block_bytes));
        __m128i rb3 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 3 * block_bytes));
        __m128i rb4 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 4 * block_bytes));
        __m128i rb5 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 5 * block_bytes));
        __m128i rb6 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 6 * block_bytes));
        __m128i rb7 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 7 * block_bytes));
        rb0 = _mm_xor_si128(rb0, round_keys[0]); rb1 = _mm_xor_si128(rb1, round_keys[0]); rb2 = _mm_xor_si128(rb2, round_keys[0]); rb3 = _mm_xor_si128(rb3, round_keys[0]); rb4 = _mm_xor_si128(rb4, round_keys[0]); rb5 = _mm_xor_si128(rb5, round_keys[0]); rb6 = _mm_xor_si128(rb6, round_keys[0]); rb7 = _mm_xor_si128(rb7, round_keys[0]);
        for (int r_rem_loop = 1; r_rem_loop < num_rounds; ++r_rem_loop) {
            rb0 = _mm_aesenc_si128(rb0, round_keys[r_rem_loop]); rb1 = _mm_aesenc_si128(rb1, round_keys[r_rem_loop]); rb2 = _mm_aesenc_si128(rb2, round_keys[r_rem_loop]); rb3 = _mm_aesenc_si128(rb3, round_keys[r_rem_loop]); rb4 = _mm_aesenc_si128(rb4, round_keys[r_rem_loop]); rb5 = _mm_aesenc_si128(rb5, round_keys[r_rem_loop]); rb6 = _mm_aesenc_si128(rb6, round_keys[r_rem_loop]); rb7 = _mm_aesenc_si128(rb7, round_keys[r_rem_loop]);
        }
        rb0 = _mm_aesenclast_si128(rb0, round_keys[num_rounds]); rb1 = _mm_aesenclast_si128(rb1, round_keys[num_rounds]); rb2 = _mm_aesenclast_si128(rb2, round_keys[num_rounds]); rb3 = _mm_aesenclast_si128(rb3, round_keys[num_rounds]); rb4 = _mm_aesenclast_si128(rb4, round_keys[num_rounds]); rb5 = _mm_aesenclast_si128(rb5, round_keys[num_rounds]); rb6 = _mm_aesenclast_si128(rb6, round_keys[num_rounds]); rb7 = _mm_aesenclast_si128(rb7, round_keys[num_rounds]);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 0 * block_bytes), rb0);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 1 * block_bytes), rb1);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 2 * block_bytes), rb2);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 3 * block_bytes), rb3);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 4 * block_bytes), rb4);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 5 * block_bytes), rb5);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 6 * block_bytes), rb6);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 7 * block_bytes), rb7);
        k_rem = 8;
    }

    for ( ; k_rem < remaining_blocks_count; ++k_rem) {
        uint8_t *current_single_block = remaining_ptr + k_rem * block_bytes;
        AES_Encrypt_Block(current_single_block, current_single_block, round_keys, num_rounds);
    }
}

__attribute__((target("aes,avx2")))
void AES_Decrypt_AVX2(uint8_t *blocks, size_t num_blocks, const __m128i *round_keys, int num_rounds) {
    const size_t block_bytes = AES_BLOCK_SIZE;
    const size_t stride_bytes = 16 * block_bytes;
    const int last_key_idx = num_rounds;

    size_t main_iterations = num_blocks / 16;
    size_t i;

    for (i = 0; i < main_iterations; ++i) {
        uint8_t* current_block_ptr = blocks + i * stride_bytes;

        if (i + 1 < main_iterations) {
            uint8_t* prefetch_addr_base = blocks + (i + 1) * stride_bytes;
            _mm_prefetch((const char*)(prefetch_addr_base + 0 * 64), _MM_HINT_T0);
            _mm_prefetch((const char*)(prefetch_addr_base + 1 * 64), _MM_HINT_T0);
            _mm_prefetch((const char*)(prefetch_addr_base + 2 * 64), _MM_HINT_T0);
            _mm_prefetch((const char*)(prefetch_addr_base + 3 * 64), _MM_HINT_T0);
        }

        __m128i b0 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 0 * block_bytes));
        __m128i b1 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 1 * block_bytes));
        __m128i b2 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 2 * block_bytes));
        __m128i b3 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 3 * block_bytes));
        __m128i b4 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 4 * block_bytes));
        __m128i b5 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 5 * block_bytes));
        __m128i b6 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 6 * block_bytes));
        __m128i b7 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 7 * block_bytes));
        __m128i b8 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 8 * block_bytes));
        __m128i b9 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 9 * block_bytes));
        __m128i b10 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 10 * block_bytes));
        __m128i b11 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 11 * block_bytes));
        __m128i b12 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 12 * block_bytes));
        __m128i b13 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 13 * block_bytes));
        __m128i b14 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 14 * block_bytes));
        __m128i b15 = _mm_loadu_si128((const __m128i*)(current_block_ptr + 15 * block_bytes));

        b0 = _mm_xor_si128(b0, round_keys[last_key_idx]); b1 = _mm_xor_si128(b1, round_keys[last_key_idx]); b2 = _mm_xor_si128(b2, round_keys[last_key_idx]); b3 = _mm_xor_si128(b3, round_keys[last_key_idx]); b4 = _mm_xor_si128(b4, round_keys[last_key_idx]); b5 = _mm_xor_si128(b5, round_keys[last_key_idx]); b6 = _mm_xor_si128(b6, round_keys[last_key_idx]); b7 = _mm_xor_si128(b7, round_keys[last_key_idx]); b8 = _mm_xor_si128(b8, round_keys[last_key_idx]); b9 = _mm_xor_si128(b9, round_keys[last_key_idx]); b10 = _mm_xor_si128(b10, round_keys[last_key_idx]); b11 = _mm_xor_si128(b11, round_keys[last_key_idx]); b12 = _mm_xor_si128(b12, round_keys[last_key_idx]); b13 = _mm_xor_si128(b13, round_keys[last_key_idx]); b14 = _mm_xor_si128(b14, round_keys[last_key_idx]); b15 = _mm_xor_si128(b15, round_keys[last_key_idx]);

        int r_idx;
        for (r_idx = num_rounds - 1; r_idx >= 4; r_idx -= 4) {
            b0 = _mm_aesdec_si128(b0, round_keys[r_idx]); b1 = _mm_aesdec_si128(b1, round_keys[r_idx]); b2 = _mm_aesdec_si128(b2, round_keys[r_idx]); b3 = _mm_aesdec_si128(b3, round_keys[r_idx]); b4 = _mm_aesdec_si128(b4, round_keys[r_idx]); b5 = _mm_aesdec_si128(b5, round_keys[r_idx]); b6 = _mm_aesdec_si128(b6, round_keys[r_idx]); b7 = _mm_aesdec_si128(b7, round_keys[r_idx]); b8 = _mm_aesdec_si128(b8, round_keys[r_idx]); b9 = _mm_aesdec_si128(b9, round_keys[r_idx]); b10 = _mm_aesdec_si128(b10, round_keys[r_idx]); b11 = _mm_aesdec_si128(b11, round_keys[r_idx]); b12 = _mm_aesdec_si128(b12, round_keys[r_idx]); b13 = _mm_aesdec_si128(b13, round_keys[r_idx]); b14 = _mm_aesdec_si128(b14, round_keys[r_idx]); b15 = _mm_aesdec_si128(b15, round_keys[r_idx]);
            b0 = _mm_aesdec_si128(b0, round_keys[r_idx-1]); b1 = _mm_aesdec_si128(b1, round_keys[r_idx-1]); b2 = _mm_aesdec_si128(b2, round_keys[r_idx-1]); b3 = _mm_aesdec_si128(b3, round_keys[r_idx-1]); b4 = _mm_aesdec_si128(b4, round_keys[r_idx-1]); b5 = _mm_aesdec_si128(b5, round_keys[r_idx-1]); b6 = _mm_aesdec_si128(b6, round_keys[r_idx-1]); b7 = _mm_aesdec_si128(b7, round_keys[r_idx-1]); b8 = _mm_aesdec_si128(b8, round_keys[r_idx-1]); b9 = _mm_aesdec_si128(b9, round_keys[r_idx-1]); b10 = _mm_aesdec_si128(b10, round_keys[r_idx-1]); b11 = _mm_aesdec_si128(b11, round_keys[r_idx-1]); b12 = _mm_aesdec_si128(b12, round_keys[r_idx-1]); b13 = _mm_aesdec_si128(b13, round_keys[r_idx-1]); b14 = _mm_aesdec_si128(b14, round_keys[r_idx-1]); b15 = _mm_aesdec_si128(b15, round_keys[r_idx-1]);
            b0 = _mm_aesdec_si128(b0, round_keys[r_idx-2]); b1 = _mm_aesdec_si128(b1, round_keys[r_idx-2]); b2 = _mm_aesdec_si128(b2, round_keys[r_idx-2]); b3 = _mm_aesdec_si128(b3, round_keys[r_idx-2]); b4 = _mm_aesdec_si128(b4, round_keys[r_idx-2]); b5 = _mm_aesdec_si128(b5, round_keys[r_idx-2]); b6 = _mm_aesdec_si128(b6, round_keys[r_idx-2]); b7 = _mm_aesdec_si128(b7, round_keys[r_idx-2]); b8 = _mm_aesdec_si128(b8, round_keys[r_idx-2]); b9 = _mm_aesdec_si128(b9, round_keys[r_idx-2]); b10 = _mm_aesdec_si128(b10, round_keys[r_idx-2]); b11 = _mm_aesdec_si128(b11, round_keys[r_idx-2]); b12 = _mm_aesdec_si128(b12, round_keys[r_idx-2]); b13 = _mm_aesdec_si128(b13, round_keys[r_idx-2]); b14 = _mm_aesdec_si128(b14, round_keys[r_idx-2]); b15 = _mm_aesdec_si128(b15, round_keys[r_idx-2]);
            b0 = _mm_aesdec_si128(b0, round_keys[r_idx-3]); b1 = _mm_aesdec_si128(b1, round_keys[r_idx-3]); b2 = _mm_aesdec_si128(b2, round_keys[r_idx-3]); b3 = _mm_aesdec_si128(b3, round_keys[r_idx-3]); b4 = _mm_aesdec_si128(b4, round_keys[r_idx-3]); b5 = _mm_aesdec_si128(b5, round_keys[r_idx-3]); b6 = _mm_aesdec_si128(b6, round_keys[r_idx-3]); b7 = _mm_aesdec_si128(b7, round_keys[r_idx-3]); b8 = _mm_aesdec_si128(b8, round_keys[r_idx-3]); b9 = _mm_aesdec_si128(b9, round_keys[r_idx-3]); b10 = _mm_aesdec_si128(b10, round_keys[r_idx-3]); b11 = _mm_aesdec_si128(b11, round_keys[r_idx-3]); b12 = _mm_aesdec_si128(b12, round_keys[r_idx-3]); b13 = _mm_aesdec_si128(b13, round_keys[r_idx-3]); b14 = _mm_aesdec_si128(b14, round_keys[r_idx-3]); b15 = _mm_aesdec_si128(b15, round_keys[r_idx-3]);
        }

        for (; r_idx >= 1; --r_idx) {
            b0 = _mm_aesdec_si128(b0, round_keys[r_idx]); b1 = _mm_aesdec_si128(b1, round_keys[r_idx]); b2 = _mm_aesdec_si128(b2, round_keys[r_idx]); b3 = _mm_aesdec_si128(b3, round_keys[r_idx]); b4 = _mm_aesdec_si128(b4, round_keys[r_idx]); b5 = _mm_aesdec_si128(b5, round_keys[r_idx]); b6 = _mm_aesdec_si128(b6, round_keys[r_idx]); b7 = _mm_aesdec_si128(b7, round_keys[r_idx]); b8 = _mm_aesdec_si128(b8, round_keys[r_idx]); b9 = _mm_aesdec_si128(b9, round_keys[r_idx]); b10 = _mm_aesdec_si128(b10, round_keys[r_idx]); b11 = _mm_aesdec_si128(b11, round_keys[r_idx]); b12 = _mm_aesdec_si128(b12, round_keys[r_idx]); b13 = _mm_aesdec_si128(b13, round_keys[r_idx]); b14 = _mm_aesdec_si128(b14, round_keys[r_idx]); b15 = _mm_aesdec_si128(b15, round_keys[r_idx]);
        }

        b0 = _mm_aesdeclast_si128(b0, round_keys[0]); b1 = _mm_aesdeclast_si128(b1, round_keys[0]); b2 = _mm_aesdeclast_si128(b2, round_keys[0]); b3 = _mm_aesdeclast_si128(b3, round_keys[0]); b4 = _mm_aesdeclast_si128(b4, round_keys[0]); b5 = _mm_aesdeclast_si128(b5, round_keys[0]); b6 = _mm_aesdeclast_si128(b6, round_keys[0]); b7 = _mm_aesdeclast_si128(b7, round_keys[0]); b8 = _mm_aesdeclast_si128(b8, round_keys[0]); b9 = _mm_aesdeclast_si128(b9, round_keys[0]); b10 = _mm_aesdeclast_si128(b10, round_keys[0]); b11 = _mm_aesdeclast_si128(b11, round_keys[0]); b12 = _mm_aesdeclast_si128(b12, round_keys[0]); b13 = _mm_aesdeclast_si128(b13, round_keys[0]); b14 = _mm_aesdeclast_si128(b14, round_keys[0]); b15 = _mm_aesdeclast_si128(b15, round_keys[0]);

        _mm_storeu_si128((__m128i*)(current_block_ptr + 0 * block_bytes), b0);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 1 * block_bytes), b1);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 2 * block_bytes), b2);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 3 * block_bytes), b3);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 4 * block_bytes), b4);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 5 * block_bytes), b5);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 6 * block_bytes), b6);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 7 * block_bytes), b7);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 8 * block_bytes), b8);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 9 * block_bytes), b9);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 10 * block_bytes), b10);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 11 * block_bytes), b11);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 12 * block_bytes), b12);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 13 * block_bytes), b13);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 14 * block_bytes), b14);
        _mm_storeu_si128((__m128i*)(current_block_ptr + 15 * block_bytes), b15);
    }

    size_t remaining_start_offset = main_iterations * stride_bytes;
    size_t remaining_blocks_count = num_blocks % 16;
    uint8_t *remaining_ptr = blocks + remaining_start_offset;
    size_t k_rem = 0;

    if (remaining_blocks_count >= 8) {
        __m128i rb0 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 0 * block_bytes));
        __m128i rb1 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 1 * block_bytes));
        __m128i rb2 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 2 * block_bytes));
        __m128i rb3 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 3 * block_bytes));
        __m128i rb4 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 4 * block_bytes));
        __m128i rb5 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 5 * block_bytes));
        __m128i rb6 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 6 * block_bytes));
        __m128i rb7 = _mm_loadu_si128((const __m128i*)(remaining_ptr + 7 * block_bytes));
        rb0 = _mm_xor_si128(rb0, round_keys[last_key_idx]); rb1 = _mm_xor_si128(rb1, round_keys[last_key_idx]); rb2 = _mm_xor_si128(rb2, round_keys[last_key_idx]); rb3 = _mm_xor_si128(rb3, round_keys[last_key_idx]); rb4 = _mm_xor_si128(rb4, round_keys[last_key_idx]); rb5 = _mm_xor_si128(rb5, round_keys[last_key_idx]); rb6 = _mm_xor_si128(rb6, round_keys[last_key_idx]); rb7 = _mm_xor_si128(rb7, round_keys[last_key_idx]);
        for (int r_rem_loop = num_rounds - 1; r_rem_loop >= 1; --r_rem_loop) {
            rb0 = _mm_aesdec_si128(rb0, round_keys[r_rem_loop]); rb1 = _mm_aesdec_si128(rb1, round_keys[r_rem_loop]); rb2 = _mm_aesdec_si128(rb2, round_keys[r_rem_loop]); rb3 = _mm_aesdec_si128(rb3, round_keys[r_rem_loop]); rb4 = _mm_aesdec_si128(rb4, round_keys[r_rem_loop]); rb5 = _mm_aesdec_si128(rb5, round_keys[r_rem_loop]); rb6 = _mm_aesdec_si128(rb6, round_keys[r_rem_loop]); rb7 = _mm_aesdec_si128(rb7, round_keys[r_rem_loop]);
        }
        rb0 = _mm_aesdeclast_si128(rb0, round_keys[0]); rb1 = _mm_aesdeclast_si128(rb1, round_keys[0]); rb2 = _mm_aesdeclast_si128(rb2, round_keys[0]); rb3 = _mm_aesdeclast_si128(rb3, round_keys[0]); rb4 = _mm_aesdeclast_si128(rb4, round_keys[0]); rb5 = _mm_aesdeclast_si128(rb5, round_keys[0]); rb6 = _mm_aesdeclast_si128(rb6, round_keys[0]); rb7 = _mm_aesdeclast_si128(rb7, round_keys[0]);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 0 * block_bytes), rb0);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 1 * block_bytes), rb1);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 2 * block_bytes), rb2);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 3 * block_bytes), rb3);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 4 * block_bytes), rb4);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 5 * block_bytes), rb5);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 6 * block_bytes), rb6);
        _mm_storeu_si128((__m128i*)(remaining_ptr + 7 * block_bytes), rb7);
        k_rem = 8;
    }

    for ( ; k_rem < remaining_blocks_count; ++k_rem) {
        uint8_t *current_single_block = remaining_ptr + k_rem * block_bytes;
        AES_Decrypt_Block(current_single_block, current_single_block, round_keys, num_rounds);
    }
}
