#include "FLOW_core/flow_core_pch.h"
#include <stdlib.h>    // for _rotl

namespace CORE
{
//=============================================================================

	/*!
	 * Block read - if your platform needs to do endian-swapping or can only
	 * handle aligned reads, do the conversion here
	 */
	inline u32_t getblock( const u32_t* p, int_t i )
	{
		return p[i];
	}

//=============================================================================

	/*!
	 * Finalization mix - force all bits of a hash block to avalanche
	 * avalanches all bits to within 0.25% bias
	 */
	inline hashid_t fmix32 ( hashid_t h )
	{
		h ^= h >> 16;
		h *= 0x85ebca6b;
		h ^= h >> 13;
		h *= 0xc2b2ae35;
		h ^= h >> 16;

		return h;
	}

//=============================================================================

	inline void bmix32 ( hashid_t& h1, hashid_t& k1, hashid_t& c1, hashid_t& c2 )
	{
		k1 *= c1; 
		k1  = _rotl(k1,11); 
		k1 *= c2;
		h1 ^= k1;
        
		h1 = h1*3+0x52dce729;

		c1 = c1*5+0x7b7d159c;
		c2 = c2*5+0x6bce6396;
	}

//=============================================================================

	hashid_t MurmurHash3_x86_32( const void* ap_key, int_t len, hashid_t seed )
	{
		const u8_t* p_data = ( const u8_t* )ap_key;
		const int_t nblocks = len / 4;

		hashid_t h1 = 0x971e137b ^ seed;

		hashid_t c1 = 0x95543787;
		hashid_t c2 = 0x2ad7eb25;

		//----------
		// body

		const u32_t* blocks = ( const u32_t * )( p_data + nblocks * 4 );

		for( int_t i = -nblocks; i; ++i )
		{
			u32_t k1 = getblock( blocks, i );
			bmix32( h1, k1, c1, c2 );
		}

		//----------
		// tail

		const u8_t* tail = ( const u8_t* )( p_data + nblocks * 4 );

		hashid_t k1 = 0;

		switch( len & 3 )
		{
		case 3: k1 ^= tail[ 2 ] << 16;
		case 2: k1 ^= tail[ 1 ] << 8;
		case 1: k1 ^= tail[ 0 ];

			bmix32( h1, k1, c1, c2 );
		};

		//----------
		// finalization

		h1 ^= len;

		h1 = fmix32( h1 );

		return h1;
	} 

} //namespace CORE