package exp

import (
	"bytes"
	"cluster-cache/common"
	"encoding/binary"
	"golang.org/x/sys/unix"
	"sync"
	"sync/atomic"
)

type FastMap struct {
	mem   []map[string]uint64 // key -> value
	cache []map[uint64]uint64 // hash -> (id,pos)
	locks []*sync.RWMutex
	size  uint64
	heap  *OffHeap
	hash  common.Hash
}

// 1GB
const defaultSegmentSize = 1 * 1024 * 1024 * 1024

func NewFastMap(size int, hash common.Hash) *FastMap {
	kvs := &FastMap{
		mem:   make([]map[string]uint64, size),
		cache: make([]map[uint64]uint64, size),
		locks: make([]*sync.RWMutex, size),
		size:  uint64(size),
		heap:  NewOffHeap(defaultSegmentSize),
		hash:  hash,
	}
	for i := 0; i < size; i++ {
		kvs.mem[i] = make(map[string]uint64)
		kvs.cache[i] = make(map[uint64]uint64)
		kvs.locks[i] = new(sync.RWMutex)
	}
	return kvs
}

func (k *FastMap) Add(key, value []byte) {
	hash := k.hash.Sum64(key)
	bucket := hash % k.size
	k.locks[bucket].Lock()
	if val, ok := k.cache[bucket][hash]; ok {
		buf := k.heap.Read(val)
		pos := 0
		lenKey := binary.LittleEndian.Uint16(buf)
		pos += 2
		if lenKey != 0 {
			// hash冲突处理, 将旧数据放到mem中
			oldKey := buf[pos:(pos + int(lenKey))]
			if !bytes.Equal(key, oldKey) {
				pos += int(lenKey)
				// 不同的Key存储到mem中
				k.mem[bucket][string(oldKey)] = k.heap.Offset(val, uint64(pos))
			} else {
				// 相同key, 需要清理数据
				k.heap.clear(val)
			}
		}
	}

	size := 4 + len(key) + len(value)
	seq := k.heap.Allocate(uint64(size))
	buf := make([]byte, size)
	pos := 0
	binary.LittleEndian.PutUint16(buf[pos:], uint16(len(key)))
	pos += 2
	copy(buf[pos:], key)
	pos += len(key)

	binary.LittleEndian.PutUint16(buf[pos:], uint16(len(value)))
	pos += 2
	copy(buf[pos:], value)
	k.heap.Write(seq, buf)
	k.cache[bucket][hash] = seq

	k.locks[bucket].Unlock()
}

func (k *FastMap) Query(key []byte) []byte {
	hash := k.hash.Sum64(key)
	bucket := hash % k.size
	k.locks[bucket].RLock()
	if seq, ok := k.cache[bucket][hash]; ok {
		buf := k.heap.Read(seq)
		pos := 0
		lenKey := binary.LittleEndian.Uint16(buf[pos:])
		pos += 2
		if lenKey != 0 && bytes.Equal(key, buf[pos:(pos+int(lenKey))]) {
			pos += int(lenKey)
			lenValue := binary.LittleEndian.Uint16(buf[pos:])
			pos += 2
			k.locks[bucket].RUnlock()
			return buf[pos:(pos + int(lenValue))]
		}
	}
	ks := common.BytesToString(key)
	if val, ok := k.mem[bucket][ks]; ok {
		k.locks[bucket].RUnlock()
		return k.heap.Read(val)
	} else {
		k.locks[bucket].RUnlock()
		return nil
	}
}

func (k *FastMap) Delete(key []byte) {
	hash := k.hash.Sum64(key)
	bucket := hash % k.size
	k.locks[bucket].Lock()
	if seq, ok := k.cache[bucket][hash]; ok {
		buf := k.heap.Read(seq)
		pos := 0
		lenKey := binary.LittleEndian.Uint16(buf[pos:])
		pos += 2
		if lenKey != 0 && bytes.Equal(key, buf[pos:(pos+int(lenKey))]) {
			k.heap.clear(seq)
			delete(k.cache[bucket], hash)
		}
	}
	delete(k.mem[bucket], string(key))
	k.locks[bucket].Unlock()
}

// OffHeap increment off heap cache
type OffHeap struct {
	sync.RWMutex
	id          uint64
	pos         uint64
	data        [][]byte
	segmentSize uint64
}

const intBits = uint64(32)
const intMask = (1 << intBits) - 1

func NewOffHeap(size int) *OffHeap {
	if size >= intMask {
		panic("too big cache")
	}
	heap := &OffHeap{
		id:          0,
		pos:         0,
		data:        make([][]byte, 1, 1024),
		segmentSize: uint64(size),
	}
	err := heap.NewFile()
	if err != nil {
		panic(err)
	}
	return heap
}

// Allocate max 65535
func (o *OffHeap) Allocate(size uint64) uint64 {
	if size > 65535 {
		return 0
	}
	// len(2), bytes[]
	size += 2
	// allocate
	end := atomic.AddUint64(&o.pos, size)
	id := o.id
	if end >= o.segmentSize-4 {
		o.Lock()
		end = atomic.LoadUint64(&o.pos) + size
		id = o.id
		// double check
		if end >= o.segmentSize-4 {
			err := o.NewFile()
			if err != nil {
				panic(err)
			}
		}
		end = atomic.AddUint64(&o.pos, size)
		id = o.id
		o.Unlock()
	}
	start := end - size
	return start | (id << intBits)
}

func (o *OffHeap) Write(seq uint64, value []byte) {
	id := seq >> intBits
	pos := seq & intMask
	binary.LittleEndian.PutUint16(o.data[id][pos:], uint16(len(value)))
	pos += 2
	copy(o.data[id][pos:], value)
}

func (o *OffHeap) clear(seq uint64) {
	id := seq >> intBits
	pos := seq & intMask
	binary.LittleEndian.PutUint16(o.data[id][pos:], uint16(0))
}

func (o *OffHeap) Read(seq uint64) []byte {
	id := seq >> intBits
	pos := seq & intMask
	size := binary.LittleEndian.Uint16(o.data[id][pos:])
	pos += 2
	res := make([]byte, size)
	copy(res, o.data[id][pos:])
	return res
}

func (o *OffHeap) Offset(seq uint64, offset uint64) uint64 {
	id := seq >> intBits
	pos := seq & intMask
	pos += offset + 2
	return pos | (id << intBits)
}

func (o *OffHeap) NewFile() error {
	o.id++
	data, err := unix.Mmap(-1, 0, int(o.segmentSize), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_ANON|unix.MAP_PRIVATE)
	if err != nil {
		return err
	}
	o.data = append(o.data, data)
	return nil
}
