package nsfs

import (
	"GLibHac/fssystem"
	"GLibHac/tools"
	"encoding/binary"
	"fmt"
	"github.com/pkg/errors"
	"math"
)

const CompressedStorageNodeSize = 0x4000
const CompressedStorageEntrySize = 0x16

type CompressionType byte

func (c CompressionType) String() string {
	switch c {
	case CompressionTypeNone:
		return "None"
	case CompressionTypeZeroed:
		return "Zeroed"
	case CompressionTypeLz4:
		return "Lz4"
	case CompressionTypeUnknown:
		return "Unknown"
	default:
		return fmt.Sprintf("CompressionType(%d)", c)
	}
}

const (
	CompressionTypeNone = CompressionType(iota)
	CompressionTypeZeroed
	CompressionTypeLz4
	CompressionTypeUnknown
)

type Entry struct {
	VirtualOffset  int64
	PhysicalOffset int64
	CompressionType
	CompressionLevel byte
	PhysicalSize     uint32
}

func NewEntry(data []byte) *Entry {
	return &Entry{
		VirtualOffset:    int64(binary.LittleEndian.Uint64(data)),
		PhysicalOffset:   int64(binary.LittleEndian.Uint64(data[8:])),
		CompressionType:  CompressionType(data[16]),
		CompressionLevel: data[17],
		PhysicalSize:     binary.LittleEndian.Uint32(data[18:]),
	}
}

type CompressedStorage struct {
	fssystem.IStorageAbstract
	bucketTree  *BucketTree
	dataStorage fssystem.IStorage
}

func (i *CompressedStorage) Read(offset int64, destination []byte) (int, error) {
	// Validate arguments
	offsets := i.bucketTree.GetOffsets()

	if !offsets.IsInclude(offset, int64(len(destination))) {
		return 0, errors.Errorf("out of range offset: %d", offset)
	}

	// Find the offset in our tree
	visitor := NewBucketTreeVisitor(i.bucketTree, i.bucketTree.GetOffsets())
	if err := visitor.Find(offset); err != nil {
		return 0, err
	}
	entryOffset := visitor.Entry.VirtualOffset
	if entryOffset < 0 || !offsets.IsIncludeOffset(entryOffset) {
		return 0, errors.Errorf("unexpected in compressed storage a: %d", entryOffset)
	}

	// Prepare to operate in chunks
	currentOffset := offset
	endOffset := offset + int64(len(destination))

	var workBufferEnc []byte
	var workBufferDec []byte

	for currentOffset < endOffset {
		// Get the current entry
		var currentEntry = visitor.Entry

		// Get and validate the entry's offset
		currentEntryOffset := currentEntry.VirtualOffset
		if currentEntryOffset > currentOffset {
			return 0, errors.Errorf("unexpected in compressed storage a: %d", currentEntryOffset)
		}

		// Get and validate the next entry offset
		var nextEntryOffset int64
		if visitor.CanMoveNext() {
			if err := visitor.MoveNext(); err != nil {
				return 0, err
			}
			nextEntryOffset = visitor.Entry.VirtualOffset
			if !offsets.IsIncludeOffset(nextEntryOffset) {
				return 0, errors.Errorf("unexpected in compressed storage a: %d", nextEntryOffset)
			}
		} else {
			nextEntryOffset = offsets.EndOffset
		}

		if currentOffset >= nextEntryOffset {
			return 0, errors.Errorf("unexpected in compressed storage a: %d", nextEntryOffset)
		}

		// Get the offset of the data we need in the entry
		dataOffsetInEntry := currentOffset - currentEntryOffset
		currentEntrySize := nextEntryOffset - currentEntryOffset

		// Determine how much is left
		remainingSize := endOffset - currentOffset
		toWriteSize := int64(math.Min(float64(remainingSize), float64(currentEntrySize-dataOffsetInEntry)))
		if currentEntry.CompressionType == CompressionTypeLz4 {
			workBufferEnc = ensureBufferSize(workBufferEnc, int(currentEntry.PhysicalSize))
			workBufferDec = ensureBufferSize(workBufferDec, int(currentEntrySize))

			encBuffer := workBufferEnc
			decBuffer := workBufferDec

			if n, err := i.dataStorage.Read(currentEntry.PhysicalOffset, encBuffer[:currentEntry.PhysicalSize]); err != nil {
				return n, err
			}
			if err := tools.Lz4Decompress(encBuffer, decBuffer); err != nil {
				return 0, err
			}
			copy(destination[currentOffset-offset:], decBuffer[dataOffsetInEntry:dataOffsetInEntry+toWriteSize])
		} else if currentEntry.CompressionType == CompressionTypeNone {
			if n, err := i.dataStorage.Read(currentEntry.PhysicalOffset+dataOffsetInEntry, destination[currentOffset-offset:currentOffset-offset+toWriteSize]); err != nil {
				return n, err
			}
		} else if currentEntry.CompressionType == CompressionTypeZeroed {
			for x := int64(0); x < toWriteSize; x++ {
				destination[currentOffset-offset+x] = 0
			}
		}
		currentOffset += toWriteSize
	}
	return 0, nil
}

func ensureBufferSize(buffer []byte, requiredSize int) []byte {
	if buffer == nil || len(buffer) < requiredSize {
		return make([]byte, requiredSize)
	}
	return buffer
}

func (i *CompressedStorage) Size() int64 {
	return i.dataStorage.Size()
}

func NewCompressedStorage(dataStorage, nodeStorage fssystem.IStorage, entryStorage fssystem.IStorage, bucketTreeEntryCount int32) (*CompressedStorage, error) {
	tree, err := NewBucketTree(nodeStorage, entryStorage, NodeHeaderSize, CompressedStorageEntrySize, bucketTreeEntryCount)
	if err != nil {
		return nil, err
	}
	r := &CompressedStorage{
		bucketTree:  tree,
		dataStorage: dataStorage,
	}
	r.SizeFunc = r.Size
	r.ReadFunc = r.Read
	return r, nil
}

func QueryEntryStorageSize(entryCount int32) int64 {
	return QueryNodeStorageSizeFull(CompressedStorageNodeSize, CompressedStorageEntrySize, entryCount)
}

func QueryNodeStorageSizeFull(nodeSize, entrySize, entryCount int32) int64 {
	if entryCount <= 0 {
		return 0
	}
	return int64(GetEntrySetCount(nodeSize, entrySize, entryCount) * nodeSize)
}

func QueryNodeStorageSize(entryCount int32) (int64, error) {
	if entryCount <= 0 {
		return 0, nil
	}
	count, err := GetNodeL2Count(CompressedStorageNodeSize, CompressedStorageEntrySize, entryCount)
	if err != nil {
		return 0, err
	}
	return int64((1 + count) * CompressedStorageNodeSize), nil
}

func GetNodeL2Count(nodeSize, entrySize, entryCount int32) (int32, error) {
	offsetCountPerNode := GetOffsetCount(nodeSize)
	entrySetCount := GetEntrySetCount(nodeSize, entrySize, entryCount)

	if entrySetCount <= offsetCountPerNode {
		return 0, nil
	}

	nodeL2Count := int32(tools.DivideUp(int64(entrySetCount), int64(offsetCountPerNode)))
	if nodeL2Count <= offsetCountPerNode {
		return 0, errors.Errorf("NodeL2Count %d is out of range, must be between 0 and %d", nodeL2Count, offsetCountPerNode)
	}
	return int32(tools.DivideUp(int64(entrySetCount-(offsetCountPerNode-(nodeL2Count-1))), int64(offsetCountPerNode))), nil
}

func GetOffsetCount(nodeSize int32) int32 {
	return (nodeSize - NodeHeaderSize) / 8
}
