package dedup

import (
	"hash/crc32"
	"math"
)

// FixedSizeChunker implements fixed-size chunking
type FixedSizeChunker struct {
	blockSize int64
}

// NewFixedSizeChunker creates a new fixed-size chunker
func NewFixedSizeChunker(blockSize int64) *FixedSizeChunker {
	return &FixedSizeChunker{
		blockSize: blockSize,
	}
}

// GetName returns the name of the chunker
func (c *FixedSizeChunker) GetName() string {
	return "fixed"
}

// Chunk splits data into fixed-size chunks
func (c *FixedSizeChunker) Chunk(data []byte) []Chunk {
	var chunks []Chunk
	dataLen := int64(len(data))
	offset := int64(0)

	for offset < dataLen {
		chunkSize := c.blockSize
		if offset+chunkSize > dataLen {
			chunkSize = dataLen - offset
		}

		chunk := Chunk{
			Data:   data[offset : offset+chunkSize],
			Offset: offset,
			Size:   chunkSize,
		}
		chunks = append(chunks, chunk)
		offset += chunkSize
	}

	return chunks
}

// RabinChunker implements Rabin fingerprinting-based chunking
type RabinChunker struct {
	minChunkSize int64
	maxChunkSize int64
	avgChunkSize int64
	windowSize   int
	polynomial   uint64
}

// NewRabinChunker creates a new Rabin chunker
func NewRabinChunker(minChunkSize, maxChunkSize, avgChunkSize int64) *RabinChunker {
	return &RabinChunker{
		minChunkSize: minChunkSize,
		maxChunkSize: maxChunkSize,
		avgChunkSize: avgChunkSize,
		windowSize:   48, // Typical window size for Rabin fingerprinting
		polynomial:   0x3DA3358B4DC173, // A commonly used irreducible polynomial
	}
}

// GetName returns the name of the chunker
func (c *RabinChunker) GetName() string {
	return "rabin"
}

// Chunk splits data into variable-size chunks using Rabin fingerprinting
func (c *RabinChunker) Chunk(data []byte) []Chunk {
	var chunks []Chunk
	dataLen := int64(len(data))

	if dataLen < c.minChunkSize {
		// Data too small, return as single chunk
		return []Chunk{{
			Data:   data,
			Offset: 0,
			Size:   dataLen,
		}}
	}

	offset := int64(0)

	for offset < dataLen {
		chunkEnd := c.findChunkBoundary(data, offset, dataLen)

		chunk := Chunk{
			Data:   data[offset:chunkEnd],
			Offset: offset,
			Size:   chunkEnd - offset,
		}
		chunks = append(chunks, chunk)
		offset = chunkEnd
	}

	return chunks
}

// findChunkBoundary finds the next chunk boundary using Rabin fingerprinting
func (c *RabinChunker) findChunkBoundary(data []byte, start, dataLen int64) int64 {
	maxEnd := start + c.maxChunkSize
	if maxEnd > dataLen {
		maxEnd = dataLen
	}

	minEnd := start + c.minChunkSize
	if minEnd > dataLen {
		minEnd = dataLen
	}

	// Calculate initial fingerprint for the window
	windowEnd := start + int64(c.windowSize)
	if windowEnd > dataLen {
		windowEnd = dataLen
	}
	fingerprint := c.calculateFingerprint(data[start:windowEnd])

	// Look for chunk boundary
	for pos := windowEnd; pos < maxEnd; pos++ {
		// Update fingerprint by removing old byte and adding new byte
		if pos > windowEnd {
			oldByte := data[pos-int64(c.windowSize)-1]
			newByte := data[pos-1]
			fingerprint = c.updateFingerprint(fingerprint, oldByte, newByte)
		}

		// Check if this is a chunk boundary
		if pos >= minEnd && c.isBoundary(fingerprint) {
			return pos
		}
	}

	// No boundary found within constraints, return max end
	return maxEnd
}

// calculateFingerprint calculates Rabin fingerprint for a window of data
func (c *RabinChunker) calculateFingerprint(data []byte) uint64 {
	var fingerprint uint64
	for _, b := range data {
		fingerprint = (fingerprint << 1) ^ uint64(b)
		// Apply polynomial modulo operation
		if fingerprint&0x8000000000000000 != 0 {
			fingerprint ^= c.polynomial
		}
	}
	return fingerprint
}

// updateFingerprint updates the fingerprint by removing old byte and adding new byte
func (c *RabinChunker) updateFingerprint(fingerprint uint64, oldByte, newByte byte) uint64 {
	// Remove old byte contribution
	fingerprint ^= uint64(oldByte) << (c.windowSize - 1)

	// Shift and add new byte
	fingerprint = (fingerprint << 1) ^ uint64(newByte)

	// Apply polynomial modulo operation
	if fingerprint&0x8000000000000000 != 0 {
		fingerprint ^= c.polynomial
	}

	return fingerprint
}

// isBoundary checks if the fingerprint indicates a chunk boundary
func (c *RabinChunker) isBoundary(fingerprint uint64) bool {
	// Use the lower bits of the fingerprint to determine boundary
	// This creates chunks of approximately the desired average size
	mask := uint64(math.Pow(2, math.Log2(float64(c.avgChunkSize))) - 1)
	return fingerprint&mask == 0
}

// BuzHashChunker implements BuzHash-based chunking
type BuzHashChunker struct {
	minChunkSize int64
	maxChunkSize int64
	avgChunkSize int64
	windowSize   int
	table        [256]uint64
}

// NewBuzHashChunker creates a new BuzHash chunker
func NewBuzHashChunker(minChunkSize, maxChunkSize, avgChunkSize int64) *BuzHashChunker {
	chunker := &BuzHashChunker{
		minChunkSize: minChunkSize,
		maxChunkSize: maxChunkSize,
		avgChunkSize: avgChunkSize,
		windowSize:   64, // Typical window size for BuzHash
	}

	// Initialize random table for BuzHash
	chunker.initTable()
	return chunker
}

// GetName returns the name of the chunker
func (c *BuzHashChunker) GetName() string {
	return "buzhash"
}

// Chunk splits data into variable-size chunks using BuzHash
func (c *BuzHashChunker) Chunk(data []byte) []Chunk {
	var chunks []Chunk
	dataLen := int64(len(data))

	if dataLen < c.minChunkSize {
		// Data too small, return as single chunk
		return []Chunk{{
			Data:   data,
			Offset: 0,
			Size:   dataLen,
		}}
	}

	offset := int64(0)

	for offset < dataLen {
		chunkEnd := c.findBuzHashBoundary(data, offset, dataLen)

		chunk := Chunk{
			Data:   data[offset:chunkEnd],
			Offset: offset,
			Size:   chunkEnd - offset,
		}
		chunks = append(chunks, chunk)
		offset = chunkEnd
	}

	return chunks
}

// initTable initializes the random table for BuzHash
func (c *BuzHashChunker) initTable() {
	// Use a simple pseudo-random generator for reproducibility
	seed := uint64(0x1234567890ABCDEF)
	for i := range c.table {
		seed = seed*1103515245 + 12345
		c.table[i] = seed
	}
}

// findBuzHashBoundary finds the next chunk boundary using BuzHash
func (c *BuzHashChunker) findBuzHashBoundary(data []byte, start, dataLen int64) int64 {
	maxEnd := start + c.maxChunkSize
	if maxEnd > dataLen {
		maxEnd = dataLen
	}

	minEnd := start + c.minChunkSize
	if minEnd > dataLen {
		minEnd = dataLen
	}

	// Calculate initial BuzHash for the window
	windowEnd := start + int64(c.windowSize)
	if windowEnd > dataLen {
		windowEnd = dataLen
	}
	hash := c.calculateBuzHash(data[start:windowEnd])

	// Look for chunk boundary
	for pos := windowEnd; pos < maxEnd; pos++ {
		// Update BuzHash by removing old byte and adding new byte
		if pos > windowEnd {
			oldByte := data[pos-int64(c.windowSize)-1]
			newByte := data[pos-1]
			hash = c.updateBuzHash(hash, oldByte, newByte)
		}

		// Check if this is a chunk boundary
		if pos >= minEnd && c.isBuzHashBoundary(hash) {
			return pos
		}
	}

	// No boundary found within constraints, return max end
	return maxEnd
}

// calculateBuzHash calculates BuzHash for a window of data
func (c *BuzHashChunker) calculateBuzHash(data []byte) uint64 {
	var hash uint64
	for i, b := range data {
		hash = (hash << 1) | (hash >> 63) // Rotate left by 1
		hash ^= c.table[b]
		hash ^= c.table[i%256] // Add position dependency
		if i >= c.windowSize {
			break
		}
	}
	return hash
}

// updateBuzHash updates the BuzHash by removing old byte and adding new byte
func (c *BuzHashChunker) updateBuzHash(hash uint64, oldByte, newByte byte) uint64 {
	// Remove old byte contribution
	oldContribution := c.table[oldByte]
	hash ^= oldContribution

	// Rotate hash
	hash = (hash << 1) | (hash >> 63)

	// Add new byte contribution
	newContribution := c.table[newByte]
	hash ^= newContribution

	return hash
}

// isBuzHashBoundary checks if the BuzHash indicates a chunk boundary
func (c *BuzHashChunker) isBuzHashBoundary(hash uint64) bool {
	// Use the lower bits of the hash to determine boundary
	// This creates chunks of approximately the desired average size
	mask := uint64(math.Pow(2, math.Log2(float64(c.avgChunkSize))) - 1)
	return hash&mask == 0
}

// ContentDefinedChunker implements content-defined chunking using rolling hash
type ContentDefinedChunker struct {
	minChunkSize int64
	maxChunkSize int64
	avgChunkSize int64
	windowSize   int
	seed         uint32
}

// NewContentDefinedChunker creates a new content-defined chunker
func NewContentDefinedChunker(minChunkSize, maxChunkSize, avgChunkSize int64) *ContentDefinedChunker {
	return &ContentDefinedChunker{
		minChunkSize: minChunkSize,
		maxChunkSize: maxChunkSize,
		avgChunkSize: avgChunkSize,
		windowSize:   64,
		seed:         0x12345678,
	}
}

// GetName returns the name of the chunker
func (c *ContentDefinedChunker) GetName() string {
	return "content-defined"
}

// Chunk splits data into content-defined chunks
func (c *ContentDefinedChunker) Chunk(data []byte) []Chunk {
	var chunks []Chunk
	dataLen := int64(len(data))

	if dataLen < c.minChunkSize {
		// Data too small, return as single chunk
		return []Chunk{{
			Data:   data,
			Offset: 0,
			Size:   dataLen,
		}}
	}

	offset := int64(0)

	for offset < dataLen {
		chunkEnd := c.findContentBoundary(data, offset, dataLen)

		chunk := Chunk{
			Data:   data[offset:chunkEnd],
			Offset: offset,
			Size:   chunkEnd - offset,
		}
		chunks = append(chunks, chunk)
		offset = chunkEnd
	}

	return chunks
}

// findContentBoundary finds content-defined boundary
func (c *ContentDefinedChunker) findContentBoundary(data []byte, start, dataLen int64) int64 {
	maxEnd := start + c.maxChunkSize
	if maxEnd > dataLen {
		maxEnd = dataLen
	}

	minEnd := start + c.minChunkSize
	if minEnd > dataLen {
		minEnd = dataLen
	}

	// Use rolling hash to find content boundaries
	rollingHash := NewRollingHash(c.windowSize, c.seed)

	// Initialize rolling hash
	windowEnd := start + int64(c.windowSize)
	if windowEnd > dataLen {
		windowEnd = dataLen
	}
	for i := start; i < windowEnd; i++ {
		rollingHash.Update(data[i])
	}

	// Look for content boundary
	for pos := windowEnd; pos < maxEnd; pos++ {
		if pos > windowEnd && pos < dataLen {
			rollingHash.Roll(data[pos-int64(c.windowSize)], data[pos])
		}

		// Check if this is a content boundary
		if pos >= minEnd && rollingHash.IsBoundary(c.avgChunkSize) {
			return pos
		}
	}

	// No boundary found within constraints, return max end
	return maxEnd
}

// RollingHash implements a rolling hash for content-defined chunking
type RollingHash struct {
	window    []byte
	hash      uint32
	windowSize int
	seed      uint32
}

// NewRollingHash creates a new rolling hash
func NewRollingHash(windowSize int, seed uint32) *RollingHash {
	return &RollingHash{
		window:     make([]byte, 0, windowSize),
		hash:       seed,
		windowSize: windowSize,
		seed:       seed,
	}
}

// Update adds a byte to the rolling hash
func (r *RollingHash) Update(b byte) {
	if len(r.window) >= r.windowSize {
		// Remove oldest byte
		old := r.window[0]
		r.window = r.window[1:]
		r.hash ^= crc32.Update(0, crc32.IEEETable, []byte{old})
	}

	// Add new byte
	r.window = append(r.window, b)
	r.hash = crc32.Update(r.hash, crc32.IEEETable, []byte{b})
}

// Roll updates the rolling hash by removing old byte and adding new byte
func (r *RollingHash) Roll(oldByte, newByte byte) {
	// Remove old byte contribution
	r.hash ^= crc32.Update(0, crc32.IEEETable, []byte{oldByte})

	// Add new byte contribution
	r.hash = crc32.Update(r.hash, crc32.IEEETable, []byte{newByte})

	// Update window
	copy(r.window, r.window[1:])
	r.window[len(r.window)-1] = newByte
}

// IsBoundary checks if the current hash value indicates a boundary
func (r *RollingHash) IsBoundary(avgChunkSize int64) bool {
	// Use the lower bits of the hash to determine boundary
	mask := uint32(math.Pow(2, math.Log2(float64(avgChunkSize))) - 1)
	return r.hash&mask == 0
}

// GetHash returns the current hash value
func (r *RollingHash) GetHash() uint32 {
	return r.hash
}