package reader

import (
	"fmt"
	"log"
	"mig/pkg/chunk"
	"mig/pkg/data"
	"mig/pkg/types"
	"sync"
	"sync/atomic"
)

type DefaultReader struct {
	Database   data.Database
	ChunkSize  int64
	MaxWorkers int
}

func readDatabase(database data.Database, chunkSize int64, maxWorkers int) ([]*types.DigestNode, error) {
	totalRows, err := database.Count()
	if err != nil {
		return nil, err
	}
	log.Printf("total rows: %d, chunk size: %d", totalRows, chunkSize)
	totalChunks := totalRows / chunkSize
	if totalRows%chunkSize > 0 {
		totalChunks++
	}

	chunkNo := int64(-1)
	wg := sync.WaitGroup{}
	digests := make([]*types.DigestNode, totalChunks)
	mtx := sync.Mutex{}
	for i := 0; i < maxWorkers; i++ {
		wg.Add(1)
		go func() {
			var (
				ck        chunk.Chunk
				workerErr error
			)

			for {
				currentNo := atomic.AddInt64(&chunkNo, 1)
				if currentNo >= totalChunks {
					wg.Done()
					return
				}

				ck, workerErr = database.ReadChunk(currentNo, chunkSize)
				if workerErr != nil {
					log.Printf("read error: %v", workerErr)
					continue
				}

				mtx.Lock()
				digests[currentNo] = ck.Digest()
				mtx.Unlock()
			}
		}()
	}
	wg.Wait()
	for chunkNo, digestNode := range digests {
		digestNode.ID = fmt.Sprintf("%s_%d", database.Identifier(), chunkNo)
	}
	return digests, nil
}

func (r *DefaultReader) Read(algorithm types.Algorithm) (*types.IntegrityInfo, error) {
	digests, err := readDatabase(r.Database, r.ChunkSize, r.MaxWorkers)
	if err != nil {
		log.Printf("read error: %v", err)
		return nil, err
	}
	switch algorithm {
	case types.AlgorithmDigest, types.AlgorithmDigestV2:
		return types.NewDigestIntegrityInfo(digests), nil
	case types.AlgorithmBloomFilter:
		return types.NewBloomFilterIntegrityInfo(digests), nil
	case types.AlgorithmCommon:
		return &types.IntegrityInfo{DigestNodes: digests}, nil
	default:
		return nil, fmt.Errorf("unknown algorithm: %v", algorithm)
	}
}

func NewDefaultReader(database data.Database, chunkSize int64, maxWorkers int) *DefaultReader {
	return &DefaultReader{
		Database:   database,
		ChunkSize:  chunkSize,
		MaxWorkers: maxWorkers,
	}
}
