package dup

import (
	"github.com/raspi/dirscanner"
	"log"
	"math"
	"os"
	"runtime"
	"time"
)

const (
	MEBIBYTE = 1048576
	// GIGABYTE = 1073741824
)

func getFilterFunc() dirscanner.FileValidatorFunction {
	return func(info dirscanner.FileInformation) bool {
		if info.Size == 0 {
			return false
		}

		if info.Mode&os.ModeType != 0 {
			return false
		}

		return true
	}
}

type KeepFile struct {
	Priority uint8
	INode    uint64
}

func Run(dirs []string, removeFiles *[]FileInfo, actuallyRemove bool) error {
	readSize := int64(MEBIBYTE)

	if !isPowerOfTwo(uint64(readSize)) {
		log.Fatalf(`readSize (%v) is not power of two`, readSize)
	}

	for _, dir := range dirs {
		_, err := isDirectory(dir)
		if err != nil {
			return err
		}
	}

	if actuallyRemove {
		log.Printf("ACTUALLY DELETING FILES, PRESS \"CTRL+C\" TO ABORT\n")
	} else {
		log.Printf("just scanning not actually deleting files\n")
	}

	// Ticker for stats
	ticker := time.NewTicker(time.Second * 1)
	defer ticker.Stop()
	now := time.Now()
	workerCount := runtime.NumCPU()

	dupes := New(ticker, &now, workerCount)

	filterFunc := getFilterFunc()

	// look-up table for inodes
	seenInodes := map[uint64]bool{}

	// First get a recursive file listing
	for _, dir := range dirs {
		scanner := dirscanner.New()

		err := scanner.Init(workerCount*2, filterFunc)
		if err != nil {
			return err
		}

		err = scanner.ScanDirectory(dir)
		if err != nil {
			return err
		}

		prior := uint8(math.MaxUint8)
		//lastDir := ``
		//lastFile := ``
		fileCount := 0

	scallop:
		for {
			select {

			case <-scanner.Finished: // Finished getting file list

				break scallop

			case e, ok := <-scanner.Errors: // Error happened, handle, discard or abort
				if ok {
					return e
					//s.Aborted <- true // Abort
				}

			case _, ok := <-scanner.Information: // Got information where worker is currently
				if ok {
					//lastDir = info.Directory
				}

			case <-ticker.C: // Display some progress stats
				//log.Printf(`%v Files scanned: %v Last file: %#v Dir: %#v`, time.Since(now).Truncate(time.Second), fileCount, lastFile, lastDir)

			case res, ok := <-scanner.Results:
				if ok {
					fileCount++
					//lastFile = res.Path

					_, iok := seenInodes[res.Identifier]

					if !iok {
						seenInodes[res.Identifier] = true
						err := dupes.AddFile(newFileInfo(prior, res))
						if err != nil {
							return err
						}
					}
				}
			}
		}

		err = scanner.Close()
		if err != nil {
			return err
		}

		prior--
	} // End of recursive scan

	dupes.ReportStats()
	dupes.RemoveFileOrphans()
	dupes.ReportStats()
	dupes.RemoveBasedOnBytes(readSize, READ_FIRST)
	dupes.ReportStats()
	dupes.RemoveBasedOnBytes(readSize, READ_LAST)
	dupes.ReportStats()

	deletedCount := uint64(0)
	deletedSize := uint64(0)

	hashed := dupes.HashDuplicates(readSize)
	dupes.Reset()

	for _, v := range GetDuplicateList(hashed) {
		for idx, f := range v {
			if idx == 0 {
				continue
			}

			deletedSize += f.Size
			deletedCount++

			*removeFiles = append(*removeFiles, f)

			if actuallyRemove {
				err := os.Remove(f.Path)

				if err != nil {
					return err
				}
			}
		}
	}

	return nil

}

func GetDuplicateList(m map[string]map[uint64][]FileInfo) (dupes [][]FileInfo) {
	for _, sizeKey := range m {
		for _, files := range sizeKey {

			var selected []FileInfo

			// Find best candidate
			bestCandidateFile := KeepFile{
				Priority: 0,
				INode:    math.MaxUint64,
			}

			for _, file := range files {
				if file.INode < bestCandidateFile.INode && file.Priority >= bestCandidateFile.Priority {
					bestCandidateFile.INode = file.INode
					bestCandidateFile.Priority = file.Priority
				}
			}

			// List what to keep and discard
			var keep FileInfo
			var discard []FileInfo

			for _, file := range files {
				if file.INode == bestCandidateFile.INode {
					keep = file
				} else {
					discard = append(discard, file)
				}
			}

			selected = append(selected, keep)
			selected = append(selected, discard...)

			dupes = append(dupes, selected)

		}
	}

	return dupes
}
