package utils

import (
	"fmt"
	"os"
	"path/filepath"
	"crypto/md5"
	"io/ioutil"
	"sync"
)

// 在并行化版本中，如果目录文件很多，则会分配超出系统所能提供的内存
// 所以需要限制有限个goroutine

// 所以与并行化版本不同的是，这里分成了3个stage，先walk file,then calculate digest, finally collect digests

func walkFiles(done <-chan struct{}, root string) (<-chan string, <-chan error) {
	paths := make(chan string)
	errc := make(chan error, 1)

	go func ()  {
		defer close(paths)
		errc <- filepath.Walk(root, func (path string, info os.FileInfo, err error) error {
			if err != nil {
				return err
			}
			if !info.Mode().IsRegular() {
				return nil
			}
			select {
			case paths <- path:
			case <-done:
				return fmt.Errorf("walk cancelled")
			}

			return nil
		})
	}()

	return paths, errc
}

func digester(done <-chan struct{}, paths <-chan string, c chan<-result) {
	for p := range paths {
		data, err := ioutil.ReadFile(p)
		select {
		case c <- result{p, md5.Sum(data), err}:
		case <-done:
			return
		}
	}
}

func collectDigest(done <-chan struct{}, res)

func Tmd5UseBoundedGoroutine(root string) (map[string][md5.Size]byte, error) {
	done := make(chan struct{})
	defer close(done)

	// first stage
	paths, errc := walkFiles(done, root)

	c := make(chan result)
	var wg sync.WaitGroup
	const numDigesters = 20
	wg.Add(numDigesters)
	for i:=0; i<numDigesters; i++ {
		go func ()  {
			// digester接收path，把计算结果放入c 中
			digester(done, paths, c)
			wg.Done()
		}()
	}

	go func ()  {
		wg.Wait()
		close(c)
	}()

	m := make(map[string][md5.Size]byte)
	for r := range c {
		if r.err != nil {
			return nil, r.err
		}
		m[r.path] = r.sum
	}

	if err := <-errc; err != nil {
		return nil, err
	}
}