package idx

import (
	"context"
	"enode/store"
	"fmt"
	"golang.org/x/sync/errgroup"
	"io"
	fs2 "io/fs"
	"log"
	"os"
	"path/filepath"
	"strings"
	"sync/atomic"
	"time"
)

type LargeFile struct {
	dataDir           string
	cleanupTime       time.Duration
	cleanupMaxProcess int
	cleaning          uint32
	pool              *pool
	cancelMonitor     func()
}

type readCloser struct {
	r    io.ReadCloser
	key  string
	pool *pool
}

func (r *readCloser) Read(b []byte) (int, error) {
	return r.r.Read(b)
}

func (r *readCloser) Close() error {
	return r.pool.Close(r.key)
}

func NewLargeFile(dataDir string, cleanupTime time.Duration, cleanupMaxProcess int, poolCap int) *LargeFile {
	_store := &LargeFile{
		dataDir:           dataDir,
		cleanupTime:       cleanupTime,
		cleanupMaxProcess: cleanupMaxProcess,
		pool: &pool{
			fs:      make(map[string]*FileStore),
			cap:     poolCap,
			factory: NewFileStore,
		},
	}
	var ctx context.Context
	ctx, _store.cancelMonitor = context.WithCancel(context.Background())
	go func() {
		_store.Monitor(ctx)
	}()
	return _store
}

func (f *LargeFile) PutPiece(key string, metaInfo *store.PieceMetaInfo, r io.Reader) (hash []byte, err error) {
	fs, err := f.pool.Get(f.getIdxFileKey(key))
	if err != nil {
		return nil, err
	}
	defer func() {
		_ = f.pool.Close(key)
	}()
	return fs.PutFrom(key, r, metaInfo.Size, metaInfo.ExpireTime)
}

func (f *LargeFile) GetPiece(key string) (r io.ReadCloser, hash []byte, size int64, err error) {
	fs, err := f.pool.Get(f.getIdxFileKey(key))
	if err != nil {
		return
	}
	var _r io.ReadCloser
	_r, hash, size, err = fs.GetReader(key)
	r = &readCloser{r: _r, pool: f.pool, key: key}
	return
}

func (f *LargeFile) DelPiece(key string) (err error) {
	fs, err := f.pool.Get(f.getIdxFileKey(key))
	if err != nil {
		return
	}
	defer func() {
		_ = f.pool.Close(key)
	}()
	return fs.Delete(key)
}

func (f *LargeFile) Mode() string {
	return store.ModeLargeFile
}

func (f *LargeFile) getIdxFileKey(pieceId string) string {
	key := calIdxFile([]byte(pieceId))
	return filepath.Join(f.dataDir, key)
}

func (f *LargeFile) Close() error {
	if f.cancelMonitor != nil {
		f.cancelMonitor()
		f.cancelMonitor = nil
	}
	return nil
}

func (f *LargeFile) Monitor(ctx context.Context) {
	if f.cleanupTime <= 0 {
		return
	}
	t := time.NewTicker(f.cleanupTime)
	defer t.Stop()
	for {
		select {
		case <-ctx.Done():
			return
		case <-t.C:
			fmt.Println("开始清理")
			_ = f.cleanupMonitor()
			t.Reset(f.cleanupTime)
		}
	}
}

func (f *LargeFile) cleanupMonitor() error {
	if atomic.CompareAndSwapUint32(&f.cleaning, 0, 1) {
		defer atomic.CompareAndSwapUint32(&f.cleaning, 1, 0)
		idxKeys := f.getDatFiles()
		group := errgroup.Group{}
		group.SetLimit(f.cleanupMaxProcess)
		for _, k := range idxKeys {
			func(k string) {
				group.Go(func() (err error) {
					defer func() {
						fmt.Println("清理文件", k, err)
					}()
					fs, err := f.pool.Get(k)
					if err != nil {
						return err
					}
					err = fs.CleanUp()
					return err
				})
			}(k)
		}
		return group.Wait()
	}
	return nil
}

func (f *LargeFile) getDatFiles() []string {
	idxKeys := make([]string, 0)
	err := filepath.Walk(f.dataDir, func(path string, info fs2.FileInfo, err error) error {
		if err != nil {
			return nil
		}
		if f.dataDir == path {
			return nil
		}
		if info.IsDir() {
			return nil
		}
		if isCleanFile(path) {
			_ = os.Remove(path)
		}
		if isDatFile(path) && !isCleanFile(path) {
			path = strings.TrimSuffix(path, ".dat")
			idxKeys = append(idxKeys, path)
		}
		return nil
	})
	if err != nil {
		log.Println("store.getIdxFiles err", err)
	}
	return idxKeys
}
