package sfs

import (
	"context"
	"github.com/chicken-team-outside/chicken_transmission/utils"
	"go.uber.org/zap"
	"io"
	"io/fs"
	"os"
	"path/filepath"
	"sort"
	"strings"
	"sync"
	"time"
)

type SeekerCloser interface {
	io.ReadSeeker
	io.Closer
}

type BlockCache interface {
	Put(ctx context.Context, key string, reader io.Reader, size int64) error
	Get(key string) (SeekerCloser, error)
}

type BlockInfo struct {
	key   string
	mtime int64
	size  int64
}

type BlockCacheImpl struct {
	cacheDir       string
	maxCacheSize   int64
	cachedSizeLock sync.Mutex
	cacheSize      int64
	cacheLock      sync.RWMutex
	ctx            context.Context
	cancel         context.CancelFunc
	blockMap       sync.Map
	notifyChannel  chan struct{}
}

func NewBlockCacheImpl(cacheDir string, maxCacheSize int64) (*BlockCacheImpl, error) {
	stat, err := os.Stat(cacheDir)
	if err != nil {
		if os.IsNotExist(err) {
			err = os.MkdirAll(cacheDir, os.ModeDir)
		}
	} else if !stat.IsDir() {
		err = fs.ErrInvalid
	}
	if err != nil {
		return nil, err
	}
	cache := &BlockCacheImpl{
		notifyChannel: make(chan struct{}, 1),
		maxCacheSize:  maxCacheSize,
		cacheDir:      cacheDir,
	}
	cache.ctx, cache.cancel = context.WithCancel(context.Background())
	go cache.autoClean()
	cache.cleanTmpFile()
	cache.updateCachedSize(0)
	return cache, nil
}

func (b *BlockCacheImpl) cleanTmpFile() {
	dir, err := os.ReadDir(b.cacheDir)
	if err == nil {
		for _, info := range dir {
			if !info.IsDir() {
				if strings.HasSuffix(info.Name(), "_block_tmp") {
					tmpFilePath := filepath.Join(b.cacheDir, info.Name())
					err = os.Remove(tmpFilePath)
					if err != nil {
						zap.L().Warn("Unable to remove temporary file", zap.Error(err), zap.String("tmp_file", tmpFilePath))
					}
				} else {
					fileInfo, err := info.Info()
					if err != nil {
						zap.L().Warn("Unable to get info", zap.Error(err), zap.String("name", info.Name()), zap.String("path", b.cacheDir))
					} else {
						b.cacheSize += fileInfo.Size()
						b.blockMap.Store(info.Name(), &BlockInfo{mtime: fileInfo.ModTime().Unix(), size: fileInfo.Size(), key: fileInfo.Name()})
					}
				}
			}
		}
	} else {
		zap.L().Error("failed to read cache dir", zap.Error(err), zap.String("cacheDir", b.cacheDir))
	}
}

func (b *BlockCacheImpl) SetMaxCacheSize(size int64) {
	b.cachedSizeLock.Lock()
	b.maxCacheSize = size
	b.cachedSizeLock.Unlock()
	b.updateCachedSize(0)
}

func (b *BlockCacheImpl) GetMaxCacheSize() int64 {
	b.cachedSizeLock.Lock()
	defer b.cachedSizeLock.Unlock()
	return b.maxCacheSize
}

func (b *BlockCacheImpl) GetCacheSize() int64 {
	b.cachedSizeLock.Lock()
	defer b.cachedSizeLock.Unlock()
	return b.cacheSize
}

func (b *BlockCacheImpl) GetCacheDirectory() string {
	return b.cacheDir
}

func (b *BlockCacheImpl) updateCachedSize(size int64) {
	b.cachedSizeLock.Lock()
	defer b.cachedSizeLock.Unlock()
	b.cacheSize += size
	if b.maxCacheSize > 0 && b.cacheSize >= b.maxCacheSize {
		select {
		case b.notifyChannel <- struct{}{}:
		default:
		}
	}
}

func (b *BlockCacheImpl) getKeepSize() int64 {
	maxCachedSize := b.GetMaxCacheSize()
	cachedSize := b.GetCacheSize()
	if cachedSize >= maxCachedSize {
		if maxCachedSize > 200*1024*1024 {
			return maxCachedSize - 100*1024
		} else {
			return maxCachedSize / 2
		}
	}
	return maxCachedSize
}

func (b *BlockCacheImpl) autoClean() {
	for {
		select {
		case <-b.notifyChannel:
		case <-b.ctx.Done():
			return
		}
		var needDeleteFiles []*BlockInfo
		b.blockMap.Range(func(key, value any) bool {
			needDeleteFiles = append(needDeleteFiles, value.(*BlockInfo))
			return true
		})
		sort.Slice(needDeleteFiles, func(i, j int) bool {
			return needDeleteFiles[i].mtime < needDeleteFiles[j].mtime
		})
		keepSize := b.getKeepSize()
		for _, info := range needDeleteFiles {
			if keepSize > b.GetCacheSize() {
				break
			} else {
				err := b.Delete(info.key)
				if err != nil {
					zap.L().Warn("failed to delete cache key", zap.Error(err), zap.String("key", info.key))
				}
			}
		}
	}
}

func (b *BlockCacheImpl) Put(ctx context.Context, key string, reader io.Reader, size int64) error {
	b.cacheLock.RLock()
	defer b.cacheLock.RUnlock()
	tempFile, err := os.CreateTemp(b.cacheDir, "*_block_tmp")
	if err != nil {
		return err
	}
	written, err := utils.CopyBuffer(ctx, tempFile, reader, make([]byte, 65535))
	if err != nil {
		return err
	}
	err = tempFile.Close()
	if err != nil {
		return err
	}
	tmpFilePath := tempFile.Name()
	if written == size {
		err = os.Rename(tmpFilePath, filepath.Join(b.cacheDir, key))
		if os.IsExist(err) {
			if err01 := os.Remove(tmpFilePath); err01 != nil {
				zap.L().Warn("Failed to remove cache file", zap.Error(err), zap.String("cache_path", tmpFilePath))
			}
		}
		if err == nil {
			b.blockMap.Store(key, &BlockInfo{
				mtime: time.Now().Unix(),
				size:  size,
				key:   key,
			})
			b.updateCachedSize(size)
		}
		return err
	} else {
		if err = os.Remove(tmpFilePath); err != nil {
			zap.L().Warn("Failed to remove cache file", zap.Error(err), zap.String("cache_path", tmpFilePath))
		}
		return io.ErrUnexpectedEOF
	}
}

func (b *BlockCacheImpl) Get(key string) (SeekerCloser, error) {
	return os.Open(filepath.Join(b.cacheDir, key))
}

func (b *BlockCacheImpl) Delete(key string) error {
	b.cacheLock.Lock()
	defer b.cacheLock.Unlock()
	path := filepath.Join(b.cacheDir, key)
	stat, err := os.Stat(path)
	if err != nil {
		if os.IsNotExist(err) {
			return nil
		}
		return err
	}
	err = os.Remove(path)
	if err == nil {
		b.updateCachedSize(-stat.Size())
	}
	return err
}

func (b *BlockCacheImpl) CleanAll() {
	b.blockMap.Range(func(key, value any) bool {
		err := b.Delete(key.(string))
		if err != nil {
			zap.L().Warn("failed to delete cache", zap.String("key", key.(string)), zap.Error(err))
		}
		return true
	})
}

func (b *BlockCacheImpl) Close() {
	b.cancel()
}
