package tiny_file

import (
	"context"
	"errors"
	"fmt"
	"github.com/google/uuid"
	"github.com/zeebo/errs"
	"go.uber.org/zap"
	"gorm.io/gorm"
	"io"
	"math"
	"os"
	"path"
	"sync"
	"time"
)

var ErrFileStore = errs.Class("tinyFile")

var maxCleanup = 0.1

type FileStore struct {
	h           *sqliteHandle
	storeDir    string
	StoreFile   string
	fileWriting largeFile
	fileLock    sync.Mutex
	log         *zap.Logger
}

func NewFileStore(ctx context.Context, log *zap.Logger, root string) *FileStore {
	dbPath := path.Join(root, "sqlite.db")
	sql := NewSqliteHandle(dbPath, log)
	sql.Migrator(ctx)

	f := &FileStore{
		h:        sql,
		log:      log,
		storeDir: root,
	}
	return f
}

func (f *FileStore) Init() error {
	storeFile := path.Join(f.storeDir, Today())
	out, err := os.OpenFile(storeFile, os.O_WRONLY|os.O_CREATE, os.ModePerm)
	if err != nil {
		return err
	}
	f.StoreFile = Today()
	f.fileWriting = largeFile{out: out}
	return err
}

func (f *FileStore) setupStoreFile() error {
	f.fileLock.Lock()
	defer f.fileLock.Unlock()
	if f.StoreFile != Today() {
		storeFile := path.Join(f.storeDir, Today())
		out, err := os.OpenFile(storeFile, os.O_WRONLY|os.O_CREATE, os.ModePerm)
		if err != nil {
			return err
		}
		f.StoreFile = Today()
		f.fileWriting.close()
		f.fileWriting = largeFile{out: out}
	}
	return nil
}
func (f *FileStore) PutPiece(ctx context.Context, pieceId uuid.UUID, expire int,
	size int, r io.Reader, md5Sum [16]byte) (err error) {
	if f.setupStoreFile() != nil {
		return err
	}
	space := size + tinyBlockSize
	blockInfo, err := f.h.PreAllocation(f.StoreFile, space)
	if err != nil {
		return err
	}
	//0 为不过期，这里为了方便处理，给个int最大过期时间
	if expire == 0 {
		expire = math.MaxInt
	}
	//最低有效期五分钟
	if expire <= int(time.Now().Unix())+300 {
		return errors.New("过期时间错误")
	}
	//fmt.Println("blockInfo", blockInfo.LastPos, blockInfo.Count)

	//数据库中写入索引
	idx := Idx{}
	idx.PieceId = pieceId
	idx.Space = space
	idx.Tmp = 0
	idx.File = f.StoreFile
	idx.Expire = expire
	idx.Md5 = md5Sum[:]
	idx.Pos = blockInfo.LastPos

	err = f.h.Insert(idx)
	if err != nil {
		return err
	}

	//把索引和数据写入大文件
	block := tinyBlock{
		space:   uint32(idx.Space),
		size:    uint32(size),
		pieceId: pieceId,
		expire:  uint32(expire),
		sum:     md5Sum,
	}
	return f.fileWriting.write(r, block, idx.Pos)
}

func (f *FileStore) GetPiece(ctx context.Context, pieceId uuid.UUID) ([]byte, error) {
	idx, err := f.h.Get(pieceId)
	if err != nil {
		return nil, err
	}
	if idx.ExpiredOrDel() {
		return nil, ErrFileStore.New("record is del or expired")
	}
	storeFile := path.Join(f.storeDir, idx.File)
	file, err := os.OpenFile(storeFile, os.O_RDONLY, os.ModePerm)
	if err != nil {
		return nil, err
	}
	defer file.Close()
	_, data, err := f.fileWriting.read(file, idx.Pos)
	return data, err
}

func (f *FileStore) DelPiece(ctx context.Context, pieceId uuid.UUID) (err error) {
	return f.h.Del(pieceId)
}

func (f *FileStore) Mode() string {
	return "tiny file"
}

// CleanupLargeFile 整理大文件
func (f *FileStore) CleanupLargeFile(ctx context.Context, fileName string) (int, error) {
	l := largeFile{}

	newFileName := fileName + ".tmp"
	largeTempFile, err := os.OpenFile(path.Join(f.storeDir, newFileName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
	if err != nil {
		return 0, err
	}
	l2 := largeFile{out: largeTempFile}
	lastPos := int64(0)
	cleanupCount := 0
	total := 0

	ret := l.through(path.Join(f.storeDir, fileName), func(startPos int64, reader io.Reader, tiny *tinyBlock) (err error) {
		select {
		case <-ctx.Done():
			return ctx.Err()
		default:
		}
		idx, err := f.h.Get(tiny.pieceId)
		if err != nil {
			if errors.Is(err, gorm.ErrRecordNotFound) {
				cleanupCount++
				return nil
			}
			return err
		}
		if !idx.ExpiredOrDel() {
			//把索引和数据写入大文件
			err = l2.write(reader, *tiny, lastPos)
			if err != nil {
				return fmt.Errorf("write largefile err: %s", err)
			}
			err = f.h.Insert(Idx{
				PieceId: tiny.pieceId,
				File:    fileName,
				Pos:     lastPos,
				Space:   int(tiny.space),
				Expire:  int(tiny.expire),
				Md5:     tiny.sum[:],
				Del:     0,
				Tmp:     1,
			})
			if err != nil {
				return fmt.Errorf("insert idx failed: %s", err)
			}
			total++
			lastPos += int64(tiny.space)
		} else {
			cleanupCount++
		}
		return nil
	})
	f.log.Error("tmpFile close", zap.String("file", newFileName), zap.Error(largeTempFile.Close()))
	var txErr error
	if ret == nil {
		txErr = f.h.Transaction(func(tx *gorm.DB) (err error) {
			//删除idx源纪录
			err = tx.Where("file = ? and tmp = 0", fileName).Delete(&Idx{}).Error
			if err != nil {
				return
			}
			//将临时idx纪录扶正
			err = tx.Model(&Idx{}).Where("file = ? and tmp = 1", fileName).Update("tmp", 0).Error
			if err != nil {
				return
			}
			//修改largefile信息
			err = tx.Model(&LargeFileInfo{}).Where("file=?", fileName).Updates(map[string]any{
				"last_cleanup": time.Now().Unix(),
				"last_pos":     lastPos,
				"count":        total,
			}).Error
			if err != nil {
				return
			}
			//最后覆盖旧的大文件
			return os.Rename(path.Join(f.storeDir, newFileName), path.Join(f.storeDir, fileName))
		})
	}
	//如果出错了，回滚操作
	if ret != nil || txErr != nil {
		if ret != nil {
			f.log.Error("CleanupLargeFile through() error", zap.Error(ret))
		}
		if txErr != nil {
			f.log.Error("CleanupLargeFile Transaction() error", zap.Error(txErr))
		}
		//删除临时idx
		_err := f.h.DelIdxTmp()
		if _err != nil {
			f.log.Error("CleanupLargeFile DelIdxTmp error", zap.Error(_err))
		}
		_err = os.Remove(path.Join(f.storeDir, newFileName))
		if _err != nil {
			f.log.Error("CleanupLargeFile remove large file error error", zap.String("file", path.Join(f.storeDir, newFileName)), zap.Error(_err))
		}
		if total == 0 {
			os.Remove(path.Join(f.storeDir, fileName))
		}
		return cleanupCount, errs.Combine(ret, txErr)
	}

	return cleanupCount, ret
}

// ThroughAndCleanup 清理过期或者删除文件
func (f *FileStore) ThroughAndCleanup(ctx context.Context) error {
	largeFiles, err := f.h.GetLargeFileInfos()
	if err != nil {
		return err
	}
	for _, lar := range largeFiles {
		select {
		case <-ctx.Done():
			return ctx.Err()
		default:
		}
		//统计正常的文件还有多少
		total, _err := f.h.GetIdxCountWithoutDelOrExpired(lar.File)
		if _err != nil {
			f.log.Error("ThroughAndCleanup.GetIdxCountWithoutDelOrExpired error", zap.String("file", lar.File), zap.Error(_err))
			continue
		}
		//是否触发清理操作
		if lar.Count >= total && float64(lar.Count-total)/float64(lar.Count) < maxCleanup {
			continue
		}
		f.log.Info("start cleanup file", zap.String("file", lar.File), zap.Int64("count", lar.Count), zap.Int64("total", total))
		cleanupCount, errCleanUp := f.CleanupLargeFile(ctx, lar.File)
		if errCleanUp != nil {
			f.log.Info("end cleanup file err", zap.String("file", lar.File), zap.Int("cleanupCount", cleanupCount), zap.Error(errCleanUp))
		} else {
			f.log.Info("end cleanup file", zap.String("file", lar.File), zap.Int("cleanupCount", cleanupCount))
		}
		time.Sleep(time.Millisecond * 500)
	}
	return nil
}

func (f *FileStore) Run(ctx context.Context) error {
	tk := time.NewTicker(time.Hour * 24)
	defer tk.Stop()
	for {
		select {
		case <-tk.C:
			if err := f.ThroughAndCleanup(ctx); err != nil {
				f.log.Error("ThroughAndCleanup err", zap.Error(err))
			}
		case <-ctx.Done():
			return ctx.Err()
		}
	}
}
