//the storage engine for appendable, relatively large files, e.g. 1MB
package flatstore

import (
	"errors"
	"math/rand"
	"os"
	"strconv"
	"syscall"
	"time"
)

const (
	SubdirectoryLimit      = 365
	SingleDirDataFileLimit = 1000

	FileOpenOpt = os.O_RDWR | os.O_CREATE | os.O_APPEND
)

type Store struct {
	dataDir      string
	filePointers map[int]*os.File
}

//start the store engine with one system disk and one data disk with the former for metadata and logging
func NewStore(datadir string, sync bool) (s *Store, e error) {
	s = new(Store)
	s.dataDir = datadir

	//creat and check all subdirectories
	if false == s.createAndCheckSubdirectories() {
		return nil, errors.New("CheckSubdirError")
	}

	return s, nil
}

//the parent directory should be fsync to the disk
func (s *Store) createAndCheckSubdirectories() bool {
	return false
}

//get free space information via fsstat etc.
func (s *Store) StatFreeSpace() int64 {
	var buf syscall.Statfs_t
	err := syscall.Statfs(s.dataDir, &buf)
	if err != nil {
		return 0
	}

	return int64(buf.Bfree) * buf.Bsize
}

func (s *Store) GenerateFile() (subdir, file int, err error) {
	rand.Seed(time.Now().UTC().UnixNano())

	var fp *os.File
	dirRetry := 0

	for {
		dirRetry = dirRetry + 1
		if dirRetry > SubdirectoryLimit {
			err = errors.New("GenerateFileError")
			return
		}

		subdir = rand.Intn(SubdirectoryLimit)
		file = rand.Intn(SingleDirDataFileLimit)
		fp, err = os.OpenFile(s.dataDir+"/"+strconv.Itoa(subdir)+"/"+strconv.Itoa(file), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
		if err == nil {
			break
		}
	}

	index := subdir*SingleDirDataFileLimit + file
	s.filePointers[index] = fp
	return
}

func (s *Store) CreateAndAppend(subdir, file int, data []byte) error {
	fp, err := os.OpenFile(s.dataDir+"/"+strconv.Itoa(subdir)+"/"+strconv.Itoa(file), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
	if err != nil {
		return err
	}

	index := subdir*SingleDirDataFileLimit + file
	s.filePointers[index] = fp

	_, err = fp.Write(data)
	return err
}

func (s *Store) Append(subdir, file int, data []byte) error {
	index := subdir*SingleDirDataFileLimit + file
	fp, ok := s.filePointers[index]
	if !ok {
		return errors.New("FileMissingError")
	}

	_, err := fp.Write(data)
	return err
}

func (s *Store) Fsync(subdir, file int) error {
	index := subdir*SingleDirDataFileLimit + file
	fp, ok := s.filePointers[index]
	if !ok {
		return errors.New("FileMissingError")
	}
	return fp.Sync()
}

func (s *Store) Read(subdir, file int, offset, length int64, data []byte) (nbuf []byte, err error) {
	index := subdir*SingleDirDataFileLimit + file
	fp, ok := s.filePointers[index]
	if !ok {
		err = errors.New("FileMissingError")
		return
	}
	if data == nil || int64(len(data)) != length {
		data = make([]byte, length)
	}
	var n int
	n, err = fp.ReadAt(data, offset)
	if int64(n) != length || err != nil {
		return nil, err
	}

	nbuf = data
	return
}

//delete a file, tiny or not. if it is tiny just mark it in the metadata db; otherwise remove it directly
func (s *Store) Delete(subdir, file int) error {
	delete(s.filePointers, subdir*SingleDirDataFileLimit+file)
	return os.Remove(s.dataDir + "/" + strconv.Itoa(subdir) + "/" + strconv.Itoa(file))
}
