// Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package nbs

import (
	"bytes"
	"context"
	"errors"
	"io"
	"time"

	"golang.org/x/sync/errgroup"

	"github.com/dolthub/dolt/go/store/blobstore"
	"github.com/dolthub/dolt/go/store/chunks"
	"github.com/dolthub/dolt/go/store/hash"
)

const (
	tableRecordsExt = ".records"
	tableTailExt    = ".tail"
)

type blobstorePersister struct {
	bs        blobstore.Blobstore
	q         MemoryQuotaProvider
	blockSize uint64
}

var _ tablePersister = &blobstorePersister{}
var _ tableFilePersister = &blobstorePersister{}

// Persist makes the contents of mt durable. Chunks already present in
// |haver| may be dropped in the process.
func (bsp *blobstorePersister) Persist(ctx context.Context, mt *memTable, haver chunkReader, keeper keeperF, stats *Stats) (chunkSource, gcBehavior, error) {
	address, data, splitOffset, chunkCount, gcb, err := mt.write(haver, keeper, stats)
	if err != nil {
		return emptyChunkSource{}, gcBehavior_Continue, err
	}
	if gcb != gcBehavior_Continue {
		return emptyChunkSource{}, gcb, nil
	}
	if chunkCount == 0 {
		return emptyChunkSource{}, gcBehavior_Continue, nil
	}
	name := address.String()

	// persist this table in two parts to facilitate later conjoins
	records, tail := data[:splitOffset], data[splitOffset:]

	recordsName := name + tableRecordsExt
	tailName := name + tableTailExt

	// first write table records and tail (index+footer) as separate blobs
	eg, ectx := errgroup.WithContext(ctx)
	eg.Go(func() error {
		_, err := bsp.bs.Put(ectx, recordsName, int64(len(records)), bytes.NewBuffer(records))
		return err
	})
	eg.Go(func() error {
		_, err := bsp.bs.Put(ectx, tailName, int64(len(tail)), bytes.NewBuffer(tail))
		return err
	})
	if err = eg.Wait(); err != nil {
		return nil, gcBehavior_Continue, err
	}

	// then concatenate into a final blob
	if _, err = bsp.bs.Concatenate(ctx, name, []string{recordsName, tailName}); err != nil {
		return emptyChunkSource{}, gcBehavior_Continue, err
	}
	rdr := &bsTableReaderAt{key: name, bs: bsp.bs}
	src, err := newReaderFromIndexData(ctx, bsp.q, data, address, rdr, bsp.blockSize)
	if err != nil {
		return emptyChunkSource{}, gcBehavior_Continue, err
	}
	return src, gcBehavior_Continue, nil
}

// ConjoinAll implements tablePersister.
func (bsp *blobstorePersister) ConjoinAll(ctx context.Context, sources chunkSources, stats *Stats) (chunkSource, cleanupFunc, error) {
	plan, err := planRangeCopyConjoin(ctx, sources, bsp.q, stats)
	if err != nil {
		return nil, nil, err
	}
	defer plan.closer()

	if plan.chunkCount == 0 {
		return emptyChunkSource{}, nil, nil
	}

	name := plan.name.String() + plan.suffix

	// conjoin must contiguously append the chunk records of |sources|, but the raw content
	// of each source contains a chunk index in the tail. Blobstore does not expose a range
	// copy (GCP Storage limitation), so we must create sub-objects from each source that
	// contain only chunk records. We make an effort to store these sub-objects on Persist(),
	// but we will create them in getRecordsSubObjects if necessary.

	conjoinees := make([]string, 0, len(sources)+1)
	for _, src := range plan.sources.sws {
		sub, err := bsp.getRecordsSubObject(ctx, src.source)
		if err != nil {
			return nil, nil, err
		}
		conjoinees = append(conjoinees, sub)
	}

	// first concatenate all the sub-objects to create a composite sub-object
	recordsName := name + tableRecordsExt
	tailName := name + tableTailExt
	if _, err = bsp.bs.Concatenate(ctx, recordsName, conjoinees); err != nil {
		return nil, nil, err
	}
	if _, err = blobstore.PutBytes(ctx, bsp.bs, tailName, plan.mergedIndex); err != nil {
		return nil, nil, err
	}
	// then concatenate into a final blob
	if _, err = bsp.bs.Concatenate(ctx, name, []string{recordsName, tailName}); err != nil {
		return emptyChunkSource{}, nil, err
	}

	var cs chunkSource
	if plan.suffix == ArchiveFileSuffix {
		cs, err = newBSArchiveChunkSource(ctx, bsp.bs, plan.name, bsp.q, stats)
	} else {
		cs, err = newBSTableChunkSource(ctx, bsp.bs, plan.name, plan.chunkCount, bsp.q, stats)
	}

	return cs, func() {}, err
}

func (bsp *blobstorePersister) getRecordsSubObject(ctx context.Context, cs chunkSource) (name string, err error) {
	name = cs.hash().String() + cs.suffix() + tableRecordsExt
	// first check if we created this sub-object on Persist()
	ok, err := bsp.bs.Exists(ctx, name)
	if err != nil {
		return "", err
	} else if ok {
		return name, nil
	}

	// otherwise create the sub-object from |table|
	// (requires a round-trip for remote blobstores)
	if cs.suffix() == ArchiveFileSuffix {
		err = bsp.hotCreateArchiveRecords(ctx, cs)
	} else {
		err = bsp.hotCreateTableRecords(ctx, cs)
	}

	return name, err
}

func (bsp *blobstorePersister) hotCreateTableRecords(ctx context.Context, cs chunkSource) error {
	cnt, err := cs.count()
	if err != nil {
		return err
	}
	off := tableTailOffset(cs.currentSize(), cnt)
	l := int64(off)
	rng := blobstore.NewBlobRange(0, l)

	rdr, _, _, err := bsp.bs.Get(ctx, cs.hash().String(), rng)
	if err != nil {
		return err
	}
	defer func() {
		if cerr := rdr.Close(); cerr != nil {
			err = cerr
		}
	}()

	_, err = bsp.bs.Put(ctx, cs.hash().String()+tableRecordsExt, l, rdr)
	return err
}

func (bsp *blobstorePersister) hotCreateArchiveRecords(ctx context.Context, cs chunkSource) error {
	arch, ok := cs.(archiveChunkSource)
	if !ok {
		return errors.New("runtime error: hotCreateArchiveRecords expected archiveChunkSource")
	}

	dataLen := int64(arch.aRdr.footer.dataSpan().length)
	rng := blobstore.NewBlobRange(0, dataLen)

	rdr, _, _, err := bsp.bs.Get(ctx, arch.hash().String()+ArchiveFileSuffix, rng)
	if err != nil {
		return err
	}
	defer rdr.Close()

	key := arch.hash().String() + ArchiveFileSuffix + tableRecordsExt
	_, err = bsp.bs.Put(ctx, key, dataLen, rdr)
	return err
}

// Open a table named |name|, containing |chunkCount| chunks.
func (bsp *blobstorePersister) Open(ctx context.Context, name hash.Hash, chunkCount uint32, stats *Stats) (chunkSource, error) {
	cs, err := newBSTableChunkSource(ctx, bsp.bs, name, chunkCount, bsp.q, stats)
	if err == nil {
		return cs, nil
	}

	if blobstore.IsNotFoundError(err) {
		source, err := newBSArchiveChunkSource(ctx, bsp.bs, name, bsp.q, stats)
		if err != nil {
			return nil, err
		}
		return source, nil
	}

	return nil, err
}

func (bsp *blobstorePersister) Exists(ctx context.Context, name string, _ uint32, _ *Stats) (bool, error) {
	return bsp.bs.Exists(ctx, name)
}

func (bsp *blobstorePersister) PruneTableFiles(ctx context.Context, keeper func() []hash.Hash, t time.Time) error {
	return nil
}

func (bsp *blobstorePersister) Close() error {
	return nil
}

func (bsp *blobstorePersister) AccessMode() chunks.ExclusiveAccessMode {
	return chunks.ExclusiveAccessMode_Shared
}

func (bsp *blobstorePersister) Path() string {
	return ""
}

func (bsp *blobstorePersister) CopyTableFile(ctx context.Context, r io.Reader, name string, fileSz uint64, splitOffset uint64) error {
	if splitOffset > 0 {
		lr := io.LimitReader(r, int64(splitOffset))

		indexLen := int64(fileSz - splitOffset)

		recordsName := name + tableRecordsExt
		tailName := name + tableTailExt
		// check if we can Put concurrently
		rr, ok := r.(io.ReaderAt)
		if !ok {
			// sequentially write chunk records then tail
			if _, err := bsp.bs.Put(ctx, recordsName, int64(splitOffset), lr); err != nil {
				return err
			}
			if _, err := bsp.bs.Put(ctx, tailName, indexLen, r); err != nil {
				return err
			}
		} else {
			// on the push path, we expect to Put concurrently
			// see BufferedFileByteSink in byte_sink.go
			eg, ectx := errgroup.WithContext(ctx)
			eg.Go(func() error {
				srdr := io.NewSectionReader(rr, int64(splitOffset), indexLen)
				_, err := bsp.bs.Put(ectx, tailName, indexLen, srdr)
				return err
			})
			eg.Go(func() error {
				_, err := bsp.bs.Put(ectx, recordsName, int64(splitOffset), lr)
				return err
			})
			if err := eg.Wait(); err != nil {
				return err
			}
		}

		// finally concatenate into the complete table
		_, err := bsp.bs.Concatenate(ctx, name, []string{recordsName, tailName})
		return err
	} else {
		// no split offset, just copy the whole table. We will create the records object on demand if needed.
		_, err := bsp.bs.Put(ctx, name, int64(fileSz), r)
		return err
	}
}

type bsTableReaderAt struct {
	bs  blobstore.Blobstore
	key string
}

func (bsTRA *bsTableReaderAt) Close() error {
	return nil
}

func (bsTRA *bsTableReaderAt) clone() (tableReaderAt, error) {
	return bsTRA, nil
}

func (bsTRA *bsTableReaderAt) Reader(ctx context.Context) (io.ReadCloser, error) {
	rc, _, _, err := bsTRA.bs.Get(ctx, bsTRA.key, blobstore.AllRange)
	return rc, err
}

// ReadAtWithStats is the bsTableReaderAt implementation of the tableReaderAt interface
func (bsTRA *bsTableReaderAt) ReadAtWithStats(ctx context.Context, p []byte, off int64, stats *Stats) (int, error) {
	br := blobstore.NewBlobRange(off, int64(len(p)))
	rc, _, _, err := bsTRA.bs.Get(ctx, bsTRA.key, br)

	if err != nil {
		return 0, err
	}
	defer rc.Close()

	totalRead := 0
	for totalRead < len(p) {
		n, err := rc.Read(p[totalRead:])

		if err != nil && err != io.EOF {
			return 0, err
		}

		totalRead += n

		if err == io.EOF {
			break
		}
	}

	return totalRead, nil
}

func newBSArchiveChunkSource(ctx context.Context, bs blobstore.Blobstore, name hash.Hash, q MemoryQuotaProvider, stats *Stats) (cs chunkSource, err error) {
	rc, sz, _, err := bs.Get(ctx, name.String()+ArchiveFileSuffix, blobstore.NewBlobRange(-int64(archiveFooterSize), 0))
	if err != nil {
		return nil, err
	}
	defer rc.Close()

	footer := make([]byte, archiveFooterSize)
	_, err = io.ReadFull(rc, footer)
	if err != nil {
		return nil, err
	}

	aRdr, err := newArchiveReaderFromFooter(ctx, &bsTableReaderAt{key: name.String() + ArchiveFileSuffix, bs: bs}, name, sz, footer, q, stats)
	if err != nil {
		return emptyChunkSource{}, err
	}
	return archiveChunkSource{aRdr, ""}, nil
}

func newBSTableChunkSource(ctx context.Context, bs blobstore.Blobstore, name hash.Hash, chunkCount uint32, q MemoryQuotaProvider, stats *Stats) (cs chunkSource, err error) {
	index, err := loadTableIndex(ctx, stats, chunkCount, q, func(p []byte) error {
		rc, _, _, err := bs.Get(ctx, name.String(), blobstore.NewBlobRange(-int64(len(p)), 0))
		if err != nil {
			return err
		}
		defer rc.Close()

		_, err = io.ReadFull(rc, p)
		if err != nil {
			return err
		}

		return nil
	})
	if err != nil {
		return nil, err
	}

	if chunkCount != index.chunkCount() {
		return nil, errors.New("unexpected chunk count")
	}

	tr, err := newTableReader(ctx, index, &bsTableReaderAt{key: name.String(), bs: bs}, s3BlockSize)
	if err != nil {
		_ = index.Close()
		return nil, err
	}
	return &chunkSourceAdapter{tr, name}, nil
}

func tableTailOffset(size uint64, count uint32) uint64 {
	return size - (indexSize(count) + footerSize)
}
