package graphsplit

import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"github.com/filecoin-project/go-fil-markets/stores"
	"github.com/ipfs/go-blockservice"
	"github.com/ipfs/go-cid"
	"github.com/ipfs/go-cidutil"
	"github.com/ipfs/go-datastore"
	dss "github.com/ipfs/go-datastore/sync"
	bstore "github.com/ipfs/go-ipfs-blockstore"
	chunker "github.com/ipfs/go-ipfs-chunker"
	offline "github.com/ipfs/go-ipfs-exchange-offline"
	files "github.com/ipfs/go-ipfs-files"
	format "github.com/ipfs/go-ipld-format"
	"github.com/ipfs/go-merkledag"
	dag "github.com/ipfs/go-merkledag"
	"github.com/ipfs/go-unixfs"
	"github.com/ipfs/go-unixfs/importer/balanced"
	ihelper "github.com/ipfs/go-unixfs/importer/helpers"
	"github.com/ipld/go-car"
	"github.com/mitchellh/go-homedir"
	mh "github.com/multiformats/go-multihash"
	"golang.org/x/xerrors"
	"gorm.io/gorm"
	"io"
	"io/ioutil"
	"os"
	"os/exec"
	"path"
	"path/filepath"
	"runtime"
	"strings"
	"sync"
	"time"

	"github.com/filecoin-project/go-fil-markets/shared"
	ipld "github.com/ipfs/go-ipld-format"
	ipldprime "github.com/ipld/go-ipld-prime"
	basicnode "github.com/ipld/go-ipld-prime/node/basic"
	"github.com/ipld/go-ipld-prime/traversal/selector"
	"github.com/ipld/go-ipld-prime/traversal/selector/builder"
)

var Db *gorm.DB

const UnixfsLinksPerLevel = 1 << 10
const UnixfsChunkSize uint64 = 1 << 20

var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)

const BuildUnixfsLinksPerLevel = 1024
const BuildUnixfsChunkSize uint64 = 1 << 20

var MaxTraversalLinks uint64 = 32 * (1 << 20)
var counter uint64
var wg = &sync.WaitGroup{}

type Finfo struct {
	Path      string
	Name      string
	Info      os.FileInfo
	SeekStart int64
	SeekEnd   int64
}

// file system tree node
type fsNode struct {
	Name string
	Hash string
	Size uint64
	Link []fsNode
}

type FSBuilder struct {
	root *dag.ProtoNode
	ds   ipld.DAGService
}
type FileBaseInfo struct {
	ID           uint   `gorm:"primarykey"`
	PayloadCID   string `json:"payload_cid" gorm:"column:payload_cid"`
	FileName     string `json:"file_name" gorm:"column:file_name"`
	PieceCID     string `json:"piece_cid" gorm:"column:piece_cid"`
	DataCID      string `json:"data_cid" gorm:"column:data_cid"`
	FileSize     uint64 `json:"file_size" gorm:"column:file_size"`
	SendTimes    uint32 `json:"send_times" gorm:"column:send_times"`
	SuccessTimes uint32 `json:"success_times" gorm:"column:success_times"`
}

func (FileBaseInfo) TableName() string {
	return "t_file_base_info"
}

func NewFSBuilder(root *dag.ProtoNode, ds ipld.DAGService) *FSBuilder {
	return &FSBuilder{root, ds}
}

func (b *FSBuilder) Build() (*fsNode, error) {
	fsn, err := unixfs.FSNodeFromBytes(b.root.Data())
	if err != nil {
		return nil, xerrors.Errorf("input dag is not a unixfs node: %s", err)
	}

	rootn := &fsNode{
		Hash: b.root.Cid().String(),
		Size: fsn.FileSize(),
		Link: []fsNode{},
	}
	if !fsn.IsDir() {
		return rootn, nil
	}
	for _, ln := range b.root.Links() {
		fn, err := b.getNodeByLink(ln)
		if err != nil {
			return nil, err
		}
		rootn.Link = append(rootn.Link, fn)
	}

	return rootn, nil
}

func (b *FSBuilder) getNodeByLink(ln *format.Link) (fn fsNode, err error) {
	ctx := context.Background()
	fn = fsNode{
		Name: ln.Name,
		Hash: ln.Cid.String(),
		Size: ln.Size,
	}
	nd, err := b.ds.Get(ctx, ln.Cid)
	if err != nil {
		log.Warn(err)
		return
	}

	nnd, ok := nd.(*dag.ProtoNode)
	if !ok {
		err = xerrors.Errorf("failed to transformed to dag.ProtoNode")
		return
	}
	fsn, err := unixfs.FSNodeFromBytes(nnd.Data())
	if err != nil {
		log.Warnf("input dag is not a unixfs node: %s", err)
		return
	}
	if !fsn.IsDir() {
		return
	}
	for _, ln := range nnd.Links() {
		node, err := b.getNodeByLink(ln)
		if err != nil {
			return node, err
		}
		fn.Link = append(fn.Link, node)
	}
	return
}

func BuildIpldGraph(ctx context.Context, fileList []Finfo, graphName, parentPath, carDir string, parallel int, cb GraphBuildCallback) {
	node, fsDetail, err := buildIpldGraph(ctx, fileList, parentPath, carDir, parallel)
	if err != nil {
		//log.Fatal(err)
		cb.OnError(err)
		return
	}
	cb.OnSuccess(node, graphName, fsDetail)
}

func buildIpldGraph(ctx context.Context, fileList []Finfo, parentPath, carDir string, parallel int) (ipld.Node, string, error) {
	bs2 := bstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore()))
	dagServ := merkledag.NewDAGService(blockservice.New(bs2, offline.Exchange(bs2)))

	cidBuilder, err := merkledag.PrefixForCidVersion(0)
	if err != nil {
		return nil, "", err
	}
	fileNodeMap := make(map[string]*dag.ProtoNode)
	dirNodeMap := make(map[string]*dag.ProtoNode)

	var rootNode *dag.ProtoNode
	rootNode = unixfs.EmptyDirNode()
	rootNode.SetCidBuilder(cidBuilder)
	var rootKey = "root"
	dirNodeMap[rootKey] = rootNode

	fmt.Println("************ start to build ipld **************")
	// build file node
	// parallel build
	cpun := runtime.NumCPU()
	if parallel > cpun {
		parallel = cpun
	}
	pchan := make(chan struct{}, parallel)
	wg := sync.WaitGroup{}
	lock := sync.Mutex{}
	for i, item := range fileList {
		wg.Add(1)
		go func(i int, item Finfo) {
			defer func() {
				<-pchan
				wg.Done()
			}()
			pchan <- struct{}{}
			fileNode, err := BuildFileNode(item, dagServ, cidBuilder)
			if err != nil {
				log.Warn(err)
				return
			}
			fn, ok := fileNode.(*dag.ProtoNode)
			if !ok {
				emsg := "file node should be *dag.ProtoNode"
				log.Warn(emsg)
				return
			}
			lock.Lock()
			fileNodeMap[item.Path] = fn
			lock.Unlock()
			fmt.Println(item.Path)
			//log.Infof("file node: %s", fileNode)
		}(i, item)
	}
	wg.Wait()

	// build dir tree
	for _, item := range fileList {
		// log.Info(item.Path)
		// log.Infof("file name: %s, file size: %d, item size: %d, seek-start:%d, seek-end:%d", item.Name, item.Info.Size(), item.SeekEnd-item.SeekStart, item.SeekStart, item.SeekEnd)
		dirStr := path.Dir(item.Path)
		parentPath = path.Clean(parentPath)
		// when parent path equal target path, and the parent path is also a file path
		if parentPath == path.Clean(item.Path) {
			dirStr = ""
		} else if parentPath != "" && strings.HasPrefix(dirStr, parentPath) {
			dirStr = dirStr[len(parentPath):]
		}

		if strings.HasPrefix(dirStr, "/") {
			dirStr = dirStr[1:]
		}
		var dirList []string
		if dirStr == "" {
			dirList = []string{}
		} else {
			dirList = strings.Split(dirStr, "/")
		}
		fileNode, ok := fileNodeMap[item.Path]
		if !ok {
			panic("unexpected, missing file node")
		}
		if len(dirList) == 0 {
			dirNodeMap[rootKey].AddNodeLink(item.Name, fileNode)
			continue
		}
		//log.Info(item.Path)
		//log.Info(dirList)
		i := len(dirList) - 1
		for ; i >= 0; i-- {
			// get dirNodeMap by index
			var ok bool
			var dirNode *dag.ProtoNode
			var parentNode *dag.ProtoNode
			var parentKey string
			dir := dirList[i]
			dirKey := getDirKey(dirList, i)
			//log.Info(dirList)
			//log.Infof("dirKey: %s", dirKey)
			dirNode, ok = dirNodeMap[dirKey]
			if !ok {
				dirNode = unixfs.EmptyDirNode()
				dirNode.SetCidBuilder(cidBuilder)
				dirNodeMap[dirKey] = dirNode
			}
			// add file node to its nearest parent node
			if i == len(dirList)-1 {
				dirNode.AddNodeLink(item.Name, fileNode)
			}
			if i == 0 {
				parentKey = rootKey
			} else {
				parentKey = getDirKey(dirList, i-1)
			}
			//log.Infof("parentKey: %s", parentKey)
			parentNode, ok = dirNodeMap[parentKey]
			if !ok {
				parentNode = unixfs.EmptyDirNode()
				parentNode.SetCidBuilder(cidBuilder)
				dirNodeMap[parentKey] = parentNode
			}
			if isLinked(parentNode, dir) {
				parentNode, err = parentNode.UpdateNodeLink(dir, dirNode)
				if err != nil {
					return nil, "", err
				}
				dirNodeMap[parentKey] = parentNode
			} else {
				parentNode.AddNodeLink(dir, dirNode)
			}
		}
	}

	for _, node := range dirNodeMap {
		//fmt.Printf("add node to store: %v\n", node)
		//fmt.Printf("key: %s, links: %v\n", key, len(node.Links()))
		dagServ.Add(ctx, node)
	}

	rootNode = dirNodeMap[rootKey]
	//log.Infof("start to generate car for %s", rootNode.Cid())
	//genCarStartTime := time.Now()
	//car
	//carF, err := os.Create(path.Join(carDir, rootNode.Cid().String()+".car"))
	//if err != nil {
	// return nil, "", err
	//}
	//defer carF.Close()
	//selector := allSelector()
	//sc := car.NewSelectiveCar(ctx, bs2, []car.Dag{{Root: rootNode.Cid(), Selector: selector}})
	//err = sc.Write(carF)
	// cario := cario.NewCarIO()
	// err = cario.WriteCar(context.Background(), bs2, rootNode.Cid(), selector, carF)
	//if err != nil {
	// return nil, "", err
	//}
	//log.Infof("generate car file completed, time elapsed: %s", time.Now().Sub(genCarStartTime))

	fsBuilder := NewFSBuilder(rootNode, dagServ)
	fsNode, err := fsBuilder.Build()
	if err != nil {
		return nil, "", err
	}
	fsNodeBytes, err := json.Marshal(fsNode)
	if err != nil {
		return nil, "", err
	}
	//log.Info(dirNodeMap)
	fmt.Println("++++++++++++ finished to build ipld +++++++++++++")
	return rootNode, fmt.Sprintf("%s", fsNodeBytes), nil
}

func allSelector() ipldprime.Node {
	ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
	return ssb.ExploreRecursive(selector.RecursionLimitNone(),
		ssb.ExploreAll(ssb.ExploreRecursiveEdge())).
		Node()
}

func getDirKey(dirList []string, i int) (key string) {
	for j := 0; j <= i; j++ {
		key += dirList[j]
		if j < i {
			key += "."
		}
	}
	return
}

func isLinked(node *dag.ProtoNode, name string) bool {
	for _, lk := range node.Links() {
		if lk.Name == name {
			return true
		}
	}
	return false
}

type fileSlice struct {
	r        *os.File
	offset   int64
	start    int64
	end      int64
	fileSize int64
}

func (fs *fileSlice) Read(p []byte) (n int, err error) {
	if fs.end == 0 {
		fs.end = fs.fileSize - 1
	}
	if fs.offset == 0 && fs.start > 0 {
		_, err = fs.r.Seek(fs.start, 0)
		if err != nil {
			log.Warn(err)
			return 0, err
		}
		fs.offset = fs.start
	}
	//fmt.Printf("offset: %d, end: %d, start: %d, size: %d\n", fs.offset, fs.end, fs.start, fs.fileSize)
	if fs.end-fs.offset+1 == 0 {
		return 0, io.EOF
	}
	if fs.end-fs.offset+1 < 0 {
		return 0, xerrors.Errorf("read data out bound of the slice")
	}
	plen := len(p)
	leftLen := fs.end - fs.offset + 1
	if leftLen > int64(plen) {
		n, err = fs.r.Read(p)
		if err != nil {
			log.Warn(err)
			return
		}
		//fmt.Printf("read num: %d\n", n)
		fs.offset += int64(n)
		return
	}
	b := make([]byte, leftLen)
	n, err = fs.r.Read(b)
	if err != nil {
		return
	}
	//fmt.Printf("read num: %d\n", n)
	fs.offset += int64(n)

	return copy(p, b), io.EOF
}

func BuildFileNode(item Finfo, bufDs ipld.DAGService, cidBuilder cid.Builder) (node ipld.Node, err error) {
	var r io.Reader
	f, err := os.Open(item.Path)
	if err != nil {
		return nil, err
	}
	r = f

	// read all data of item
	if item.SeekStart > 0 || item.SeekEnd > 0 {
		r = &fileSlice{
			r:        f,
			start:    item.SeekStart,
			end:      item.SeekEnd,
			fileSize: item.Info.Size(),
		}
	}

	params := ihelper.DagBuilderParams{
		Maxlinks:   UnixfsLinksPerLevel,
		RawLeaves:  false,
		CidBuilder: cidBuilder,
		Dagserv:    bufDs,
		NoCopy:     false,
	}
	db, err := params.New(chunker.NewSizeSplitter(r, int64(UnixfsChunkSize)))
	if err != nil {
		return nil, err
	}
	node, err = balanced.Layout(db)
	if err != nil {
		return nil, err
	}
	return
}

func GenGraphName(graphName string, sliceCount, sliceTotal int) string {
	if sliceTotal == 1 {
		return fmt.Sprintf("%s.car", graphName)
	}
	return fmt.Sprintf("%s-total-%d-part-%d.car", graphName, sliceTotal, sliceCount+1)
}

func GetGraphCount(args []string, sliceSize int64) int {
	list, err := GetFileList(args)
	if err != nil {
		panic(err)
	}
	var totalSize int64 = 0
	for _, path := range list {
		finfo, err := os.Stat(path)
		if err != nil {
			panic(err)
		}
		totalSize += finfo.Size()
	}
	if totalSize == 0 {
		return 0
	}
	count := (totalSize / sliceSize) + 1
	return int(count)
}

func GetFileListAsync(args []string) chan Finfo {
	fichan := make(chan Finfo, 0)
	go func() {
		defer close(fichan)
		for _, path := range args {
			finfo, err := os.Stat(path)
			if err != nil {
				log.Warn(err)
				return
			}
			// 忽略隐藏目录
			if strings.HasPrefix(finfo.Name(), ".") {
				continue
			}
			if finfo.IsDir() {
				files, err := ioutil.ReadDir(path)
				if err != nil {
					log.Warn(err)
					return
				}
				templist := make([]string, 0)
				for _, n := range files {
					templist = append(templist, fmt.Sprintf("%s/%s", path, n.Name()))
				}
				embededChan := GetFileListAsync(templist)
				if err != nil {
					log.Warn(err)
					return
				}

				for item := range embededChan {
					fichan <- item
				}
			} else {
				fichan <- Finfo{
					Path: path,
					Name: finfo.Name(),
					Info: finfo,
				}
			}
		}
	}()

	return fichan
}

func GetFileList(args []string) (fileList []string, err error) {
	fileList = make([]string, 0)
	for _, path := range args {
		finfo, err := os.Stat(path)
		if err != nil {
			return nil, err
		}
		// 忽略隐藏目录
		if strings.HasPrefix(finfo.Name(), ".") {
			continue
		}
		if finfo.IsDir() {
			files, err := ioutil.ReadDir(path)
			if err != nil {
				return nil, err
			}
			templist := make([]string, 0)
			for _, n := range files {
				templist = append(templist, fmt.Sprintf("%s/%s", path, n.Name()))
			}
			list, err := GetFileList(templist)
			if err != nil {
				return nil, err
			}
			fileList = append(fileList, list...)
		} else {
			fileList = append(fileList, path)
		}
	}

	return
}

// piece info
type PieceInfo struct {
	PayloadCid string `csv:"payload_cid"`
	Filename   string `csv:"filename"`
	PieceCid   string `csv:"piece_cid"`
	PieceSize  uint64 `csv:"piece_size"`
}

// manifest
type Manifest struct {
	PayloadCid string `csv:"payload_cid"`
	Filename   string `csv:"filename"`
}

func GenGraphName2(name string) string {
	return fmt.Sprintf("%s.car", name)
}

func BuildIpldGraph2(ctx context.Context, fileInfo Finfo, graphName, parentPath, carDir string, parallel int, fileChan chan Finfo) {
	defer func() {
		if e := recover(); e != nil {
		}
		<-fileChan
		wg.Done()
	}()
	node, _, err := buildIpldGraph(ctx, []Finfo{fileInfo}, parentPath, carDir, parallel)
	if err != nil {
		log.Errorf("BuildIpldGraph2 failed,error: %v", err)
		return
	}
	//********************test datacid
	id := shared.NewTimeCounter().Next()
	carPathTemp, err := AllocateCAR(id, carDir)
	if err != nil {
		return
	}
	defer os.Remove(carPathTemp)

	//carfilepath==:/Users/nyp/nyp-data/xcode/myself/car-dir/QmczT99M71exJo83sR9NP1ygBiUoJDFSaicgufvVbas3LL.car,
	//carPath==:/Users/nyp/nyp-data/xcode/myself/car-dir/1.car
	root, err := createUnixFSFilestore(context.Background(), fileInfo.Path, carPathTemp)
	log.Infof("fileInfo.Path==:%v,dataCid==:%v,carPath==:%v", fileInfo.Path, root, carPathTemp)
	if err != nil {
		return
	}

	// open the positional reference CAR as a filestore.
	fs, err := stores.ReadOnlyFilestore(carPathTemp)
	if err != nil {
		return
	}
	defer fs.Close() //nolint:errcheck

	// build a dense deterministic CAR (dense = containing filled leaves)
	ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)
	allSelector := ssb.ExploreRecursive(
		selector.RecursionLimitNone(),
		ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node()
	sc := car.NewSelectiveCar(ctx,
		fs,
		[]car.Dag{{Root: root, Selector: allSelector}},
		car.MaxTraversalLinks(MaxTraversalLinks),
	)
	carPath, err := AllocateCAR(shared.NewTimeCounter().Next(), carDir)
	f, err := os.Create(carPath)
	if err != nil {
		return
	}
	if err = sc.Write(f); err != nil {
		log.Errorf("failed to write CAR to output file: %w", err)
	}
	f.Close()
	//***********************test Datacid

	save(node, graphName, carDir, carPath, root.String(), parentPath)
	//log.Infof("move fileInfo.Path{%v} to carDir{%v}", fileInfo.Path, carDir+fileInfo.Name)
	//move(fileInfo.Path, carDir+fileInfo.Name)

}

func save(node ipld.Node, graphName, carDir, carfilepath, root, parentPath string) {
	//carfilepath = path.Join(carDir, node.Cid().String()+".car")
	defer func() {
		os.Remove(carfilepath)
	}()

	commpStartTime := time.Now()
	cpRes, err := CalcCommP(context.TODO(), carfilepath)
	if err != nil {
		log.Fatal(err)
	}
	log.Infof("calculation of pieceCID completed, time elapsed: %s", time.Now().Sub(commpStartTime))
	// Add node inof to manifest.csv
	manifestPath := path.Join(carDir, "manifest.csv")
	_, err = os.Stat(manifestPath)
	if err != nil && !os.IsNotExist(err) {
		log.Fatal(err)
	}
	var isCreateAction bool
	if err != nil && os.IsNotExist(err) {
		isCreateAction = true
	}
	f, err := os.OpenFile(manifestPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
	if err != nil {
		log.Fatal(err)
	}
	defer f.Close()
	if isCreateAction {
		if _, err := f.Write([]byte("playload_cid,DataCid,filename,piece_cid,piece_size\n")); err != nil {
			log.Fatal(err)
		}
	}
	graphName = parentPath[strings.LastIndex(parentPath, "/"):] + "/" + graphName
	if _, err := f.Write([]byte(fmt.Sprintf("%s,%s,%s,%s,%d\n", node.Cid(), root, graphName, cpRes.Root.String(), cpRes.Size))); err != nil {
		log.Fatal(err)
	}
	//save db

	info := &FileBaseInfo{
		PayloadCID: fmt.Sprintf("%s", node.Cid()),
		FileName:   graphName,
		PieceCID:   cpRes.Root.String(),
		DataCID:    root,
		FileSize:   uint64(cpRes.Size),
	}
	Db.Save(info)
	log.Infof("save sqllite info==%+v", info)

}
func buildUnixFS(ctx context.Context, reader io.Reader, into bstore.Blockstore, filestore bool) (cid.Cid, error) {
	b, err := unixFSCidBuilder()
	if err != nil {
		return cid.Undef, err
	}

	bsvc := blockservice.New(into, offline.Exchange(into))
	dags := merkledag.NewDAGService(bsvc)
	bufdag := ipld.NewBufferedDAG(ctx, dags)

	params := ihelper.DagBuilderParams{
		Maxlinks:   BuildUnixfsLinksPerLevel,
		RawLeaves:  true,
		CidBuilder: b,
		Dagserv:    bufdag,
		NoCopy:     filestore,
	}

	db, err := params.New(chunker.NewSizeSplitter(reader, int64(BuildUnixfsChunkSize)))
	if err != nil {
		return cid.Undef, err
	}
	nd, err := balanced.Layout(db)
	if err != nil {
		return cid.Undef, err
	}

	if err := bufdag.Commit(); err != nil {
		return cid.Undef, err
	}
	return nd.Cid(), nil
}
func unixFSCidBuilder() (cid.Builder, error) {
	prefix, err := merkledag.PrefixForCidVersion(1)
	if err != nil {
		return nil, fmt.Errorf("failed to initialize UnixFS CID Builder: %w", err)
	}
	prefix.MhType = DefaultHashFunction
	b := cidutil.InlineBuilder{
		Builder: prefix,
		Limit:   126,
	}
	return b, nil
}
func createUnixFSFilestore(ctx context.Context, srcPath string, dstPath string) (cid.Cid, error) {
	// This method uses a two-phase approach with a staging CAR blockstore and
	// a final CAR blockstore.
	//
	// This is necessary because of https://github.com/ipld/go-car/issues/196
	//
	// TODO: do we need to chunk twice? Isn't the first output already in the
	//  right order? Can't we just copy the CAR file and replace the header?

	src, err := os.Open(srcPath)
	if err != nil {
		return cid.Undef, xerrors.Errorf("failed to open input file: %w", err)
	}
	defer src.Close() //nolint:errcheck

	stat, err := src.Stat()
	if err != nil {
		return cid.Undef, xerrors.Errorf("failed to stat file :%w", err)
	}

	file, err := files.NewReaderPathFile(srcPath, src, stat)
	if err != nil {
		return cid.Undef, xerrors.Errorf("failed to create reader path file: %w", err)
	}

	f, err := ioutil.TempFile("", "")
	if err != nil {
		return cid.Undef, xerrors.Errorf("failed to create temp file: %w", err)
	}
	_ = f.Close() // close; we only want the path.

	tmp := f.Name()
	defer os.Remove(tmp) //nolint:errcheck

	// Step 1. Compute the UnixFS DAG and write it to a CARv2 file to get
	// the root CID of the DAG.
	fstore, err := stores.ReadWriteFilestore(tmp)
	if err != nil {
		return cid.Undef, xerrors.Errorf("failed to create temporary filestore: %w", err)
	}

	finalRoot1, err := buildUnixFS(ctx, file, fstore, true)
	//log.Infof("finalRoot1:%v", finalRoot1)
	if err != nil {
		_ = fstore.Close()
		return cid.Undef, xerrors.Errorf("failed to import file to store to compute root: %w", err)
	}

	if err := fstore.Close(); err != nil {
		return cid.Undef, xerrors.Errorf("failed to finalize car filestore: %w", err)
	}

	// Step 2. We now have the root of the UnixFS DAG, and we can write the
	// final CAR for real under `dst`.
	bs, err := stores.ReadWriteFilestore(dstPath, finalRoot1)
	if err != nil {
		return cid.Undef, xerrors.Errorf("failed to create a carv2 read/write filestore: %w", err)
	}

	// rewind file to the beginning.
	if _, err := src.Seek(0, 0); err != nil {
		return cid.Undef, xerrors.Errorf("failed to rewind file: %w", err)
	}

	finalRoot2, err := buildUnixFS(ctx, file, bs, true)
	if err != nil {
		_ = bs.Close()
		return cid.Undef, xerrors.Errorf("failed to create UnixFS DAG with carv2 blockstore: %w", err)
	}

	if err := bs.Close(); err != nil {
		return cid.Undef, xerrors.Errorf("failed to finalize car blockstore: %w", err)
	}

	if finalRoot1 != finalRoot2 {
		return cid.Undef, xerrors.New("roots do not match")
	}

	return finalRoot1, nil
}

func AllocateCAR(id uint64, carfilepath string) (path string, err error) {
	path = filepath.Join(carfilepath, fmt.Sprintf("%d.car", id))
	file, err := os.Create(path)
	if err != nil {
		return "", xerrors.Errorf("failed to create car file for import: %w", err)
	}

	// close the file before returning the path.
	if err := file.Close(); err != nil {
		return "", xerrors.Errorf("failed to close temp file: %w", err)
	}
	return path, err
}
func move(from, to string) error {
	from, err := homedir.Expand(from)
	if err != nil {
		return xerrors.Errorf("move: expanding from: %w", err)
	}

	to, err = homedir.Expand(to)
	if err != nil {
		return xerrors.Errorf("move: expanding to: %w", err)
	}

	if filepath.Base(from) != filepath.Base(to) {
		return xerrors.Errorf("move: base names must match ('%s' != '%s')", filepath.Base(from), filepath.Base(to))
	}

	//log.Debugw("move sector data", "from", from, "to", to)

	toDir := filepath.Dir(to)

	// `mv` has decades of experience in moving files quickly; don't pretend we
	//  can do better

	var errOut bytes.Buffer

	var cmd *exec.Cmd
	if runtime.GOOS == "darwin" {
		if err := os.MkdirAll(toDir, 0777); err != nil {
			return xerrors.Errorf("failed exec MkdirAll: %s", err)
		}

		cmd = exec.Command("/usr/bin/env", "mv", from, toDir) // nolint
	} else {
		cmd = exec.Command("/usr/bin/env", "mv", "-t", toDir, from) // nolint
	}

	cmd.Stderr = &errOut
	if err := cmd.Run(); err != nil {
		return xerrors.Errorf("exec mv (stderr: %s): %w", strings.TrimSpace(errOut.String()), err)
	}

	return nil
}
