/*
 * Copyright Hundsun Technologies Inc. All Rights Reserved.
 */

package impl

import (
	"errors"
	"sort"
	"strconv"
	"sync"
	"time"

	"github.com/golang/protobuf/proto"
	"hundsun.com/hsl/hschain/common/log/logging"
	"hundsun.com/hsl/hschain/common/types"
	pbcom "hundsun.com/hsl/hschain/protos/common"
	pb "hundsun.com/hsl/hschain/protos/execute"
	"hundsun.com/hsl/hschain/protos/ledger"
	ptypes "hundsun.com/hsl/hschain/protos/types"
)

const (
	// MaxAccpetBlock 最大处理区块缓存
	MaxAccpetBlock int32 = 1024
	// MaxLocalCacheCount 本地缓存最大缓存数目
	MaxLocalCacheCount int = 50
	// MaxTxDupCheckBlock 交易去重检测前100个块中如果没有重复，就可以认为交易没有重复
	MaxTxDupCheckBlock uint64 = 100
	// DefaultWorkerExecTimeout 默认worker执行超时时间
	DefaultWorkerExecTimeout = 50
	// DefaultStateHashCache 最大缓存状态hash
	DefaultStateHashCache = 100
	// DefaultBatchKVCount 一次批量写入数量限制
	DefaultBatchKVCount = 1000
)

var (
	// ErrContextExit error context exit
	ErrContextExit = errors.New("ErrContextExit")
	// ErrFindWorker error find worker
	ErrFindWorker = errors.New("ErrFindWorker")
)

// Config boss config
type Config struct {
	BatchKVCount              int  // 批量写入kv数
	BatchBlockExecResultCount int  // 批量写区块执行结果
	EnableSaveExecutedTx      bool // 使能保存已执行交易
	WorkerExecTimeout         int  // worker执行超时设置
}

// TxHistoryMap tx history map
type TxHistoryMap struct {
	hash2BlockNum  map[string]uint64
	blockNum2Hashs map[uint64][]string
}

// LocationTx local tx
type LocationTx struct {
	index int
	tx    *ledger.Transaction
}

// HeartbeatMgr 心跳管理
type HeartbeatMgr struct {
	hbInfo map[int]int // map[workerId]count
	mt     *sync.Mutex
}

// NewHeartbeatMgr ...
func NewHeartbeatMgr() *HeartbeatMgr {
	return &HeartbeatMgr{
		hbInfo: make(map[int]int),
		mt:     &sync.Mutex{},
	}
}

// Set ...
func (hm *HeartbeatMgr) Set(workID, count int) {
	hm.mt.Lock()
	defer hm.mt.Unlock()
	hm.hbInfo[workID] = count
}

// Size ...
func (hm *HeartbeatMgr) Size() int {
	hm.mt.Lock()
	defer hm.mt.Unlock()
	return len(hm.hbInfo)
}

// CheckHeartbeat ...
func (hm *HeartbeatMgr) CheckHeartbeat() {
	hm.mt.Lock()
	defer hm.mt.Unlock()
	for k, v := range hm.hbInfo {
		v++
		if v > 3 { //大于三次没有收到心跳，说明链接已经断开,删除该连接信息
			delete(hm.hbInfo, k)
		} else {
			hm.hbInfo[k] = v
		}
	}
}

// workerMonitor 启动worker监控，接收worker连接和心跳
func (chain *BlockChain) workerMonitor() {
	// 每隔2s 检测一次是否在线
	t := time.NewTicker(time.Second * 2)
	for {
		select {
		case <-chain.ctx.Done():
			return
		case <-t.C:
			chain.heartbeatMgr.CheckHeartbeat()
		}
	}
}

//处理交易分组
func (chain *BlockChain) processTxGroups() {
	log := chain.log
	for {
		if chain.getExecResultCacheSize() > MaxLocalCacheCount { //做个保护,本地缓存超过MaxLocalCacheCount时候不继续处理，等待commit
			time.Sleep(time.Millisecond * 20)
			continue
		}
		select {
		case <-chain.ctx.Done():
			log.Infof("processTxGroups exit...")
			return
		case blockTxGroups := <-chain.txGroupsCh:
			//取出交易分组
			groups := blockTxGroups.TxGroups
			block := blockTxGroups.Block
			mqLog := log.WithField(logging.LogFieldGoroutine, "ProcessTxGroups").WithField(logging.LogFieldBlockNumber, block.GetHeader().GetNumber())
			start := time.Now()
			mqLog.Infof("start Process block groups is %d ", len(groups))
			if len(groups) == 0 { // 没有需要执行的交易
				chain.procEmptyBlock(block)
			} else {
				chain.blockProcTxGroup(block, groups)
			}
			mqLog.Infof("end Process block groups is %d time is %v", len(groups), time.Since(start))
		}
	}
}

func (chain *BlockChain) procEmptyBlock(block *ledger.Block) {
	if block == nil || block.GetHeader() == nil {
		return
	}
	header := block.GetHeader()
	log := chain.log
	blockNum := header.GetNumber()
	blockExecRst := &ledger.BlockExecResult{BlockNumber: blockNum}
	chain.checkBlockExecResult(block, blockExecRst)
	statehash := chain.StateDataSetMemory(blockExecRst)
	blockExecRst.StateHash = statehash
	eblk := &ledger.ExecutedBlock{
		Block:           &ledger.Block{Header: header, Transactions: nil},
		BlockExecResult: blockExecRst,
	}
	chain.setExecResultCache(eblk)
	log.Infof("end setLocalCacheExecResult block %d executed", header.Number)
}

// blockProcTxGroup 阻塞执行交易
func (chain *BlockChain) blockProcTxGroup(block *ledger.Block, txGp []*pb.TransactionGroup) {
	log := chain.log
	header := block.GetHeader()
	//从本地缓冲获取preBlockExecResults
	preBlockExecResults := make([]*ledger.BlockExecResult, 0)
	s := time.Now()
	execdblks := chain.getExecResultsCache()
	for _, eblk := range execdblks {
		if eblk.GetBlockExecResult() != nil {
			preBlockExecResults = append(preBlockExecResults, eblk.BlockExecResult)
		}
	}
	log.Debugf("getLocalCacheExecResults block num is %d , cost time is %v, len is %d", header.Number, time.Since(s), len(execdblks))
	glAcceptedTxs := make([]*pb.GroupedTransaction, 0)
	glPendingGrps := make([]*pb.TransactionGroup, 0)
	groups := txGp
	curPreBlkExecRsts := preBlockExecResults
	start := time.Now()
	defer func() {
		log.Debugf("end blockProcTxGroup block num is %d, group count is %d, cost time %v \n",
			header.Number, len(groups), time.Since(start))
	}()
	log.Debugf("start blockProcTxGroup block num is %d ", header.Number)

	for {
		select {
		case <-chain.ctx.Done():
			log.Infof("blockProcTxGroup exit...")
			return
		default:
			break
		}

		needReGp, execGps, err := chain.blockDispatch(groups, header, curPreBlkExecRsts)
		if err != nil {
			if err == ErrContextExit {
				return
			}
			time.Sleep(time.Millisecond * 100)
			continue
		}

		if needReGp {
			var reGps []*pb.TransactionGroup
			reGps = append(reGps, execGps...)
			reGps = append(reGps, glPendingGrps...)
			hasConflict, newGroups, acceptedTxs, newPendingGrps := regroup(reGps)
			log.Debugf("need regroup block num is %d conflct status %v", header.Number, hasConflict)
			if !hasConflict {
				chain.saveExecResult(execGps, glPendingGrps, glAcceptedTxs, block)
				return
			}
			groups = newGroups
			glPendingGrps = newPendingGrps
			glAcceptedTxs = append(glAcceptedTxs, acceptedTxs...)
			// 更新preBlockExecResults
			curPreBlkExecRsts = preBlockExecResults
			// 下次计算要基于已采纳的交易
			blockExecRst := &ledger.BlockExecResult{
				BlockNumber:            header.Number,
				TransactionExecResults: make([]*ledger.TransactionExecResult, 0),
			}
			for _, tx := range glAcceptedTxs {
				blockExecRst.TransactionExecResults = append(blockExecRst.TransactionExecResults, tx.TxExecRst)
			}
			curPreBlkExecRsts = append(curPreBlkExecRsts, blockExecRst)
			continue
		}
		chain.saveExecResult(execGps, glPendingGrps, glAcceptedTxs, block)
		return
	}
}

// blockDispatch 阻塞分发处理交易
func (chain *BlockChain) blockDispatch(groups []*pb.TransactionGroup, header *ledger.BlockHeader, preBlockExecResults []*ledger.BlockExecResult,
) (needReGp bool, execGps []*pb.TransactionGroup, err error) {
	log := chain.log
	glExecGps := make([]*pb.TransactionGroup, 0)
	glNeedReGp := false
	txcount := 0
	for _, gps := range groups {
		txcount += len(gps.Txs)
	}
	preExecState := GenExecStateDatas(preBlockExecResults, chain.log)
	preNum := uint64(0)
	if len(preBlockExecResults) > 0 {
		if preBlockExecResults[0].BlockNumber > 0 {
			preNum = preBlockExecResults[0].BlockNumber - 1
		}
	} else {
		if header.Number > 0 {
			preNum = header.Number - 1
		}
	}
	preExecState.PreNubmer = preNum
	preExecState.PreStateHash = chain.GetStateHash4Cache(preNum)
	start := time.Now()
	defer func() {
		log.Debugf("end blockDispatch block num is %d, group count is %d, groupsTxCount %d, cost time %v",
			header.Number, len(groups), txcount, time.Since(start))
	}()
	log.Debugf("start blockDispatch block num is %d ", header.Number)
	print := 0
	for {
		select {
		case <-chain.ctx.Done():
			log.Infof("blockDispatch exit...")
			return false, nil, ErrContextExit
		default:
			break
		}
		needReGp, execGps, timeoutGrps, err := chain.dispatch(groups, header, preExecState)
		if err != nil {
			// 没有可用worker情况下休眠100ms继续执行
			time.Sleep(time.Millisecond * 100)
			print++
			if print == 50 {
				log.Debugf("blockDispatch %d dispatch is error %v ", header.Number, err)
				print = 0
			}
			continue
		}
		glNeedReGp = glNeedReGp || needReGp
		glExecGps = append(glExecGps, execGps...)
		if len(timeoutGrps) > 0 { // 处理timeout情况
			groups = timeoutGrps
			continue
		}
		return glNeedReGp, glExecGps, nil
	}
}

// GenExecStateDatas []*ledger.BlockExecResult to ExecStateDatas
func GenExecStateDatas(preBlockExecResults []*ledger.BlockExecResult, log logging.Log) *pb.ExecStateDatas {
	// for test time
	st := time.Now()
	num := 0
	filterMp := make(map[string][]byte)
	for _, preBlkRes := range preBlockExecResults {
		for _, txRes := range preBlkRes.GetTransactionExecResults() {
			for _, kv := range txRes.GetKvs() {
				filterMp[string(kv.Key)] = kv.Value
				num++
			}
		}
	}
	stateDatas := &pb.ExecStateDatas{}
	for k, v := range filterMp {
		stateDatas.Kvs = append(stateDatas.Kvs, &pbcom.KeyValue{Key: []byte(k), Value: v})
	}
	log.Debugf("GenExecStateDatas cost time is %v kv num %d", time.Since(st), num)
	return stateDatas
}

// dispatch 将任务分发给每个worker，并且等待执行结果
func (chain *BlockChain) dispatch(groups []*pb.TransactionGroup, header *ledger.BlockHeader, preBlockExecResults *pb.ExecStateDatas,
) (needReGp bool, execGps, timeoutGps []*pb.TransactionGroup, err error) {
	log := chain.log
	groupsSize := len(groups)
	if groupsSize <= 0 {
		return false, nil, nil, nil
	}
	// 为每个worker分配group任务
	gpTaskMp, err := chain.assignTaskToWorker(groups)
	if err != nil {
		return false, nil, nil, err
	}
	// 分别给每个worker发送group
	needReGp, execGps, timeoutWorkers := chain.sendTaskToWorkers(gpTaskMp, header, preBlockExecResults)
	if len(timeoutWorkers) != 0 {
		log.Infof("groups executed timeout worker id is %v", timeoutWorkers)
		for _, tg := range timeoutWorkers {
			if value, ok := gpTaskMp[tg]; ok {
				timeoutGps = append(timeoutGps, value...)
			}
		}
		return needReGp, execGps, timeoutGps, nil
	}
	return needReGp, execGps, timeoutGps, nil
}

// assignTaskToWorker 为每个worker分配任务
func (chain *BlockChain) assignTaskToWorker(groups []*pb.TransactionGroup) (map[int32][]*pb.TransactionGroup, error) {
	// 获取当前存在心跳的的worker数
	workerNum := chain.heartbeatMgr.Size()
	wkTxGpMp := make(map[int32][]*pb.TransactionGroup)
	if workerNum <= 0 {
		return wkTxGpMp, ErrFindWorker
	} else if workerNum == 1 {
		// 1.worker数与交易分组数之比为1
		wkTxGpMp[0] = groups
		return wkTxGpMp, nil
	}
	groupSize := len(groups)
	// 1. 每个worker初始需要先整个分配2/3的分组
	groupSize1 := groupSize * 2 / 3
	multg1 := splitGroups(groups[:groupSize1], workerNum)
	// 2. 剩下的1/3分组再次平均分配掉，用以实现有些处理快的worker可以多处理
	multg2 := splitGroups(groups[groupSize1:], workerNum)
	if len(multg2) > 0 {
		multg1 = append(multg1, multg2...)
	}

	for id, g := range multg1 {
		wkTxGpMp[int32(id)] = g
	}
	return wkTxGpMp, nil
}

func splitGroups(groups []*pb.TransactionGroup, worknum int) [][]*pb.TransactionGroup {
	if len(groups) == 0 {
		return nil
	}

	var multGroups [][]*pb.TransactionGroup
	if worknum <= 1 {
		multGroups = append(multGroups, groups)
		return multGroups
	}
	groupSize := len(groups)
	avr := groupSize / worknum
	id := 0
	end := 0
	for id = 0; id < worknum-1; id++ {
		var newGps []*pb.TransactionGroup
		start := id * avr
		end = start + avr
		if end > groupSize {
			end = groupSize
		}
		newGps = append(newGps, groups[start:end]...)
		multGroups = append(multGroups, newGps)
	}
	if end < groupSize {
		id++
		var newGps []*pb.TransactionGroup
		newGps = append(newGps, groups[end:]...)
		multGroups = append(multGroups, newGps)
	}
	return multGroups
}

// SendToWorkFunc ...
type SendToWorkFunc func(workerId int32, sendGroups []*pb.TransactionGroup, currentBlockHeader *ledger.BlockHeader,
	preBlockExecResults *pb.ExecStateDatas) (*pb.TxGroupsReply, error)

// SendToWorkFn ...
var SendToWorkFn SendToWorkFunc

func (chain *BlockChain) sendTaskToWorkers(wkTxGpMp map[int32][]*pb.TransactionGroup,
	blkHeder *ledger.BlockHeader, preBlkExecRsts *pb.ExecStateDatas) (needReGp bool, execGps []*pb.TransactionGroup, timeoutWrkers []int32) {
	log := chain.log
	workerCount := len(wkTxGpMp)
	recvChan := make(chan *pb.TxGroupsReply, workerCount)
	for workerID, txGp := range wkTxGpMp {
		go func(id int32, gps []*pb.TransactionGroup) {
			var txGpRep *pb.TxGroupsReply
			var err error
			if SendToWorkFn != nil {
				txGpRep, err = SendToWorkFn(id, gps, blkHeder, preBlkExecRsts)
			} else {
				txGpRep, err = chain.sendGroupsToWorker(id, gps, blkHeder, preBlkExecRsts)
			}
			if err != nil {
				log.Errorf("sendGroupsToWorker exec workerId %d, group count %d fail %v", id, len(gps), err)
			}
			recvChan <- txGpRep
		}(workerID, txGp)
	}
	for i := 0; i < workerCount; i++ {
		txGp := <-recvChan
		if len(txGp.Groups) == 0 {
			timeoutWrkers = append(timeoutWrkers, txGp.WorkerId)
		} else {
			execGps = append(execGps, txGp.Groups...)
		}
		// 检查是否重新分组
		if !needReGp && txGp.HasNewAddr {
			needReGp = true
		}
	}
	return needReGp, execGps, timeoutWrkers
}

// saveExecResult 保存执行结果
func (chain *BlockChain) saveExecResult(groups, pendingGrps []*pb.TransactionGroup, acceptedTxs []*pb.GroupedTransaction, block *ledger.Block) {
	log := chain.log
	header := block.GetHeader()
	//将执行结果写入cache
	//构造区块执行结果
	blockExecRst := ledger.BlockExecResult{
		BlockNumber:            header.Number,
		TransactionExecResults: make([]*ledger.TransactionExecResult, 0)}
	//合并已执行和Pending分组的所有交易执行结果
	for _, egrp := range groups {
		for _, etx := range egrp.Txs {
			blockExecRst.TransactionExecResults = append(blockExecRst.TransactionExecResults, etx.GetTxExecRst())
		}
	}
	for _, pgrp := range pendingGrps {
		for _, ptx := range pgrp.Txs {
			blockExecRst.TransactionExecResults = append(blockExecRst.TransactionExecResults, ptx.GetTxExecRst())
		}
	}
	// 加入已经更新的统一更新到localcache中
	for _, tx := range acceptedTxs {
		blockExecRst.TransactionExecResults = append(blockExecRst.TransactionExecResults, tx.GetTxExecRst())
	}
	//对执行结果交易进行排序
	sort.Slice(blockExecRst.TransactionExecResults, func(i, j int) bool {
		return blockExecRst.TransactionExecResults[i].GetReceipt().GetTransactionIndex() < blockExecRst.TransactionExecResults[j].GetReceipt().GetTransactionIndex()
	})
	//合并重复的交易进入区块执行结果
	chain.checkBlockExecResult(block, &blockExecRst)
	statehash := chain.StateDataSetMemory(&blockExecRst)
	blockExecRst.StateHash = statehash
	eblk := &ledger.ExecutedBlock{
		Block:           &ledger.Block{Header: header, Transactions: block.GetTransactions()},
		BlockExecResult: &blockExecRst,
	}
	chain.setExecResultCache(eblk)
	log.Infof("end saveExecResult block %d executed", header.Number)
}

// sendGroupsToWorker 发送交易分组到worker执行, 目前发送时候workerId暂时无法使用mq内部不知道要发给哪个worker
func (chain *BlockChain) sendGroupsToWorker(workerID int32, sendGroups []*pb.TransactionGroup,
	currentBlockHeader *ledger.BlockHeader, preBlockExecResults *pb.ExecStateDatas) (*pb.TxGroupsReply, error) {

	blkNum := currentBlockHeader.GetNumber()
	mqLog := chain.log.WithField(logging.LogFieldGoroutine, "SendGroupsToWorker").WithField(logging.LogFieldBlockNumber, blkNum)
	mqLog.Infof("Send groups size %d to worker %d", len(sendGroups), workerID)
	txGroupsReq := &pb.TxGroupsRequest{
		BlockNumber:        blkNum,
		WorkerId:           workerID,
		Groups:             sendGroups,
		CurrentBlockHeader: currentBlockHeader,
		PreStateDatas:      preBlockExecResults,
	}
	value, _ := proto.Marshal(txGroupsReq)
	rep := &pb.TxGroupsReply{
		BlockNumber: blkNum,
		WorkerId:    workerID,
	}
	msg := chain.workerClient.NewMessage(types.WorkerModule, "worker", types.EventExecTxGroup, value)
	err := chain.workerClient.Send(msg, true)
	if err != nil {
		mqLog.Errorf("send msg to  worker %d , error is %s", workerID, err.Error())
		return rep, err
	}
	res, err := chain.workerClient.WaitTimeout(msg, time.Duration(chain.WorkerExecTimeout)*time.Second)
	if err != nil {
		mqLog.Errorf("WaitTimeout msg to  worker %d , error is %s", workerID, err.Error())
		return rep, err
	}
	v := res.Data.(*ptypes.Reply)
	if v == nil || !v.IsOk {
		mqLog.Errorf(" worker %d , value is nil", workerID)
		return rep, err
	}
	err = proto.Unmarshal(v.Msg, rep)
	if err != nil {
		mqLog.Errorf("worker %d ,Unmarshal error is %s", workerID, err.Error())
		return rep, err
	}
	// rep回来的应答才能知道是哪个worker执行完成的
	mqLog.Infof("Receive worker %d successful executed result", rep.WorkerId)
	return rep, nil
}

// setExecResultCache 缓存本地执行结果及交易
func (chain *BlockChain) setExecResultCache(eblk *ledger.ExecutedBlock) {
	if eblk == nil || eblk.GetBlockExecResult() == nil {
		return
	}
	chain.execRstCache.Push(strconv.FormatUint(eblk.GetBlockExecResult().GetBlockNumber(), 10), eblk)
}

// getExecResultsCache 从本地缓存获取区块执行结果
func (chain *BlockChain) getExecResultsCache() []*ledger.ExecutedBlock {
	//从本地缓冲获取preBlockExecResults
	var exedBlocks []*ledger.ExecutedBlock
	chain.execRstCache.Walk(func(value interface{}) bool {
		if eblk, ok := value.(*ledger.ExecutedBlock); ok {
			exedBlocks = append(exedBlocks, eblk)
		}
		return true
	})
	return exedBlocks
}

func (chain *BlockChain) getExecResultCache(blkNum uint64) (*ledger.ExecutedBlock, error) {
	value, err := chain.execRstCache.GetItem(strconv.FormatUint(blkNum, 10))
	if err == nil {
		if eblk, ok := value.(*ledger.ExecutedBlock); ok {
			return eblk, nil
		}
	}
	return nil, types.ErrNotFound
}

// delExecResultCache 从本地删除执行结果
func (chain *BlockChain) delExecResultCache(blkNum uint64) {
	chain.execRstCache.Remove(strconv.FormatUint(blkNum, 10))
}

// getExecResultCacheSize 获取当前缓存数量
func (chain *BlockChain) getExecResultCacheSize() int {
	return chain.execRstCache.Size()
}

// LoadStateHashCache 批量载入statehash,
// [(end - count +1), end] 从end - count + 1到end载入
func (chain *BlockChain) LoadStateHashCache(end int64, count int) {
	start := end - int64(count) + 1
	if start < 0 {
		start = 0
	}
	for i := start; i <= end; i++ {
		chain.stateHashCache.Add(uint64(i), chain.getStateHash(uint64(i)))
	}
}

// getStateHash 获取上一块已执行区块状态hash
func (chain *BlockChain) getStateHash(blkNum uint64) []byte {
	execBlk, err := chain.getExecResultCache(blkNum)
	if err == nil {
		return execBlk.GetBlockExecResult().GetStateHash()
	}

	res, err := chain.blockStore.GetBlockExecResult(blkNum)
	if err != nil {
		return []byte{}
	}
	return res.GetStateHash()
}

// AddStateHash2Cache add statehash
func (chain *BlockChain) AddStateHash2Cache(height uint64, hash []byte) {
	chain.stateHashCache.Add(height, hash)
}

// GetStateHash4Cache get StateHash TODO 后续多版本遇上共识回退的情况取hash还需要在修改
func (chain *BlockChain) GetStateHash4Cache(height uint64) []byte {
	v, ok := chain.stateHashCache.Get(height)
	if !ok {
		value := chain.getStateHash(height)
		if len(value) == 0 {
			return []byte{}
		}
		return value
	}
	return v.([]byte)
}

// StateDataSetMemory 将执行结果的kv数据转化为状态数据
func (chain *BlockChain) StateDataSetMemory(blkRst *ledger.BlockExecResult) []byte {
	// 计算statedb
	blockNum := blkRst.GetBlockNumber()
	mqLog := chain.log.WithField(logging.LogFieldBlockNumber, blockNum)

	preBlockNum := uint64(0)
	if blockNum > 0 {
		preBlockNum = blockNum - 1
	}
	set := &pbcom.StateSet{
		PreStateHash: chain.GetStateHash4Cache(preBlockNum),
		Number:       blockNum,
	}
	for _, txr := range blkRst.GetTransactionExecResults() {
		if txr.GetKvs() != nil {
			set.KVs = append(set.KVs, txr.GetKvs()...)
		}
	}
	// 计算状态数据
	start := time.Now()
	stateHash, err := chain.stateDBHandle.SetMemory(set, true)
	if err != nil {
		mqLog.Error("StateDataToSetMemory", "SetMemory fail", err)
	}
	chain.AddStateHash2Cache(blockNum, stateHash)
	mqLog.Debug("StateDataToSetMemory cost time = ", time.Since(start))
	return stateHash
}

// StateDataCommit 将状态数据保存到数据库中
func (chain *BlockChain) StateDataCommit(statehash []byte) error {
	err := chain.stateDBHandle.Commit(statehash)
	if err == types.ErrNotFound {
		return nil
	}
	return err
}
