// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package eth

import (
	"encoding/json"
	"fmt"
	"math/big"
	"time"

	"github.com/ethereum/go-ethereum/common"
	"github.com/ethereum/go-ethereum/core"
	"github.com/ethereum/go-ethereum/core/rawdb"
	"github.com/ethereum/go-ethereum/core/types"
	"github.com/ethereum/go-ethereum/log"
	"github.com/ethereum/go-ethereum/p2p/tracker"
	"github.com/ethereum/go-ethereum/params"
	"github.com/ethereum/go-ethereum/rlp"
	"github.com/ethereum/go-ethereum/trie"
)

// requestTracker is a singleton tracker for eth/66 and newer request times.
var requestTracker = tracker.New(ProtocolName, 5*time.Minute)

func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
	// Decode the complex header query
	var query GetBlockHeadersPacket
	if err := msg.Decode(&query); err != nil {
		return err
	}
	response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer)
	return peer.ReplyBlockHeadersRLP(query.RequestId, response)
}

// ServiceGetBlockHeadersQuery assembles the response to a header query. It is
// exposed to allow external packages to test protocol behavior.
func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
	if query.Amount == 0 {
		return nil
	}
	if query.Skip == 0 {
		// The fast path: when the request is for a contiguous segment of headers.
		return serviceContiguousBlockHeaderQuery(chain, query)
	} else {
		return serviceNonContiguousBlockHeaderQuery(chain, query, peer)
	}
}

func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
	hashMode := query.Origin.Hash != (common.Hash{})
	first := true
	maxNonCanonical := uint64(100)

	// Gather headers until the fetch or network limits is reached
	var (
		bytes   common.StorageSize
		headers []rlp.RawValue
		unknown bool
		lookups int
	)

	for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit &&
		len(headers) < maxHeadersServe && lookups < 2*maxHeadersServe {
		lookups++
		// Retrieve the next header satisfying the query
		var origin *types.Header

		if hashMode {
			if first {
				first = false

				origin = chain.GetHeaderByHash(query.Origin.Hash)
				if origin != nil {
					query.Origin.Number = origin.Number.Uint64()
				}
			} else {
				origin = chain.GetHeader(query.Origin.Hash, query.Origin.Number)
			}
		} else {
			origin = chain.GetHeaderByNumber(query.Origin.Number)
		}

		if origin == nil {
			break
		}

		if rlpData, err := rlp.EncodeToBytes(origin); err != nil {
			log.Crit("Unable to encode our own headers", "err", err)
		} else {
			headers = append(headers, rlp.RawValue(rlpData))
			bytes += common.StorageSize(len(rlpData))
		}
		// Advance to the next header of the query
		switch {
		case hashMode && query.Reverse:
			// Hash based traversal towards the genesis block
			ancestor := query.Skip + 1
			if ancestor == 0 {
				unknown = true
			} else {
				query.Origin.Hash, query.Origin.Number = chain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
				unknown = (query.Origin.Hash == common.Hash{})
			}
		case hashMode && !query.Reverse:
			// Hash based traversal towards the leaf block
			var (
				current = origin.Number.Uint64()
				next    = current + query.Skip + 1
			)

			if next <= current {
				infos, _ := json.MarshalIndent(peer.Peer.Info(), "", "  ")
				peer.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)

				unknown = true
			} else {
				if header := chain.GetHeaderByNumber(next); header != nil {
					nextHash := header.Hash()
					expOldHash, _ := chain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)

					if expOldHash == query.Origin.Hash {
						query.Origin.Hash, query.Origin.Number = nextHash, next
					} else {
						unknown = true
					}
				} else {
					unknown = true
				}
			}
		case query.Reverse:
			// Number based traversal towards the genesis block
			current := query.Origin.Number
			ancestor := current - (query.Skip + 1)
			if ancestor >= current { // check for underflow
				unknown = true
			} else {
				query.Origin.Number = ancestor
			}

		case !query.Reverse:
			current := query.Origin.Number
			next := current + query.Skip + 1
			if next <= current { // check for overflow
				unknown = true
			} else {
				query.Origin.Number = next
			}
		}
	}

	return headers
}

func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue {
	count := query.Amount
	if count > maxHeadersServe {
		count = maxHeadersServe
	}

	if query.Origin.Hash == (common.Hash{}) {
		// Number mode, just return the canon chain segment. The backend
		// delivers in [N, N-1, N-2..] descending order, so we need to
		// accommodate for that.
		from := query.Origin.Number
		if !query.Reverse {
			from = from + count - 1
		}

		headers := chain.GetHeadersFrom(from, count)

		if !query.Reverse {
			for i, j := 0, len(headers)-1; i < j; i, j = i+1, j-1 {
				headers[i], headers[j] = headers[j], headers[i]
			}
		}

		return headers
	}
	// Hash mode.
	var (
		headers []rlp.RawValue
		hash    = query.Origin.Hash
		header  = chain.GetHeaderByHash(hash)
	)

	if header != nil {
		rlpData, _ := rlp.EncodeToBytes(header)
		headers = append(headers, rlpData)
	} else {
		// We don't even have the origin header
		return headers
	}

	num := header.Number.Uint64()
	if !query.Reverse {
		// Theoretically, we are tasked to deliver header by hash H, and onwards.
		// However, if H is not canon, we will be unable to deliver any descendants of
		// H.
		if canonHash := chain.GetCanonicalHash(num); canonHash != hash {
			// Not canon, we can't deliver descendants
			return headers
		}

		descendants := chain.GetHeadersFrom(num+count-1, count-1)
		for i, j := 0, len(descendants)-1; i < j; i, j = i+1, j-1 {
			descendants[i], descendants[j] = descendants[j], descendants[i]
		}

		headers = append(headers, descendants...)

		return headers
	}
	{ // Last mode: deliver ancestors of H
		for i := uint64(1); header != nil && i < count; i++ {
			header = chain.GetHeaderByHash(header.ParentHash)
			if header == nil {
				break
			}

			rlpData, _ := rlp.EncodeToBytes(header)
			headers = append(headers, rlpData)
		}

		return headers
	}
}

func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
	// Decode the block body retrieval message
	var query GetBlockBodiesPacket
	if err := msg.Decode(&query); err != nil {
		return err
	}
	response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest)
	return peer.ReplyBlockBodiesRLP(query.RequestId, response)
}

// ServiceGetBlockBodiesQuery assembles the response to a body query. It is
// exposed to allow external packages to test protocol behavior.
func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue {
	// Gather blocks until the fetch or network limits is reached
	var (
		bytes  int
		bodies []rlp.RawValue
	)

	for lookups, hash := range query {
		if bytes >= softResponseLimit || len(bodies) >= maxBodiesServe ||
			lookups >= 2*maxBodiesServe {
			break
		}

		if data := chain.GetBodyRLP(hash); len(data) != 0 {
			bodies = append(bodies, data)
			bytes += len(data)
		}
	}

	return bodies
}

func handleGetReceipts68(backend Backend, msg Decoder, peer *Peer) error {
	// Decode the block receipts retrieval message
	var query GetReceiptsPacket
	if err := msg.Decode(&query); err != nil {
		return err
	}
	response := ServiceGetReceiptsQuery68(backend.Chain(), query.GetReceiptsRequest)
	return peer.ReplyReceiptsRLP(query.RequestId, response)
}

func handleGetReceipts69(backend Backend, msg Decoder, peer *Peer) error {
	// Decode the block receipts retrieval message
	var query GetReceiptsPacket
	if err := msg.Decode(&query); err != nil {
		return err
	}
	response := ServiceGetReceiptsQuery69(backend.Chain(), query.GetReceiptsRequest)
	return peer.ReplyReceiptsRLP(query.RequestId, response)
}

// ServiceGetReceiptsQuery68 assembles the response to a receipt query. It is
// exposed to allow external packages to test protocol behavior.
func ServiceGetReceiptsQuery68(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue {
	// Gather state data until the fetch or network limits is reached
	var (
		bytes    int
		receipts []rlp.RawValue
	)

	for lookups, hash := range query {
		if bytes >= softResponseLimit || len(receipts) >= maxReceiptsServe ||
			lookups >= 2*maxReceiptsServe {
			break
		}
		// Retrieve the requested block's receipts
		results := chain.GetReceiptsRLP(hash)
		if results == nil {
			if header := chain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
				continue
			}
		} else {
			body := chain.GetBodyRLP(hash)
			if body == nil {
				continue
			}
			var err error
			results, err = blockReceiptsToNetwork68(results, body)
			if err != nil {
				log.Error("Error in block receipts conversion", "hash", hash, "err", err)
				continue
			}
		}
		receipts = append(receipts, results)
		bytes += len(results)
	}
	return receipts
}

// ServiceGetReceiptsQuery69 assembles the response to a receipt query.
// It does not send the bloom filters for the receipts
func ServiceGetReceiptsQuery69(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue {
	// Gather state data until the fetch or network limits is reached
	var (
		bytes    int
		receipts []rlp.RawValue
	)
	borCfg := chain.Config().Bor
	for lookups, hash := range query {
		if bytes >= softResponseLimit || len(receipts) >= maxReceiptsServe ||
			lookups >= 2*maxReceiptsServe {
			break
		}

		number := rawdb.ReadHeaderNumber(chain.DB(), hash)
		if number == nil {
			continue
		}

		// If we're past the Madhugiri hardfork, state-sync receipts (if present) are stored
		// with normal block receipts so no special handling needed.
		if borCfg != nil && borCfg.IsMadhugiri(big.NewInt(int64(*number))) {
			allReceipts := chain.GetReceiptsRLP(hash)
			if allReceipts == nil {
				if header := chain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
					continue
				}
			}
			body := chain.GetBodyRLP(hash)
			if body == nil {
				continue
			}
			// Noop as no special handling is needed
			isStateSyncReceipt := func(index int) bool {
				return false
			}
			results, err := blockReceiptsToNetwork69(allReceipts, body, isStateSyncReceipt)
			if err != nil {
				log.Error("Error in block receipts conversion", "hash", hash, "err", err)
				continue
			}

			receipts = append(receipts, results)
			bytes += len(results)
			continue
		}

		// Before Madhugiri hardfork, we need to fetch state-sync receipts separately along with fetching
		// block receipts. Upon fetching, decode them, merge them into a single unit and re-encode
		// the final list to be sent over p2p.
		normalReceipts := chain.GetReceiptsRLP(hash)
		var normalReceiptsDecoded []*types.ReceiptForStorage
		if normalReceipts != nil {
			if err := rlp.DecodeBytes(normalReceipts, &normalReceiptsDecoded); err != nil {
				log.Error("Failed to decode normal receipts", "err", err)
				continue
			}
		}

		// Fetch state-sync transaction receipt (if any)
		borReceipt := chain.GetBorReceiptRLPByHash(hash)
		var borReceiptDecoded types.ReceiptForStorage
		if borReceipt != nil {
			if err := rlp.DecodeBytes(borReceipt, &borReceiptDecoded); err != nil {
				log.Error("Failed to decode state-sync receipt", "err", err)
				continue
			}
		}

		// Check if receipts are nil due to non existence or something else
		if normalReceipts == nil && borReceipt == nil {
			// Don't append empty receipt data for this block if either the local header is nil
			// or the receipt root of header denotes existence of receipt (i.e. is not empty hash)
			if header := chain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
				continue
			}
		}

		// Track existence of bor receipts for encoding
		var isBorReceiptPresent bool

		// We atleast have some non-nil data for this block. Combine the receipts for encoding.
		var blockReceipts []*types.ReceiptForStorage = make([]*types.ReceiptForStorage, 0)
		if normalReceipts != nil {
			blockReceipts = append(blockReceipts, normalReceiptsDecoded...)
		}
		if borReceipt != nil {
			isBorReceiptPresent = true
			blockReceipts = append(blockReceipts, &borReceiptDecoded)
		}

		// isStateSyncReceipt denotes whether a receipt belongs to state-sync transaction or not
		isStateSyncReceipt := func(index int) bool {
			// If bor receipt is present, it will always be at the end of list
			if isBorReceiptPresent && index == len(blockReceipts)-1 {
				return true
			}
			return false
		}

		// Encode the final list and convert to network format
		encodedBlockReceipts, err := rlp.EncodeToBytes(blockReceipts)
		if err != nil {
			continue
		}
		body := chain.GetBodyRLP(hash)
		if body == nil {
			continue
		}

		results, err := blockReceiptsToNetwork69(encodedBlockReceipts, body, isStateSyncReceipt)
		if err != nil {
			log.Error("Error in block receipts conversion", "hash", hash, "err", err)
			continue
		}

		receipts = append(receipts, results)
		bytes += len(results)
	}

	return receipts
}

func handleNewBlockhashes(backend Backend, msg Decoder, peer *Peer) error {
	// A batch of new block announcements just arrived
	ann := new(NewBlockHashesPacket)
	if err := msg.Decode(ann); err != nil {
		return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
	}
	// Mark the hashes as present at the remote node
	for _, block := range *ann {
		peer.markBlock(block.Hash)
	}
	// Deliver them all to the backend for queuing
	return backend.Handle(peer, ann)
}

func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error {
	// Retrieve and decode the propagated block
	ann := new(NewBlockPacket)
	if err := msg.Decode(ann); err != nil {
		return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
	}

	if err := ann.sanityCheck(); err != nil {
		return err
	}

	if hash := types.CalcUncleHash(ann.Block.Uncles()); hash != ann.Block.UncleHash() {
		log.Warn("Propagated block has invalid uncles", "have", hash, "exp", ann.Block.UncleHash())
		return nil // TODO(karalabe): return error eventually, but wait a few releases
	}

	if hash := types.DeriveSha(ann.Block.Transactions(), trie.NewStackTrie(nil)); hash != ann.Block.TxHash() {
		log.Warn("Propagated block has invalid body", "have", hash, "exp", ann.Block.TxHash())
		return nil // TODO(karalabe): return error eventually, but wait a few releases
	}

	msgTime := msg.Time()
	ann.Block.ReceivedAt = msg.Time()
	ann.Block.ReceivedFrom = peer
	ann.Block.AnnouncedAt = &msgTime

	// Mark the peer as owning the block
	peer.markBlock(ann.Block.Hash())

	return backend.Handle(peer, ann)
}

func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
	// A batch of headers arrived to one of our previous requests
	res := new(BlockHeadersPacket)
	if err := msg.Decode(res); err != nil {
		return err
	}

	metadata := func() interface{} {
		hashes := make([]common.Hash, len(res.BlockHeadersRequest))
		for i, header := range res.BlockHeadersRequest {
			hashes[i] = header.Hash()
		}

		return hashes
	}

	return peer.dispatchResponse(&Response{
		id:   res.RequestId,
		code: BlockHeadersMsg,
		Res:  &res.BlockHeadersRequest,
	}, metadata)
}

func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
	// A batch of block bodies arrived to one of our previous requests
	res := new(BlockBodiesPacket)
	if err := msg.Decode(res); err != nil {
		return err
	}

	metadata := func() interface{} {
		var (
			txsHashes        = make([]common.Hash, len(res.BlockBodiesResponse))
			uncleHashes      = make([]common.Hash, len(res.BlockBodiesResponse))
			withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
		)

		hasher := trie.NewStackTrie(nil)
		for i, body := range res.BlockBodiesResponse {
			txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
			uncleHashes[i] = types.CalcUncleHash(body.Uncles)

			if body.Withdrawals != nil {
				withdrawalHashes[i] = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher)
			}
		}
		return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}
	}

	return peer.dispatchResponse(&Response{
		id:   res.RequestId,
		code: BlockBodiesMsg,
		Res:  &res.BlockBodiesResponse,
	}, metadata)
}

func handleReceipts[L ReceiptsList](backend Backend, msg Decoder, peer *Peer) error {
	// A batch of receipts arrived to one of our previous requests
	res := new(ReceiptsPacket[L])
	if err := msg.Decode(res); err != nil {
		return err
	}

	// Assign temporary hashing buffer to each list item, the same buffer is shared
	// between all receipt list instances.
	buffers := new(receiptListBuffers)
	for i := range res.List {
		res.List[i].setBuffers(buffers)
	}

	// The `metadata` function below was used earlier to calculate `ReceiptHash` which is further
	// used to validate against `header.ReceiptHash`. By default, state-sync receipts (which are
	// appended at the end of list for a block) are excluded from the `ReceiptHash` calculation.
	// After the Madhugiri hardfork, they should be included in the calculation. We don't have
	// access to block number here so we can't determine whether to exclude or not. Instead, just
	// ignore the `metadata` function and pass on the whole receipt list as is. The receipt queue
	// handler which has access to block number will take care of the exclusion if needed.
	metadata := func() interface{} {
		return nil
	}

	// Assign the decoded receipt list to the result of `Response` packet.
	return peer.dispatchResponse(&Response{
		id:   res.RequestId,
		code: ReceiptsMsg,
		Res:  &res.List,
	}, metadata)
}

// EncodeReceiptsAndPrepareHasher encodes a list of receipts to the storage format (does not
// include TxType field). It also returns a function which calculates `ReceiptHash` of a receipt list
// based on the whether we've crossed the Madhugiri hardfork or not.
func EncodeReceiptsAndPrepareHasher(packet interface{}, borCfg *params.BorConfig) (ReceiptsRLPResponse, func(int, *big.Int) common.Hash) {
	// Extract receipts based on type. Add/remove support for new types here as needed.
	var (
		receipts             ReceiptsRLPResponse
		getReceiptListHashes func(int, *big.Int) common.Hash
	)
	switch packet := packet.(type) {
	case []*ReceiptList68:
		receiptList := packet
		receipts, getReceiptListHashes = encodeReceiptsAndPrepareHasher(receiptList, borCfg)
	case *[]*ReceiptList68:
		receiptList := packet
		receipts, getReceiptListHashes = encodeReceiptsAndPrepareHasher(*receiptList, borCfg)
	case []*ReceiptList69:
		receiptList := packet
		receipts, getReceiptListHashes = encodeReceiptsAndPrepareHasher(receiptList, borCfg)
	case *[]*ReceiptList69:
		receiptList := packet
		receipts, getReceiptListHashes = encodeReceiptsAndPrepareHasher(*receiptList, borCfg)
	default:
		// This shouldn't happen unless there's a bug in identifying type of receipt list
		// or there's a new type which isn't handled here.
		log.Debug("EncodeReceiptsAndPrepareHasher: unsupported receipt list type", "type", fmt.Sprintf("%T", packet))
		return nil, nil
	}
	return receipts, getReceiptListHashes
}

// encodeReceiptsAndPrepareHasher is an internal generic function for all receipt types
func encodeReceiptsAndPrepareHasher[L ReceiptsList](receipts []L, borCfg *params.BorConfig) (ReceiptsRLPResponse, func(int, *big.Int) common.Hash) {
	var encodedReceipts ReceiptsRLPResponse = make(ReceiptsRLPResponse, len(receipts))
	for i := range receipts {
		encodedReceipts[i] = receipts[i].EncodeForStorage()
	}

	hasher := trie.NewStackTrie(nil)
	calculateReceiptHashes := func(index int, number *big.Int) common.Hash {
		// Don't exclude state-sync receipts for post hardfork blocks
		if borCfg.IsMadhugiri(number) {
			return types.DeriveSha(receipts[index], hasher)
		} else {
			receipts[index].ExcludeStateSyncReceipt()
			return types.DeriveSha(receipts[index], hasher)
		}
	}

	return encodedReceipts, calculateReceiptHashes
}

func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) error {
	// New transaction announcement arrived, make sure we have
	// a valid and fresh chain to handle them
	if !backend.AcceptTxs() {
		return nil
	}
	ann := new(NewPooledTransactionHashesPacket)
	if err := msg.Decode(ann); err != nil {
		return err
	}
	if len(ann.Hashes) != len(ann.Types) || len(ann.Hashes) != len(ann.Sizes) {
		return fmt.Errorf("NewPooledTransactionHashes: invalid len of fields in %v %v %v", len(ann.Hashes), len(ann.Types), len(ann.Sizes))
	}
	// Schedule all the unknown hashes for retrieval
	for _, hash := range ann.Hashes {
		peer.markTransaction(hash)
	}
	return backend.Handle(peer, ann)
}

func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
	// Decode the pooled transactions retrieval message
	var query GetPooledTransactionsPacket
	if err := msg.Decode(&query); err != nil {
		return err
	}
	hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest)
	return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs)
}

func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest) ([]common.Hash, []rlp.RawValue) {
	// Gather transactions until the fetch or network limits is reached
	var (
		bytes  int
		hashes []common.Hash
		txs    []rlp.RawValue
	)

	for _, hash := range query {
		if bytes >= softResponseLimit {
			break
		}
		// Retrieve the requested transaction, skipping if unknown to us
		encoded := backend.TxPool().GetRLP(hash)
		if len(encoded) == 0 {
			continue
		}
		hashes = append(hashes, hash)
		txs = append(txs, encoded)
		bytes += len(encoded)
	}

	return hashes, txs
}

func handleTransactions(backend Backend, msg Decoder, peer *Peer) error {
	// Transactions arrived, make sure we have a valid and fresh chain to handle them
	if !backend.AcceptTxs() {
		return nil
	}
	// Transactions can be processed, parse all of them and deliver to the pool
	var txs TransactionsPacket
	if err := msg.Decode(&txs); err != nil {
		return err
	}

	for i, tx := range txs {
		// Validate and mark the remote transaction
		if tx == nil {
			return fmt.Errorf("Transactions: transaction %d is nil", i)
		}

		peer.markTransaction(tx.Hash())
	}

	return backend.Handle(peer, &txs)
}

func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
	// Transactions arrived, make sure we have a valid and fresh chain to handle them
	if !backend.AcceptTxs() {
		return nil
	}
	// Transactions can be processed, parse all of them and deliver to the pool
	var txs PooledTransactionsPacket
	if err := msg.Decode(&txs); err != nil {
		return err
	}
	for i, tx := range txs.PooledTransactionsResponse {
		// Validate and mark the remote transaction
		if tx == nil {
			return fmt.Errorf("PooledTransactions: transaction %d is nil", i)
		}

		peer.markTransaction(tx.Hash())
	}

	requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId)

	return backend.Handle(peer, &txs.PooledTransactionsResponse)
}

func handleBlockRangeUpdate(backend Backend, msg Decoder, peer *Peer) error {
	var update BlockRangeUpdatePacket
	if err := msg.Decode(&update); err != nil {
		return err
	}
	if err := update.Validate(); err != nil {
		return err
	}
	// We don't do anything with these messages for now, just store them on the peer.
	peer.lastRange.Store(&update)
	return nil
}
