package app

import (
	"encoding/json"
	"fmt"
	"io/ioutil"
	"math/rand"
	"sort"
	"time"

	"github.com/ledgerwatch/erigon/cmd/utils"
	"github.com/ledgerwatch/erigon/core"
	"github.com/ledgerwatch/erigon/core/state"
	"github.com/ledgerwatch/erigon/core/types"
	"github.com/ledgerwatch/erigon/eth"
	"github.com/ledgerwatch/erigon/ethdb/pebble"
	"github.com/ledgerwatch/erigon/turbo/debug"
	turboNode "github.com/ledgerwatch/erigon/turbo/node"
	"github.com/ledgerwatch/log/v3"
	"github.com/urfave/cli/v2"
)

func MakeSlimArchiveDB(path string) (*pebble.Database, error) {
	log.Info("open slim archive", "path", path)
	return pebble.New(path, 200000, 1000000, "", false)
}

func testSampleReplay(ctx *cli.Context) error {
	state.EnabledExpensive = true
	var err error
	var logger log.Logger
	if logger, err = debug.Setup(ctx, true /* root logger */); err != nil {
		return err
	}

	if !ctx.IsSet(utils.DataDirFlag.Name) {
		panic("params not set")
	}

	nodeCfg := turboNode.NewNodConfigUrfave(ctx, logger)
	ethCfg := turboNode.NewEthConfigUrfave(ctx, nodeCfg, logger)

	stack := makeConfigNode(nodeCfg, logger)
	defer stack.Close()

	ethereum, err := eth.New(stack, ethCfg, logger)
	if err != nil {
		return err
	}
	// tx, _ := ethereum.ChainKV().BeginRo(context.Background())
	// fmt.Println(rpchelper.GetLatestBlockNumber(tx))
	// num, hash, yes, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(200000)), tx, nil)
	// fmt.Println(num, hash, yes, err)
	// fmt.Println(ethereum.BlockReader().BlockWithSenders(context.Background(), tx, hash, num))
	// // block, err := api.blockWithSenders(ctx, tx, h, n)
	// // return block, err
	// return nil
	// defer ethereum.Stop()

	var (
		start, end, interval int
	)

	if !ctx.IsSet(ReplayStartFlag.Name) {
		panic("start not set")
	} else {
		start = ctx.Int(ReplayStartFlag.Name)
	}
	if !ctx.IsSet(ReplayEndFlag.Name) {
		panic("end not set")
	} else {
		end = ctx.Int(ReplayEndFlag.Name)
	}
	interval = ctx.Int(IntervalFlag.Name)
	switch ctx.String(ArchiveTypeFlag.Name) {
	case "slim":
		log.Info("use slim archive")
		archive, err := MakeSlimArchiveDB(stack.ResolvePath("archive"))
		if err != nil {
			return err
		}
		ethereum.SetSlimArchive(archive)
	case "erigon":
		log.Info("use erigon archive")
	default:
		panic("invalid archive type")
	}

	blocks, err := ethereum.SampleMidBlockTaskByGasUsed(uint64(start), uint64(end), uint64(interval))
	if err != nil {
		return err
	}

	log.Info("start sample replay", "start", start, "end", end, "interval", interval, "concurrency", ctx.Int(ConcurrencyFlag.Name))
	result, err := ethereum.ProcessBlocks(blocks, ctx.Int(ConcurrencyFlag.Name), false)
	if err != nil {
		return err
	}
	reportResult(result)
	return nil
}

func testTxReplayCost(ctx *cli.Context) error {
	state.EnabledExpensive = true
	var err error
	var logger log.Logger
	if logger, err = debug.Setup(ctx, true /* root logger */); err != nil {
		return err
	}

	if !ctx.IsSet(utils.DataDirFlag.Name) {
		panic("params not set")
	}

	nodeCfg := turboNode.NewNodConfigUrfave(ctx, logger)
	ethCfg := turboNode.NewEthConfigUrfave(ctx, nodeCfg, logger)

	stack := makeConfigNode(nodeCfg, logger)
	defer stack.Close()

	ethereum, err := eth.New(stack, ethCfg, logger)
	if err != nil {
		return err
	}

	var (
		start, end int
	)

	if !ctx.IsSet(ReplayStartFlag.Name) {
		panic("start not set")
	} else {
		start = ctx.Int(ReplayStartFlag.Name)
	}
	if !ctx.IsSet(ReplayEndFlag.Name) {
		panic("end not set")
	} else {
		end = ctx.Int(ReplayEndFlag.Name)
	}
	concurrency := ctx.Int(ConcurrencyFlag.Name)
	interval := ctx.Int(IntervalFlag.Name)
	switch ctx.String(ArchiveTypeFlag.Name) {
	case "slim":
		slimPath := ctx.String(SlimArchivePathFlag.Name)
		log.Info("use slim archive", "path", slimPath)
		archive, err := MakeSlimArchiveDB(slimPath)
		if err != nil {
			return err
		}
		ethereum.SetSlimArchive(archive)
	case "erigon":
		log.Info("use erigon archive")
	default:
		panic("invalid archive type")
	}

	blocks, err := ethereum.SampleMidBlockTaskByGasUsed(uint64(start), uint64(end), uint64(interval))
	if err != nil {
		return err
	}
	if err != nil {
		return err
	}

	log.Info("test block tx replay v2 ####")

	maxIndex := core.TxCount50Percentile(blocks)
	log.Info("dataset", "blocks", len(blocks), "50-p index", maxIndex, "max tx count", core.MaxTxCnt(blocks))

	replayRes, err := ethereum.TestBlockProcessV2(blocks, concurrency, ctx.String(ArchiveTypeFlag.Name) == "slim")
	if err != nil {
		log.Error(err.Error())
		return err
	}

	return reportAndSaveResult(replayRes, core.MaxTxCnt(blocks), ctx.String(ArchiveTypeFlag.Name))
}

func reportResult(b *core.BatchBlockTaskResult) {
	if b == nil {
		return
	}
	log.Info("batch result", "block cnt", b.BlockCnt, "tx cnt", b.TxCnt, "cost", b.ProcessTime,
		"state read time", b.StateReadTime, "state read cnt", b.StateReadCnt,
		"blocks/s", float64(b.BlockCnt)/b.ProcessTime.Seconds(),
		"txs/s", float64(b.TxCnt)/b.ProcessTime.Seconds(),
		"mgas/s", float64(b.GasUsed/1000000)/b.ProcessTime.Seconds(),
		"avg state read delay", float64(b.StateReadTime.Milliseconds())/float64(b.StateReadCnt))
}
func reportOverheadResult(result *core.TxLevelCostLookUp, maxIndex int) {
	for i, cnt := range result.TxIndexCnt {
		if i >= maxIndex {
			break
		}
		if cnt == 0 {
			continue
		}
		log.Info("overhead result", "index", i,
			"avg prepare (ms)", float64(result.PrepareCosts[i]/time.Duration(cnt))/float64(time.Millisecond),
			"avg process (ms)", float64(result.ProcessCosts[i]/time.Duration(cnt))/float64(time.Millisecond),
			"avg prepare/process", float64(result.PrepareCosts[i]/result.ProcessCosts[i]),
		)
	}
}
func reportAndSaveResult(results []*core.BlockTxReplayCost, maxIndex int, archiveTpy string) error {
	sort.SliceStable(results, func(i, j int) bool {
		return results[i].Number < results[j].Number
	})
	lookup := core.NewTxLevelCostLookUp(maxIndex)
	for _, res := range results {
		for idx, txMetrics := range res.Txs {
			lookup.Add(idx, txMetrics[0], txMetrics[1])
		}
		res.FormatJSON()
	}
	reportOverheadResult(lookup, maxIndex)
	b, err := json.Marshal(results)
	if err != nil {
		return err
	}
	return ioutil.WriteFile(fmt.Sprintf("block-tx-replay-erigon-%s.json", archiveTpy), b, 0644)
}

func testMultiThread(ctx *cli.Context) error {
	state.EnabledExpensive = true
	var err error
	var logger log.Logger
	if logger, err = debug.Setup(ctx, true /* root logger */); err != nil {
		return err
	}

	if !ctx.IsSet(utils.DataDirFlag.Name) {
		panic("params not set")
	}

	nodeCfg := turboNode.NewNodConfigUrfave(ctx, logger)
	ethCfg := turboNode.NewEthConfigUrfave(ctx, nodeCfg, logger)

	stack := makeConfigNode(nodeCfg, logger)
	defer stack.Close()

	ethereum, err := eth.New(stack, ethCfg, logger)
	if err != nil {
		return err
	}

	var (
		start, end int
	)

	if !ctx.IsSet(ReplayStartFlag.Name) {
		panic("start not set")
	} else {
		start = ctx.Int(ReplayStartFlag.Name)
	}
	if !ctx.IsSet(ReplayEndFlag.Name) {
		panic("end not set")
	} else {
		end = ctx.Int(ReplayEndFlag.Name)
	}
	interval := ctx.Int(IntervalFlag.Name)
	switch ctx.String(ArchiveTypeFlag.Name) {
	case "slim":
		log.Info("use slim archive")
		archive, err := MakeSlimArchiveDB(stack.ResolvePath("archive"))
		if err != nil {
			return err
		}
		ethereum.SetSlimArchive(archive)
	case "erigon":
		log.Info("use erigon archive")
	default:
		panic("invalid archive type")
	}

	blocks, err := ethereum.SampleMidBlockTaskByGasUsed(uint64(start), uint64(end), uint64(interval))
	if err != nil {
		return err
	}
	return testMultiThreadRun(ethereum, blocks, []int{1, 1, 2, 4, 8, 16, 32, 64, 128})
}

func testMultiThreadRun(e *eth.Ethereum, blocks types.Blocks, concurrencies []int) error {
	log.Info("test multithread")
	for _, concurrency := range concurrencies {
		log.Info(fmt.Sprintf("%v thread(s)", concurrency))

		t0 := time.Now()
		// res, err := e.ProcessBlocks(blocks, concurrency, true)
		res, err := e.TestTxProcess(ShuffleTxs(blocks), concurrency)
		cost := time.Since(t0)
		// log.Info(fmt.Sprintf("%v thread(s) end", concurrency), "blocks/s", float64(res.BlockCnt)/cost.Seconds(), "txs/s", float64(res.TxCnt)/cost.Seconds())
		log.Info(fmt.Sprintf("%v thread(s) end", concurrency), "txs/s", float64(res.TxCnt)/cost.Seconds())
		if err != nil {
			log.Error(err.Error())
			return err
		}
	}
	return nil
}

func ShuffleTxs(blocks types.Blocks) []*core.TransactionTask {
	log.Info("starting shuffle txs")
	n := 0
	for _, b := range blocks {
		n += b.Transactions().Len()
	}
	txs := make([]*core.TransactionTask, 0, n)
	for _, b := range blocks {
		for index := range b.Transactions() {
			txs = append(txs, &core.TransactionTask{
				Block:   b,
				TxIndex: index,
			})
		}
	}
	rand.Seed(time.Now().Unix())
	var tmp *core.TransactionTask
	i := n - 1
	j := 0
	for i > 0 {
		j = rand.Intn(i + 1)
		// temp = array[i]
		// array[i] = array[j]
		// array[j] = temp
		tmp = txs[i]
		txs[i] = txs[j]
		txs[j] = tmp
		i--
	}
	return txs
}
