package commands

import (
	"context"
	"encoding/csv"
	"errors"
	"fmt"
	"os"
	"sort"
	"strconv"
	"time"

	jsoniter "github.com/json-iterator/go"
	"github.com/ledgerwatch/erigon-lib/common"
	"github.com/ledgerwatch/erigon-lib/common/cmp"
	"github.com/ledgerwatch/erigon/core"
	"github.com/ledgerwatch/erigon/core/state"
	"github.com/ledgerwatch/erigon/core/types"
	"github.com/ledgerwatch/erigon/core/vm"
	"github.com/ledgerwatch/erigon/core/vm/evmtypes"
	"github.com/ledgerwatch/erigon/rpc"
	"github.com/ledgerwatch/erigon/turbo/transactions"
	"github.com/ledgerwatch/log/v3"
	"github.com/schollz/progressbar/v3"
)

var (
	batchSize = 1000000
)

func (api *PrivateDebugAPIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
	tx, err := api.db.BeginRo(ctx)
	defer tx.Rollback()
	if err != nil {
		return nil, err
	}
	block, err := api.blockByRPCNumber(ctx, number, tx)
	return block, err
}

func (api *PrivateDebugAPIImpl) blocksByRange(start, end uint64, maxWorkers int) (blocks types.Blocks, err error) {
	if start == 0 {
		start = 1
	}
	totalTasks := int(end - start)

	taskCh := make(chan uint64, totalTasks)
	go func() {
		for i := start; i < end; i++ {
			taskCh <- i
		}
		close(taskCh)
	}()

	resCh := make(chan *types.Block, totalTasks)

	fetchWorker := func() {
		for task := range taskCh {
			blk, _ := api.GetBlockByNumber(context.Background(), rpc.BlockNumber(task))
			if blk == nil {
				panic(fmt.Errorf("block %v not found", start))
			}
			resCh <- blk
		}
	}

	for i := 0; i < maxWorkers; i++ {
		go fetchWorker()
	}
	blocks = make(types.Blocks, 0, totalTasks)

	for i := 0; i < totalTasks; i++ {
		blocks = append(blocks, <-resCh)
	}
	return
}

func (api *PrivateDebugAPIImpl) TestSampleMidBlockIsStable(ctx context.Context, start, end, interval, testCount uint64) error {

	lastBlock, err := api.SampleMidBlockTaskByGasUsed(start, end, interval)
	if err != nil {
		log.Error(err.Error())
		return err
	}
	for i := uint64(0); i < testCount; i++ {
		sampledBlocks, err := api.SampleMidBlockTaskByGasUsed(start, end, interval)
		if err != nil {
			log.Error(err.Error())
			return err
		}
		for j := range sampledBlocks {
			if sampledBlocks[j].NumberU64() != lastBlock[j].NumberU64() {
				log.Error("sampled block number not stable", "last", lastBlock[j].NumberU64(), "current", sampledBlocks[j].NumberU64())
				return fmt.Errorf("sampled block number not stable")
			}
		}
	}
	log.Info("sampled block number stable")
	return nil
}

func (api *PrivateDebugAPIImpl) SampleMidBlockTaskByGasUsed(start, end, interval uint64) (types.Blocks, error) {
	log.Info("sample blocks by mid gas used", "start", start, "end", end, "interval", interval)
	if interval < 1 {
		return nil, errors.New("interval must > 0")
	} else if interval == 1 {
		return api.blocksByRange(start, end, 100)
	}
	bar := progressbar.Default(int64((end - start) / interval))
	sampled := make(types.Blocks, 0)
	t0 := time.Now()
	for i := start; i < end; i += interval {
		bar.Add(1)
		blocks, err := api.blocksByRange(i, i+interval, 100)
		if err != nil {
			return nil, err
		}
		sort.SliceStable(blocks, func(i, j int) bool {
			if blocks[i].GasUsed() != blocks[j].GasUsed() {
				return blocks[i].GasUsed() < blocks[j].GasUsed()
			}
			return blocks[i].NumberU64() < blocks[j].NumberU64()
		})
		sampled = append(sampled, blocks[interval/2])
		// ch <- &BlockTask{Number: blocks[interval/2].NumberU64(), Block: blocks[interval/2]}
	}
	log.Info("sample fetch mid block by gas used finished", "cost", time.Since(t0), "total", len(sampled))
	return sampled, nil
}

func (api *PrivateDebugAPIImpl) ReplayExperiment(ctx context.Context, statrtBlockNum uint64, endBlockNum uint64, interval uint64, stream *jsoniter.Stream) error {
	cost := time.Duration(0)
	totalRes := &core.BatchBlockTaskResult{TxLevelCostLookUp: &core.TxLevelCostLookUp{}}
	maxIdx := 0
	for i := statrtBlockNum; i < endBlockNum; i += uint64(batchSize) {
		log.Info("replay starts", "from", i, "to", i+uint64(batchSize))
		blocks, err := api.SampleMidBlockTaskByGasUsed(i, i+uint64(batchSize), uint64(interval))
		if err != nil {
			return err
		}
		t0 := time.Now()
		res, err := api.sampleReplayOnePass(blocks)
		cost += time.Since(t0)
		if err != nil {
			log.Error(err.Error())
			return err
		}
		reportResult(res)
		totalRes.Add(res)
		totalRes.TxLevelCostLookUp.AddTxCostLookUp(res.TxLevelCostLookUp)
		maxIdx = cmp.Max(maxIdx, core.TxCount50Percentile(blocks))
	}
	log.Info("total replay finished", "process cost", cost)
	reportResult(totalRes)
	return reportTxnLevelRes(totalRes, make(types.Blocks, 0), maxIdx)
}

func reportResult(b *core.BatchBlockTaskResult) {
	log.Info("batch result", "block cnt", b.BlockCnt, "tx cnt", b.TxCnt, "cost", b.ProcessTime,
		"state read time", b.StateReadTime, "state read cnt", b.StateReadCnt,
		"blocks/s", float64(b.BlockCnt)/b.ProcessTime.Seconds(),
		"txs/s", float64(b.TxCnt)/b.ProcessTime.Seconds(),
		"mgas/s", float64(b.GasUsed/1000000)/b.ProcessTime.Seconds(),
		"avg state read delay", float64(b.StateReadTime.Milliseconds())/float64(b.StateReadCnt))
}

func (api *PrivateDebugAPIImpl) sampleReplayOnePass(blocks types.Blocks) (*core.BatchBlockTaskResult, error) {
	concurrency := 1
	taskCh := make(chan *types.Block, len(blocks))
	go func() {
		for _, block := range blocks {
			taskCh <- block
		}
		close(taskCh)
	}()

	// bc.SampleMidBlockTaskByGasUsed(taskCh, start, end, 1000)
	resCh := make(chan *core.BlockTaskResult, len(blocks))

	for i := 0; i < concurrency; i++ {
		go func(id int) {
			api.blockWorker(id, taskCh, resCh)
		}(i)
	}

	result := &core.BatchBlockTaskResult{
		BlockCnt: len(blocks),
	}

	result.TxLevelCostLookUp = core.NewTxLevelCostLookUp(core.MaxTxCnt(blocks))

	var err error
	bar := progressbar.Default(int64(len(blocks)))
	for i := 0; i < len(blocks); i++ {
		bar.Add(1)
		ret := <-resCh
		if ret.Err != nil {
			return nil, err
		}
		result.TxCnt += ret.Block.Transactions().Len()
		result.GasUsed += ret.Block.GasUsed()
		result.ProcessTime += ret.ProcessTime
		result.ExecTime += ret.ExecTime
		result.StateReadTime += ret.StateReadTime
		result.StateReadCnt += ret.StateReadCnt

		for i := range ret.DetailTxMetrics {
			result.TxLevelCostLookUp.Add(i, ret.DetailTxMetrics[i].PrepareCost, ret.DetailTxMetrics[i].ProcessTime)
		}

	}

	return result, nil
}

func (api *PrivateDebugAPIImpl) blockWorker(id int, taskCh <-chan *types.Block, resultCh chan<- *core.BlockTaskResult) {
	state.EnabledExpensive = true

	engine := api.engine()

	for block := range taskCh {
		tx, err := api.db.BeginRo(context.Background())
		if err != nil {
			log.Error(err.Error())
			return
		}

		chainConfig, _ := api.chainConfig(tx)

		detailTxMetrics := make([]*core.DetailTxMetrics, len(block.Transactions()))

		pstart := time.Now()
		_, blockCtx, _, ibs, _, err := transactions.ComputeTxEnv(context.Background(), engine, block, chainConfig, api._blockReader, tx, 0, api.historyV3(tx))
		var (
			signer  = types.MakeSigner(chainConfig, block.NumberU64(), block.Time())
			rules   = chainConfig.Rules(block.NumberU64(), block.Time())
			txns    = block.Transactions()
			usedGas = uint64(0)
		)
		if err != nil {
			log.Error(err.Error())
			resultCh <- &core.BlockTaskResult{
				Err: err,
			}
			tx.Rollback()
			continue
		}
		for idx, txn := range txns {
			// Generate the next state snapshot fast without tracing
			txStart := time.Now()

			ibs.SetTxContext(txn.Hash(), block.Hash(), idx)
			msg, _ := txn.AsMessage(*signer, block.BaseFee(), rules)

			if msg.FeeCap().IsZero() && engine != nil {
				syscall := func(contract common.Address, data []byte) ([]byte, error) {
					return core.SysCallContract(contract, data, chainConfig, ibs, block.Header(), engine, true /* constCall */)
				}
				msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall))
			}

			txCtx := evmtypes.TxContext{
				TxHash:   txn.Hash(),
				Origin:   msg.From(),
				GasPrice: msg.GasPrice(),
			}

			vmenv := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{})
			var refunds = true
			execResult, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()).AddDataGas(msg.DataGas()), refunds, false /* gasBailout */)

			if err != nil {
				resultCh <- &core.BlockTaskResult{Err: err}
				return
			}

			usedGas += execResult.UsedGas

			detailTxMetrics[idx] = &core.DetailTxMetrics{
				Index:       idx,
				PrepareCost: txStart.Sub(pstart),
				ProcessTime: time.Since(txStart),
			}

			// Finalize the state so any modifications are written to the trie
			// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
			if err == nil {
				err = ibs.FinalizeTx(rules, state.NewNoopWriter())
			}
			if err != nil {
				resultCh <- &core.BlockTaskResult{Err: err}
				return
			}
		}
		ptime := time.Since(pstart)
		tx.Rollback()
		if usedGas != block.GasUsed() {
			resultCh <- &core.BlockTaskResult{Err: fmt.Errorf("unmatched gasUsed, block: %v, origin: %v, result: %v", block.NumberU64(), block.GasUsed(), usedGas)}
		} else {
			resultCh <- &core.BlockTaskResult{
				Block: block,
				ProcessCost: core.ProcessCost{
					ProcessTime:   ptime,
					ExecTime:      ptime - (ibs.AccountReads + ibs.StorageReads + ibs.CodeReads + ibs.IncarReads),
					StateReadCnt:  uint64(ibs.AccountReadCnt + ibs.StorageReadCnt + ibs.CodeReadCnt + ibs.IncarReadCnt),
					StateReadTime: ibs.AccountReads + ibs.StorageReads + ibs.CodeReads + ibs.IncarReads,
				},
				DetailTxMetrics: detailTxMetrics,
			}
		}
	}
}

func reportOverheadResult(result *core.TxLevelCostLookUp, maxIndex int) {
	for i, cnt := range result.TxIndexCnt {
		if i >= maxIndex {
			break
		}
		if cnt == 0 {
			continue
		}
		log.Info("overhead result", "index", i,
			"avg prepare (ms)", float64(result.PrepareCosts[i]/time.Duration(cnt))/float64(time.Millisecond),
			"avg process (ms)", float64(result.ProcessCosts[i]/time.Duration(cnt))/float64(time.Millisecond),
			"avg prepare/process", float64(result.PrepareCosts[i]/result.ProcessCosts[i]),
		)
	}
}

func reportTxnLevelRes(blockRes *core.BatchBlockTaskResult, blocks types.Blocks, maxIndex int) error {
	if len(blocks) != 0 {
		maxIndex = core.TxCount50Percentile(blocks)
	}
	// reportOverheadResult(blockRes.TxLevelCostLookUp, maxIndex)
	csvRecords := [][]string{{"index", "prepare (ms)", "prepare/process", "speed up"}}
	var totalCosts time.Duration
	for i, cnt := range blockRes.TxLevelCostLookUp.TxIndexCnt {
		if i >= maxIndex {
			break
		}
		if cnt == 0 {
			continue
		}
		totalCosts += blockRes.TxLevelCostLookUp.TotalCosts[i]

		// log.Info("tx-level process", "index", i, "speed up", float64(0))
		csvRecords = append(csvRecords, []string{
			strconv.FormatInt(int64(i), 10),
			formatFloat64(float64(blockRes.TxLevelCostLookUp.PrepareCosts[i]/time.Duration(cnt)) / float64(time.Millisecond)),
			formatFloat64(float64(blockRes.TxLevelCostLookUp.PrepareCosts[i] / blockRes.TxLevelCostLookUp.ProcessCosts[i])),
			formatFloat64(float64(0)),
		})
	}
	log.Info("tx-level process", "total speed up", float64(0))
	return writeTxlvlProcessToCSV("./tx-level-replay.csv", csvRecords)
}

func writeTxlvlProcessToCSV(path string, records [][]string) error {
	File, err := os.OpenFile(path, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0666)
	if err != nil {
		log.Error(err.Error())
	}
	defer File.Close()

	WriterCsv := csv.NewWriter(File)

	for _, record := range records {
		err = WriterCsv.Write(record)
		if err != nil {
			log.Error(err.Error())
			return err
		}
	}
	WriterCsv.Flush()
	log.Info("write to csv done")
	return nil
}

func formatFloat64(v float64) string {
	return fmt.Sprintf("%v", v)
}
