package app

import (
	"bufio"
	"context"
	"encoding/binary"
	"errors"
	"fmt"
	"io"
	"os"
	"strconv"
	"strings"
	"time"

	libcommon "github.com/ledgerwatch/erigon-lib/common"
	"github.com/ledgerwatch/erigon-lib/common/hexutility"
	"github.com/ledgerwatch/erigon-lib/kv"
	"github.com/ledgerwatch/erigon/cmd/utils"
	"github.com/ledgerwatch/erigon/cmd/utils/flags"
	"github.com/ledgerwatch/erigon/core/state"
	"github.com/ledgerwatch/erigon/core/systemcontracts"
	"github.com/ledgerwatch/erigon/core/types/accounts"
	"github.com/ledgerwatch/erigon/node"
	"github.com/ledgerwatch/erigon/params/networkname"
	"github.com/ledgerwatch/erigon/turbo/debug"
	turboNode "github.com/ledgerwatch/erigon/turbo/node"
	"github.com/ledgerwatch/log/v3"
	"github.com/shirou/gopsutil/v3/mem"
	"github.com/shirou/gopsutil/v3/process"
	"github.com/urfave/cli/v2"
)

var (
	ReplayStartFlag = &cli.IntFlag{
		Name: "replay.start",
	}
	ReplayEndFlag = &cli.IntFlag{
		Name: "replay.end",
	}
	ConcurrencyFlag = &cli.IntFlag{
		Name:  "concurrency",
		Value: 1,
	}
	IntervalFlag = &cli.IntFlag{
		Name:  "interval",
		Value: 100,
	}
	ArchiveTypeFlag = &cli.StringFlag{
		Name:  "archive.type",
		Value: "erigon",
	}
	SlimArchivePathFlag = &cli.StringFlag{
		Name:  "slim.archive.path",
		Value: "",
	}
)

var experimentCommand = cli.Command{
	Name: "experiment",
	Flags: []cli.Flag{
		&utils.DataDirFlag,
		&utils.ChainFlag,
	},
	Subcommands: []*cli.Command{
		&cli.Command{
			Action: MigrateFlags(testRandomKeyAccess),
			Name:   "testRandomKeyAccess",
			Flags: []cli.Flag{
				&cli.StringFlag{
					Name: "test.file.path",
				},
				&cli.StringFlag{
					Name: "table",
				},
			},
		},
		&cli.Command{
			Action: MigrateFlags(testRandomKeyAccessConcurrent),
			Name:   "testRandomKeyAccessConcurrent",
			Flags: []cli.Flag{
				&cli.StringFlag{
					Name: "test.file.path",
				},
				&cli.StringFlag{
					Name: "tables",
				},
				&cli.IntFlag{
					Name:  "concurrency",
					Value: 1,
				},
				&cli.BoolFlag{
					Name:  "sanitycheck",
					Value: false,
				},
			},
		},
		&cli.Command{
			Action: MigrateFlags(testSampleReplay),
			Name:   "testSampleReplay",
			Flags: []cli.Flag{
				ReplayStartFlag,
				ReplayEndFlag,
				ConcurrencyFlag,
				IntervalFlag,
				ArchiveTypeFlag,
			},
		},
		&cli.Command{
			Action: MigrateFlags(testTxReplayCost),
			Name:   "testTxReplayCost",
			Flags: []cli.Flag{
				ReplayStartFlag,
				ReplayEndFlag,
				ConcurrencyFlag,
				IntervalFlag,
				ArchiveTypeFlag,
				SlimArchivePathFlag,
			},
		},
		&cli.Command{
			Action: MigrateFlags(testMultiThread),
			Name:   "multithread-replay-block",
			Flags: flags.Merge([]cli.Flag{
				ReplayStartFlag,
				ReplayEndFlag,
				IntervalFlag,
				ArchiveTypeFlag,
			}),
		},
	},
}

func testRandomKeyAccess(ctx *cli.Context) error {
	var err error
	var logger log.Logger
	if logger, err = debug.Setup(ctx, true /* root logger */); err != nil {
		return err
	}

	if !ctx.IsSet(utils.DataDirFlag.Name) || !ctx.IsSet("test.file.path") {
		panic("params not set")
	}

	nodeCfg := turboNode.NewNodConfigUrfave(ctx, logger)
	// ethCfg := turboNode.NewEthConfigUrfave(ctx, nodeCfg, logger)

	stack := makeConfigNode(nodeCfg, logger)
	defer stack.Close()

	// ethereum, err := eth.New(stack, ethCfg, logger)
	// if err != nil {
	// 	return err
	// }

	chainKv, err := node.OpenDatabase(stack.Config(), kv.ChainDB, logger)
	if err != nil {
		return err
	}

	// blockNum, err := rpchelper.GetLatestBlockNumber(tx)
	// if err != nil {
	// 	return err
	// }
	// log.Info("opendb", "latest", blockNum)
	table := ctx.String("table")
	var keyLen int
	switch table {
	case "account":
		keyLen = 20 + 4
	case "storage":
		keyLen = 20 + 32 + 4
	case "code":
		keyLen = 32
	default:
		panic("invalid table")
	}
	file, err := os.Open(ctx.String("test.file.path") + table)
	if err != nil {
		return err
	}

	monitorQuit := make(chan struct{})
	processInfo, err := process.NewProcess(int32(os.Getpid()))
	if err != nil {
		return err
	}
	sysMem, _ := mem.VirtualMemory()
	totalPerc := float64(0)
	totalSample := 0
	lastTime := time.Now()
	lastIORead, err := ProcessDiskRead(uint64(processInfo.Pid))
	if err != nil {
		return err
	}
	initTime, initIORead := lastTime, lastIORead
	go func() {
		ticker := time.NewTicker(5 * time.Second)
		defer ticker.Stop()
		for {
			select {
			case <-monitorQuit:
				return
			case <-ticker.C:
				mem, err := processInfo.MemoryInfo()
				if err != nil {
					log.Error("get process mem", "err", err)
					return
				}
				cpuPerc, err := processInfo.CPUPercent()
				if err != nil {
					log.Error("get process cpu", "err", err)
					return
				}
				nowIoRead, _ := ProcessDiskRead(uint64(processInfo.Pid))
				nowTime := time.Now()
				log.Info("sys report", "cpu", cpuPerc, "res", mem.RSS, "percent", float64(mem.RSS)/float64(sysMem.Total)*100,
					"IO read MB/s", float64(nowIoRead-lastIORead)/nowTime.Sub(lastTime).Seconds()/1024/1024)
				totalPerc += float64(mem.RSS) / float64(sysMem.Total) * 100
				totalSample += 1
				lastTime, lastIORead = nowTime, nowIoRead
				time.Sleep(5 * time.Second)
			}
		}
	}()

	id := make([]byte, keyLen)
	kCnt := 0
	log.Info("access start", "table", table)

	initMem, _ := processInfo.MemoryInfo()

	var acc *accounts.Account
	var val, code []byte
	start := time.Now()
	for {
		readLen, err := file.Read(id)
		if err == io.EOF {
			break
		} else if err != nil {
			return err
		} else if readLen != keyLen {
			return fmt.Errorf("invalid key")
		}
		kCnt++
		number := binary.BigEndian.Uint32(id[len(id)-4:])
		// tr := state.NewBlockSecTrieV2(uint64(number), 0, db)
		tx, err := chainKv.BeginRo(context.Background())
		if err != nil {
			return err
		}
		reader := state.NewPlainState(tx, uint64(number), systemcontracts.SystemContractCodeLookup[networkname.MainnetChainName])
		switch table {
		case "account":
			// _, err = tr.GetAccount(common.BytesToAddress(id[:20]))
			acc, err = readAccountFromHistory(reader, libcommon.BytesToAddress(id[:20]))
			if err != nil {
				return err
			}
		case "storage":
			// _, err = tr.GetStorage(common.BytesToAddress(id[:20]), id[20:52])
			val, err = readStorageFromHistory(reader, libcommon.BytesToAddress(id[:20]), libcommon.BytesToHash(id[20:52]))
			if err != nil {
				return err
			}
		case "code":
			// _, err = tr.ContractCode(types.EmptyRootHash, common.BytesToHash(id))
			code, err = readCodeFromHistory(reader, libcommon.BytesToHash(id))
			if err != nil {
				return err
			}
		}
		if kCnt%100000 == 0 {
			logs := []interface{}{"table", table, "current prefix", hexutility.Encode(id[:4]), "rand number", number}
			switch table {
			case "account":
				if acc != nil {
					logs = append(logs, "bal", acc.Balance.ToBig(), "nonce", acc.Nonce)
				}
			case "storage":
				logs = append(logs, "val", hexutility.Encode(val))
			case "code":
				logs = append(logs, "code", hexutility.Encode(code))
			}
			log.Info("access report", logs...)
		}
		tx.Rollback()
	}
	finalMem, _ := processInfo.MemoryInfo()
	monitorQuit <- struct{}{}
	lastIORead, _ = ProcessDiskRead(uint64(processInfo.Pid))

	cost := time.Since(start)
	log.Info("access finish", "table", table, "cnt", kCnt, "cost", cost, "speed", float64(kCnt)/cost.Seconds())
	file.Close()
	log.Info("util report", "init mem", initMem.RSS, "final mem", finalMem.RSS,
		"avg percent", totalPerc/float64(totalSample), "avg read IO MB/s", float64(lastIORead-initIORead)/time.Since(initTime).Seconds()/1024/1024)
	return nil

}

func testRandomKeyAccessConcurrent(ctx *cli.Context) error {
	var err error
	var logger log.Logger
	if logger, err = debug.Setup(ctx, true /* root logger */); err != nil {
		return err
	}

	if !ctx.IsSet(utils.DataDirFlag.Name) || !ctx.IsSet("test.file.path") {
		panic("params not set")
	}

	nodeCfg := turboNode.NewNodConfigUrfave(ctx, logger)
	// ethCfg := turboNode.NewEthConfigUrfave(ctx, nodeCfg, logger)

	stack := makeConfigNode(nodeCfg, logger)
	defer stack.Close()

	chainKv, err := node.OpenDatabase(stack.Config(), kv.ChainDB, logger)
	if err != nil {
		return err
	}

	tables := strings.Split(ctx.String("tables"), ",")
	fmt.Println(tables)
	for _, table := range tables {
		var keyLen int
		switch table {
		case "account":
			keyLen = 20 + 4
		case "storage":
			keyLen = 20 + 32 + 4
		case "code":
			keyLen = 32
		default:
			panic("invalid table")
		}
		file, err := os.Open(ctx.String("test.file.path") + table)
		if err != nil {
			return err
		}

		kCnt := 0
		id := make([]byte, keyLen)
		keys := make([][]byte, 0)
		for {
			readLen, err := file.Read(id)
			if err == io.EOF {
				break
			} else if err != nil {
				return err
			} else if readLen != keyLen {
				return fmt.Errorf("invalid key")
			}
			keys = append(keys, append([]byte{}, id...))
			kCnt++
			if kCnt%10000000 == 0 {
				log.Info("loading keys...", "prefix", hexutility.Encode(id[:4]))
			}
		}
		file.Close()

		// sanitycheck
		if ctx.Bool("sanitycheck") {
			ids := make(map[string]struct{}, len(keys))
			for _, k := range keys {
				if _, ok := ids[string(k)]; !ok {
					ids[string(k)] = struct{}{}
				} else {
					log.Error("dup key")
					return nil
				}
			}
			log.Info("collect done", "count", len(ids))
		}

		report, err := TestRandomAccess(chainKv, keys, ctx.Int("concurrency"), table)
		if err != nil {
			return err
		}
		fmt.Printf("table: %v \n %+v \n", table, report)
		log.Info("finish randim access", "total", report.Cnt, "cost (s)", report.Cost.Seconds(),
			"throughput (/s)", float64(report.Cnt)/report.Cost.Seconds(), "avg res (GB)", float64(report.AvgMemRes)/1024/1024/1024,
			"max res (GB)", float64(report.PeakMemRes)/1024/1024/1024, "disk read (GB)", float64(report.DiskRead)/1024/1024/1024)
	}
	return nil
}

type RandomAccessReport struct {
	Cnt  uint64
	Cost time.Duration

	SystemUtil
}

func TestRandomAccess(db kv.RwDB, keys [][]byte, concurrency int, tpy string) (RandomAccessReport, error) {
	log.Info("start random access", "total", len(keys), "councurrency", concurrency, "type", tpy)
	var err error
	if concurrency < 1 {
		return RandomAccessReport{}, errors.New("invalid concurrency")
	}

	tasks := make(chan []byte, len(keys))
	go func() {
		for _, k := range keys {
			tasks <- k
		}
		close(tasks)
	}()

	report := RandomAccessReport{
		Cnt: uint64(len(keys)),
	}
	res := make(chan error, len(tasks))
	for i := 0; i < concurrency; i++ {
		go randomAccessWorkder(db, tasks, res, tpy)
	}
	monitorQuit := make(chan struct{})
	go SampleSystemState(monitorQuit, 3, &report.SystemUtil)
	start := time.Now()
	lastReport := time.Now()
	for i := 0; i < len(keys); i++ {
		err = <-res
		if err != nil {
			return RandomAccessReport{}, err
		}
		if i%10000 == 0 && time.Since(lastReport) > 30*time.Second {
			log.Info("round verify done", "current", i)
			lastReport = time.Now()
		}
	}
	monitorQuit <- struct{}{}
	report.Cost = time.Since(start)
	time.Sleep(time.Second)
	return report, nil
}

func randomAccessWorkder(db kv.RwDB, tasks <-chan []byte, res chan<- error, tpy string) {
	for id := range tasks {
		number := binary.BigEndian.Uint32(id[len(id)-4:])
		// tr := NewBlockSecTrieV2(uint64(number), 0, db)
		tx, err := db.BeginRo(context.Background())
		if err != nil {
			panic(err)
		}
		reader := state.NewPlainState(tx, uint64(number), systemcontracts.SystemContractCodeLookup[networkname.MainnetChainName])
		switch tpy {
		case "account":
			_, err = readAccountFromHistory(reader, libcommon.BytesToAddress(id[:20]))
			res <- err
		case "storage":
			_, err = readStorageFromHistory(reader, libcommon.BytesToAddress(id[:20]), libcommon.BytesToHash(id[20:52]))
			res <- err
		case "code":
			_, err = readCodeFromHistory(reader, libcommon.BytesToHash(id))
			res <- err
		}
		tx.Rollback()
	}
}

func readAccountFromHistory(reader *state.PlainState, address libcommon.Address) (*accounts.Account, error) {
	return reader.ReadAccountData(address)
}

func readStorageFromHistory(reader *state.PlainState, address libcommon.Address, key libcommon.Hash) ([]byte, error) {
	acc, err := reader.ReadAccountData(address)
	if acc == nil || err != nil {
		return nil, err
	}

	return reader.ReadAccountStorage(address, acc.Incarnation, &key)
}

func readCodeFromHistory(reader *state.PlainState, codeHash libcommon.Hash) ([]byte, error) {
	// acc, err := reader.ReadAccountData(address)
	// if acc == nil || err != nil {
	// return err
	// }
	return reader.ReadAccountCode(libcommon.Address{}, 0, codeHash)
}

func ProcessDiskRead(pid uint64) (uint64, error) {
	fd, err := os.Open("/proc/" + strconv.FormatUint(pid, 10) + "/io")
	if err != nil {
		return 0, err
	}
	defer fd.Close()

	scanner := bufio.NewScanner(fd)
	for scanner.Scan() {
		ln := scanner.Text()
		if strings.Contains(ln, "read_bytes") {
			s := strings.Split(ln, " ")
			read, err := strconv.ParseUint(s[1], 10, 64)
			if err != nil {
				return 0, err
			}
			return read, nil
		}
	}

	return 0, errors.New("io read not found")
}

type SystemUtil struct {
	Cost       time.Duration
	InitMemRes uint64

	AvgCPU    float64
	AvgMemRes uint64

	PeakCPU    float64
	PeakMemRes uint64

	DiskRead uint64
}

func SampleSystemState(quit <-chan struct{}, interval int, result *SystemUtil) {
	start := time.Now()
	processInfo, err := process.NewProcess(int32(os.Getpid()))
	if err != nil {
		panic(err)
	}
	sysMem, _ := mem.VirtualMemory()

	mem, _ := processInfo.MemoryInfo()
	result.InitMemRes = mem.RSS

	totalCpu := float64(0)
	totalMem := uint64(0)

	sampleCnt := 0

	initIORead, err := ProcessDiskRead(uint64(processInfo.Pid))
	if err != nil {
		panic(err)
	}
	lastReport := time.Now()
	ticker := time.NewTicker(time.Duration(interval) * time.Second)
	defer ticker.Stop()
	for {
		select {
		case <-quit:
			lastIORead, err := ProcessDiskRead(uint64(processInfo.Pid))
			if err != nil {
				panic(err)
			}
			result.Cost = time.Since(start)
			result.DiskRead = lastIORead - initIORead
			result.AvgCPU = totalCpu / float64(sampleCnt)
			result.AvgMemRes = totalMem / uint64(sampleCnt)
			return
		case <-ticker.C:
			memnow, err := processInfo.MemoryInfo()
			if err != nil {
				panic(err)
			}
			cpunow, err := processInfo.CPUPercent()
			if err != nil {
				panic(err)
			}

			if time.Since(lastReport) > 60*time.Second {
				nowIoRead, _ := ProcessDiskRead(uint64(processInfo.Pid))
				log.Info("sys report", "cpu", cpunow, "res", memnow.RSS, "percent", float64(memnow.RSS)/float64(sysMem.Total)*100,
					"read MB", float64(nowIoRead-initIORead)/1024/1024)
				lastReport = time.Now()
			}

			totalCpu += cpunow
			totalMem += memnow.RSS

			if cpunow > result.PeakCPU {
				result.PeakCPU = cpunow
			}
			if memnow.RSS > result.PeakMemRes {
				result.PeakMemRes = memnow.RSS
			}
			sampleCnt += 1
		}
	}
}
