package rpc

import (
	"bufio"
	"context"
	"encoding/json"
	"fmt"
	platon "github.com/PlatONnetwork/PlatON-Go"
	"github.com/PlatONnetwork/PlatON-Go/core/types"
	"github.com/PlatONnetwork/PlatON-Go/ethclient"
	"go.uber.org/zap"
	"math/big"
	"net/http"
	"platon-tools/go-service/event"
	go_logger "platon-tools/go-service/go-logger"
	"platon-tools/go-service/module/gcloud_k8s"
	"platon-tools/go-service/protocol/rpc_pb"
	"regexp"
	"strconv"
	"time"
)

type MonitorNode struct {
	*ethclient.Client

	ContractsCreated int64
	TokenTransfers   int64
	EthTransfers     int64
	BlockSize        float64
	LoadTime         float64
	TotalEth         *big.Int
	CurrentBlock     *types.Block
	Sync             *platon.SyncProgress
	LastBlockUpdate  time.Time
	SugGasPrice      *big.Int
	PendingTx        uint
	NetworkId        *big.Int

	ctx    context.Context
	cancel context.CancelFunc

	feed *event.Event

	pod *gcloud_k8s.PodEle
	gc  *gcloud_k8s.GCPlatonK8s

	eventCh chan gcloud_k8s.PodEle
	delay   int64
}

func NewMonitorNode(ctx context.Context, pod *gcloud_k8s.PodEle, gc *gcloud_k8s.GCPlatonK8s, feed *event.Event, delay int64) *MonitorNode {
	c, cancel := context.WithCancel(ctx)
	return &MonitorNode{
		ctx:      c,
		cancel:   cancel,
		TotalEth: big.NewInt(0),
		feed:     feed,
		pod:      pod,
		gc:       gc,
		eventCh:  make(chan gcloud_k8s.PodEle, 20),
		delay:    delay,
	}
}

func (m *MonitorNode) Start() error {

	platonServer := fmt.Sprintf("ws://%s:6790", m.pod.SvcIP)
	client, err := ethclient.DialContext(m.ctx, platonServer)
	if err != nil {
		go_logger.Logger.Error("DialContext", zap.Error(err))
		return err
	}

	m.Client = client

	go m.toCenterService()
	m.subscribeLoop()

	return nil
}

func (m *MonitorNode) toCenterService() {
	defer m.cancel()

	defer close(m.eventCh)
	for {
		select {
		case <-m.ctx.Done():
			return
		case _, ok := <-m.eventCh:
			if ok {
				go_logger.Logger.Sugar().Debugf("收到合约查询通知")
			}
		}
	}
}

func (m *MonitorNode) subscribeLoop() {
	defer m.cancel()

	subCtx, cancel := context.WithCancel(m.ctx)

	go m.runSubscribe(subCtx, cancel)

	for {
		select {
		case <-m.ctx.Done():
			return
		case <-subCtx.Done():
			subCtx, cancel = context.WithCancel(m.ctx)
			go m.runSubscribe(subCtx, cancel)
		}
	}

}

func (m *MonitorNode) runSubscribe(subCtx context.Context, cancelFunc context.CancelFunc) {
	defer cancelFunc()

	druation := time.Duration(m.delay) * time.Second

	timer := time.NewTicker(druation)
	defer timer.Stop()

	for {
		select {
		case <-subCtx.Done():
			go_logger.Logger.Sugar().Infof("subscribe quit %s", time.Now().String())
			return
		case <-timer.C:

			start := time.Now()
			currentBlock, err := m.Client.BlockByNumber(subCtx, nil)
			if err != nil {
				go_logger.Logger.Sugar().Warn("get last block failed", err)
				return
			}

			m.Sync, _ = m.Client.SyncProgress(subCtx)
			m.LastBlockUpdate = time.Now()
			m.gc.GetPods()
			m.LoadTime = time.Now().Sub(start).Seconds()

			monitorNode := event.NodeMonitorItem{
				NodeId:          m.pod.NodeID,
				NodeName:        m.pod.Label.Name,
				CurrentBlock:    currentBlock.NumberU64(),
				DiskRatio:       0.0,
				CPURatio:        0.0,
				MemoryRatio:     0.0,
				NetWorkRatio:    0.0,
				IORatio:         0.0,
				RWAmount:        0.0,
				RPCResponseTime: m.LoadTime,
				NodeStatus:      m.pod.Status,
				CurrentHigh:     m.Sync.HighestBlock,
			}

			if metric := m.metrics(); metric != nil {
				Amount := metric["system/disk/readbytes"] + metric["system/disk/writebytes"]
				monitorNode.RWAmount = Amount
				monitorNode.DiskRatio = metric["system/disk/writebytes"]
				monitorNode.NetWorkRatio = metric["p2p/InboundTraffic.mean"]
				monitorNode.CPURatio = metric["system/cpu/sysload"]
				monitorNode.MemoryRatio = metric["system/memory/used"]
				monitorNode.IORatio = metric["system/cpu/threads"]
				monitorNode.RAmount = metric["system/disk/readbytes"]
				monitorNode.WAmount = metric["system/disk/writebytes"]
			}

			if data, err := json.Marshal(monitorNode); err == nil {
				m.feed.Send(event.MonitorEvent{
					Name: rpc_pb.MsgType_NodeMonitorItem,
					Data: data,
				})
			}

			timer.Reset(druation)
		}
	}
}

func (m *MonitorNode) metrics() map[string]float64 {
	metricsAddr := fmt.Sprintf("http://%s:6060/debug/metrics", m.pod.SvcIP)

	resp, err := http.Get(metricsAddr)
	if err != nil {
		go_logger.Logger.Warn("get metrics failed", zap.Error(err))
		return nil
	}

	defer resp.Body.Close()

	r1, _ := regexp.Compile(`system[/\w]+`)
	r2, _ := regexp.Compile(`[\d+][.]?[\d]*`)

	reader := bufio.NewReader(resp.Body)
	metrics := map[string]float64{}
	for {
		data, err := reader.ReadString('\n')

		if err != nil {
			return metrics
		}
		key := r1.FindString(data)
		if len(key) > 0 {
			sValue := r2.FindString(data)
			value, err := strconv.ParseFloat(sValue, 64)
			if err != nil {
				metrics[key] = 0
			} else {
				metrics[key] = value
			}
			//fmt.Printf("key:[%s] value:[%v]\n", key, value)
		}
	}

}
