package main

import (
	"flag"
	"fmt"
	"strconv"
	"sync"
	"time"
)

const (
	ClusterNum           = 4 // 系统机构数量
	ChannelCount         = 1 // fabric channel数量
	Debug                = 0 // debug模式
	NormalStart          = 0 // 是否正常启动
	CephConfigPath       = "/etc/ceph/ceph.conf"
	CephServerPortPrefix = "1000"      // ceph server port prefix, complete port is 1000 + clusterID
	CephPoolName         = "testbench" // 所使用的ceph pool的名称
	CryptoFilesPath      = "/home/czy/ssd/bc-storage/run-redundancy/crypto-config/"
	// PortPrefixTLS        = "1000"          // HTTP port, read/write objs from other clusters with TLS
	HeightStartCheck = 8 // block height of check start
)

var (
	ClusterID            = flag.Int("i", 0, "cluster ID")
	ClusterIPConfigPath  = flag.String("c", "/home/czy/ssd/bc-storage/cluster_IP.conf", "cluster IP config path")
	ObjectSizePoint      = flag.Int("s", 4*1024*1024, "object size stored in ceph") // object size stored in ceph, default 4MB
	needCheck            = flag.Bool("n", false, "need check")
	ObjectSize           int
	CephIPs              []string
	FabricSDKIP          string
	FabricSDKPort        string
	KafkaIP              string
	KafkaPort            string
	recoveringObjsMap    sync.Map // flag of obj is recovering
	recoverStartTime     time.Time
	BlockHeightRedundunt uint64
)

func parseConfig() {
	flag.Parse()
	ObjectSize = *ObjectSizePoint
	CephIPs = ParseEnvVarArray(*ClusterIPConfigPath, "CEPH_IPs")

	FabricSDKIPs := ParseEnvVarArray(*ClusterIPConfigPath, "FABRIC_SDK_IPs")
	FabricSDKIP = FabricSDKIPs[*ClusterID]
	fabricSDKPortBase, _ := strconv.Atoi(ParseEnvVar(*ClusterIPConfigPath, "FABRIC_SDK_PORT_BASE"))
	FabricSDKPort = strconv.Itoa(fabricSDKPortBase + *ClusterID)

	// fmt.Println(FabricSDKPort)

	kafkaIPs := ParseEnvVarArray(*ClusterIPConfigPath, "KAFKA_IPs")
	KafkaIP = kafkaIPs[*ClusterID]
	kafkaPort, _ := strconv.Atoi(ParseEnvVar(*ClusterIPConfigPath, "KAFKA_PORT_BASE"))
	KafkaPort = strconv.Itoa(kafkaPort + *ClusterID)

	// fmt.Println(KafkaPort)

	// check result
	// for _, v := range CephIPs {
	// 	fmt.Println(v)
	// }
}

func main() {
	flag.Parse()
	parseConfig()

	doRedundancy(*needCheck)
}

func doRedundancy(withCheck bool) {
	recoverChan := make(chan Recover, 1000)
	BlockHeightRedundunt = 0 // redundant block height start from 0

	fmt.Printf("cluster [%d] start HTTPs server provide object storage service ...\n", *ClusterID)
	go OssServer(ClusterNum, &recoverChan) // start HTTP server provide object storage service
	fmt.Printf("cluster [%d] start HTTPs server provide object storage service ... done\n", *ClusterID)

	fmt.Printf("cluster [%d] start redundancy deployment ...\n", *ClusterID)
	time.Sleep(5 * time.Second)
	go CalStoreParityBlock()
	fmt.Printf("cluster [%d] started download message from kafka ...\n", *ClusterID)
	time.Sleep(5 * time.Second)
	go DealBlockchainBlock()

	if withCheck {
		fmt.Printf("cluster [%d] start checking ...\n", *ClusterID)
		var checkClient ClientHandle
		checkClient.Init()
		defer checkClient.Shutdown()

		go checkClient.doRecovery(&recoverChan)
		fmt.Printf("cluster [%d] start recovering thread ...\n", *ClusterID)
		time.Sleep(5 * time.Second)
		if *ClusterID == 0 {
			go checkClient.doCheck()
		}
	}
	// block main thread
	var wg sync.WaitGroup
	wg.Add(1)
	wg.Wait()
}
