package main

import (
	"encoding/json"
	"fmt"
	"strconv"

	sarama "github.com/Shopify/sarama"
)

// 解析后的区块对象结构
type Block struct {
	BlockId uint64   // 区块ID
	Objs    []Object // 对象结构体，包括：对象oid、所属机构ID、所属区块ID
}

// 解析从区块链获取的一个区块中所有对象到对象结构体
// 将对象ID、其所属的机构ID和区块ID解析出来，过滤掉冗余校验块
func (bb *Block) Parse(b *ObjIDsInBlock) bool {
	var t Object
	var count int
	fmt.Printf("cluster [%d] -> break down block [%d]\n", *ClusterID, b.BlockID)
	bb.BlockId = b.BlockID
	for _, o := range b.IDs {
		if o[2:4] == "@@" {
			// fmt.Println("冗余块", o)
		} else {
			t.Oid = []byte(o[2:])              // 设置对象oid (前面两个是 0\1\2\3 + '_')
			a, _ := strconv.Atoi(string(o[0])) // 对象所属机构ID
			t.From = int8(a)                   // 设置所属机构ID
			t.BlockId = bb.BlockId             // 设置所属区块ID
			bb.Objs = append(bb.Objs, t)       // 输出到结果
			count++                            // 区块中对象计数器
		}
	}
	if count == 0 {
		return false
	}
	return true
}

// 解析从区块链获取的一个区块中所有对象到对象结构体
// 将对象ID、其所属的机构ID和区块ID解析出来，不过滤冗余校验块
func (bb *Block) ParseWithParityBlock(b *ObjIDsInBlock) bool {
	var t Object
	var count int
	fmt.Printf("cluster [%d] -> break down block [%d]\n", *ClusterID, b.BlockID)
	bb.BlockId = b.BlockID
	for _, o := range b.IDs {
		t.Oid = []byte(o[2:]) //前面两个是 0\1\2\3 + _
		a, _ := strconv.Atoi(string(o[0]))
		t.From = int8(a)
		t.BlockId = bb.BlockId
		bb.Objs = append(bb.Objs, t)
		count++
	}
	if count == 0 {
		return false
	}
	return true
}

// 处理从Kafka获取的一个区块内的所有对象信息，并确定数据布局
func DealBlockchainBlock() {
	if NormalStart == 0 {
		DealLastBlock()
	} else {
		GetBlockFromKafka()
	}
}

// Deprecated
func GetBlockFromKafka() {
	fmt.Printf("cluster [%d] -> get block data from kafka\n", *ClusterID)
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	config.Version = sarama.V0_11_0_2

	i := uint64(0)
	// consumer
	consumer, err := sarama.NewConsumer([]string{KafkaIPPort()}, config)
	if err != nil {
		fmt.Printf("consumer_test create consumer error %s\n", err.Error())
		return
	}

	defer consumer.Close()

	partition_consumer, err := consumer.ConsumePartition("block", 0, 100) //"block", 0, sarama.OffsetNewest
	if err != nil {
		fmt.Printf("try create partition_consumer error %s\n", err.Error())
		return
	}
	defer partition_consumer.Close()

	for {
		select {
		case msg := <-partition_consumer.Messages():
			b := &ObjIDsInBlock{}
			var bb Block
			var t Object
			err = json.Unmarshal(msg.Value, b)
			if err != nil {
				fmt.Println("非法区块信息")
			} else {
				bb.BlockId = i
				i++
				fmt.Printf("cluster [%d] -> getted block [%d] data\n", *ClusterID, bb.BlockId)
				for _, o := range b.IDs {
					t.Oid = []byte(o)
					t.From = int8(t.Oid[0]) % 4
					t.BlockId = uint64(b.BlockID)
					bb.Objs = append(bb.Objs, t)
				}

			}
		case err := <-partition_consumer.Errors():
			fmt.Printf("err :%s\n", err.Error())
		}
	}

}

// 从最近的一个区块开始处理对象信息，并确定数据布局
func DealLastBlock() {
	fmt.Printf("cluster [%d] -> init 5-group kafka queue ...\n", *ClusterID)
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	config.Producer.Return.Successes = true
	config.Producer.Return.Errors = true
	config.Version = sarama.V0_11_0_2

	// producer
	producer, err := sarama.NewAsyncProducer([]string{KafkaIPPort()}, config) //生产者
	if err != nil {
		fmt.Printf(KafkaIPPort(), "producer_test create producer error :%s\n", err.Error())
		return
	}

	defer producer.AsyncClose()

	// send message
	msgs := &sarama.ProducerMessage{
		Topic:     "QuintetQueue",
		Partition: 0,
	}

	fmt.Printf("cluster [%d] -> init 5-group kafka queue ... done\n", *ClusterID)

	configc := sarama.NewConfig()
	configc.Consumer.Return.Errors = true
	configc.Version = sarama.V0_11_0_2

	// consumer
	consumer, err := sarama.NewConsumer([]string{KafkaIPPort()}, configc)
	if err != nil {
		fmt.Printf("cluster [%d] -> create consumer error %s\n", *ClusterID, err.Error())
		return
	}

	defer consumer.Close()
	client, err := sarama.NewClient([]string{KafkaIPPort()}, configc) //消费者

	var off int64
	off, _ = client.GetOffset("block", 0, -1) // -1 最新 -2 最老
	fmt.Printf("cluster [%d] -> do redundancy at block kafka, kafka IP&port: %s, offset: %d\n", *ClusterID, KafkaIPPort(), off)

	partition_consumer, err := consumer.ConsumePartition("block", 0, off) //"block", 0, sarama.OffsetNewest
	if err != nil {
		fmt.Printf("try create partition_consumer error %s\n", err.Error())
		return
	}
	defer partition_consumer.Close()

	i := 0
	for {
		select {
		case msg := <-partition_consumer.Messages():
			b := &ObjIDsInBlock{}
			var bb Block
			err = json.Unmarshal(msg.Value, b)
			if err != nil {
				fmt.Printf("cluster [%d] -> block is invalid, do nothing\n", *ClusterID)
			} else {
				// bb.BlockId = uint64(msg.Offset)
				// bb.Offset64 = msg.Offset
				i++
				f := bb.Parse(b)
				if f {
					q := GenerateDataLayout(bb)
					// q.Offset = bb.Offset64
					fmt.Printf("cluster [%d] -> uploading 5-group of block [%d] to kafka\n", *ClusterID, q.Quin[0].BlockId)
					value, _ := json.Marshal(q)
					msgs.Value = sarama.ByteEncoder(value)
					producer.Input() <- msgs // 生成数据布局后放入Kafka消息队列
					select {
					case suc := <-producer.Successes():
						fmt.Printf("cluster [%d] -> uploaded 5-group of block [%d] to kafka, offset=%d\n", *ClusterID, q.Quin[0].BlockId, suc.Offset)
					case fail := <-producer.Errors():
						fmt.Printf("err: %s\n", fail.Err.Error())
					}
				} else {
					fmt.Printf("cluster [%d] -> block [%d] is empty, do nothing\n", *ClusterID, bb.BlockId)
					BlockHeightRedundunt = bb.BlockId
				}
				// if CurrentBlock < bb.BlockId { // update block height which will redundancy
				// 	CurrentBlock = bb.BlockId
				// }
				// fmt.Printf("cluster [%d] -> current block height: %d\n", *ClusterID, bb.BlockId)
			}
		case err := <-partition_consumer.Errors():
			fmt.Printf("err :%s\n", err.Error())
		}
	}
}
