package main

import (
	"crypto/sha256"
	"encoding/json"
	"fmt"
	"math/rand"
	"strconv"
	"time"

	sarama "github.com/Shopify/sarama"
)

type Block struct {
	BlockId  int           //区块编号
	transses []Transaction //
	Offset64 int64         //区块位于Kafka中的偏移量
}

func (bb *Block) Set(b Block2) bool {
	var t Transaction
	var count int
	fmt.Println("获取区块信息 ", bb.BlockId)
	for _, o := range b.IDs {
		//TODO：最后结题时改成0:2
		if o[2:4] == "_4" {
			fmt.Println("冗余块", o)
		} else {
			t.Hash = []byte(o[2:]) //前面两个是 0\1\2\3 + _
			a, _ := strconv.Atoi(string(o[0]))
			t.From = int8(a)
			t.BlockId = int(b.Count)
			bb.transses = append(bb.transses, t)
			count++
		}

	}
	if count == 0 {
		return false
	}
	return true
	//fmt.Println(bb)
}

type Block2 struct {
	Count uint16   `json:"count"`
	IDs   []string `json:"ids"`
}

func GetBlock() {
	if NormalStart == 0 {
		GetLatestBlock()
	} else {
		// if Debug == 1 {
		// 	for i := 0; i < 5; i++ {
		// 		t := GenerateBlocks(i)
		// 		fmt.Println("生成区块", t.BlockId)
		// 		transses <- t
		// 		time.Sleep(10 * time.Second)
		// 	}
		// } else {
		GetBlockFromKafka()
		// }
	}

}

func GetBlockFromKafka() {
	fmt.Println("开始获取区块信息")
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	config.Version = sarama.V0_11_0_2
	i := 0
	// consumer
	consumer, err := sarama.NewConsumer([]string{KafkaIpAddress}, config)
	if err != nil {
		fmt.Printf("consumer_test create consumer error %s\n", err.Error())
		return
	}

	defer consumer.Close()

	partition_consumer, err := consumer.ConsumePartition("block", 0, 100) //"block", 0, sarama.OffsetNewest
	if err != nil {
		fmt.Printf("try create partition_consumer error %s\n", err.Error())
		return
	}
	defer partition_consumer.Close()

	for {
		select {
		case msg := <-partition_consumer.Messages():
			b := Block2{}
			var bb Block
			var t Transaction
			err = json.Unmarshal(msg.Value, &b)
			if err != nil {
				fmt.Println("非法区块信息")
			} else {
				bb.BlockId = i
				i++
				fmt.Println("获取区块信息 ", bb.BlockId)
				for _, o := range b.IDs {
					t.Hash = []byte(o)
					t.From = int8(t.Hash[0]) % 4
					t.BlockId = int(b.Count)
					bb.transses = append(bb.transses, t)
				}

			}
		case err := <-partition_consumer.Errors():
			fmt.Printf("err :%s\n", err.Error())
		}
	}

}

func GenerateBlocks(blockid int) (block Block) { //随机生成一个区块
	var from int8
	var data []byte
	var hash []byte
	sha := sha256.New()
	var trans Transaction
	rand.Seed(time.Now().UnixNano())
	for i := 0; i < 10; i++ {
		//产生机构编号
		from = int8(rand.Intn(4))

		//产生随机哈希
		data = append(data, byte(rand.Uint64()))
		_, err := sha.Write(data)
		if err != nil {
			//err.Error()
			return
		}
		hash = sha.Sum(nil)
		trans.From = from
		trans.Hash = hash
		trans.BlockId = blockid
		block.transses = append(block.transses, trans)
		//fmt.Println(trans.From, trans.Hash)
	}
	if Debug == 1 {
		for _, trans := range block.transses {
			fmt.Println(trans.BlockId, trans.From, trans.Hash)
		}
	}
	block.BlockId = blockid
	return block
}

func GetLatestBlock() {
	fmt.Println("producer启动")
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	config.Producer.Return.Successes = true
	config.Producer.Return.Errors = true
	config.Version = sarama.V0_11_0_2

	producer, err := sarama.NewAsyncProducer([]string{ClusterIp(Cluster) + KafkaPort}, config) //生产者
	if err != nil {
		fmt.Printf(ClusterIp(Cluster)+KafkaPort, "producer_test create producer error :%s\n", err.Error())
		return
	}

	defer producer.AsyncClose()

	// send message
	msgs := &sarama.ProducerMessage{
		Topic: "TQuintetLine",
		//Key:   sarama.StringEncoder("go_test"),
		Partition: 0,
	}

	fmt.Println("load latest block")

	configc := sarama.NewConfig()
	configc.Consumer.Return.Errors = true
	configc.Version = sarama.V0_11_0_2

	// consumer
	consumer, err := sarama.NewConsumer([]string{KafkaIpAddress}, configc)
	if err != nil {
		fmt.Printf("consumer_test create consumer error %s\n", err.Error())
		return
	}

	defer consumer.Close()
	client, err := sarama.NewClient([]string{KafkaIpAddress}, configc) //消费者

	var off int64
	if 1 == 1 { //从区块队列中获取进度
		off, _ = client.GetOffset("block", 0, -2) //-1 最新 -2 最老
		off = off + 20
		fmt.Println("进度", off)
	} else { //从之前上传的最后任务确定进度
		offset, _ := client.GetOffset("TQuintetLine", 0, -1)
		quintet_consumer, _ := consumer.ConsumePartition("TQuintetLine", 0, offset-1)
		m := <-quintet_consumer.Messages()
		d := TQuintet{}
		err := json.Unmarshal(m.Value, &d)
		if err != nil {
			fmt.Println("重启时读取最新五元组失败")
			return
		}
		fmt.Println("最新五元组offset", d.Offset)
		quintet_consumer.Close()
		off = d.Offset - 40 //测试 记得删掉40
	}

	partition_consumer, err := consumer.ConsumePartition("block", 0, off) //"block", 0, sarama.OffsetNewest

	if err != nil {
		fmt.Printf("try create partition_consumer error %s\n", err.Error())
		return
	}
	defer partition_consumer.Close()
	i := 0
	for {
		select {
		case msg := <-partition_consumer.Messages():
			b := Block2{}
			var bb Block
			err = json.Unmarshal(msg.Value, &b)
			//fmt.Println(b)
			if err != nil {
				fmt.Println("非法区块信息")
			} else {
				bb.BlockId = int(msg.Offset)
				bb.Offset64 = msg.Offset
				i++
				f := bb.Set(b)
				if f {
					q := GenerateDateDistribute(bb)
					q.Offset = bb.Offset64

					fmt.Println("上传五元组 ", q.Quin[0].BlockId) //, string(q.Quin[0].Trans[0].Hash))
					value, _ := json.Marshal(q)
					msgs.Value = sarama.ByteEncoder(value)
					producer.Input() <- msgs

					select {
					case suc := <-producer.Successes():
						fmt.Printf("offset: %d,  timestamp: %s \n", suc.Offset, suc.Timestamp.String())
					case fail := <-producer.Errors():
						fmt.Printf("err: %s\n", fail.Err.Error())
					}
				} else {
					fmt.Println("空区块")
				}

			}
		case err := <-partition_consumer.Errors():
			fmt.Printf("err :%s\n", err.Error())
		}
	}

}
