package main

import (
	"bytes"
	"encoding/json"
	"fmt"
	"log"
	"net/http"
	"strconv"

	sarama "github.com/Shopify/sarama"
)

type Block struct {
	BlockId  int           //区块编号
	transses []Transaction //
	Offset64 int64         //区块位于Kafka中的偏移量
}

func (bb *Block) Set(b Block2, offset int) bool {
	var t Transaction
	var count int
	log.Println("cluster", Cluster, ":", "获取区块信息 ", bb.BlockId)
	for _, o := range b.IDs {
		//TODO：最后结题时改成0:2
		if o[2:4] == "_4" {
			log.Println("cluster", Cluster, ":", "冗余块", o)
		} else {
			t.Hash = []byte(o[2:]) //前面两个是 0\1\2\3 + _
			a, _ := strconv.Atoi(string(o[0]))
			t.From = int8(a)
			t.BlockId = offset
			bb.transses = append(bb.transses, t)
			count++
		}

	}
	if count == 0 {
		return false
	}
	return true
	//fmt.Println(bb)
}

type Block2 struct {
	Count uint16   `json:"count"`
	IDs   []string `json:"ids"`
}

func Check(cli *http.Client, block int) {
	log.Println("cluster", Cluster, ":", "producer启动")
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	config.Producer.Return.Successes = true
	config.Producer.Return.Errors = true
	config.Version = sarama.V0_11_0_2

	log.Println("cluster", Cluster, ":", "load latest block")

	configc := sarama.NewConfig()
	configc.Consumer.Return.Errors = true
	configc.Version = sarama.V0_11_0_2

	// consumer
	consumer, err := sarama.NewConsumer([]string{KafkaIpAddress}, configc)
	if err != nil {
		log.Println("cluster", Cluster, ":", "consumer_test create consumer error %s\n", err.Error())
		return
	}

	defer consumer.Close()
	client, err := sarama.NewClient([]string{KafkaIpAddress}, configc) //消费者

	var off int64
	if 1 == 0 { //从区块队列中获取进度
		off, _ = client.GetOffset("block", 0, -2) //-1 最新 -2 最老
		off = off + 25
		log.Println("cluster", Cluster, ":", "进度", off)
	}

	off = int64(block)

	partition_consumer, err := consumer.ConsumePartition("block", 0, off) //"block", 0, sarama.OffsetNewest

	if err != nil {
		log.Println("cluster", Cluster, ":", "try create partition_consumer error %s\n", err.Error())
		return
	}
	defer partition_consumer.Close()
	i := 0

	// http版
	// var cli *http.Client
	// var c Ceph
	// if 1 == 1 {
	// 	cli = tls_client()
	// } else {
	// 	c.CephInit()
	// }

	msg := <-partition_consumer.Messages()
	b := Block2{}
	var bb Block
	err = json.Unmarshal(msg.Value, &b)

	if err != nil {
		log.Println("cluster", Cluster, ":", "非法区块信息")
	} else {
		bb.BlockId = int(msg.Offset)
		bb.Offset64 = msg.Offset
		i++
		f := bb.Set(b, int(msg.Offset))
		if f {
			var count [4]int
			var output bool
			for _, v := range bb.transses {
				count[v.From]++
				var data []byte
				if 1 == 1 {
					data, err = GetMerkleRootFromTLS(v, 0, cli)
				} else {
					// data = c.GetMerkleRootFromHttp(v, 0)
				}
				//
				if err != nil {

				} else {
					if data != nil {
						var proof MerklrProof
						json.Unmarshal(data, &proof)
						//fmt.Println(proof)
						proof.Fork = []int{1, 32, 32}
						log.Println("cluster", Cluster, ":", "校验证明", bb.BlockId, string(v.Hash), proof.Proof[len(proof.Proof)-1][0], proof.Check(0))
						proof2 := GethashFromHttp(v)
						output = bytes.Equal(proof.Proof[len(proof.Proof)-1][0], proof2)
						log.Println("cluster", Cluster, ":", "对比证明", bb.BlockId, string(v.Hash), output)
					}
					if data == nil || output == false {
						log.Println("cluster", Cluster, ":", string(v.Hash), "抽查失败")
					}
				}

			}
			fmt.Println(count)
		} else {
			log.Println("cluster", Cluster, ":", "空区块")
		}

	}

}
