package main

import (
	"fmt"
	"os"
	"strconv"
	"time"

	"github.com/confluentinc/confluent-kafka-go/kafka"
)

func main() {
	fmt.Println(os.Args)
	partitionNum := 0
	if len(os.Args) > 1 {
		partitionNum, _ = strconv.Atoi(os.Args[1])
	}

	p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": "127.0.0.1:9092"})
	if err != nil {
		panic(err)
	}

	defer p.Close()

	// // Delivery report handler for produced messages
	// go func() {
	// 	for e := range p.Events() {
	// 		switch ev := e.(type) {
	// 		case *kafka.Message:
	// 			if ev.TopicPartition.Error != nil {
	// 				fmt.Printf("Delivery failed: %v\n", ev.TopicPartition)
	// 			} else {
	// 				// fmt.Printf("Delivered message to %v\n", ev.TopicPartition)
	// 			}
	// 		}
	// 	}
	// }()
	t1 := time.Now()
	NN := 1000000
	fmt.Println("partitionNum", partitionNum)
	// Produce messages to topic (asynchronously)
	// println("kafka.PartitionAny", kafka.PartitionAny)
	topic := "test-partitioned1"
	for ii := 0; ii <= NN; ii++ {
		for _, word := range []string{"msg "} {
			p.Produce(&kafka.Message{
				TopicPartition: kafka.TopicPartition{Topic: &topic,
					Partition: int32(partitionNum)},
				Value: []byte(word + strconv.Itoa((partitionNum))),
			}, nil)
			time.Sleep(100 * time.Millisecond)
		}
	}
	ts := float64(time.Since(t1)) / 1e9
	println("qps", float64(NN)/ts)
	println("ts", ts)

	// Wait for message deliveries before shutting down
	p.Flush(15 * 1000)
	time.Sleep(5 * time.Second)
}
