package main

import (
	"context"
	"fmt"
	"github.com/segmentio/kafka-go"
	"io"
	"log"
	"os"
	"strconv"
	"strings"
	"time"
)

var serverip = "106.120.201.126:19359"
var topic = "yyn_test"
var groupId = "main_consume_history"
var parition = 0

//var LOC, _ = time.LoadLocation("Asia/Shanghai")
var startTime, _ = time.ParseInLocation("2006-01-02 15:04:05", "2023-04-27 08:24:12", time.Local)
var endTime, _ = time.ParseInLocation("2006-01-02 15:04:05", "2023-04-27 08:24:20", time.Local)

var batchSize = 1048576 // 1MB
var filePath = "D:\\qiwenshare\\e1.txt"

func main() {
	//send()
	consume2()

}

func consume2() {
	_, err := os.Stat(filePath)
	if err != nil {
		//文件存在 则删除再创建
		os.Remove(filePath)
		os.Create(filePath)
	}
	file, _ := os.OpenFile(filePath, os.O_RDWR, 0777)

	reader := getKafkaReader(serverip, topic, groupId)

	defer reader.Close()
	reader.SetOffsetAt(context.Background(), startTime)

	fmt.Println("start consuming ... !!" + strconv.Itoa(int(reader.Offset())))
	if reader.Offset() < 0 {
		fmt.Println("没有数据")
		return
	}

	for {
		m, err := reader.FetchMessage(context.Background())

		if err != nil {
			log.Fatalln(err)
			break
		}
		reader.CommitMessages(context.Background(), m)
		if m.Time.After(endTime) {
			log.Println("end")
			break
		}

		fmt.Printf("message at topic:%v partition:%v offset:%v	%s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value))
		_, err = io.WriteString(file, string(m.Value)+"\r\n")
		if err != nil {
			log.Println("写入出错")
			return
		}
		if m.Offset+1 == m.HighWaterMark {
			log.Println("已到最后一条")
			break
		}
	}
}
func getKafkaReader(kafkaURL, topic, groupID string) *kafka.Reader {
	brokers := strings.Split(kafkaURL, ",")
	return kafka.NewReader(kafka.ReaderConfig{
		Brokers: brokers,
		//GroupID:     groupID,
		Topic:       topic,
		Partition:   0,
		MaxWait:     time.Second,
		StartOffset: kafka.FirstOffset,
		//MinBytes: 1, // 10KB
		//MaxBytes: 1, // 10MB
	})
}

func consumeByTime() {
	_, err := os.Stat(filePath)
	if err == nil {
		//文件存在 则删除再创建
		os.Remove(filePath)
		os.Create(filePath)
	}
	file, _ := os.OpenFile(filePath, os.O_RDWR, 0777)

	r := kafka.NewReader(kafka.ReaderConfig{
		Brokers:   []string{serverip},
		Topic:     topic,
		GroupID:   groupId,
		Partition: 0,
		MinBytes:  batchSize,
		MaxBytes:  batchSize,
		//提交间隔
		CommitInterval:        time.Second,
		WatchPartitionChanges: false,
		SessionTimeout:        time.Second * 30,
	})

	r.SetOffsetAt(context.Background(), startTime)

	var seq = 1
	for {
		m, err := r.ReadMessage(context.Background())

		if err != nil {
			break
		}
		if m.Time.After(endTime) {
			break
		}
		// TODO: process message
		fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value))
		fmt.Println(seq)
		seq++
		io.WriteString(file, string(m.Value)+"\\r\\n")
	}

	if err := r.Close(); err != nil {
		log.Fatal("failed to close reader:", err)
	}
}
func send2() {
	w := &kafka.Writer{
		Addr:     kafka.TCP(serverip),
		Topic:    topic,
		Balancer: &kafka.LeastBytes{},
	}

	for i := 0; i < 20; i++ {

		err := w.WriteMessages(context.Background(),
			kafka.Message{
				Key:   []byte("Key-A"),
				Value: []byte(strconv.Itoa(i+1) + "yuyanan!"),
			},
		)
		if err != nil {
			log.Fatal("failed to write messages:", err)
		}
		fmt.Println(i)

	}

	if err := w.Close(); err != nil {
		log.Fatal("failed to close writer:", err)
	}
}

func send() {
	partition := 0

	conn, err := kafka.DialLeader(context.Background(), "tcp", serverip, topic, partition)
	if err != nil {
		log.Fatal("failed to dial leader:", err)
	}

	conn.SetWriteDeadline(time.Now().Add(10 * time.Second))

	for i := 0; i < 20; i++ {
		_, err = conn.WriteMessages(
			kafka.Message{Value: []byte(strconv.Itoa(i+1) + "yuyanan!")},
		)
		if err != nil {
			log.Fatal("failed to write messages:", err)
		}
		fmt.Println(i)
	}

	if err := conn.Close(); err != nil {
		log.Fatal("failed to close writer:", err)
	}
}

func connTest() {
	conn, err := kafka.Dial("tcp", serverip)
	if err != nil {
		panic(err.Error())
	}
	defer conn.Close()

	partitions, err := conn.ReadPartitions()
	if err != nil {
		panic(err.Error())
	}

	for index, p := range partitions {
		fmt.Println(index)
		fmt.Println(p)
	}
}
