package log

import (
	"fmt"
	"github.com/Shopify/sarama"
	"log"
	"os"
	"os/signal"
	"sync"
)

type LoggerKafka struct {
	pool chan string
}

func (logger *LoggerKafka) Init(host string, port string, topic string) {
	logger.pool = make(chan string, 1)
	go logger.Sender(logger.pool, host, port, topic)
}

func (logger *LoggerKafka) Log(message string) {
	fmt.Println(message)
	logger.pool <- message
}

func (logger *LoggerKafka)Sender(pool chan string, host string, port string, topic string) {
	fmt.Println("func Sender()")
	var message string

	config := sarama.NewConfig()
	config.Producer.Return.Successes = true
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	fmt.Println("finished config.")

	client, err := sarama.NewClient([]string{host + ":" + port}, config)
	defer client.Close()
	if err != nil {
		fmt.Println(err)
	}
	fmt.Println("finished client.")
	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		fmt.Println(err)
	}
	fmt.Println("finished producer.")


	// Trap SIGINT to trigger a graceful shutdown.
	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)

	var (
		wg                          sync.WaitGroup
		enqueued, successes, errors int
	)

	wg.Add(1)
	// start a groutines to count successes num
	go func() {
		defer wg.Done()
		for range producer.Successes() {
			successes++
		}
	}()

	wg.Add(1)
	// start a groutines to count error num
	go func() {
		defer wg.Done()
		for err := range producer.Errors() {
			log.Println(err)
			errors++
		}
	}()
ProducerLoop:
	for {
		fmt.Println("ProducerLoop")
		message = <-pool
		fmt.Println(message)
		sender := &sarama.ProducerMessage{Topic: topic, Value: sarama.StringEncoder(message)}
		select {
		case producer.Input() <- sender:
			enqueued++

		case <-signals:
			producer.AsyncClose() // Trigger a shutdown of the producer.
			break ProducerLoop
		}
	}

	wg.Wait()

	log.Printf("Successfully produced: %d; errors: %d\n", successes, errors)
}

