package main

import (
    "../common"
    "container/list"
    "fmt"
    "github.com/Shopify/sarama"
    "strings"
    "syscall"
    "time"
    "strconv"
)

type Sender struct {
    kafkaAddr              []string
    topic                  string
    counter                *Counter
    DataPointCh            chan common.DataPoint
    cache                  *common.CacheManager
    stopping               chan interface{}
    stopped                chan interface{}
    producer               sarama.AsyncProducer
    summary                *SenderStatics
    ticker                 *time.Ticker
    retryMessageQueue      *list.List
    reconnectCount         int
    partitionCount         int32
    retryMessageQueueSize  int
    errorReconnectTimes    int
    errorReconnectInterval int
}

func NewSender(conf *Config, ch chan common.DataPoint, counter *Counter, cache *common.CacheManager) *Sender {
    retryMessageQueueSize := conf.RetryMessageQueueSize
    if retryMessageQueueSize == 0 {
        retryMessageQueueSize = 200
    }
    errorReconnectTimes := conf.ErrorReconnectTimes
    if errorReconnectTimes == 0 {
        errorReconnectTimes = 20
    }
    errorReconnectInterval := conf.ErrorReconnectInterval
    if errorReconnectInterval == 0 {
        errorReconnectInterval = 5
    }
    s := Sender{
        kafkaAddr:              conf.KafkaAddr,
        topic:                  conf.KafkaTopic,
        DataPointCh:            ch,
        counter:                counter,
        cache:                  cache,
        stopping:               make(chan interface{}),
        stopped:                make(chan interface{}),
        summary:                &SenderStatics{FromTimestamp: time.Now().Unix()},
        ticker:                 time.NewTicker(time.Duration(10) * time.Second),
        retryMessageQueue:      list.New(),
        partitionCount:         13,
        retryMessageQueueSize:  retryMessageQueueSize,
        errorReconnectTimes:    errorReconnectTimes,
        errorReconnectInterval: errorReconnectInterval,
    }
    return &s
}

func tail(name string) (ret int32) {
    ret = 0
    if v := strings.Split(name, "."); len(v) > 1 {
        if s := strings.Split(v[1], "_"); len(s) == 4 {
            if i, err := strconv.Atoi(s[3]); err == nil {
                ret = int32(i)
            }
        }
    }
    return
}

func (s *Sender) ReConnect() error {
    kafkaConfig := sarama.NewConfig()
    kafkaConfig.Producer.Partitioner = sarama.NewManualPartitioner
    if s.producer != nil {
        if err := s.producer.Close(); err != nil {
            log.Fatal(err.Error())
        }
    }
    s.producer = nil
    producer, err := sarama.NewAsyncProducer(s.kafkaAddr, kafkaConfig)
    if err != nil {
        return err
    }
    s.producer = producer
    return nil
}

func (s *Sender) send(dp common.DataPoint) error {
    pid := tail(dp.Name) % s.partitionCount
    select {
    case s.producer.Input() <- &sarama.ProducerMessage{Topic: s.topic, Partition: pid, Value: dp}:
        s.summary.MessageInc()
        return nil
    case err := <-s.producer.Errors():
        s.summary.ErrorInc()
        return err
    }
}

func (s *Sender) Send(dp common.DataPoint) error {
    select {
    case <-s.ticker.C:
        s.counter.OnSender(s.summary)
        s.summary = &SenderStatics{FromTimestamp: time.Now().Unix()}
    default:
    }
    return s.send(dp)
}

func (s *Sender) GetDataPoint() (dp common.DataPoint, err error) {
    if s.retryMessageQueue.Len() > 0 {
        log.Warningf("fetch datapoint from failed queue: %v", err)
        node := s.retryMessageQueue.Front()
        s.retryMessageQueue.Remove(node)
        dp = node.Value.(common.DataPoint)
        return
    }
    data, ok := <-s.DataPointCh
    if !ok {
        err = fmt.Errorf("server datapoint channel closed")
    }
    s.cache.DataChan() <- data
    dp = data
    return
}

func (s *Sender) Run() {
    err := s.ReConnect()
    if err != nil {
        log.Error("kafka error: ", err)
        syscall.Kill(syscall.Getpid(), syscall.SIGTERM)
        return
    }
    defer func() {
        if err := s.producer.Close(); err != nil {
            log.Fatal(err.Error())
        }
    }()
    for {
        dp, err := s.GetDataPoint()
        if err != nil || s.reconnectCount > s.errorReconnectTimes {
            log.Fatalf("Failed to fetch datapoint: %v", err)
            syscall.Kill(syscall.Getpid(), syscall.SIGTERM)
            break
        }
        if err := s.Send(dp); err != nil {
            log.Fatalf("Failed to produce message: %v", err)
            s.retryMessageQueue.PushBack(dp)
        }
        if s.retryMessageQueue.Len() > s.retryMessageQueueSize {
            log.Warningf("Sleep %d s and reconnect to kafka...", s.errorReconnectInterval)
            time.Sleep(time.Duration(s.errorReconnectInterval) * time.Second)
            s.ReConnect()
            s.reconnectCount++
        }
    }
}
