package repeater

import (
    "../common"
    "fmt"
    "github.com/Shopify/sarama"
    "time"
    "strings"
    "strconv"
)

type KafkaSender struct {
    output            chan *DataPointPack
    closing           chan interface{}
    stopped           chan interface{}
    rc                *RepeaterController
    workingOffset     int64
    commitedOffset    int64
    consumer          sarama.Consumer
    topic             string
    partition         int32
    servers           []string
    name              string
    batchSize         int
    timeWindowsSize   int64
    partitionConsumer sarama.PartitionConsumer
}

func NewKafkaSender(cfg map[string]string, topic string, partition int32) (*KafkaSender, error) {
    name, ok := cfg["name"]
    if !ok {
        return nil, fmt.Errorf("Config Missing [ name ]")
    }
    serverstr, ok := cfg["servers"]
    if !ok {
        return nil, fmt.Errorf("Config Missing [ servers ]")
    }
    batchSize := 20
    if size, ok := cfg["batch_size"]; ok {
        if s, err := strconv.Atoi(size); err == nil {
            batchSize = s
        }
    }
    timeWindowsSize := 20
    if size, ok := cfg["time_window_size"]; ok {
        if s, err := strconv.Atoi(size); err == nil {
            timeWindowsSize = s
        }
    }
    servers := strings.Split(serverstr, ",")
    return &KafkaSender{
        topic:           topic,
        name:            name,
        servers:         servers,
        batchSize:       batchSize,
        timeWindowsSize: int64(timeWindowsSize),
        closing:         make(chan interface{}),
        stopped:         make(chan interface{}),
        output:          make(chan *DataPointPack),
    }, nil
}

//func (rs *KafkaSender) SetOffset(offset int64) {
//    rs.workingOffset = offset
//}
func (rs *KafkaSender) Init() (err error) {
    rs.workingOffset = rs.rc.initialOffset
    rs.partition = rs.rc.Partition
    cfg := sarama.NewConfig()
    cfg.Consumer.Retry.Backoff = time.Duration(3)
    cfg.Consumer.Fetch.Min = 100
    cfg.Consumer.Fetch.Max = 20000
    cfg.Consumer.Offsets.Initial = sarama.OffsetOldest
    cfg.Consumer.Return.Errors = true
    cfg.Consumer.MaxWaitTime = time.Duration(300 * time.Second)
    cfg.ClientID = fmt.Sprintf("%s.%s.%d", rs.name, rs.topic, rs.partition)
    rs.consumer, err = sarama.NewConsumer(rs.servers, cfg)
    if err != nil {
        return
    }
    rs.partitionConsumer, err = rs.consumer.ConsumePartition(rs.topic, rs.partition, rs.workingOffset)
    if err == sarama.ErrOffsetOutOfRange {
        log.Notice("offset no longer exsit, try again, to use the oldeset offset")
        rs.partitionConsumer, err = rs.consumer.ConsumePartition(rs.topic, rs.partition, sarama.OffsetOldest)
    }
    return
}

func (rs *KafkaSender) Start() {
    pkg := NewDataPointPack()
    rs.rc.wg.Add(1)
LOOP:
    for {
        select {
        case <-rs.closing:
            break LOOP
        case msg := <-rs.partitionConsumer.Messages():
            if msg == nil {
                log.Errorf("maybe offset is no longer available")
                break LOOP
            }
            offset := msg.Offset
            rs.workingOffset = offset
            if dp, err := common.ParseDataPoint(msg.Value); err != nil {
                rs.rc.errors <- err
            } else {
                pkg.AddDataPoint(dp, offset)
                if pkg.IsReday(rs.batchSize, rs.timeWindowsSize) {
                    rs.output <- pkg
                    pkg = NewDataPointPack()
                }
            }
        }
    }
    close(rs.stopped)
}

func (rs *KafkaSender) Stop() error {
    defer rs.rc.wg.Done()
    if err := rs.partitionConsumer.Close(); err != nil {
        return err
    }
    if err := rs.consumer.Close(); err != nil {
        return err
    }
    return nil
}
func (rs *KafkaSender) Name() string {
    return rs.name
}

func (rs *KafkaSender) Output() <-chan *DataPointPack {
    return rs.output
}
func (rs *KafkaSender) Handle(pkg *DataPointPack) error {
    return nil
}

func (rs *KafkaSender) SetController(rc *RepeaterController) error {
    rs.rc = rc
    return nil
}
func (rs *KafkaSender) Commit() (int64, error) {
    rs.commitedOffset = rs.workingOffset
    return rs.commitedOffset, nil
}
