package services

import (
	"errors"
	"fmt"
	"github.com/Shopify/sarama"
	cluster "github.com/bsm/sarama-cluster"
	"ir/core"
	"ir/core/db"
	"ir/models/structs"
	"log"
	"os"
)

var c = core.GetConfig()
var KAFKA_HOST = c.GetString("KAFKA_HOST")

//const KAFKA_HOST = "localhost:9092"

func handleMatchOrder(newOrder *structs.Order) (err error) {
	orderBook := newOrder.ToOrderBook()
	var affected int64
	affected, err = db.Conn().Insert(orderBook)

	if err != nil {
		return
	}

	if affected == 0 {
		err = errors.New("入库失败")
		return
	}

	var matchOrders []structs.Order
	matchOrders, err = FindMatchOrders(*newOrder) // 返回匹配的订单列表
	if err != nil {
		return
	}
	matchNum := len(matchOrders)
	if matchNum > 0 { // 处理匹配订单列表
		err = DealMatchedOrders(*newOrder, matchOrders)
		if err != nil {
			return
		}
	} else {
		err = PushToQueue(*newOrder)
		if err != nil {
			return
		}
	}

	if err == nil {
		NotifyOrder(newOrder.ID, "order.arrive")
	}

	return
}

func handleCancelOrder(order *structs.Order) (err error) {
	err = RemoveOrder(*order)
	if err == nil {
		NotifyOrder(order.ID, "order.cancel")
	}
	return
}

func StartConsume() {
	fmt.Println("KAFKA_HOST", KAFKA_HOST)
	// create the consumer and listen for new order messages
	consumer := createConsumer()
	// create a signal channel to know when we are done

	// start processing orders
	for {
		select {
		case msg, ok := <-consumer.Messages():
			if ok {
				fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
				var newOrder = new(structs.Order)
				newOrder.FromJSON(msg.Value)
				var err error
				if newOrder.Type == "trade" {
					err = handleMatchOrder(newOrder)
				} else if newOrder.Type == "cancel" {
					err = handleCancelOrder(newOrder)
				} else {
					err = errors.New("无效的订单类型")
				}

				if err != nil {
					log.Println("处理订单失败", err.Error())
					continue
				}

				consumer.MarkOffset(msg, "") // 上报offset
			}
		}
	}
	// wait until we are done
}

//
// Create the consumer
//
func createConsumer() *cluster.Consumer {
	// define our configuration to the cluster
	config := cluster.NewConfig()
	config.Consumer.Return.Errors = true
	config.Group.Return.Notifications = false
	config.Consumer.Offsets.Initial = sarama.OffsetOldest

	// create the consumer
	brokers := []string{KAFKA_HOST}
	topics := []string{"orders"}
	consumer, err := cluster.NewConsumer(brokers, "test-consumer-group", topics, config)
	if err != nil {
		log.Fatal("Unable to connect consumer to kafka cluster")
	}
	go handleErrors(consumer)
	go handleNotifications(consumer)
	return consumer
}

func handleErrors(consumer *cluster.Consumer) {
	for err := range consumer.Errors() {
		log.Printf("Error: %s\n", err.Error())
	}
}

func handleNotifications(consumer *cluster.Consumer) {
	for ntf := range consumer.Notifications() {
		log.Printf("Rebalanced: %+v\n", ntf)
	}
}

//
// Create the producer
//

func CreateProducer() sarama.AsyncProducer {
	config := sarama.NewConfig()
	config.Producer.Return.Successes = false
	config.Producer.Return.Errors = true
	config.Producer.RequiredAcks = sarama.WaitForAll
	producer, err := sarama.NewAsyncProducer([]string{KAFKA_HOST}, config)
	if err != nil {
		log.Fatal("Unable to connect producer to kafka server")
	}
	return producer
}
