package ods_kafka

import (
	"fmt"
	"github.com/Shopify/sarama"
	"server/global"
	"server/model/common/request"
	"server/utils"
	"strconv"
	"sync"
)

type OdsKafkaService struct {
}

func (odsKafkaService *OdsKafkaService) CreateOdsKafka(odsKafka *OdsKafka) (err error) {
	odsKafka.ID = 0
	odsKafka.CheckReady()
	err = global.SysDB.Create(odsKafka).Error
	if odsKafka.ID > 0 {
		err := odsKafka.SetBsm()
		if err != nil {
			return err
		}
	}
	return err
}

func (odsKafkaService *OdsKafkaService) DeleteOdsKafka(odsKafka OdsKafka) (err error) {
	err = global.SysDB.Delete(&odsKafka).Error
	return err
}

func (odsKafkaService *OdsKafkaService) DeleteOdsKafkaByIds(ids request.IdsReq) (err error) {
	err = global.SysDB.Delete(&[]OdsKafka{}, "id in ?", ids.Ids).Error
	return err
}

func (odsKafkaService *OdsKafkaService) UpdateOdsKafka(odsKafka OdsKafka) (err error) {
	odsKafka.CheckReady()
	err = odsKafka.SetBsm()
	if err != nil {
		return err
	}
	err = global.SysDB.Save(&odsKafka).Error
	return err
}

func (odsKafkaService *OdsKafkaService) GetOdsKafkaById(id uint) (odsKafka OdsKafka, err error) {
	err = global.SysDB.Where("id = ?", id).First(&odsKafka).Error
	return
}

func (odsKafkaService *OdsKafkaService) GetOdsKafkaInfoList(info OdsKafkaSearch) (list []OdsKafka, total int64, err error) {
	limit := info.PageSize
	offset := info.PageSize * (info.Page - 1)
	db := global.SysDB.Model(&OdsKafka{})
	var odsKafkas []OdsKafka
	if info.StartCreatedAt != nil && info.EndCreatedAt != nil {
		db = db.Where("created_at BETWEEN ? AND ?", info.StartCreatedAt, info.EndCreatedAt)
	}
	err = db.Count(&total).Error
	if err != nil {
		return
	}

	err = db.Limit(limit).Offset(offset).Find(&odsKafkas).Error
	return odsKafkas, total, err
}

func (odsKafkaService *OdsKafkaService) GetAllOdsKafkas() (odsKafka []OdsKafka, err error) {
	err = global.SysDB.Find(&odsKafka).Error
	return odsKafka, err
}

func (odsKafkaService *OdsKafkaService) GetOdsKafka() (odsKafka []OdsKafka, err error) {
	err = global.SysDB.Find(&odsKafka).Error
	return odsKafka, err
}

func (odsKafkaService *OdsKafkaService) TestOdsKafka(odsKafka *OdsKafka) (msg string, err error) {
	global.Logger.Info("test service")
	var report string
	var wg sync.WaitGroup
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	dsn := fmt.Sprintf("%s", odsKafka.Dsn)
	report += fmt.Sprintf("kafka dsn:%s\n", odsKafka.Dsn)
	fmt.Printf("%v\n", report)
	consumer, err := sarama.NewConsumer([]string{dsn}, config)
	if err != nil {
		report += fmt.Sprintf("连接kafka失败,err:%v\n", err)
		return report, err
	}
	report += fmt.Sprintf("连接kafka成功\n")
	fmt.Printf("%v\n", report)

	topic := odsKafka.Topic
	report += fmt.Sprintf("kafka topic:%s\n", odsKafka.Topic)
	fmt.Printf("%v\n", report)
	partitionList, err := consumer.Partitions(topic)
	if err != nil {
		report += fmt.Sprintf("获取partition list失败,err:%v\n", err)
		fmt.Printf("%v\n", report)
		return report, err
	}
	report += fmt.Sprintf("获取partitions成功,list:%v\n", partitionList)
	fmt.Printf("%v\n", report)

	if odsKafka.Partition != "" {
		report += fmt.Sprintf("获取指定partition:%v\n", odsKafka.Partition)
		fmt.Printf("%v\n", report)
		partition, err := strconv.Atoi(odsKafka.Partition)
		if err != nil {
			report += fmt.Sprintf("指定的partition不正确,err:%v\n", err)
			fmt.Printf("%v\n", report)
			return report, err
		}
		offset := sarama.OffsetOldest
		if odsKafka.Offset != "" {
			report += fmt.Sprintf("获取指定offset:%v\n", odsKafka.Offset)
			fmt.Printf("%v\n", report)
			o, err := strconv.Atoi(odsKafka.Offset)
			if err != nil {
				report += fmt.Sprintf("指定的offset不正确,err:%v\n", err)
				fmt.Printf("%v\n", report)
				return report, err
			}
			offset = int64(o)
		}
		partitionConsumer, err := consumer.ConsumePartition(topic, int32(partition), offset)
		if err != nil {
			report += fmt.Sprintf("消费指定位置失败,topic:%s,partition:%d,offset:%d\n", topic, partition, offset)
			fmt.Printf("%v\n", report)
			return report, err
		}
		report += fmt.Sprintf("消费指定位置成功,topic:%s,partition:%d,offset:%d\n", topic, partition, offset)
		fmt.Printf("%v\n", report)
		defer func(partitionConsumer sarama.PartitionConsumer) {
			err := partitionConsumer.Close()
			if err != nil {
				global.Logger.Info("关闭partitionConsumer失败")
			}
		}(partitionConsumer)

		for {
			select {
			case msg := <-partitionConsumer.Messages():
				report += fmt.Sprintf("指定位置值成功：msg offset: %d, partition: %d, timestamp: %s, value: %s\n",
					msg.Offset, msg.Partition, msg.Timestamp.String(), string(msg.Value))
				fmt.Printf("%v\n", report)
				return report, nil
			case err := <-partitionConsumer.Errors():
				report += fmt.Sprintf("指定位置值失败,%v：n", err)
				fmt.Printf("%v\n", report)
				return report, err
			}
		}
	} else {
		report += fmt.Sprintf("默认获取全部partition:%v\n", partitionList)
		fmt.Printf("%v\n", report)
		for partition := range partitionList {
			offset := sarama.OffsetOldest
			if odsKafka.Offset != "" {
				report += fmt.Sprintf("获取指定offset:%v\n", odsKafka.Offset)
				fmt.Printf("%v\n", report)
				o, err := strconv.Atoi(odsKafka.Offset)
				if err != nil {
					report += fmt.Sprintf("指定的offset不正确,err:%v\n", err)
					fmt.Printf("%v\n", report)
					return report, err
				}
				offset = int64(o)
			}

			pc, err := consumer.ConsumePartition(topic, int32(partition), offset)
			if err != nil {
				report += fmt.Sprintf("消费失败,topic:%s,partition:%d,offset:%d\n", topic, partition, offset)
				fmt.Printf("%v\n", report)
				return report, err
			}
			report += fmt.Sprintf("消费成功,topic:%s,partition:%d,useOffset:%d,markOffset:%v,\n", topic, partition, offset, pc.HighWaterMarkOffset())

			fmt.Printf("%v\n", report)
			go func(sarama.PartitionConsumer) {
				for msg := range pc.Messages() {
					report += fmt.Sprintf("Partition:%d, Offset:%d, key:%s, value:%s\n", msg.Partition, msg.Offset, string(msg.Key), string(msg.Value))
					fmt.Printf("%v\n", report)
					return
				}
				defer pc.AsyncClose()
				wg.Done()
			}(pc)
			wg.Wait()
		}
	}
	return report, err
}

func (odsKafkaService *OdsKafkaService) StartOdsKafka(odsKafka *OdsKafka) error {
	_, ok := global.JobRunning[odsKafka.Bsm]
	if ok {
		odsKafka.Start = true
		err := odsKafka.Save()
		if err != nil {
			return err
		}

		global.JobRunning[odsKafka.Bsm] = true
		return nil
	}

	runningKey := odsKafka.Bsm
	var wg sync.WaitGroup
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true

	consumer, err := sarama.NewConsumer([]string{odsKafka.Dsn}, config)
	if err != nil {
		return err
	}

	partitionList, err := consumer.Partitions(odsKafka.Topic)
	if err != nil {
		return err
	}

	for partition := range partitionList {
		offset := sarama.OffsetOldest
		key := fmt.Sprintf("kfk/%s/%d", odsKafka.Topic, partition)
		if odsKafka.Offset != "" {
			o, err := strconv.Atoi(odsKafka.Offset)
			if err != nil {
				return err
			}
			offset = int64(o)
		} else {
			kps, err := utils.EtcdGet(global.Etcd, key)
			if err != nil {
				global.Logger.Info(fmt.Sprintf("etcd获取%s kafka分区信息失败，默认为0: %s\n", odsKafka.Topic, err.Error()))
			}

			if len(kps.Kvs) == 0 {
				fmt.Printf("etcd key(%s) 没有值\n", key)
			} else {
				for _, kv := range kps.Kvs {
					offsetE, err := strconv.Atoi(string(kv.Value))
					if err != nil {
						global.Logger.Info(fmt.Sprintf("etcd获取%s kafka分区offset失败，默认为0: %s\n", odsKafka.Topic, err.Error()))
					} else {
						offset = int64(offsetE)
						global.Logger.Info(fmt.Sprintf("offset设置为etcd的值: %d\n", offsetE))
					}

				}
			}
		}

		pc, err := consumer.ConsumePartition(odsKafka.Topic, int32(partition), sarama.OffsetOldest)
		if err != nil {
			return err
		}

		if pc.HighWaterMarkOffset() > offset {
			pc, err = consumer.ConsumePartition(odsKafka.Topic, int32(partition), offset)
			if err != nil {
				return err
			}
		}
		odsKafka.Offset = ""
		err = odsKafka.Save()
		if err != nil {
			return err
		}
		wg.Add(1)
		go func(partitionConsumer sarama.PartitionConsumer) {
			for {
				if global.JobRunning[runningKey] == false {
					delete(global.JobRunning, runningKey)
					break
				}
				select {
				case msg := <-partitionConsumer.Messages():
					if msg.Offset > offset {
						fmt.Printf("处理kafka数据\n")
						v, ok := global.JobRunning[runningKey]
						if v == false || !ok {
							fmt.Printf("kafka任务退出,v:%v,ok:%v\n", v, ok)
							delete(global.JobRunning, runningKey)
							return
						}
						err := kafkaFunc(msg.Value, *odsKafka)
						if err != nil {
							global.Logger.Warn(fmt.Sprintf("kafka数据处理,TODO..."))
						} else {
							err = utils.EtcdPut(global.Etcd, key, fmt.Sprintf("%d", msg.Offset))
							if err != nil {
								global.Logger.Error(fmt.Sprintf("保存%s kafka消费offset失败 %s\n", odsKafka.Topic, err.Error()))
							}
						}
					}
				case err := <-partitionConsumer.Errors():
					global.Logger.Error(fmt.Sprintf("kfk数据消费失败,key:%s,err :%s\n", key, err.Error()))
				}
			}
		}(pc)
	}

	global.JobRunning[runningKey] = true
	odsKafka.Start = true
	err = odsKafka.Save()
	if err != nil {
		return err
	}
	return nil
}

func kafkaFunc(kfkValue []byte, odsKafka OdsKafka) error {
	err := fmt.Sprintf("%+v,%+v", kfkValue, odsKafka)
	return fmt.Errorf(err)
}

func (odsKafkaService *OdsKafkaService) StopOdsKafka(odsKafka *OdsKafka) (err error) {
	odsKafka.Start = false
	err = odsKafka.Save()
	if err != nil {
		return err
	}

	global.JobRunning[odsKafka.Bsm] = false
	return nil
}
