package kafkadeploylogic

import (
	"context"
	"errors"
	"fmt"
	"github.com/zeromicro/go-zero/core/logx"
	"strconv"
	"strings"
	"sync"
	"time"
	"yunzhan/common/models"
	utils "yunzhan/common/utils"
	"yunzhan/rpc-server/internal/svc"
	agent "yunzhan/rpc-server/pb"
)

type DeployKafkaOnZookeeperLogic struct {
	ctx    context.Context
	svcCtx *svc.ServiceContext
	logx.Logger
}

func NewDeployKafkaOnZookeeperLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeployKafkaOnZookeeperLogic {
	return &DeployKafkaOnZookeeperLogic{
		ctx:    ctx,
		svcCtx: svcCtx,
		Logger: logx.WithContext(ctx),
	}
}

// DeployKafkaOnZookeeper 部署kafka on zookeeper集群
func (l *DeployKafkaOnZookeeperLogic) DeployKafkaOnZookeeper(in *agent.DeployKafkaRequest) (*agent.DeployResponse, error) {
	if in.NodeInfo == nil {
		return &agent.DeployResponse{
			Code:    500,
			Message: "未配置节点信息, 无法执行kafka集群部署",
		}, errors.New("未配置节点信息, 无法执行kafka集群部署")
	}

	resp := &agent.DeployResponse{
		Code:    200,
		Message: fmt.Sprintf("请求已接收，正在部署 %s 到节点 %s", in.ConfigInfo.ComponentName, in.NodeInfo.Host),
	}

	go l.startDeployment(in)

	return resp, nil
}
func (l *DeployKafkaOnZookeeperLogic) startDeployment(in *agent.DeployKafkaRequest) {

	var wg sync.WaitGroup

	wg.Add(1)

	go func() {
		defer wg.Done()
		// 创建 LogManager 实例
		logManager := models.NewLogManager(100, fmt.Sprintf("http://%s:%d", l.svcCtx.Config.RestServices["apiServer"].RestConf.Host, l.svcCtx.Config.RestServices["apiServer"].RestConf.Port))
		defer logManager.Close()

		flushInterval := 5 * time.Second // 刷新间隔
		maxBatchSize := 100              // 最大批量大小
		retryDelay := 2 * time.Second    // 重试延迟
		maxRetries := 3                  // 最大重试次数

		// 创建 ComponentInfoManager 实例
		componentInfoManager := models.NewComponentInfoManager(
			1000,
			fmt.Sprintf("http://%s:%d", l.svcCtx.Config.RestServices["apiServer"].RestConf.Host, l.svcCtx.Config.RestServices["apiServer"].RestConf.Port),
			flushInterval,
			maxBatchSize,
			retryDelay,
			maxRetries)
		defer componentInfoManager.Close()
		packageDirectory := in.ConfigInfo.PackagePath
		rtrimDir := utils.Rtrim(packageDirectory, "/")
		kafkaVersion := in.ConfigInfo.ComponentName

		installPath := utils.Rtrim(in.ConfigInfo.InstallPath, "/")

		baseInfo := fmt.Sprintf("安装包存放目录: %s, Kafka version: %s, Kafka 安装目录: %s", rtrimDir, kafkaVersion, installPath)
		step := 1
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", baseInfo, "Kafka", "Server")

		javaHome := "/usr/local/java"
		checkPathCommand := fmt.Sprintf(`
if [ ! -d "%s" ]; then
   sudo mkdir -p "%s"
fi
`, installPath, installPath)

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "执行组件安装路径检查...", "Kafka", "Broker")
		output, err := utils.ExecCommand(checkPathCommand)
		if err != nil {
			message := fmt.Sprintf("检查组件安装路径异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "检查组件安装路径完成", "Kafka", "Broker")

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "解压安装包...", "Kafka", "Broker")
		tarCommand := fmt.Sprintf("sudo tar -xzf %s/%s -C %s", rtrimDir, kafkaVersion, installPath)
		output, err = utils.ExecCommand(tarCommand)
		if err != nil {
			message := fmt.Sprintf("解压安装包异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "解压安装包完成", "Kafka", "Broker")

		kafkaPackageName := utils.Rtrim(kafkaVersion, ".tgz")
		kafkaHome := fmt.Sprintf("%s/%s", installPath, kafkaPackageName)

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "修改组件所属用户组...", "Kafka", "Broker")
		chownCommand := fmt.Sprintf("sudo chown -R %s:%s %s", in.NodeInfo.Username, in.NodeInfo.Username, kafkaHome)
		output, err = utils.ExecCommand(chownCommand)
		if err != nil {
			message := fmt.Sprintf("修改组件所属用户组异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "修改组件所属用户组完成", "Kafka", "Broker")

		checkPathCommand = fmt.Sprintf(`
if [ ! -d "%s" ]; then
   sudo mkdir -p "%s"
fi
sudo chown -R %s:%s %s
`, in.KafkaConfig.LogDirs, in.KafkaConfig.LogDirs, in.NodeInfo.Username, in.NodeInfo.Username, in.KafkaConfig.LogDirs)
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "检查组件数据存储目录...", "Kafka", "Broker")
		output, err = utils.ExecCommand(checkPathCommand)
		if err != nil {
			message := fmt.Sprintf("检查组件数据存储目录异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "检查组件数据存储目录完成", "Kafka", "Broker")

		brokerId := in.KafkaConfig.BrokerId[in.NodeInfo.Host]
		configCommand := fmt.Sprintf("sed -i '/^\\s*#\\?\\s*broker.id=/c\\broker.id=%d' %s/config/server.properties", brokerId, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*listeners=PLAINTEXT:/c\\listeners=PLAINTEXT://%s:%d' %s/config/server.properties", in.NodeInfo.Host, in.KafkaConfig.Port, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*advertised.listeners=PLAINTEXT:/c\\advertised.listeners=PLAINTEXT://%s:%d' %s/config/server.properties", in.NodeInfo.Host, in.KafkaConfig.Port, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*num.network.threads=/c\\num.network.threads=%d' %s/config/server.properties", in.KafkaConfig.NumNetworkThreads, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*num.io.threads=/c\\num.io.threads=%d' %s/config/server.properties", in.KafkaConfig.NumIOThreads, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*socket.send.buffer.bytes=/c\\socket.send.buffer.bytes=%d' %s/config/server.properties", in.KafkaConfig.SocketSendBufferBytes, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*socket.request.max.bytes=/c\\socket.request.max.bytes=%d' %s/config/server.properties", in.KafkaConfig.SocketRequestMaxBytes, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*log.dirs=/c\\log.dirs=%s' %s/config/server.properties", in.KafkaConfig.LogDirs, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*num.partitions=/c\\num.partitions=%d' %s/config/server.properties", in.KafkaConfig.NumPartitions, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*num.recovery.threads.per.data.dir=/c\\num.recovery.threads.per.data.dir=%d' %s/config/server.properties", in.KafkaConfig.NumRecoveryThreadsPerDataDir, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*offsets.topic.replication.factor=/c\\offsets.topic.replication.factor=%d' %s/config/server.properties", in.KafkaConfig.OffsetsTopicReplicationFactor, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*transaction.state.log.replication.factor=/c\\transaction.state.log.replication.factor=%d' %s/config/server.properties", in.KafkaConfig.TransactionStateLogReplicationFactor, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*transaction.state.log.min.isr=/c\\transaction.state.log.min.isr=%d' %s/config/server.properties", in.KafkaConfig.TransactionStateLogMinISR, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*log.flush.interval.messages=/c\\log.flush.interval.messages=%d' %s/config/server.properties", in.KafkaConfig.LogFlushIntervalMessages, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*log.flush.interval.ms=/c\\log.flush.interval.ms=%d' %s/config/server.properties", in.KafkaConfig.LogFlushIntervalMs, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*log.retention.hours=/c\\log.retention.hours=%d' %s/config/server.properties", in.KafkaConfig.LogRetentionHours, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*log.retention.bytes=/c\\log.retention.bytes=%d' %s/config/server.properties", in.KafkaConfig.LogSegmentBytes, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*log.segment.bytes=/c\\log.segment.bytes=%d' %s/config/server.properties", in.KafkaConfig.LogSegmentBytes, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*log.retention.check.interval.ms=/c\\log.retention.check.interval.ms=%d' %s/config/server.properties", in.KafkaConfig.LogRetentionCheckIntervalMs, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*zookeeper_logic.connect=/c\\zookeeper_logic.connect=%s' %s/config/server.properties", in.KafkaConfig.ZookeeperConnect, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*zookeeper_logic.connection.timeout.ms=/c\\zookeeper_logic.connection.timeout.ms=%d' %s/config/server.properties", in.KafkaConfig.ZookeeperConnectionTimeoutMs, kafkaHome)
		configCommand += fmt.Sprintf(" && sed -i '/^\\s*#\\?\\s*group.initial.rebalance.delay.ms=/c\\group.initial.rebalance.delay.ms=%d' %s/config/server.properties", in.KafkaConfig.GroupUnitialRebalanceDelayMs, kafkaHome)
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "配置server.properties文件...", "Kafka", "Broker")
		output, err = utils.ExecCommand(configCommand)
		if err != nil {
			message := fmt.Sprintf("配置 server.properties 文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "配置server.properties文件完成", "Kafka", "Broker")

		kafkaServiceFile := fmt.Sprintf(`[Unit]
Description=Apache Kafka Server
Documentation=https://kafka.apache.org/documentation.html
After=network.target

[Service]
Type=simple
User=%s
Group=%s
Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:%s/bin"
Environment="JAVA_HOME=%s"
ExecStart=%s/bin/kafka-server-start.sh %s/config/server.properties
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=%s/bin/kafka-server-stop.sh
Restart=on-failure
LimitNOFILE=655355
LimitMEMLOCK=infinity

[Install]
WantedBy=multi-user.target`, javaHome, javaHome, in.NodeInfo.Username, in.NodeInfo.Username, kafkaHome, kafkaHome, kafkaHome)

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建kafka.service文件...", "Kafka", "Broker")
		writeServiceFileCommand := fmt.Sprintf("echo '%s' | sudo tee /etc/systemd/system/kafka.service", kafkaServiceFile)
		output, err = utils.ExecCommand(writeServiceFileCommand)
		if err != nil {
			message := fmt.Sprintf("创建 kafka.service 文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建kafka.service文件完成", "Kafka", "Broker")

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动Kafka服务...", "Kafka", "Broker")

		reloadSystemdCommand := fmt.Sprintf("sudo systemctl daemon-reload && sudo systemctl enable kafka.service --now")
		output, err = utils.ExecCommand(reloadSystemdCommand)
		if err != nil {
			message := fmt.Sprintf("启动Kafka服务异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动Kafka服务完成", "Kafka", "Broker")
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待Kafka服务完全启动", "Kafka", "Broker")

		time.Sleep(3 * time.Second)

		step++
		statusCommand := "sudo systemctl is-active kafka.service"
		output, err = utils.ExecCommand(statusCommand)
		if err != nil || strings.TrimSpace(output) != "active" {
			message := fmt.Sprintf("Kafka启动后未正常运行: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "Kafka部署成功", "Kafka", "Broker")

		currentIP, currentIPErr := utils.GetCurrentInternalIP()
		if err != nil {
			message := fmt.Sprintf("%s: 无法获取当前主机IP: %v", in.NodeInfo.Host, currentIPErr)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}
		currentHost, currentHostErr := utils.GetCurrentHostname()
		if err != nil {
			message := fmt.Sprintf("%s: 无法获取当前主机名: %v", in.NodeInfo.Host, currentHostErr)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Kafka", "Broker")
			return
		}

		kafkaInfo := &models.ComponentInfo{
			ClusterID:      in.ClusterID,
			ComponentName:  "Kafka",
			Version:        in.Version,
			NodeHost:       currentHost,
			NodeIP:         currentIP,
			ComponentRole:  "Broker",
			HomePath:       kafkaHome,
			DataStorageDir: in.KafkaConfig.LogDirs,
			Port:           strconv.Itoa(int(in.KafkaConfig.Port)),
			Status:         true,
			AdditionalInfo: fmt.Sprintf("config_file=%s/config/server.properties", kafkaHome),
		}

		componentInfoManager.SendComponentInfo(kafkaInfo)

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "部署kafka_exporter服务...", "kafka_exporter", "Server")
		tarExporterCommand := fmt.Sprintf("sudo tar -xzf %s/package/kafka_exporter-1.8.0.linux-amd64.tar.gz -C %s", kafkaPackageName, installPath)
		tarResult, tarErr := utils.ExecCommand(tarExporterCommand)
		if tarErr != nil {
			message := fmt.Sprintf("解压kafka exporter压缩包异常: %v, output: %s", tarErr, tarResult)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "kafka_exporter", "Server")
			return
		}

		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "解压kafka_exporter安装包完成", "kafka_exporter", "Server")

		exporterServiceFile := fmt.Sprintf(`[Unit]
Description=kafka monitor
After=local-fs.target network-online.target network.target
Wants=local-fs.target network-online.target network.target

[Service]
Type=simple
User=%s
Group=%s
ExecStart=%s/kafka_exporter \
   --kafka.server=%s:%d \
   --log.level=info > %s/kafka_exporter.log \
   --web.listen-address=19308
RestartSec=10
StartLimitInterval=0
Restart=on-failure

[Install]
WantedBy=multi-user.target`,
			in.NodeInfo.Username,
			in.NodeInfo.Username,
			installPath+"/kafka_exporter-1.8.0.linux-amd64",
			in.NodeInfo.Host,
			in.KafkaConfig.Port,
			installPath+"/kafka_exporter-1.8.0.linux-amd64")
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建kafka-exporter.service文件...", "kafka_exporter", "Server")
		exporterServiceCommand := fmt.Sprintf("echo '%s' | sudo tee /etc/systemd/system/kafka-exporter.service", exporterServiceFile)
		exporterResult, exporterErr := utils.ExecCommand(exporterServiceCommand)
		if exporterErr != nil {
			message := fmt.Sprintf("创建 kafka-exporter.service 文件异常: %v, output: %s", exporterErr, exporterResult)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "kafka_exporter", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建kafka-exporter.service文件完成", "kafka_exporter", "Server")

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动kafka_exporter服务...", "kafka_exporter", "Server")

		exporterStartCommand := "sudo systemctl daemon-reload && sudo sudo systemctl enable kafka-exporter --now"
		exporterStartResult, exporterStartErr := utils.ExecCommand(exporterStartCommand)
		if exporterStartErr != nil {
			message := fmt.Sprintf("启动kafka exporter服务异常: %v, output: %s", exporterStartErr, exporterStartResult)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "kafka_exporter", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动kafka_exporter服务完成", "kafka_exporter", "Server")
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待kafka_exporter服务完全启动...", "kafka_exporter", "Server")

		time.Sleep(3 * time.Second)
		step++
		statusCommand = "sudo systemctl is-active kafka-exporter.service"
		nodeExporterServiceResult, nodeExporterServiceErr := utils.ExecCommand(statusCommand)
		if nodeExporterServiceErr != nil || strings.TrimSpace(nodeExporterServiceResult) != "active" {
			message := fmt.Sprintf("kafka exporter服务启动后未正常运行: %v, output: %s", nodeExporterServiceErr, nodeExporterServiceResult)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "kafka_exporter", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "kafka_exporter服务部署成功", "kafka_exporter", "Server")

		exporterInfo := &models.ComponentInfo{
			ClusterID:      in.ClusterID,
			ComponentName:  "kafka_exporter",
			Version:        "1.8.0",
			NodeHost:       currentHost,
			NodeIP:         currentIP,
			ComponentRole:  "Server",
			HomePath:       installPath + "/kafka_exporter-1.8.0.linux-amd64",
			DataStorageDir: "",
			Port:           "19308",
			Status:         true,
			AdditionalInfo: "",
		}

		componentInfoManager.SendComponentInfo(exporterInfo)

	}()
	wg.Wait()
}
