package hdfsdeploylogic

import (
	"context"
	"errors"
	"fmt"
	"github.com/zeromicro/go-zero/core/logx"
	"strings"
	"sync"
	"time"
	"yunzhan/common/models"
	utils "yunzhan/common/utils"
	"yunzhan/rpc-server/internal/svc"
	agent "yunzhan/rpc-server/pb"
)

type DeployHdfsHALogic struct {
	ctx    context.Context
	svcCtx *svc.ServiceContext
	logx.Logger
}

func NewDeployHdfsHALogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeployHdfsHALogic {
	return &DeployHdfsHALogic{
		ctx:    ctx,
		svcCtx: svcCtx,
		Logger: logx.WithContext(ctx),
	}
}

// DeployHdfsHA HA部署
func (l *DeployHdfsHALogic) DeployHdfsHA(in *agent.DeployHadoopRequest) (*agent.DeployResponse, error) {
	if in.NodeInfo == nil {
		return &agent.DeployResponse{
			Code:    500,
			Message: "未配置节点信息, 无法执行HDFS集群部署",
		}, errors.New("未配置节点信息, 无法执行HDFS集群部署")
	}

	// 立即返回一个响应，表示请求已接收并正在处理
	resp := &agent.DeployResponse{
		Code:    200,
		Message: fmt.Sprintf("请求已接收，正在部署 %s 到节点 %s", in.ConfigInfo.ComponentName, in.NodeInfo.Host),
	}

	go l.startDeployment(in)

	return resp, nil
}
func (l *DeployHdfsHALogic) startDeployment(in *agent.DeployHadoopRequest) {

	var wg sync.WaitGroup

	wg.Add(1)

	go func() {
		defer wg.Done()
		// 创建 LogManager 实例
		logManager := models.NewLogManager(100, fmt.Sprintf("http://%s:%d", l.svcCtx.Config.RestServices["apiServer"].RestConf.Host, l.svcCtx.Config.RestServices["apiServer"].RestConf.Port))
		defer logManager.Close()
		flushInterval := 5 * time.Second // 刷新间隔
		maxBatchSize := 100              // 最大批量大小
		retryDelay := 2 * time.Second    // 重试延迟
		maxRetries := 3                  // 最大重试次数
		// 创建 ComponentInfoManager 实例
		componentInfoManager := models.NewComponentInfoManager(1000,
			fmt.Sprintf("http://%s:%d", l.svcCtx.Config.RestServices["apiServer"].RestConf.Host, l.svcCtx.Config.RestServices["apiServer"].RestConf.Port),
			flushInterval,
			maxBatchSize,
			retryDelay,
			maxRetries)
		defer componentInfoManager.Close()
		packageDirectory := in.ConfigInfo.PackagePath
		rtrimDir := utils.Rtrim(packageDirectory, "/")
		hadoopVersion := in.ConfigInfo.ComponentName
		installPath := utils.Rtrim(in.ConfigInfo.InstallPath, "/")
		baseInfo := fmt.Sprintf("安装包存放目录: %s, HDFS version: %s, HDFS 安装目录: %s", rtrimDir, hadoopVersion, installPath)
		step := 1
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", baseInfo, "HDFS", "Server")

		javaHome := "/usr/local/java"
		hadoopHome := utils.Rtrim(hadoopVersion, ".tar.gz")
		home := fmt.Sprintf("%s/%s", installPath, hadoopHome)
		// 检测安装路径是否存在
		checkPathCommand := fmt.Sprintf(`
if [ ! -d "%s" ]; then
   sudo mkdir -p "%s"
fi
`, installPath, installPath)

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "执行组件安装路径检查...", "HDFS", "Server")
		output, err := utils.ExecCommand(checkPathCommand)
		if err != nil {
			message := fmt.Sprintf("检查组件安装路径异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "检查组件安装路径完成", "HDFS", "Server")

		if !strings.Contains(output, hadoopHome) {
			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "解压安装包...", "HDFS", "Server")
			tarCommand := fmt.Sprintf("sudo tar -xzf %s/%s -C %s", rtrimDir, hadoopVersion, installPath)
			output, err = utils.ExecCommand(tarCommand)
			if err != nil {
				message := fmt.Sprintf("解压安装包异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "解压安装包完成", "HDFS", "Server")
		} else {
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "HDFS安装目录已存在，跳过解压", "HDFS", "Server")
		}

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "修改组件所属用户组...", "HDFS", "Server")
		chownCommand := fmt.Sprintf("sudo chown -R %s:%s %s", in.NodeInfo.Username, in.NodeInfo.Username, home)
		output, err = utils.ExecCommand(chownCommand)
		if err != nil {
			message := fmt.Sprintf("修改组件所属用户组异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "修改组件所属用户组完成", "HDFS", "Server")

		// 检查数据存储目录是否存在并设置权限
		checkPathCommand = fmt.Sprintf(`
if [ ! -d "%s" ]; then
   sudo mkdir -p "%s"
fi
sudo chown -R %s:%s %s
`, in.HadoopConfig.CoreSiteConfig.HadoopTmpDir,
			in.HadoopConfig.CoreSiteConfig.HadoopTmpDir,
			in.NodeInfo.Username,
			in.NodeInfo.Username,
			in.HadoopConfig.CoreSiteConfig.HadoopTmpDir)
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "检查组件数据存储目录...", "HDFS", "Server")
		output, err = utils.ExecCommand(checkPathCommand)
		if err != nil {
			message := fmt.Sprintf("检查组件数据存储目录异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "检查组件数据存储目录完成", "HDFS", "Server")
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "加载core-site.xml配置文件...", "HDFS", "Server")

		coreSitePath := fmt.Sprintf("%s/etc/hadoop/core-site.xml", hadoopHome)
		coreConfig, err := utils.LoadConfiguration(coreSitePath)
		if err != nil {
			message := fmt.Sprintf("加载 core-site.xml 配置文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "加载core-site.xml配置文件完成", "HDFS", "Server")

		coreConfig.SetProperty("fs.defaultFS", in.HadoopConfig.CoreSiteConfig.FsDefaultFS, "指定默认文件系统的名称。可以是一个完全合格的 URI，例如 hdfs://namenode:port/, file:/// 等")
		coreConfig.SetProperty("hadoop.tmp.dir", in.HadoopConfig.CoreSiteConfig.HadoopTmpDir, "其他临时目录的基础目录。此参数指定临时文件的基础目录")
		coreConfig.SetProperty("ha.zookeeper_logic.quorum", in.HadoopConfig.CoreSiteConfig.HaZookeeperQuorum, "HDFS 高可用性（HA）使用的 ZooKeeper quorum。此参数指定 HDFS HA 功能使用的 ZooKeeper 服务器")
		coreConfig.SetProperty("ha.zookeeper_logic.session-timeout.ms", in.HadoopConfig.CoreSiteConfig.HaZookeeperSessionTimeoutMs, "HDFS 高可用性（HA）使用的 ZooKeeper 会话超时。此参数指定 ZooKeeper 用于 HDFS HA 的会话超时时间（单位：毫秒）")
		coreConfig.SetProperty("hadoop.http.staticuser.user_logic", in.HadoopConfig.CoreSiteConfig.HadoopHttpStaticuserUser, "HTTP Web 界面认证的静态用户。此参数指定静态用户 Web 认证方法用于 HTTP Web 界面的用户名")
		coreConfig.SetProperty("io.file.buffer.size", in.HadoopConfig.CoreSiteConfig.IoFileBufferSize, "读写文件时使用的缓冲区大小。此参数指定文件 I/O 操作的缓冲区大小")
		coreConfig.SetProperty("io.bytes.per.checksum", in.HadoopConfig.CoreSiteConfig.IoBytesPerChecksum, "每次校验的数据字节数。此参数指定数据校验时每次处理的字节数")
		coreConfig.SetProperty("io.skip.checksum.errors", in.HadoopConfig.CoreSiteConfig.IoSkipChecksumErrors, "是否跳过校验错误。此参数指定是否在数据校验过程中跳过校验错误")
		coreConfig.SetProperty("io.compression.codecs", in.HadoopConfig.CoreSiteConfig.IoCompressionCodecs, "支持的压缩编解码器列表。此参数指定 Hadoop 支持的压缩编解码器")
		coreConfig.SetProperty("io.serializations", in.HadoopConfig.CoreSiteConfig.IoSerializations, "支持的序列化类列表。此参数指定 Hadoop 支持的序列化类")
		coreConfig.SetProperty("io.seqfile.local.dir", in.HadoopConfig.CoreSiteConfig.IoSeqfileLocalDir, "本地序列文件的存储目录。此参数指定本地序列文件的存储目录")
		coreConfig.SetProperty("fs.trash.interval", in.HadoopConfig.CoreSiteConfig.FsTrashInterval, "垃圾回收站保留时间（分钟）。此参数指定垃圾回收站中文件的保留时间")
		coreConfig.SetProperty("fs.trash.checkpoint.interval", in.HadoopConfig.CoreSiteConfig.FsTrashCheckpointInterval, "垃圾回收站检查点间隔（分钟）。此参数指定垃圾回收站的检查点间隔时间")
		coreConfig.SetProperty("fs.permissions.umask-mode", in.HadoopConfig.CoreSiteConfig.FsPermissionsUmaskMode, "文件权限的 umask 模式。此参数指定文件权限的 umask 模式")
		coreConfig.SetProperty("ha.health-monitor.connect-retry-interval.ms", in.HadoopConfig.CoreSiteConfig.HaHealthMonitorConnectRetryIntervalMs, "健康监测连接重试间隔（毫秒）。此参数指定健康监测的连接重试间隔时间")
		coreConfig.SetProperty("ha.health-monitor.check-interval.ms", in.HadoopConfig.CoreSiteConfig.HaHealthMonitorCheckIntervalMs, "健康监测检查间隔（毫秒）。此参数指定健康监测的检查间隔时间")
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "更新core-site.xml配置文件...", "HDFS", "Server")
		err = coreConfig.Save(coreSitePath)
		if err != nil {
			message := fmt.Sprintf("更新 core-site.xml 配置文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "更新core-site.xml配置文件完成", "HDFS", "Server")
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "加载hdfs-site.xml配置文件...", "HDFS", "Server")
		hdfsSitePath := fmt.Sprintf("%s/etc/hadoop/hdfs-site.xml", hadoopHome)
		hdfsConfig, err := utils.LoadConfiguration(hdfsSitePath)
		if err != nil {
			message := fmt.Sprintf("加载hdfs-site.xml配置文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "加载hdfs-site.xml配置文件完成", "HDFS", "Server")

		hdfsConfig.SetProperty("dfs.replication", in.HadoopConfig.HdfsSiteConfig.DfsReplication, "数据块的默认副本数。此参数指定数据块在 HDFS 中的默认副本数")
		hdfsConfig.SetProperty("dfs.namenode.name.dir", in.HadoopConfig.HdfsSiteConfig.DfsNamenodeNameDir, "NameNode 存储元数据的目录。此参数指定 NameNode 存储文件系统元数据的本地目录")
		hdfsConfig.SetProperty("dfs.datanode.data.dir", in.HadoopConfig.HdfsSiteConfig.DfsDatanodeDataDir, "DataNode 存储数据块的目录。此参数指定 DataNode 存储数据块的本地目录")
		hdfsConfig.SetProperty("dfs.http.address", in.HadoopConfig.HdfsSiteConfig.DfsHttpAddress, "NameNode 的 HTTP 地址。此参数指定 NameNode 的 HTTP 服务地址")
		hdfsConfig.SetProperty("dfs.permissions.enabled", in.HadoopConfig.HdfsSiteConfig.DfsPermissionsEnabled, "是否启用 HDFS 权限检查。此参数指定是否启用 HDFS 的权限检查功能")
		hdfsConfig.SetProperty("dfs.webhdfs.enabled", in.HadoopConfig.HdfsSiteConfig.DfsWebHdfsEnabled, "是否启用 WebHDFS REST API。此参数指定是否启用 WebHDFS REST API 服务")
		hdfsConfig.SetProperty("dfs.nameservices", in.HadoopConfig.HdfsSiteConfig.DfsNameServices, "HDFS 名称服务的逻辑名称。此参数指定 HDFS 名称服务的逻辑名称")
		hdfsConfig.SetProperty("dfs.ha.automatic-failover.enabled", in.HadoopConfig.HdfsSiteConfig.DfsHAAutomaticFailoverEnabled, "是否启用自动故障转移。此参数指定是否启用 HDFS 高可用性的自动故障转移功能")
		hdfsConfig.SetProperty("dfs.ha.fencing.methods", in.HadoopConfig.HdfsSiteConfig.DfsHAFencingMethods, "故障转移期间的隔离方法。此参数指定 HDFS HA 故障转移期间的隔离方法")
		hdfsConfig.SetProperty("dfs.ha.fencing.ssh.connect-timeout", in.HadoopConfig.HdfsSiteConfig.DfsHAFencingSshConnectTimeout, "SSH 隔离方法的连接超时时间。此参数指定 SSH 隔离方法的连接超时时间")
		hdfsConfig.SetProperty("dfs.ha.fencing.ssh.private-key-files", in.HadoopConfig.HdfsSiteConfig.DfsHAFencingSshPrivateKeyFiles, "SSH 隔离方法使用的私钥文件。此参数指定 SSH 隔离方法使用的私钥文件")
		hdfsConfig.SetProperty("dfs.namenode.shared.edits.dir", in.HadoopConfig.HdfsSiteConfig.DfsNamenodeSharedEditsDir, "NameNode 共享编辑日志的目录。此参数指定 NameNode 共享编辑日志的目录")
		hdfsConfig.SetProperty("dfs.blocksize", in.HadoopConfig.HdfsSiteConfig.DfsBlockSize, "HDFS 文件块的大小。此参数指定 HDFS 中文件块的大小，单位为字节")
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "更新hdfs-site.xml配置文件...", "HDFS", "Server")
		err = hdfsConfig.Save(hdfsSitePath)
		if err != nil {
			message := fmt.Sprintf("更新 hdfs-site.xml 配置文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "更新hdfs-site.xml配置文件完成", "HDFS", "Server")

		hadoopEnvPath := fmt.Sprintf("%s/etc/hadoop/hadoop-env.sh", hadoopHome)
		appendHadoopEnvCommand := fmt.Sprintf(`
sudo bash -c 'cat <<EOF >>%s
export JAVA_HOME=%s
export HADOOP_HOME=%s
export HDFS_NAMENODE_USER=%s
export HDFS_DATANODE_USER=%s
export HDFS_SECONDARYNAMENODE_USER=%s
export YARN_RESOURCEMANAGER_USER=%s
export YARN_NODEMANAGER_USER=%s
export HDFS_JOURNALNODE_USER=%s
export HDFS_ZKFC_USER=%s
export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"
export HDFS_NAMENODE_OPTS="-XX:+AlwaysPreTouch -Xss1m  -Xms2g -Xmx2g -XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:G1HeapRegionSize=4m -XX:InitiatingHeapOccupancyPercent=30 -XX:G1ReservePercent=15"
EOF'
`, hadoopEnvPath, javaHome, hadoopHome, in.NodeInfo.Username, in.NodeInfo.Username, in.NodeInfo.Username, in.NodeInfo.Username, in.NodeInfo.Username)
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "配置hadoop-env.sh文件...", "HDFS", "Server")
		output, err = utils.ExecCommand(appendHadoopEnvCommand)
		if err != nil {
			message := fmt.Sprintf("配置hadoop-env.sh文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "配置hadoop-env.sh文件完成", "HDFS", "Server")

		workersFile := fmt.Sprintf("%s/etc/hadoop/workers", hadoopHome)
		var workersContent string
		for _, value := range in.HadoopConfig.DatanodeList {
			workersContent += value + "\n"
		}
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "配置workers文件...", "HDFS", "Server")
		writeWorkersFileCommand := fmt.Sprintf("echo -e '%s' | sudo tee %s", workersContent, workersFile)
		output, err = utils.ExecCommand(writeWorkersFileCommand)
		if err != nil {
			message := fmt.Sprintf("配置workers文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "配置workers文件完成", "HDFS", "Server")

		step++
		currentIP, currentIPErr := utils.GetCurrentInternalIP()
		if err != nil {
			message := fmt.Sprintf("%s: 无法获取当前主机IP: %v", in.NodeInfo.Host, currentIPErr)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}
		currentHost, currentHostErr := utils.GetCurrentHostname()
		if err != nil {
			message := fmt.Sprintf("%s: 无法获取当前主机名: %v", in.NodeInfo.Host, currentHostErr)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
			return
		}

		// JournalNode部署
		if utils.Contains(in.HadoopConfig.JournalNodeList, currentHost) || utils.Contains(in.HadoopConfig.JournalNodeList, currentIP) {

			journalnodeServiceFile := fmt.Sprintf(`[Unit]
Description=Hadoop JournalNode Service
After=network.target
Wants=network-online.target
After=network-online.target

[Service]
User=%s
Group=%s
Environment="JAVA_HOME=%s"
ExecStart=%s/bin/hdfs --daemon start journalnode
ExecStop=%s/bin/hdfs --daemon stop journalnode
Restart=on-failure
RestartSec=10
SuccessExitStatus=0 143
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target`, in.NodeInfo.Username, in.NodeInfo.Username, javaHome, home, home)

			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建hadoop-journalnode.service文件...", "HDFS", "JournalNode")
			writeServiceFileCommand := fmt.Sprintf("echo '%s' | sudo tee /etc/systemd/system/hadoop-journalnode.service", journalnodeServiceFile)
			output, err = utils.ExecCommand(writeServiceFileCommand)
			if err != nil {
				message := fmt.Sprintf("创建hadoop-journalnode.service文件异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建hadoop-journalnode.service文件完成", "HDFS", "JournalNode")

			journalNodeStartCommand := fmt.Sprintf("sudo systemctl daemon-reload && sudo systemctl enable hadoop-journalnode.service --now")
			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动JournalNode服务...", "HDFS", "JournalNode")
			output, err = utils.ExecCommand(journalNodeStartCommand)
			if err != nil {
				message := fmt.Sprintf("启动JournalNode服务异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "JournalNode")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动JournalNode服务完成", "HDFS", "JournalNode")

			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待JournalNode服务完全启动", "HDFS", "JournalNode")

			time.Sleep(3 * time.Second)

			step++
			statusCommand := "sudo systemctl is-active hadoop-journalnode.service"
			output, err = utils.ExecCommand(statusCommand)
			if err != nil || strings.TrimSpace(output) != "active" {
				message := fmt.Sprintf("JournalNode服务启动后未正常运行: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "JournalNode")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "JournalNode服务部署成功", "HDFS", "JournalNode")

			hdfsInfo := &models.ComponentInfo{
				ClusterID:      in.ClusterID,
				ComponentName:  "HDFS",
				Version:        in.Version,
				NodeHost:       currentHost,
				NodeIP:         currentIP,
				ComponentRole:  "JournalNode",
				HomePath:       home,
				DataStorageDir: in.HadoopConfig.HdfsSiteConfig.DfsDatanodeDataDir,
				Port:           in.HadoopConfig.HdfsSiteConfig.DfsHttpAddress,
				Status:         true,
				AdditionalInfo: fmt.Sprintf("config_file=%s/etc/hadoop/hdfs-site.xml", home),
			}

			componentInfoManager.SendComponentInfo(hdfsInfo)

		}

		// NameNode部署
		if utils.Contains(in.HadoopConfig.NamenodeList, currentHost) || utils.Contains(in.HadoopConfig.NamenodeList, currentIP) {

			namenodeServiceFile := fmt.Sprintf(`[Unit]
Description=Hadoop NameNode Service
After=network.target
Wants=network-online.target
After=network-online.target

[Service]
User=%s
Group=%s
Environment="JAVA_HOME=%s"
ExecStart=%s/bin/hdfs --daemon start namenode
ExecStop=%s/bin/hdfs --daemon stop namenode
Restart=on-failure
RestartSec=10
SuccessExitStatus=0 143
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target`, in.NodeInfo.Username, in.NodeInfo.Username, javaHome, home, home)
			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建hadoop-namenode.service文件...", "HDFS", "NameNode")
			writeServiceFileCommand := fmt.Sprintf("echo '%s' | sudo tee /etc/systemd/system/hadoop-namenode.service", namenodeServiceFile)
			_, err = utils.ExecCommand(writeServiceFileCommand)
			if err != nil {
				message := fmt.Sprintf("创建 hadoop-namenode.service 文件异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "NameNode")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建hadoop-namenode.service文件完成", "HDFS", "NameNode")

			zkfcServiceFile := fmt.Sprintf(`[Unit]
Description=Hadoop ZK Failover Controller Service
After=network.target
Wants=network-online.target
After=network-online.target

[Service]
User=%s
Group=%s
Environment="JAVA_HOME=%s"
ExecStart=%s/bin/hdfs --daemon start zkfc
ExecStop=%s/bin/hdfs --daemon stop zkfc
Restart=on-failure
RestartSec=10
SuccessExitStatus=0 143
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target`, in.NodeInfo.Username, in.NodeInfo.Username, javaHome, home, home)
			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建hadoop-zkfailovercontroller.service文件...", "HDFS", "DFSZKFailoverController")
			writeServiceFileCommand = fmt.Sprintf("echo '%s' | sudo tee /etc/systemd/system/hadoop-zkfailovercontroller.service", zkfcServiceFile)
			output, err = utils.ExecCommand(writeServiceFileCommand)
			if err != nil {
				message := fmt.Sprintf("创建hadoop-zkfailovercontroller.service文件异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "DFSZKFailoverController")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建hadoop-zkfailovercontroller.service文件完成", "HDFS", "DFSZKFailoverController")

			if in.HadoopConfig.NamenodeList[0] == currentHost || in.HadoopConfig.NamenodeList[0] == currentIP {
				step++
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "格式化NameNode...", "HDFS", "NameNode")
				formatCommand := fmt.Sprintf("sudo %s/bin/hdfs namenode -format", home)
				output, err = utils.ExecCommand(formatCommand)
				if err != nil {
					message := fmt.Sprintf("格式化NameNode异常: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "NameNode")
					return
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "格式化NameNode完成", "HDFS", "NameNode")
				step++
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动NameNode服务...", "HDFS", "NameNode")
				startCommand := fmt.Sprintf("sudo systemctl daemon-reload && sudo systemctl enable hadoop-namenode.service --now")
				output, err = utils.ExecCommand(startCommand)
				if err != nil {
					message := fmt.Sprintf("启动NameNode服务异常: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "NameNode")
					return
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动NameNode服务完成", "HDFS", "NameNode")
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待NameNode服务完全启动", "HDFS", "NameNode")

				time.Sleep(3 * time.Second)
				step++
				statusCommand := "sudo systemctl is-active hadoop-namenode.service"
				output, err = utils.ExecCommand(statusCommand)
				if err != nil || strings.TrimSpace(output) != "active" {
					message := fmt.Sprintf("NameNode服务启动异常, 3秒后重试: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "NameNode")
					// 增加重试机制
					for i := 0; i < 3; i++ {
						time.Sleep(3 * time.Second)
						output, err = utils.ExecCommand(statusCommand)
						if err == nil && output != "" {
							break
						}
					}
					if err != nil || output == "" {
						message = fmt.Sprintf("NameNode服务启动后未正常运行: %v, output: %s", err, output)
						logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "NameNode")
						return
					}
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "NameNode服务部署成功", "HDFS", "NameNode")

				step++
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "格式化ZK Failover Controller服务...", "HDFS", "DFSZKFailoverController")
				formatZKCommand := fmt.Sprintf("sudo %s/bin/hdfs zkfc -formatZK", home)
				output, err = utils.ExecCommand(formatZKCommand)
				if err != nil {
					message := fmt.Sprintf("格式化ZK Failover Controller服务异常: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "DFSZKFailoverController")
					return
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "格式化ZK Failover Controller服务完成", "HDFS", "DFSZKFailoverController")
				step++
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动DFSZKFailoverController服务...", "HDFS", "DFSZKFailoverController")
				startCommand = fmt.Sprintf("sudo systemctl daemon-reload && sudo systemctl enable hadoop-zkfailovercontroller.service --now")
				output, err = utils.ExecCommand(startCommand)
				if err != nil {
					message := fmt.Sprintf("启动DFSZKFailoverController服务异常: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "ZKFailoverController")
					return
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动DFSZKFailoverController服务完成", "HDFS", "DFSZKFailoverController")

				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待DFSZKFailoverController服务完全启动", "HDFS", "DFSZKFailoverController")

				time.Sleep(3 * time.Second)
				step++
				statusCommand = "sudo systemctl is-active hadoop-zkfailovercontroller.service"
				output, err = utils.ExecCommand(statusCommand)
				if err != nil || strings.TrimSpace(output) != "active" {
					message := fmt.Sprintf("DFSZKFailoverController服务启动异常, 3秒后重试: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "DFSZKFailoverController")
					// 增加重试机制
					for i := 0; i < 3; i++ {
						time.Sleep(3 * time.Second)
						output, err = utils.ExecCommand(statusCommand)
						if err == nil && output != "" {
							break
						}
					}
					if err != nil || output == "" {
						message = fmt.Sprintf("DFSZKFailoverController服务启动后未正常运行: %v, output: %s", err, output)
						logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "DFSZKFailoverController")
						return
					}
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "DFSZKFailoverController服务部署成功", "HDFS", "DFSZKFailoverController")

			} else {
				step++
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动Standby NameNode服务...", "HDFS", "NameNode")
				bootstrapCommand := fmt.Sprintf("sudo %s/bin/hdfs namenode -bootstrapStandby", home)
				output, err = utils.ExecCommand(bootstrapCommand)
				if err != nil {
					message := fmt.Sprintf("启动Standby NameNode服务异常: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
					return
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动Standby NameNode服务完成", "HDFS", "NameNode")
				step++
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动NameNode服务...", "HDFS", "NameNode")
				startCommand := fmt.Sprintf("sudo systemctl daemon-reload && sudo systemctl enable hadoop-namenode.service --now")
				output, err = utils.ExecCommand(startCommand)
				if err != nil {
					message := fmt.Sprintf("启动NameNode服务异常: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "NameNode")
					return
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动Standby NameNode服务完成", "HDFS", "NameNode")
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待NameNode服务完全启动", "HDFS", "NameNode")

				time.Sleep(3 * time.Second)
				step++
				statusCommand := "sudo systemctl is-active hadoop-namenode.service"
				output, err = utils.ExecCommand(statusCommand)
				if err != nil || strings.TrimSpace(output) != "active" {
					message := fmt.Sprintf("NameNode服务启动异常, 3秒后重试: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "NameNode")
					// 增加重试机制
					for i := 0; i < 3; i++ {
						time.Sleep(3 * time.Second)
						output, err = utils.ExecCommand(statusCommand)
						if err == nil && output != "" {
							break
						}
					}
					if err != nil || output == "" {
						message = fmt.Sprintf("NameNode服务启动后未正常运行: %v, output: %s", err, output)
						logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "NameNode")
						return
					}
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "NameNode服务部署成功", "HDFS", "NameNode")
				step++
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动DFSZKFailoverController服务...", "HDFS", "DFSZKFailoverController")
				startCommand = fmt.Sprintf("sudo systemctl daemon-reload && sudo systemctl enable hadoop-zkfailovercontroller.service --now")
				output, err = utils.ExecCommand(startCommand)
				if err != nil {
					message := fmt.Sprintf("启动DFSZKFailoverController服务异常: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "DFSZKFailoverController")
					return
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动DFSZKFailoverController服务完成", "HDFS", "DFSZKFailoverController")

				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待DFSZKFailoverController服务完全启动", "HDFS", "DFSZKFailoverController")
				time.Sleep(3 * time.Second)
				step++
				statusCommand = "sudo systemctl is-active hadoop-zkfailovercontroller.service"
				output, err = utils.ExecCommand(statusCommand)
				if err != nil || strings.TrimSpace(output) != "active" {
					message := fmt.Sprintf("DFSZKFailoverController服务启动异常, 3秒后重试: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "DFSZKFailoverController")
					for i := 0; i < 3; i++ {
						time.Sleep(3 * time.Second)
						output, err = utils.ExecCommand(statusCommand)
						if err == nil && output != "" {
							break
						}
					}
					if err != nil || output == "" {
						message = fmt.Sprintf("DFSZKFailoverController服务启动后未正常运行: %v, output: %s", err, output)
						logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "DFSZKFailoverController")
						return
					}
				}
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "DFSZKFailoverController服务部署成功", "HDFS", "DFSZKFailoverController")

			}

			zkInfo := &models.ComponentInfo{
				ClusterID:      in.ClusterID,
				ComponentName:  "HDFS",
				Version:        in.Version,
				NodeHost:       currentHost,
				NodeIP:         currentIP,
				ComponentRole:  "DFSZKFailoverController",
				HomePath:       home,
				DataStorageDir: in.HadoopConfig.HdfsSiteConfig.DfsDatanodeDataDir,
				Port:           in.HadoopConfig.HdfsSiteConfig.DfsHttpAddress,
				Status:         true,
				AdditionalInfo: fmt.Sprintf("config_file=%s/etc/hadoop/hdfs-site.xml", home),
			}

			componentInfoManager.SendComponentInfo(zkInfo)

			nameInfo := &models.ComponentInfo{
				ClusterID:      in.ClusterID,
				ComponentName:  "HDFS",
				Version:        in.Version,
				NodeHost:       currentHost,
				NodeIP:         currentIP,
				ComponentRole:  "NameNode",
				HomePath:       home,
				DataStorageDir: in.HadoopConfig.HdfsSiteConfig.DfsDatanodeDataDir,
				Port:           in.HadoopConfig.HdfsSiteConfig.DfsHttpAddress,
				Status:         true,
				AdditionalInfo: fmt.Sprintf("config_file=%s/etc/hadoop/hdfs-site.xml", home),
			}

			componentInfoManager.SendComponentInfo(nameInfo)

		}

		// DataNode部署
		if utils.Contains(in.HadoopConfig.DatanodeList, currentHost) || utils.Contains(in.HadoopConfig.DatanodeList, currentIP) {
			datanodeServiceFile := fmt.Sprintf(`[Unit]
Description=Hadoop DataNode Service
After=network.target
Wants=network-online.target
After=network-online.target

[Service]
User=%s
Group=%s
Environment="JAVA_HOME=%s"
ExecStart=%s/bin/hdfs --daemon start datanode
ExecStop=%s/bin/hdfs --daemon stop datanode
Restart=on-failure
RestartSec=10
SuccessExitStatus=0 143
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target`, in.NodeInfo.Username, in.NodeInfo.Username, javaHome, home, home)

			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建hadoop-datanode.service文件...", "HDFS", "DataNode")
			writeServiceFileCommand := fmt.Sprintf("echo '%s' | sudo tee /etc/systemd/system/hadoop-datanode.service", datanodeServiceFile)
			output, err = utils.ExecCommand(writeServiceFileCommand)
			if err != nil {
				message := fmt.Sprintf("创建 hadoop-datanode.service 文件异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建 hadoop-datanode.service 文件完成", "HDFS", "DataNode")

			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动DataNode服务...", "HDFS", "DataNode")
			reloadSystemdCommand := fmt.Sprintf("sudo systemctl daemon-reload && sudo systemctl enable hadoop-datanode.service --now")
			output, err = utils.ExecCommand(reloadSystemdCommand)
			if err != nil {
				message := fmt.Sprintf("启动DataNode服务异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "Server")
				return
			}

			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动DataNode服务完成", "HDFS", "DataNode")
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待DataNode服务完全启动", "HDFS", "DataNode")

			time.Sleep(3 * time.Second)
			step++
			statusCommand := "sudo systemctl is-active hadoop-datanode.service"
			output, err = utils.ExecCommand(statusCommand)
			if err != nil || strings.TrimSpace(output) != "active" {
				message := fmt.Sprintf("DataNode服务启动异常, 3秒后重试: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "DataNode")
				for i := 0; i < 3; i++ {
					time.Sleep(3 * time.Second)
					output, err = utils.ExecCommand(statusCommand)
					if err == nil && output != "" {
						break
					}
				}
				if err != nil || output == "" {
					message = fmt.Sprintf("DataNode服务启动后未正常运行: %v, output: %s", err, output)
					logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "HDFS", "DataNode")
					return
				}
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "DataNode服务部署成功", "HDFS", "DataNode")

			hdfsInfo := &models.ComponentInfo{
				ClusterID:      in.ClusterID,
				ComponentName:  "HDFS",
				Version:        in.Version,
				NodeHost:       currentHost,
				NodeIP:         currentIP,
				ComponentRole:  "DataNode",
				HomePath:       home,
				DataStorageDir: in.HadoopConfig.HdfsSiteConfig.DfsDatanodeDataDir,
				Port:           in.HadoopConfig.HdfsSiteConfig.DfsHttpAddress,
				Status:         true,
				AdditionalInfo: fmt.Sprintf("config_file=%s/etc/hadoop/hdfs-site.xml", home),
			}

			componentInfoManager.SendComponentInfo(hdfsInfo)

		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "HDFS部署成功", "HDFS", "Server")
	}()
	wg.Wait()

}
