package sparkdeploylogic

import (
	"context"
	"errors"
	"fmt"
	"strings"
	"time"
	"yunzhan/common/models"
	utils "yunzhan/common/utils"

	"yunzhan/rpc-server/internal/svc"
	agent "yunzhan/rpc-server/pb"

	"github.com/zeromicro/go-zero/core/logx"
)

type DeploySparkStandaloneLogic struct {
	ctx    context.Context
	svcCtx *svc.ServiceContext
	logx.Logger
}

func NewDeploySparkStandaloneLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeploySparkStandaloneLogic {
	return &DeploySparkStandaloneLogic{
		ctx:    ctx,
		svcCtx: svcCtx,
		Logger: logx.WithContext(ctx),
	}
}

const totalStep = 10

// DeploySparkStandalone 集群部署
func (l *DeploySparkStandaloneLogic) DeploySparkStandalone(in *agent.DeploySparkRequest) (*agent.DeployResponse, error) {
	if in.NodeInfo == nil {
		return &agent.DeployResponse{
			Code:    500,
			Message: "未配置节点信息, 无法执行Spark部署",
		}, errors.New("未配置节点信息, 无法执行Spark部署")
	}
	// 立即返回一个响应，表示请求已接收并正在处理
	resp := &agent.DeployResponse{
		Code:    202,
		Message: fmt.Sprintf("请求已接收，正在部署 %s 到节点 %s", in.ConfigInfo.ComponentName, in.NodeInfo.Host),
	}
	// 生成一个唯一的任务 ID
	taskID := "agent_" + in.NodeInfo.Host

	insertSql := fmt.Sprintf(`
    INSERT INTO %s (task_id, node_host, component_name, step_number, step_description, status, error_message) 
    VALUES (?, ?, ?, ?, ?, ?, ?)
    ON DUPLICATE KEY UPDATE 
    node_host = VALUES(node_host), 
    component_name = VALUES(component_name), 
    step_number = VALUES(step_number), 
    step_description = VALUES(step_description), 
    status = VALUES(status), 
    error_message = VALUES(error_message)
`, l.svcCtx.Config.Mysql.Tables.DeploymentLogs)

	go func() {
		packageDirectory := in.ConfigInfo.PackagePath
		rtrimDir := utils.Rtrim(packageDirectory, "/")
		logx.Infof("安装包存放目录: %s", rtrimDir)
		grafanaVersion := in.ConfigInfo.ComponentName
		logx.Infof("Spark version: %s", grafanaVersion)

		installPath := utils.Rtrim(in.ConfigInfo.InstallPath, "/")
		logx.Infof("Spark 安装目录: %s", installPath)
		logx.Infof("开始部署节点 %s", in.NodeInfo.Host)
		checkPathCommand := fmt.Sprintf(`
if [ ! -d "%s" ]; then
sudo mkdir -p "%s"
fi
`, installPath, installPath)

		logx.Infof("[Step 1/%d] %s: 执检测组件安装路径是否存在: %s", totalStep, in.NodeInfo.Host, checkPathCommand)
		output, err := utils.ExecCommand(checkPathCommand)
		if err != nil {
			logx.Errorf("[Step 1/%d] %s: 执检测组件安装路径失败: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				1,
				"检测组件安装路径",
				"failure",
				fmt.Sprintf("检测组件安装路径失败: %v, output: %s", err, output))
			return
		}
		logx.Infof("[Step 1/%d] %s: 执行检测组件安装路径成功", totalStep, in.NodeInfo.Host)
		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			1,
			"检测组件安装路径",
			"success", "")

		tarCommand := fmt.Sprintf("tar -xzf %s/%s -C %s", rtrimDir, grafanaVersion, installPath)
		output, err = utils.ExecCommand(tarCommand)
		if err != nil {
			logx.Errorf("[Step 2/%d] %s: 解压安装包失败: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				2,
				"解压安装包",
				"failure",
				fmt.Sprintf("解压安装包失败: %v, output: %s", err, output))
			return
		}
		logx.Infof("[Step 2/%d] %s: 解压安装包成功", totalStep, in.NodeInfo.Host)
		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			2,
			"解压安装包",
			"success",
			"")

		alertmanagerHome := utils.Rtrim(grafanaVersion, ".tgz")
		home := fmt.Sprintf("%s/%s", installPath, alertmanagerHome)

		// 修改组件所属用户组
		chownCommand := fmt.Sprintf("sudo chown -R %s:%s %s", in.NodeInfo.Username, in.NodeInfo.Username, home)
		output, err = utils.ExecCommand(chownCommand)
		if err != nil {
			logx.Errorf("[Step 3/%d] %s: 修改用户组失败: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				3,
				"修改用户组",
				"failure",
				fmt.Sprintf("修改组件所属用户组失败: %v, output: %s", err, output))
			return
		}
		logx.Infof("[Step 3/%d] %s: 修改组件所属用户组成功", totalStep, in.NodeInfo.Host)

		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			3,
			"修改用户组",
			"success",
			"")

		cpConfCommand := fmt.Sprintf("cp -r %s/conf/spark-defaults.conf.template %s/conf/spark-defaults.conf", home, home)
		output, err = utils.ExecCommand(cpConfCommand)
		if err != nil {
			logx.Errorf("[Step 3/%d] %s: 创建spark-defaults.conf配置文件异常: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				3,
				"创建spark-defaults.conf",
				"failure",
				fmt.Sprintf("创建spark-defaults.conf配置文件异常: %v, output: %s", err, output))
			return
		}

		logx.Infof("[Step 3/%d] %s: 创建spark-defaults.conf配置文件成功", totalStep, in.NodeInfo.Host)
		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			3,
			"创建spark-defaults.conf",
			"success",
			"")

		cpEnvCommand := fmt.Sprintf("cp -r %s/conf/spark-env.sh.template %s/conf/spark-env.sh", home, home)
		output, err = utils.ExecCommand(cpEnvCommand)
		if err != nil {
			logx.Errorf("[Step 3/%d] %s: 创建spark-env.sh配置文件异常: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				3,
				"创建spark-env.sh",
				"failure",
				fmt.Sprintf("创建spark-env.sh配置文件异常: %v, output: %s", err, output))
			return
		}

		logx.Infof("[Step 3/%d] %s: 创建spark-env.sh配置文件成功", totalStep, in.NodeInfo.Host)
		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			3,
			"创建spark-env.sh",
			"success",
			"")

		cpWorkerCommand := fmt.Sprintf("cp -r %s/conf/workers.template %s/conf/workers", home, home)
		output, err = utils.ExecCommand(cpWorkerCommand)
		if err != nil {
			logx.Errorf("[Step 3/%d] %s: 创建workers文件异常: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				3,
				"创建workers",
				"failure",
				fmt.Sprintf("创建workers文件异常: %v, output: %s", err, output))
			return
		}

		logx.Infof("[Step 3/%d] %s: 创建workers文件成功", totalStep, in.NodeInfo.Host)
		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			3,
			"创建workers",
			"success",
			"")

		cpMetricsCommand := fmt.Sprintf("cp -r %s/conf/metrics.properties.template %s/conf/metrics.properties", home, home)
		output, err = utils.ExecCommand(cpMetricsCommand)
		if err != nil {
			logx.Errorf("[Step 3/%d] %s: 创建metrics.properties配置文件异常: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				3,
				"创建metrics.properties",
				"failure",
				fmt.Sprintf("创建metrics.properties配置文件异常: %v, output: %s", err, output))
			return
		}
		logx.Infof("[Step 3/%d] %s: 创建metrics.properties配置文件成功", totalStep, in.NodeInfo.Host)
		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			3,
			"创建metrics.properties",
			"success",
			"")

		var workers []string
		for _, node := range in.SparkConfig.WorkerList {
			workers = append(workers, node)
		}
		// 将所有 serverConfig 拼接成一个大的字符串
		workersConfig := strings.Join(workers, "\n")

		configWorkerCommand := fmt.Sprintf("echo %s > %s/conf/workers", workersConfig, home)
		output, err = utils.ExecCommand(configWorkerCommand)
		if err != nil {
			logx.Errorf("[Step 3/%d] %s: 配置workers异常: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				3,
				"配置workers",
				"failure",
				fmt.Sprintf("配置workers异常: %v, output: %s", err, output))
			return
		}
		logx.Infof("[Step 3/%d] %s: 配置workers成功", totalStep, in.NodeInfo.Host)
		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			3,
			"配置workers",
			"success",
			"")

		configCommand := fmt.Sprintf("sed -i 's/^# spark.master\\s*.*/spark.master  spark:\\/\\/%s/' %s/conf/spark-defaults.conf", in.SparkConfig.SparkMaster, home)
		configCommand += fmt.Sprintf(" && sed -i 's/^# spark.eventLog.enabled\\s*.*/spark.eventLog.enabled  %v/' %s/conf/spark-defaults.conf", in.SparkConfig.SparkEventLogEnabled, home)
		configCommand += fmt.Sprintf(" && sed -i 's/^# spark.eventLog.dir\\s*.*/spark.eventLog.dir  %s/' %s/conf/spark-defaults.conf", in.SparkConfig.SparkEventLogDir, home)
		configCommand += fmt.Sprintf(" && sed -i 's/^# spark.serializer\\s*.*/spark.serializer  %s/' %s/conf/spark-defaults.conf", in.SparkConfig.SparkSerializer, home)
		configCommand += fmt.Sprintf(" && sed -i 's/^# spark.driver.memory\\s*.*/spark.driver.memory  %s/' %s/conf/spark-defaults.conf", in.SparkConfig.SparkDriverMemory, home)

		output, err = utils.ExecCommand(configCommand)
		if err != nil {
			logx.Errorf("[Step 3/%d] %s: 配置spark-defaults.conf异常: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				3,
				"配置spark-defaults.conf",
				"failure",
				fmt.Sprintf("配置spark-defaults.conf异常: %v, output: %s", err, output))
			return
		}
		logx.Infof("[Step 3/%d] %s: 配置spark-defaults.conf成功", totalStep, in.NodeInfo.Host)
		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			3,
			"配置spark-defaults.conf",
			"success",
			"")

		confCommand := fmt.Sprintf("echo 'export HADOOP_CONF_DIR=%s' >> %s/conf/spark-env.sh", in.SparkConfig.SparkHadoopConfDir, home)
		confCommand += fmt.Sprintf(" && echo 'export JAVA_HOME=%s' >> %s/conf/spark-env.sh", in.SparkConfig.SparkJavaHome, home)
		confCommand += fmt.Sprintf(" && echo 'export SCALA_HOME=%s' >> %s/conf/spark-env.sh", in.SparkConfig.SparkScalaHome, home)
		output, err = utils.ExecCommand(confCommand)
		if err != nil {
			logx.Errorf("[Step 3/%d] %s: 配置spark-env.sh异常: %v, output: %s", totalStep, in.NodeInfo.Host, err, output)
			utils.LogDeploymentStep(l.svcCtx.Conn,
				insertSql,
				taskID,
				in.NodeInfo.Host,
				"Spark",
				3,
				"配置spark-env.sh",
				"failure",
				fmt.Sprintf("配置spark-env.sh异常: %v, output: %s", err, output))
			return
		}
		logx.Infof("[Step 3/%d] %s: 配置spark-env.sh成功", totalStep, in.NodeInfo.Host)
		utils.LogDeploymentStep(l.svcCtx.Conn,
			insertSql,
			taskID,
			in.NodeInfo.Host,
			"Spark",
			3,
			"配置spark-env.sh",
			"success",
			"")

		logx.Infof("%s: 等待Spark服务启动...", in.NodeInfo.Host)
		time.Sleep(3 * time.Second)

		statusCommand := fmt.Sprintf("pgrep -f spark && exec bash")
		output, err = utils.ExecCommand(statusCommand)
		if err != nil || output == "" {
			logx.Errorf("[Step Final] %s: Spark启动失败: %v, Output: %s", in.NodeInfo.Host, err, output)
			return
		}
		logx.Infof("[Step Final] %s: Spark部署成功", in.NodeInfo.Host)

		go func() {
			ticker := time.NewTicker(15 * time.Second)
			defer ticker.Stop()

			for {
				select {
				case <-ticker.C:

					sparkInfo := &models.ComponentInfo{
						ComponentName:  "Spark",
						Version:        in.Version,
						ComponentRole:  "Server",
						HomePath:       home,
						DataStorageDir: "",
						Port:           "7077",
						Status:         true,
						AdditionalInfo: fmt.Sprintf("config_file=%s/conf/spark-defaults.conf", home),
					}

					fmt.Println(sparkInfo)
					if err != nil {
						logx.Errorf("%s 节点: 更新 Spark 角色信息失败: %v", in.NodeInfo.Host, err)
					}
					logx.Infof("%s 节点: 更新 Spark 角色信息成功 -> %s", in.NodeInfo.Host, "Server")

				}
			}
		}()
	}()
	return resp, nil
}
