package hivedeploylogic

import (
	"context"
	"errors"
	"fmt"
	"github.com/zeromicro/go-zero/core/logx"
	"strings"
	"sync"
	"time"
	"yunzhan/common/models"
	utils "yunzhan/common/utils"
	"yunzhan/rpc-server/internal/svc"
	agent "yunzhan/rpc-server/pb"
)

type DeployHiveLogic struct {
	ctx    context.Context
	svcCtx *svc.ServiceContext
	logx.Logger
}

func NewDeployHiveLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeployHiveLogic {
	return &DeployHiveLogic{
		ctx:    ctx,
		svcCtx: svcCtx,
		Logger: logx.WithContext(ctx),
	}
}

// DeployHive 部署Hive
func (l *DeployHiveLogic) DeployHive(in *agent.DeployHiveRequest) (*agent.DeployResponse, error) {
	if in.NodeInfo == nil {
		return &agent.DeployResponse{
			Code:    500,
			Message: "未配置节点信息, 无法执行Hive部署",
		}, errors.New("未配置节点信息, 无法执行Hive部署")
	}

	resp := &agent.DeployResponse{
		Code:    200,
		Message: fmt.Sprintf("请求已接收，正在部署 %s 到节点 %s", in.ConfigInfo.ComponentName, in.NodeInfo.Host),
	}

	go l.startDeployment(in)

	return resp, nil
}

func (l *DeployHiveLogic) startDeployment(in *agent.DeployHiveRequest) {

	var wg sync.WaitGroup

	wg.Add(1)
	go func() {
		defer wg.Done()

		// 创建 LogManager 实例
		logManager := models.NewLogManager(100, fmt.Sprintf("http://%s:%d", l.svcCtx.Config.RestServices["apiServer"].RestConf.Host, l.svcCtx.Config.RestServices["apiServer"].RestConf.Port))
		defer logManager.Close()

		flushInterval := 5 * time.Second // 刷新间隔
		maxBatchSize := 100              // 最大批量大小
		retryDelay := 2 * time.Second    // 重试延迟
		maxRetries := 3                  // 最大重试次数

		// 创建 ComponentInfoManager 实例
		componentInfoManager := models.NewComponentInfoManager(
			1000,
			fmt.Sprintf("http://%s:%d", l.svcCtx.Config.RestServices["apiServer"].RestConf.Host, l.svcCtx.Config.RestServices["apiServer"].RestConf.Port),
			flushInterval,
			maxBatchSize,
			retryDelay,
			maxRetries)
		defer componentInfoManager.Close()

		packageDirectory := in.ConfigInfo.PackagePath
		rtrimDir := utils.Rtrim(packageDirectory, "/")
		hiveVersion := in.ConfigInfo.ComponentName
		installPath := utils.Rtrim(in.ConfigInfo.InstallPath, "/")
		baseInfo := fmt.Sprintf("安装包存放目录: %s, Hive version: %s, Hive 安装目录: %s", rtrimDir, hiveVersion, installPath)
		step := 1
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", baseInfo, "Hive", "Server")

		checkPathCommand := fmt.Sprintf(`
if [ ! -d "%s" ]; then
   sudo mkdir -p "%s"
fi
`, installPath, installPath)

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "执行组件安装路径检查...", "Hive", "Server")
		output, err := utils.ExecCommand(checkPathCommand)
		if err != nil {
			message := fmt.Sprintf("检查组件安装路径异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "检查组件安装路径完成", "Hive", "Server")
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "解压安装包...", "Hive", "Server")

		tarCommand := fmt.Sprintf("sudo tar -xzf %s/%s -C %s", rtrimDir, hiveVersion, installPath)
		output, err = utils.ExecCommand(tarCommand)
		if err != nil {
			message := fmt.Sprintf("解压安装包异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "解压安装包完成", "Hive", "Server")

		hivePackageName := utils.Rtrim(hiveVersion, ".tar.gz")
		hiveHome := fmt.Sprintf("%s/%s", installPath, hivePackageName)

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "修改组件所属用户组...", "Hive", "Server")
		chownCommand := fmt.Sprintf("sudo chown -R %s:%s %s", in.NodeInfo.Username, in.NodeInfo.Username, hiveHome)
		output, err = utils.ExecCommand(chownCommand)
		if err != nil {
			message := fmt.Sprintf("修改组件所属用户组异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "修改组件所属用户组完成", "Hive", "Server")

		checkPathCommand = fmt.Sprintf(`
if [ ! -d "%s" ]; then
sudo mkdir -p "%s"
fi
sudo chown -R %s:%s %s
`, in.HiveConfig.HbaseRootDir, in.HiveConfig.HbaseRootDir, in.NodeInfo.Username, in.NodeInfo.Username, in.HiveConfig.HbaseRootDir)
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "检查组件数据存储目录...", "Hive", "Server")
		output, err = utils.ExecCommand(checkPathCommand)
		if err != nil {
			message := fmt.Sprintf("检查组件数据存储目录异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "检查组件数据存储目录完成", "Hive", "Server")

		step++
		currentIP, currentIPErr := utils.GetCurrentInternalIP()
		if err != nil {
			message := fmt.Sprintf("%s: 无法获取当前主机IP: %v", in.NodeInfo.Host, currentIPErr)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
			return
		}
		currentHost, currentHostErr := utils.GetCurrentHostname()
		if err != nil {
			message := fmt.Sprintf("%s: 无法获取当前主机名: %v", in.NodeInfo.Host, currentHostErr)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
			return
		}

		hiveEnvPath := fmt.Sprintf("%s/conf/hive-env.sh", hiveHome)
		appendHadoopEnvCommand := fmt.Sprintf(`
sudo bash -c 'cat <<EOF >>%s
export HADOOP_HOME=%s
export HIVE_CONF_DIR=%s/conf
export HIVE_AUX_JARS_PATH=%s/lib
EOF'
`, hiveEnvPath, hiveHome, hiveHome, hiveHome)
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "配置 hive-env.sh 文件...", "Hive", "Server")
		output, err = utils.ExecCommand(appendHadoopEnvCommand)
		if err != nil {
			message := fmt.Sprintf("配置 hive-env.sh 文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "配置 hive-env.sh 文件完成", "Hive", "Server")

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "加载 hive-site.xml 配置文件...", "Hive", "Server")
		hiveSitePath := fmt.Sprintf("%s/conf/hive-site.xml", hiveHome)
		siteConfig, err := utils.LoadConfiguration(hiveSitePath)
		if err != nil {
			message := fmt.Sprintf("加载 hive-site.xml 配置文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "加载 hive-site.xml 配置文件完成", "Hive", "Server")

		siteConfig.SetProperty("hive.metastore.uris", in.HiveConfig.HiveMetastoreUris, "Hive Metastore URI，指定Hive Metastore服务的地址")
		siteConfig.SetProperty("hive.server2.active.passive.ha.enable", in.HiveConfig.HiveServer2ActivePassiveHAEnable, "在启用 Hive 交互式会话时是否启用 HiveServer2 的主动/被动高可用性")
		siteConfig.SetProperty("hive.metastore.warehouse.dir", in.HiveConfig.HiveMetastoreWarehouseDir, "Hive 数据仓库的存放位置")
		siteConfig.SetProperty("javax.jdo.option.ConnectionURL", in.HiveConfig.JavaxJdoOptionConnectionURL, "连接到Hive元数据存储URL（使用MySQL）")
		siteConfig.SetProperty("javax.jdo.option.ConnectionDriverName", in.HiveConfig.JavaxJdoOptionConnectionDriverName, "JDBC驱动的类名（使用MySQL）")
		siteConfig.SetProperty("javax.jdo.option.ConnectionUserName", in.HiveConfig.JavaxJdoOptionConnectionUserName, "连接到Hive元数据存储的用户名")
		siteConfig.SetProperty("javax.jdo.option.ConnectionPassword", in.HiveConfig.JavaxJdoOptionConnectionPassword, "连接到Hive元数据存储的密码")
		siteConfig.SetProperty("hive.server2.support.dynamic.service.discovery", in.HiveConfig.HiveServer2SupportDynamicServiceDiscovery, "HiveServer2 是否支持其客户端的动态服务发现，允许Hive Server2通过Zookeeper进行服务发现")
		siteConfig.SetProperty("hive.server2.zookeeper_logic.namespace", in.HiveConfig.HiveServer2ZookeeperNamespace, "Hive Server2 在Zookeeper中的命名空间")
		siteConfig.SetProperty("hive.zookeeper_logic.quorum", in.HiveConfig.HiveZookeeperQuorum, "用于与之通信的 Zookeeper 集群的地址")
		siteConfig.SetProperty("hive.cli.print.current.db", in.HiveConfig.HiveCliPrintCurrentDB, "在命令行界面打印当前数据库")
		siteConfig.SetProperty("hive.cli.print.header", in.HiveConfig.HiveCliPrintHeader, "在命令行界面打印结果的表头")
		siteConfig.SetProperty("hive.server2.thrift.bind.host", in.HiveConfig.HiveServer2ThriftBindHost, "Hive Server2 绑定的主机名或IP地址")
		siteConfig.SetProperty("hive.server2.thrift.port", in.HiveConfig.HiveServer2ThriftPort, "Hive Server2 的 Thrift 服务端口")
		siteConfig.SetProperty("hive.metastore.uris", in.HiveConfig.HiveMetastoreUris, "远程元存储的 Thrift URI。用于元存储客户端连接到远程元存储")
		siteConfig.SetProperty("hive.metastore.uri.selection", in.HiveConfig.HiveMetastoreUriSelection, "期望值之一 [sequential, random]。确定元存储客户端连接到远程元存储的选择机制。SEQUENTIAL 意味着将选择被指定为 hive.metastore.uris 的第一个有效元存储。RANDOM 意味着将随机选择元存储")
		siteConfig.SetProperty("hive.metastore.client.cache.max.capacity", in.HiveConfig.HiveMetastoreClientCacheMaxCapacity, "元存储客户端缓存的最大容量")
		siteConfig.SetProperty("hive.metastore.fshandler.threads", in.HiveConfig.HiveMetastoreFsHandlerThreads, "分配给元存储处理器进行文件系统操作的线程数量")
		siteConfig.SetProperty("hive.metastore.failure.retries", in.HiveConfig.HiveMetastoreFailureRetries, "在 Thrift 元存储调用失败时的重试次数")
		siteConfig.SetProperty("hive.metastore.db.type", in.HiveConfig.HiveMetastoreDbType, "[derby, oracle, mysql, mssql, postgres]。元存储使用的数据库类型")
		siteConfig.SetProperty("hive.zookeeper_logic.client.port", in.HiveConfig.HiveZookeeperClientPort, "与 ZooKeeper 服务器通信的端口")
		siteConfig.SetProperty("hive.zookeeper_logic.session.timeout", in.HiveConfig.HiveZookeeperSessionTimeout, "ZooKeeper 客户端的会话超时（以毫秒为单位）")
		siteConfig.SetProperty("hive.zookeeper_logic.connection.timeout", in.HiveConfig.HiveZookeeperConnectionTimeout, "ZooKeeper 客户端的连接超时（以秒为单位）")
		siteConfig.SetProperty("hive.exec.dynamic.partition", in.HiveConfig.HiveExecDynamicPartition, "是否允许在 DML/DDL 中进行动态分区")
		siteConfig.SetProperty("hive.exec.dynamic.partition.mode", in.HiveConfig.HiveExecDynamicPartitionMode, "在严格模式下，用户必须指定至少一个静态分区")
		siteConfig.SetProperty("hive.exec.max.dynamic.partitions", in.HiveConfig.HiveExecMaxDynamicPartitions, "允许创建的动态分区的最大数量")
		siteConfig.SetProperty("hive.exec.mode.local.auto", in.HiveConfig.HiveExecModeLocalAuto, "让 Hive 自动确定是否在本地模式下运行")
		siteConfig.SetProperty("hive.metastore.port", in.HiveConfig.HiveMetastorePort, "Hive 元存储监听端口")
		siteConfig.SetProperty("hive.metastore.server.max.message.size", in.HiveConfig.HiveMetastoreServerMaxMessageSize, "HMS 将接受的最大消息大小（以字节为单位）")
		siteConfig.SetProperty("hive.metastore.server.min.threads", in.HiveConfig.HiveMetastoreServerMinThreads, "Thrift 服务器池中最少的工作线程数")
		siteConfig.SetProperty("hive.metastore.server.max.threads", in.HiveConfig.HiveMetastoreServerMaxThreads, "Thrift 服务器池中最多的工作线程数")
		siteConfig.SetProperty("hive.metastore.schema.verification", in.HiveConfig.HiveMetastoreSchemaVerification, "强制执行元存储模式版本一致性")
		siteConfig.SetProperty("hive.metastore.metrics.enabled", in.HiveConfig.HiveMetastoreMetricsEnabled, "启用元存储的指标")
		siteConfig.SetProperty("hive.metastore.event.db.notification.api.auth", in.HiveConfig.HiveMetastoreEventDBNotificationApiAuth, " 元数据是否应针对数据库通知相关 API（如 get_next_notification）进行授权。如果设置为 “true”，则只有代理设置中的超级用户才拥有权限")

		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "更新 hive-site.xml 配置文件...", "Hive", "Server")
		err = siteConfig.Save(hiveSitePath)
		if err != nil {
			message := fmt.Sprintf("更新 hive-site.xml 配置文件异常: %v, output: %s", err, output)
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
			return
		}
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "更新 hive-site.xml 配置文件完成", "Hive", "Server")

		// metastore部署
		if utils.Contains(in.HiveConfig.MetastoreNodes, currentHost) || utils.Contains(in.HiveConfig.MetastoreNodes, currentIP) {

			metastoreServiceFile := fmt.Sprintf(`[Unit]
Description=Hive Metastore Service
Documentation=https://cwiki.apache.org/confluence/display/Hive/Home
After=network.target

[Service]
Type=simple
User=%s
Group=%s
Environment=HIVE_HOME=%s
Environment=HADOOP_CONF_DIR=%s/etc/hadoop
ExecStart=/bin/bash -c "$HIVE_HOME/bin/hive --service metastore"
StandardOutput=journal
StandardError=journal
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target`,
				in.NodeInfo.Username,
				in.NodeInfo.Username,
				hiveHome,
				hiveHome)
			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建 hive-metastore.service 文件...", "Hive", "Metastore")
			writeServiceFileCommand := fmt.Sprintf("echo '%s' | sudo tee /etc/systemd/system/hive-metastore.service", metastoreServiceFile)
			output, err = utils.ExecCommand(writeServiceFileCommand)
			if err != nil {
				message := fmt.Sprintf("创建 hive-metastore.service 文件异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Metastore")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建 hive-metastore.service 文件完成", "Hive", "Metastore")

			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动Hive Metastore服务...", "Hive", "Metastore")
			reloadSystemdCommand := fmt.Sprintf("sudo systemctl daemon-reload && sudo systemctl enable hive-metastore.service --now")
			output, err = utils.ExecCommand(reloadSystemdCommand)
			if err != nil {
				message := fmt.Sprintf("启动Hive Metastore服务异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动Hive Metastore服务完成", "Hive", "Metastore")
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待Hive Metastore服务完全启动", "Hive", "Metastore")
			time.Sleep(3 * time.Second)
			step++
			statusCommand := "sudo systemctl is-active hive-metastore.service"
			output, err = utils.ExecCommand(statusCommand)
			if err != nil || strings.TrimSpace(output) != "active" {
				message := fmt.Sprintf("Hive Metastore服务启动后未正常运行: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Metastore")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "Hive Metastore服务部署成功", "Hive", "Metastore")

			hiveInfo := &models.ComponentInfo{
				ClusterID:      in.ClusterID,
				ComponentName:  "Hive",
				Version:        in.Version,
				NodeHost:       currentHost,
				NodeIP:         currentIP,
				ComponentRole:  "Metastore",
				HomePath:       hiveHome,
				DataStorageDir: in.HiveConfig.HiveMetastoreWarehouseDir,
				Port:           in.HiveConfig.HiveServer2ThriftPort,
				Status:         true,
				AdditionalInfo: fmt.Sprintf("config_file=%s/conf/hive-site.xml", hiveHome),
			}

			componentInfoManager.SendComponentInfo(hiveInfo)

		}

		// Hiveserver2
		if utils.Contains(in.HiveConfig.HiveServer2Nodes, currentHost) || utils.Contains(in.HiveConfig.HiveServer2Nodes, currentIP) {
			serverServiceFile := fmt.Sprintf(`[Unit]
Description=Hive Server2 Service
Documentation=https://cwiki.apache.org/confluence/display/Hive/Home
After=network.target

[Service]
Type=simple
User=%s
Group=%s
Environment=HIVE_HOME=%s
Environment=HADOOP_CONF_DIR=%s/etc/hadoop 
ExecStart=/bin/bash -c "$HIVE_HOME/bin/hive --service hiveserver2"
StandardOutput=journal
StandardError=journal
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
`, in.NodeInfo.Username, in.NodeInfo.Username, hiveHome, hiveHome)

			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建 hive-server2.service 文件...", "Hive", "HiveServer2")
			writeServiceFileCommand := fmt.Sprintf("echo '%s' | sudo tee /etc/systemd/system/hive-server2.service", serverServiceFile)
			output, err = utils.ExecCommand(writeServiceFileCommand)
			if err != nil {
				message := fmt.Sprintf("创建 hive-server2.service 文件异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "Server")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "创建 hive-server2.service 文件完成", "Hive", "HiveServer2")
			step++
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动HiveServer2服务...", "Hive", "HiveServer2")
			reloadSystemdCommand := fmt.Sprintf("sudo systemctl daemon-reload && sudo systemctl enable hive-server2.service --now")
			output, err = utils.ExecCommand(reloadSystemdCommand)
			if err != nil {
				message := fmt.Sprintf("启动HiveServer2服务异常: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "HiveServer2")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "启动HiveServer2服务完成", "Hive", "HiveServer2")

			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "等待HiveServer2服务完全启动", "Hive", "HiveServer2")

			time.Sleep(3 * time.Second)
			step++
			statusCommand := "sudo systemctl is-active hive-server2.service"
			output, err = utils.ExecCommand(statusCommand)
			if err != nil || strings.TrimSpace(output) != "active" {
				message := fmt.Sprintf("Hive Server2服务启动后未正常运行: %v, output: %s", err, output)
				logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "ERROR", message, "Hive", "HiveServer2")
				return
			}
			logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "Hive HiveServer2服务部署成功", "Hive", "HiveServer2")

			hiveInfo := &models.ComponentInfo{
				ClusterID:      in.ClusterID,
				ComponentName:  "Hive",
				Version:        in.Version,
				NodeHost:       currentHost,
				NodeIP:         currentIP,
				ComponentRole:  "HiveServer2",
				HomePath:       hiveHome,
				DataStorageDir: in.HiveConfig.HiveMetastoreWarehouseDir,
				Port:           in.HiveConfig.HiveServer2ThriftPort,
				Status:         true,
				AdditionalInfo: fmt.Sprintf("config_file=%s/conf/hive-site.xml", hiveHome),
			}

			componentInfoManager.SendComponentInfo(hiveInfo)

		}
		step++
		logManager.SendLog(in.ClusterID, in.NodeInfo.Host, step, "INFO", "Hive部署成功", "Hive", "Server")

	}()
	wg.Wait()

}
