ROOT_DIR    = $(shell pwd)
NAMESPACE   = "default"
DEPLOY_NAME = "template-single"
DOCKER_NAME = "template-single"
ADMIN_RESOURCE_PATH = "/resource/public/admin/"


.PHONY: ctrl
ctrl:
	@gf gen ctrl


# Generate Go files for DAO/DO/Entity.
.PHONY: dao
dao:
	@gf gen dao

# Generate Go files for Service.
.PHONY: service
service:
	@gf gen service


# 编译web前端
pnpm_build:
	@cd ../xiuxianba_web && pnpm run build

# 前端文件推送到测试环境
push_test:
	rsync -avz --delete ../xiuxianba_web/dist/* ubuntu@159.75.240.152:/home/web/xiuxian_v2

# 前端文件推送到生产环境
push_prod:
	rsync -avz --delete ../xiuxianba_web/dist/* ubuntu@106.55.239.19:/home/web/xiuxian_v2

# 备份文件路径
backup=./backup
# 当前时间
current_time = $(shell date +"%Y_%m_%d-%H:%M")
current_time_str = $(shell date +"%Y_%m_%d_%H_%M")
# 正式环境数据库密码
prod_db_pw=NDsYzHE6GNNATbmPrSuz
# 本地环境
dev_ip=127.0.0.1
dev_port=3308
dev_db_pw=root

#test_ip=127.0.0.1
#test_port=3308
#test_db_pw=root
# 测试环境
test_ip=159.75.240.152
test_port=3306
test_db_pw=NDsYzHE6GNNATbmPrSuz

# 临时目录和日志文件
TEMP_DIR=./temp/db_sync
LOG_FILE=./temp/db_sync.log

# 源数据库（旧库）
SRC_HOST=159.75.240.152
SRC_PORT=3306
SRC_USER=root
SRC_PASS=NDsYzHE6GNNATbmPrSuz
SRC_DB=xiuxianba

# 目标数据库（新库）
#DEST_HOST=127.0.0.1
#DEST_PORT=3308
#DEST_USER=root
#DEST_PASS=root
#DEST_DB=xiuxianba
DEST_HOST=127.0.0.1
DEST_PORT=33306
DEST_USER=root
DEST_PASS=NDsYzHE6GNNATbmPrSuz
DEST_DB=xiuxianba


# 将本地用户的 SSH 公钥复制到远程主机
# ssh-copy-id ubuntu@106.55.239.19
# ssh-copy-id ubuntu@119.91.132.65

p_xiuxian_db->l18000:
	ssh -L 8000:127.0.0.1:18000 ubuntu@159.75.240.152

# 正式环境数据库端口映射到本地 33306
p3306->l33306:
	ssh -L 33306:127.0.0.1:3306 ubuntu@106.55.239.19

# 正式环境旧数据库 xiuxian_db 端口映射到本地 15434
p_xiuxian_db->l15434:
	ssh -L 15434:172.16.0.17:5432 ubuntu@106.55.239.19

# 正式环境旧数据库 xiuxian_db 端口映射到本地 15434
p_xialou_platform->l15433:
	ssh -L 15433:172.16.0.9:5432 ubuntu@119.91.132.65

# 增量同步表
TABLES=hg_admin_role hg_bank_icon hg_sys_config hg_sys_cron hg_sys_cron_group hg_sys_dict_data hg_sys_dict_type
# 全量同步表
TABLES_FULL=hg_admin_menu hg_sys_provinces

# 测试环境更改数据同步到正式环境（已有表字段增删改需要单独操作）
sync_all:
	# 新增表同步
	$(MAKE) sync_table_structure
	# 增量同步，指定的数据库表数据
	$(MAKE) sync_table
	# 全量同步，指定的数据库表数据
	$(MAKE) sync_table_full

# 正式环境数据同步到测试环境
sync:
	# ！需要先单独执行 make p3306->l33306
	# 备份正式环境数据库，忽略表：hg_sys_log、hg_sys_login_log、hg_sys_serve_log
	# --single-transaction 避免锁表影响业务运行
	@mysqldump --single-transaction -h 127.0.0.1 -P 33306 -u root -p$(prod_db_pw) xiuxianba \
	--ignore-table=xiuxianba.hg_sys_log \
	--ignore-table=xiuxianba.hg_sys_login_log \
	--ignore-table=xiuxianba.hg_sys_serve_log > $(backup)/xxb_$(current_time).sql
	# 添加表结构：hg_sys_log、hg_sys_login_log、hg_sys_serve_log
	@mysqldump --single-transaction -h 127.0.0.1 -P 33306 -u root -p$(prod_db_pw) xiuxianba \
	--no-data hg_sys_log \
	--no-data hg_sys_login_log \
	--no-data hg_sys_serve_log >> $(backup)/xxb_$(current_time).sql
	# 测试环境将旧数据备份到新数据库
	@mysqldump -h $(test_ip) -P $(test_port) -u root -p$(test_db_pw) xiuxianba > $(backup)/test_xxb_$(current_time).sql
	@mysql -h $(test_ip) -P $(test_port) -u root -p$(test_db_pw) \
	-e "CREATE DATABASE xiuxianba_$(current_time_str) DEFAULT CHARACTER SET = 'utf8mb4';"
	@mysql -h $(test_ip) -P $(test_port) -u root -p$(test_db_pw) xiuxianba_$(current_time_str) < $(backup)/test_xxb_$(current_time).sql
	# 删除旧数据库，创建数据库
	@mysql -h $(test_ip) -P $(test_port) -u root -p$(test_db_pw) \
	-e "DROP DATABASE IF EXISTS xiuxianba;" \
	-e "CREATE DATABASE xiuxianba DEFAULT CHARACTER SET = 'utf8mb4';"
	# 导入正式环境数据
	@mysql -h $(test_ip) -P $(test_port) -u root -p$(test_db_pw) xiuxianba < $(backup)/xxb_$(current_time).sql
	# 数据修改
	# 1、修改系统配置，域名改为测试环境
	# 2、团购商品全部下架，并上架不违规的团购商品
	# 3、月会员价格改为0.01元
	@mysql -h $(test_ip) -P $(test_port) -u root -p$(test_db_pw) xiuxianba \
	-e "UPDATE hg_sys_config SET value='https://test.xxb.xialoukeji.com' WHERE id='54';" \
	-e "UPDATE hg_sys_config SET value='wss://test.xxb.xialoukeji.com/socket' WHERE id='55';" \
	-e "UPDATE hg_sys_config SET value='https://test.xxb.xialoukeji.com/api/cash/payment/notify' WHERE id='143';" \
	-e "UPDATE hg_product SET status = 2 WHERE type = 3;" \
	-e "UPDATE hg_product SET status = 1 WHERE id IN (3457,3456,3455,3454,3453,3452,3451,3450,3449,3448,3447,3446,3445);" \
	-e "UPDATE hg_member_card SET price = 0.01 WHERE id = 1;"
	@echo "同步数据库完成"

# 指定数据库表增量同步
sync_table:
	@echo "【开始同步】时间：$(current_time)" >> $(LOG_FILE)
	@for TABLE in $(TABLES); do \
		echo "正在同步表: $$TABLE" >> $(LOG_FILE); \
		\
		MAX_ID_SRC=$$(mysql -h$(SRC_HOST) -P$(SRC_PORT) -u$(SRC_USER) -p$(SRC_PASS) -Nse "SELECT MAX(id) FROM $(SRC_DB).$$TABLE;"); \
		MAX_ID_DEST=$$(mysql -h$(DEST_HOST) -P$(DEST_PORT) -u$(DEST_USER) -p$(DEST_PASS) -Nse "SELECT MAX(id) FROM $(DEST_DB).$$TABLE;"); \
		\
		if [ -z "$$MAX_ID_SRC" ] || [ "$$MAX_ID_SRC" = "NULL" ]; then MAX_ID_SRC=0; fi; \
		if [ -z "$$MAX_ID_DEST" ] || [ "$$MAX_ID_DEST" = "NULL" ]; then MAX_ID_DEST=0; fi; \
		\
		echo "源库最大ID: $$MAX_ID_SRC | 目标库最大ID: $$MAX_ID_DEST" >> $(LOG_FILE); \
		\
		if [ $$MAX_ID_SRC -gt $$MAX_ID_DEST ]; then \
			echo "表 $$TABLE 源库数据较新，准备同步"; \
				OUTPUT_FILE=$(TEMP_DIR)/同步表数据-$$TABLE-$(current_time).sql; \
				mysqldump \
				-h $(SRC_HOST) \
				-P $(SRC_PORT) \
				-u $(SRC_USER) \
				-p'$(SRC_PASS)' \
				--no-create-info \
				--skip-add-drop-table \
				--compact \
				--complete-insert \
				--extended-insert=FALSE \
				--where="id > $$MAX_ID_DEST" \
				$(SRC_DB) $$TABLE > $$OUTPUT_FILE; \
			\
			if [ -s "$$OUTPUT_FILE" ]; then \
				mysql -h$(DEST_HOST) -P$(DEST_PORT) -u$(DEST_USER) -p$(DEST_PASS) $(DEST_DB) < $$OUTPUT_FILE; \
				echo "表 $$TABLE 同步成功。" >> $(LOG_FILE); \
			else \
				echo "表 $$TABLE 中无新增数据。" >> $(LOG_FILE); \
			fi; \
		else \
			echo "表 $$TABLE 无需同步，目标库数据已是最新。"; \
		fi; \
	done
	@echo "【同步完成】时间：$(current_time)" >> $(LOG_FILE)
	@echo "" >> $(LOG_FILE)

# 判断两个数据库中表结构是否一致，缺少的表进行结构导出和创建
sync_table_structure:
	@echo "【开始同步新增表】时间：$(current_time)" >> $(LOG_FILE)
	@echo "正在获取源库和目标库表列表..." >> $(LOG_FILE)
	@mysql -h $(SRC_HOST) -P $(SRC_PORT) -u $(SRC_USER) -p'$(SRC_PASS)' -Nse "SHOW TABLES FROM $(SRC_DB);" > $(TEMP_DIR)/src_tables.txt
	@mysql -h $(DEST_HOST) -P $(DEST_PORT) -u $(DEST_USER) -p'$(DEST_PASS)' -Nse "SHOW TABLES FROM $(DEST_DB);" > $(TEMP_DIR)/dest_tables.txt
	@comm -23 $(TEMP_DIR)/src_tables.txt $(TEMP_DIR)/dest_tables.txt > $(TEMP_DIR)/missing_tables.txt
	@rm -f $(TEMP_DIR)/src_tables.txt
	@rm -f $(TEMP_DIR)/dest_tables.txt
	@if [ -s $(TEMP_DIR)/missing_tables.txt ]; then \
		echo "发现以下新增表缺失，准备同步：" >> $(LOG_FILE); \
		cat $(TEMP_DIR)/missing_tables.txt | awk '{print "- " $$1}' >> $(LOG_FILE); \
		OUTPUT_FILE=$(TEMP_DIR)/同步新增表-$(current_time).sql; \
		while read -r table; do \
			echo "导出新增表: $$table"; \
			mysqldump \
				-h $(SRC_HOST) -P $(SRC_PORT) -u $(SRC_USER) -p'$(SRC_PASS)' \
				--no-data \
				--skip-add-drop-table \
				--create-options \
				$(SRC_DB) $$table >> $$OUTPUT_FILE; \
			\
		done < $(TEMP_DIR)/missing_tables.txt; \
		if [ -s "$$OUTPUT_FILE" ]; then \
				mysql -h $(DEST_HOST) -P $(DEST_PORT) -u $(DEST_USER) -p'$(DEST_PASS)' $(DEST_DB) < $$OUTPUT_FILE; \
				echo "新增表创建成功。" >> $(LOG_FILE); \
			else \
				echo "新增表导出失败或为空。" >> $(LOG_FILE); \
			fi; \
	else \
		echo "目标库新增表完整，无需同步。" >> $(LOG_FILE); \
	fi
	@rm -f $(TEMP_DIR)/missing_tables.txt
	@echo "【同步新增表完成】时间：$(current_time)" >> $(LOG_FILE)

# 全量同步：导出源表结构和数据，并导入到目标数据库
sync_table_full:
	@echo "【开始全量同步】时间：$(current_time)" >> $(LOG_FILE)
	@for TABLE in $(TABLES_FULL); do \
		echo "正在全量同步表: $$TABLE" >> $(LOG_FILE); \
		TABLE_EXISTS=$$(mysql -h $(DEST_HOST) -P $(DEST_PORT) -u $(DEST_USER) -p'$(DEST_PASS)' -Nse \
			"SELECT COUNT(*) FROM information_schema.TABLES WHERE TABLE_SCHEMA='$(DEST_DB)' AND TABLE_NAME='$$TABLE';"); \
		if [ "$$TABLE_EXISTS" -ne 1 ]; then \
			echo "目标库缺少表 $$TABLE，正在导出并创建表结构"; \
			OUTPUT_STRUCT=$(TEMP_DIR)/$$TABLE-$(current_time)-structure.sql; \
			mysqldump \
				-h $(SRC_HOST) -P $(SRC_PORT) -u $(SRC_USER) -p'$(SRC_PASS)' \
				--no-data \
				--skip-add-drop-table \
				--create-options \
				$(SRC_DB) $$TABLE > $$OUTPUT_STRUCT; \
			\
			if [ -s "$$OUTPUT_STRUCT" ]; then \
				mysql -h $(DEST_HOST) -P $(DEST_PORT) -u $(DEST_USER) -p'$(SRC_PASS)' $(DEST_DB) < $$OUTPUT_STRUCT; \
				echo "表结构 $$TABLE 创建成功。" >> $(LOG_FILE); \
			else \
				echo "表结构 $$TABLE 导出失败或为空。" >> $(LOG_FILE); \
			fi; \
		fi; \
		OUTPUT_DATA=$(TEMP_DIR)/全量同步表-$$TABLE-$(current_time).sql; \
		mysqldump \
			-h $(SRC_HOST) -P $(SRC_PORT) -u $(SRC_USER) -p'$(SRC_PASS)' \
			--no-create-info \
			--skip-add-drop-table \
			--compact \
			--complete-insert \
			--extended-insert=FALSE \
			$(SRC_DB) $$TABLE > $$OUTPUT_DATA; \
		if [ -s "$$OUTPUT_DATA" ]; then \
			mysql -h $(DEST_HOST) -P $(DEST_PORT) -u $(DEST_USER) -p'$(DEST_PASS)' \
				-e "DELETE FROM $(DEST_DB).$$TABLE;" \
				-e "SET FOREIGN_KEY_CHECKS=0; DELETE FROM $(DEST_DB).$$TABLE; SET FOREIGN_KEY_CHECKS=1;" \
				|| echo "清空目标表 $$TABLE 失败或表不存在"; \
			\
			mysql -h $(DEST_HOST) -P $(DEST_PORT) -u $(DEST_USER) -p'$(DEST_PASS)' $(DEST_DB) < $$OUTPUT_DATA; \
			echo "表 $$TABLE 数据全量同步成功。" >> $(LOG_FILE); \
		else \
			echo "表 $$TABLE 没有数据可同步。" >> $(LOG_FILE); \
		fi; \
	done
	@echo "【全量同步完成】时间：$(current_time)" >> $(LOG_FILE)


# 一键编译。编译web前端，并将编译后的包移动到服务端对应静态资源路径下，最后编译服务端
.PHONY: build
build:
	@rm -rf ./$(ADMIN_RESOURCE_PATH)
	@mkdir ./$(ADMIN_RESOURCE_PATH)
	@cd ../web && pnpm run build && \cp -rf ./dist/*  ../server$(ADMIN_RESOURCE_PATH)
	@cd ../server
	@echo "y" | gf build

# 通过热编译启动所有服务
.PHONY: all
all:
	gf run main.go --args "all"

.PHONY: http
http:
	gf run main.go --args "http"

.PHONY: queue
queue:
	gf run main.go --args "queue"

.PHONY: cron
cron:
	gf run main.go --args "cron"

.PHONY: auth
auth:
	gf run main.go --args "auth"

# 启动web服务
.PHONY: web
web:
	@cd ../web && pnpm run dev

# 刷新casbin权限
.PHONY: refresh
refresh:
	@go run main.go tools -m=casbin -a1=refresh

# 清理casbin权限
.PHONY: clear
clear:
	@go run main.go tools -m=casbin -a1=clear

# 运行代码质量分析工具
# https://github.com/ywanbing/golangci
#.PHONY: lint
#lint:
#	golangci-lint run
#
#.PHONY: killmain
#killmain:
#	@kill -9 $(ps -ef|grep main|grep -v grep|awk '{print $2}')

# Install/Update to the latest CLI tool.
#.PHONY: cli
#cli:
#	@set -e; \
#	wget -O gf https://github.com/gogf/gf/releases/latest/download/gf_$(shell go env GOOS)_$(shell go env GOARCH) && \
#	chmod +x gf && \
#	./gf install -y && \
#	rm ./gf

# Check and install CLI tool.
#.PHONY: cli.install
#cli.install:
#	@set -e; \
#	gf -v > /dev/null 2>&1 || if [[ "$?" -ne "0" ]]; then \
#  		echo "GoFame CLI is not installed, start proceeding auto installation..."; \
#		make cli; \
#	fi;

## Build image, deploy image and yaml to current kubectl environment and make port forward to local machine.
#.PHONY: start
#start:
#	@set -e; \
#	make image; \
#	make deploy; \
#	make port;
#
## Build docker image and commit to the repository.
## example: make image tag=v0.0.1
#.PHONY: image
#image:
#	@echo "y" | gf docker main.go -p -tn hotgo:$(tag)
#
## Deploy image and yaml to current kubectl environment.
#.PHONY: deploy
#deploy:
#	$(eval _TAG = $(if ${TAG},  ${TAG}, develop))
#	@set -e; \
#	mkdir -p $(ROOT_DIR)/temp/kustomize;\
#	cd $(ROOT_DIR)/manifest/deploy/kustomize/overlays/${_TAG};\
#	kustomize build > $(ROOT_DIR)/temp/kustomize.yaml;\
#	kubectl   apply -f $(ROOT_DIR)/temp/kustomize.yaml; \
#	kubectl   patch -n $(NAMESPACE) deployment/$(DEPLOY_NAME) -p "{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"date\":\"$(shell date +%s)\"}}}}}";
