#!/bin/bash

set -x

# dir="/mnt/yanyi/ceph/build/"
# cd ${dir}

hostname=$(hostname)

# create a host in crushmap
bin/ceph osd crush add-bucket ${hostname} host
# create separate roots for PMem and NVMe devices, as well as rules
for class in mem pmem nvme; do
	bin/ceph osd crush add-bucket ${class}s root
	bin/ceph osd crush rule create-replicated ${class}_rule ${class}s osd
done
sleep 1

num_osds=$(cat /etc/ceph/ceph.conf | grep -Po "^\[\Kosd\.\d+(?=\])" | wc -l)
for i in $(seq 0 $(expr ${num_osds} - 1)); do
	# generate an osd id。
	osd_id=$(bin/ceph osd create)
	mkdir -p /data/osd.${osd_id}

	# generate keyring of osd
	bin/ceph-osd -i ${osd_id} --mkfs --mkkey --no-mon-config

	# add keyring of osd into ceph auth
	bin/ceph auth add osd.${osd_id} osd 'allow *' mon 'allow profile osd' -i /data/osd.${osd_id}/keyring

	# add osd to default bucket
	bin/ceph osd crush add osd.${osd_id} 1.0 root=default

	# add osd to host bucket
	bin/ceph osd crush add osd.${osd_id} 1.0 host=${hostname}

	# add osd to its own device class root
	# NOTE: require labeled in `/etc/ceph/ceph.conf`
	case $(cat /etc/ceph/ceph.conf | grep -Po "^\[osd\.${osd_id}\].+:\s+\K\S+") in
		*pmem*)
			bin/ceph osd crush add osd.${osd_id} 1.0 root=pmems
			;;
		*mem*)
			bin/ceph osd crush add osd.${osd_id} 1.0 root=mems
			;;
		*nvme*)
			bin/ceph osd crush add osd.${osd_id} 1.0 root=nvmes
			;;
	esac

	# start osd service
	bin/ceph-osd -i ${osd_id}
	sleep 1
done

