ceph auth get-or-create client.glance | ssh {your-glance-api-server} sudo tee /etc/ceph/ceph.client.glance.keyring
ssh {your-glance-api-server} sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
ceph auth get-or-create client.cinder | ssh {your-volume-server} sudo tee /etc/ceph/ceph.client.cinder.keyring
ssh {your-cinder-volume-server} sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
ceph auth get-or-create client.cinder-backup | ssh {your-cinder-backup-server} sudo tee /etc/ceph/ceph.client.cinder-backup.keyring
ssh {your-cinder-backup-server} sudo chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring

# on server1, ceph client
yum -y install ceph-common

ceph osd pool create volumes 128  
ceph osd pool create images 128  
ceph osd pool create vms 128  

ceph-deploy config push server1 server2 server3
ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' 
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' 
ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'

ceph auth get-key client.cinder >/etc/ceph/ceph.client.cinder.keyring
ceph auth get-or-create client.glance >/etc/ceph/ceph.client.glance.keyring

ceph auth get-or-create client.glance | ssh {your-glance-api-server} sudo tee /etc/ceph/ceph.client.glance.keyring
ssh {your-glance-api-server} sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
ceph auth get-or-create client.cinder | ssh {your-volume-server} sudo tee /etc/ceph/ceph.client.cinder.keyring
ssh {your-cinder-volume-server} sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
ceph auth get-or-create client.cinder-backup | ssh {your-cinder-backup-server} sudo tee /etc/ceph/ceph.client.cinder-backup.keyring
ssh {your-cinder-backup-server} sudo chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring

ceph auth get-or-create client.cinder-backup | ssh server2 sudo tee /etc/ceph/ceph.client.cinder-backup.keyring
ssh server2 sudo chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring

# glance client keyring
Node=controller1
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/
ssh $Node sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring

Node=controller2
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/
ssh $Node sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring

Node=controller3
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/
ssh $Node sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring

# cinder volume client keyring
Node=server2
scp /etc/ceph/ceph.client.cinder.keyring $Node:/etc/ceph/
ssh $Node sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring

# compute node
Node=server1
scp /etc/ceph/ceph.client.cinder.keyring $Node:/etc/ceph/
ssh $Node sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/ceph.client.glance-nova.keyring
ssh $Node sudo chown nova:nova /etc/ceph/ceph.client.glance-nova.keyring

Node=server3
scp /etc/ceph/ceph.client.cinder.keyring $Node:/etc/ceph/
ssh $Node sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/ceph.client.glance-nova.keyring
ssh $Node sudo chown nova:nova /etc/ceph/ceph.client.glance-nova.keyring

# controller node:glance client setting
# sed -i 's/^stores/#&/' /etc/glance/glance-api.conf 
# sed -i 's/^default_store/#&/' /etc/glance/glance-api.conf 
settingfile=/etc/glance/glance-api.conf
openstack-config --set $settingfile DEFAULT show_image_direct_url true

openstack-config --set $settingfile glance_store stores rbd,file
openstack-config --set $settingfile glance_store default_store rbd
openstack-config --set $settingfile glance_store rbd_store_pool images
openstack-config --set $settingfile glance_store rbd_store_user glance
openstack-config --set $settingfile glance_store rbd_store_ceph_conf /etc/ceph/ceph.conf
openstack-config --set $settingfile glance_store rbd_store_chunk_size 8

systemctl restart openstack-glance-api openstack-glance-registry

# compute node

echo '
[client]  
rbd cache = true 
rbd cache writethrough until flush = true 
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok  
log file = /var/log/qemu/qemu-guest-$pid.log  
rbd concurrent management ops = 20  
[client.cinder]
keyring = /etc/ceph/ceph.client.cinder.keyring
'>>/etc/ceph/ceph.conf

 
mkdir -p /var/run/ceph/guests/ /var/log/qemu/ 
chown qemu:libvirt /var/run/ceph/guests /var/log/qemu/
#密钥加进libvirt
MyUID=`uuid`
MyUID=4cbc9a9c-53f0-11e8-b33b-ac1f6b11963e
Key=`cat /etc/ceph/ceph.client.cinder.keyring`
echo '
<secret ephemeral="no" private="no">    
<uuid>'$MyUID'</uuid>    
<usage type="ceph">    
<name>client.cinder secret</name>    
</usage>    
</secret>  
'>ceph.xml
virsh secret-define --file ceph.xml  
virsh secret-set-value --secret $MyUID  --base64 $Key

novafile=/etc/nova/nova.conf
openstack-config --set $novafile libvirt images_type rbd
openstack-config --set $novafile libvirt images_rbd_pool vms
openstack-config --set $novafile libvirt images_rbd_ceph_conf /etc/ceph/ceph.conf
openstack-config --set $novafile libvirt rbd_user cinder
openstack-config --set $novafile libvirt rbd_secret_uuid $MyUID
openstack-config --set $novafile libvirt disk_cachemodes \"network=writeback\"
openstack-config --set $novafile libvirt live_migration_flag \"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"
openstack-config --set $novafile libvirt libvirt_inject_password false
openstack-config --set $novafile libvirt libvirt_inject_key false
openstack-config --set $novafile libvirt libvirt_inject_partition -2

systemctl restart libvirtd.service openstack-nova-compute.service

# storage node: on server2 cinder client
settingfile=/etc/cinder/cinder.conf
sed -i 's/^enabled_backends.*/&,ceph/' /etc/cinder/cinder.conf
openstack-config --set $settingfile ceph volume_driver cinder.volume.drivers.rbd.RBDDriver
openstack-config --set $settingfile ceph rbd_pool volumes
openstack-config --set $settingfile ceph rbd_ceph_conf /etc/ceph/ceph.conf
openstack-config --set $settingfile ceph rbd_flatten_volume_from_snapshot false
openstack-config --set $settingfile ceph rbd_max_clone_depth 5
openstack-config --set $settingfile ceph rbd_store_chunk_size 4
openstack-config --set $settingfile ceph rados_connect_timeout -1
openstack-config --set $settingfile ceph glance_api_version 2
openstack-config --set $settingfile ceph rbd_user cinder
openstack-config --set $settingfile ceph rbd_secret_uuid $MyUID
openstack-config --set $settingfile ceph volume_backend_name ceph

systemctl restart openstack-cinder-volume.service


# 测试

source ./admin-openstack.sh

openstack image create "cirros2" \
  --file cirros-0.4.0-x86_64-disk.img \
  --disk-format raw --container-format bare \
  --public

# 虽然可以以qcow2格式上传，但是创建实例的时候要报错了


# ceph -s 出现 “HEALTH_WARN application not enabled on 1 pool(s)”的警告信息，可以执行
# ceph health detail，给出了操作建议