#!/bin/bash

yum groupinstall "Development tools"
rpm -ivh https://download.ceph.com/rpm-jewel/el7/noarch/ceph-release-1-1.el7.noarch.rpm
yum install python-pip python-devel
pip install --upgrade pip
pip install ceph-deploy==1.5.39

# 下面处理pip的警告
pip install python-qpid-proton==0.10 enum-compat
pip install pyudev -U
pip install django-babel -U
pip uninstall eventlet
pip install eventlet==0.20.0
# on ceph node1
# yum install ceph-deploy -y
mkdir -p /root/ceph_cluster && cd /root/ceph_cluster
ceph-deploy new server1 server2 server3

vim ceph.conf
osd pool default size = 2
public network = 192.168.0.0/24

ceph-deploy install server1 server2 server3

    yum clean all
    yum -y install epel-release
    yum -y install yum-plugin-priorities
    rpm --import https://download.ceph.com/keys/release.asc
    rpm -Uvh --replacepkgs https://download.ceph.com/rpm-jewel/el7/noarch/ceph-release-1-1.el7.noarch.rpm
    yum -y install ceph ceph-radosgw

ceph-deploy mon create server1 server2 server3
ceph-deploy gatherkeys server1 server2 server3

ceph-deploy osd create server1:/dev/sdb server2:/dev/sdb server3:/dev/sdb
ceph -s

# create storage pool
ceph osd pool create volumes 1024
ceph osd pool create images 128
ceph osd pool create backups 512
ceph osd pool create vms 1024

# 2）在Glance节点安装python-rbd。
yum install python-rbd

# 3）在Cinder-volume节点和Nova-compute节点安装ceph-common。
yum install ceph-common

# create ceph client for openstack
ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'

ceph auth get-key client.cinder >/etc/ceph/ceph.client.cinder.keyring
ceph auth get-key client.glance >/etc/ceph/ceph.client.glance.keyring


            
#glance

Node=server3
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/
ssh $Node sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring

#nova compute
Node=compute1
scp /etc/ceph/ceph.client.cinder.keyring $Node:/etc/ceph/
ssh $Node sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/
ssh $Node sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring

#cinder storage
Node=server3
scp /etc/ceph/ceph.client.cinder.keyring $Node:/etc/ceph/
ssh $Node sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring

#glance
#更改glance默认存储为ceph
cp -f /etc/glance/glance-api.conf{,bak2}
sed -i 's/^stores/#&/' /etc/glance/glance-api.conf 
sed -i 's/^default_store/#&/' /etc/glance/glance-api.conf 
echo '#[glance_store]
stores = rbd,file
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8
'>>/etc/glance/glance-api.conf

#重启服务
systemctl restart openstack-glance-api openstack-glance-registry


# nova计算节点
echo '
[client]  
rbd cache = true  
rbd cache writethrough until flush = true  
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok  
log file = /var/log/qemu/qemu-guest-$pid.log  
rbd concurrent management ops = 20  

[client.cinder]
keyring = /etc/ceph/ceph.client.cinder.keyring
'>>/etc/ceph/ceph.conf

mkdir -p /var/run/ceph/guests/ /var/log/qemu/  
chown qemu:libvirt /var/run/ceph/guests /var/log/qemu/  


echo '
<secret ephemeral="no" private="no">    
<uuid>44b64b4c-4d2c-11e8-a1e0-ac1f6b11963e</uuid>    
<usage type="ceph">    
<name>client.cinder secret</name>    
</usage>    
</secret>  
'>secret.xml
scp secret.xml server2:/root/
scp secret.xml server3:/root/
virsh secret-define --file secret.xml
virsh secret-set-value --secret 44b64b4c-4d2c-11e8-a1e0-ac1f6b11963e  --base64 $(cat /etc/ceph/ceph.client.cinder.keyring)

echo '
[libvirt]
virt_type = kvm

images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 44b64b4c-4d2c-11e8-a1e0-ac1f6b11963e
disk_cachemodes="network=writeback"
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
#禁用文件注入#
libvirt_inject_password = false
libvirt_inject_key = false
libvirt_inject_partition = -2
'>>/etc/nova/nova.conf

#Cinder storage 添加Ceph存储

#enabled_backends添加ceph
sed -i 's/^enabled_backends.*/&,ceph/' /etc/cinder/cinder.conf
echo '
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = cinder  
rbd_secret_uuid = 44b64b4c-4d2c-11e8-a1e0-ac1f6b11963e
'>>/etc/cinder/cinder.conf

#重启服务    
systemctl restart openstack-cinder-volume.service

#检测

#在openstack管理节点
source ~/admin-openstack.sh

#查看cinder是否有@ceph存储
cinder service-list

#使用raw磁盘格式,创建镜像
source ~/admin-openstack.sh
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
openstack image create "cirros-ceph" \
  --file cirros-0.4.0-x86_64-disk.img \
  --disk-format qcow2 --container-format bare \
  --public

#检查是否上传成功
openstack image list

#创建VM (cpu16是可用域)
NET=de98a7e6-6aaf-4569-b0bf-971cfb4ffbc8
nova boot --flavor m1.nano --image cirros2 \
  --nic net-id=$NET \
  --security-group default --key-name mykey \
  --availability-zone cpu16 \
  kvm04

#检查
openstack server list

#虚拟控制台访问实例
openstack console url show kvm04

#创建云盘volume
openstack volume create --size 1 disk01
#openstack volume list
#给虚机kvm04添加云盘
openstack server add volume kvm04 disk01

###########################
#在ceph管理节点查看
ceph df
#查看pool
rbd -p vms ls
rbd -p volumes ls
rbd -p images ls

###########################
#参考
http://click.aliyun.com/m/16677/  
http://blog.csdn.net/watermelonbig/article/details/51116173
http://blog.csdn.net/Tomstrong_369/article/details/53330734
https://www.cnblogs.com/sammyliu/p/4804037.html


清除安装包
ceph-deploy purge ceph1 ceph2 ceph3

清除配置信息
ceph-deploy purgedata ceph1 ceph2 ceph3
ceph-deploy forgetkeys

每个节点删除残留的配置文件
rm -rf /var/lib/ceph/osd/*
rm -rf /var/lib/ceph/mon/*
rm -rf /var/lib/ceph/mds/*
rm -rf /var/lib/ceph/bootstrap-mds/*
rm -rf /var/lib/ceph/bootstrap-osd/*
rm -rf /var/lib/ceph/bootstrap-mon/*
rm -rf /var/lib/ceph/tmp/*
rm -rf /etc/ceph/*
rm -rf /var/run/ceph/*
