Corosync与Pacemaker
======================================================
一：准备

1.1：主机时间同步

1.2：主机名与解析主机名必须一致（hostnamectl （或者 hostname）（或者 uname -n）命令 与 /etc/hosts 文件）
    172.18.21.70 node1.magedu.com node1
    172.18.21.71 node2.magedu.com node2
        
    备注：
        CentOS 7 可使用命令 hostnamectl 
        ~]# hostnamectl set-hostname node1.magedu.com
        ~]# hostnamectl set-hostname node2.magedu.com

1.3：安装 pacemaker
    ~]# yum -y install pacemaker


二：配置
2.1：主配置
        corosync配置文件：/etc/corosync/corosync.conf
            totem {}：节点间的通信方式，通信协议、加密与否、……
                interface {}：定义集群心跳及事务信息传递的接口；
            logging {}：日志系统，日志记录方式及存储位置；
            quorum {}：投票系统
            nodelist {}：节点列表

2.2：生成通信秘钥
    ~]# corosync-keygen -l
    ~]# ll
        -r-------- 1 root root  128 May 29 15:46 authkey


2.22：

三：示例





totem {
    version: 2
    cluster_name: mycluster
    crypto_cipher: aes128
    crypto_hash: sha1
    interface {
        ringnumber: 0
        bindnetaddr: 172.18.0.0
        mcastaddr: 239.55.101.1
        mcastport: 5405
        ttl: 1
    }
}
logging {
    fileline: off
    to_stderr: no
    to_logfile: yes
    logfile: /var/log/cluster/corosync.log
    to_syslog: no
    debug: off
    timestamp: on
    logger_subsys {
        subsys: QUORUM
        debug: off
    }
}
quorum {
    provider: corosync_votequorum
}
nodelist {
    node {
        ring0_addr: 172.18.100.67
        nodeid: 1
    }
    node {
        ring0_addr: 172.18.100.68
        nodeid: 2
    }
}



~]# yum install crmsh-2.1.4-1.1.x86_64.rpm pssh-2.3.1-4.2.x86_64.rpm python-pssh-2.3.1-4.2.x86_64.rpm 
~]# yum -y install nfs-utils


~]# crm
help
status [ bynode | inactive | ops | timing | failcounts ]


        cluster/       Cluster setup and management
                add            Add a new node to the cluster
                health         Cluster health check
                init           Initializes a new HA cluster
                remove         Remove a node from the cluster
                run            Execute an arbitrary command on all nodes
                start          Start cluster services
                status         Cluster status check
                stop           Stop cluster services
                wait_for_startup Wait for cluster to start

        configure/     CIB configuration
                acl_target     Define target access rights
                _test          Help for command _test
                clone          Define a clone
                colocation     Colocate resources
                commit         Commit the changes to the CIB
                default-timeouts Set timeouts for operations to minimums from the meta-data
                delete         Delete CIB objects
                edit           Edit CIB objects
                erase          Erase the CIB
                fencing_topology Node fencing order
                filter         Filter CIB objects
                graph          Generate a directed graph
                group          Define a group
                load           Import the CIB from a file
                location       A location preference
                modgroup       Modify group
                monitor        Add monitor operation to a primitive
                ms             Define a master-slave resource
                node           Define a cluster node
                op_defaults    Set resource operations defaults
                order          Order resources
                primitive      Define a resource
                property       Set a cluster property
                ptest          Show cluster actions if changes were committed
                refresh        Refresh from CIB
                _regtest       Help for command _regtest
                rename         Rename a CIB object
                role           Define role access rights
                rsc_defaults   Set resource defaults
                rsc_template   Define a resource template
                rsc_ticket     Resources ticket dependency
                rsctest        Test resources as currently configured
                save           Save the CIB to a file
                schema         Set or display current CIB RNG schema
                show           Display CIB objects
                _objects       Help for command _objects
                tag            Define resource tags
                upgrade        Upgrade the CIB to version 1.0
                user           Define user access rights
                verify         Verify the CIB with crm_verify
                xml            Raw xml
                cib            CIB shadow management
                cibstatus      CIB status management and editing
                template       Edit and import a configuration from a template

ra
list ocf heartbeat
list stonith
info ocf:heartbeat:IPaddr

list ocf heartbeat  (apache)
list systemd



configure
primitive webip ocf:heartbeat:IPaddr params ip=172.18.21.80

校验：verify 
property stonith-enabled=false

对称节点：节点迁移
property symmetric-cluster=


[root@node1 ~]# ip addr list
2: eno16777736: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:fb:47:47 brd ff:ff:ff:ff:ff:ff
    inet 172.18.21.70/16 brd 172.18.255.255 scope global eno16777736
       valid_lft forever preferred_lft forever
    inet 172.18.21.80/16 brd 172.18.255.255 scope global secondary eno16777736
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fefb:4747/64 scope link 
       valid_lft forever preferred_lft forever
[root@node1 ~]# 



手动迁移节点：
crm(live)resource# migrate webip node1.magedu.com


移会原来（第一次启动的地方）
crm(live)resource# unmigrate webip






sync


crm ra info systemd:nginx


crm(live)configure# primitive webserver systemd:nginx



crm(live)configure# group webservice webip webserver

crm(live)# node standby




modprobe gfs2
~]# lsmod | grep gfs2
gfs2                  580949  0 
dlm                   169682  1 gfs2


drbd：两个机子建立镜像关系。块级别复制。仅支持两个节点。


~]# yum -y install nfs-utils
~]# mkdir /www/html -p

~]# vi /etc/exports
/var/html 172.18.0.0/16(rw,no_root_squash)

~]# systemctl start nfs.service






mount -t nfs 172.18.21.70:/www/html /var/www/html




















