0
votes

I want to setup multi-node cluster with corosync/pacemaker with six nodes. Nodes are: WEB1 - WEB2, API1 - API2, DB1 - DB2. Currently I have setup DB-nodes (with drbd for FS-replication, postgresql and DB_Virtual_IP) and API-nodes (only API_Virtual_IP).

config:

node api1
node api2
node db1 \
        attributes standby="off"
node db2 \
        attributes standby="off"
primitive api_vip ocf:heartbeat:IPaddr2 \
        params ip="API_IP_HERE" nic="eth0" iflabel="apivip" \
        op monitor interval="5"
primitive drbd_pg ocf:linbit:drbd \
        params drbd_resource="pg-claster" \
        op monitor interval="0" timeout="240" start \
        op stop interval="0" timeout="120"
primitive pg_fs ocf:heartbeat:Filesystem \
        params device="/dev/drbd0" directory="/data/pgdb" options="noatime,nodiratime" fstype="ext4" \
        op start interval="0" timeout="60" \
        op stop interval="0" timeout="120"
primitive pg_lsb lsb:postgresql \
        op monitor interval="30" timeout="60" \
        op start interval="0" timeout="60" \
        op stop interval="0" timeout="60"
primitive pg_vip ocf:heartbeat:IPaddr2 \
        params ip="DB_IP_HERE" nic="eth1" iflabel="pgvip" \
        op monitor interval="5"
group PGServer pg_fs pg_lsb pg_vip
ms ms_drbd_pg drbd_pg \
        meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
location cli-prefer-PGServer PGServer \
        rule $id="cli-prefer-rule-PGServer" inf: #uname eq db1
location cli-prefer-api_vip api_vip \
        rule $id="cli-prefer-rule-api_vip" inf: #uname eq api1
location loc-drbd_pg_01 ms_drbd_pg -inf: api1
location loc-drbd_pg_02 ms_drbd_pg -inf: api2
location loc-pg_lsb-prim_01 pg_lsb -inf: api1
location loc-pg_lsb-prim_02 pg_lsb -inf: api2
location loc-pgserver_01 PGServer -inf: api1
location loc-pgserver_02 PGServer -inf: api2
location loc_api_vip_01 api_vip 100: api1
location loc_api_vip_02 api_vip 10: api2
location loc_api_vip_03 api_vip -inf: db1
location loc_api_vip_04 api_vip -inf: db2
colocation col_pg_drbd inf: PGServer ms_drbd_pg:Master
order ord_pg inf: ms_drbd_pg:promote PGServer:start
property $id="cib-bootstrap-options" \
        dc-version="1.1.7-ee0730e13d124c3d58f00016c3376a1de5323cff" \
        cluster-infrastructure="openais" \
        expected-quorum-votes="4" \
        stonith-enabled="false" \
        no-quorum-policy="ignore" \
        default-resource-stickiness="110"

With this config crm_mon print me "Failed actions:":

Online: [ db1 db2 api2 api1 ]

 Master/Slave Set: ms_drbd_pg [drbd_pg]
     Masters: [ db1 ]
     Slaves: [ db2 ]
 Resource Group: PGServer
     pg_fs      (ocf::heartbeat:Filesystem):    Started db1
     pg_lsb     (lsb:postgresql):       Started db1
     pg_vip     (ocf::heartbeat:IPaddr2):       Started db1
api_vip (ocf::heartbeat:IPaddr2):       Started api1

Failed actions:
    pg_lsb_monitor_0 (node=api2, call=4, rc=5, status=complete): not installed
    drbd_pg:0_monitor_0 (node=api2, call=2, rc=5, status=complete): not installed
    pg_lsb_monitor_0 (node=api1, call=4, rc=5, status=complete): not installed
    drbd_pg:0_monitor_0 (node=api1, call=2, rc=5, status=complete): not installed

So, is this normal situation, or there is some other parameters to determine on which nodes resource can run and monitor?

1

1 Answers

0
votes

You're missing the postgresql startup script on both api1 and api2, or the startup script has improper permissions or mode. Check your postgresql startup script on both nodes, then run 'crm resource cleanup drbd_pg'