GFS2 WAY: Unterschied zwischen den Versionen

Aus Xinux Wiki
Zur Navigation springen Zur Suche springen
 
(14 dazwischenliegende Versionen von 2 Benutzern werden nicht angezeigt)
Zeile 1: Zeile 1:
 +
==Patch '''/usr/lib/ocf/resource.d/heartbeat/Filesystem'''==
 +
i must change this ...
 +
if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" ] ; then
 +
      ocf_log err "Couldn't find device [$DEVICE]. Expected /dev/??? to exist"
 +
to
 +
if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" -a "$FSTYPE" != "gfs2" ] ; then
 +
      ocf_log err "Couldn't find device [$DEVICE]. Expected /dev/??? to exist"
 +
that pacemaker recover a fail node
 +
 
==[ALL]Set up dlm_controld and gfs2==
 
==[ALL]Set up dlm_controld and gfs2==
 
  node fix
 
  node fix
Zeile 94: Zeile 103:
 
         params daemon="dlm_controld" \
 
         params daemon="dlm_controld" \
 
         op monitor interval="120s"
 
         op monitor interval="120s"
primitive resDRBD ocf:linbit:drbd \
+
  primitive resGFSD ocf:pacemaker:controld \
        params drbd_resource="disk0" \
 
        operations $id="resDRBD-operations" \
 
        op monitor interval="20" role="Master" timeout="20" \
 
        op monitor interval="30" role="Slave" timeout="20"
 
primitive resGFSD ocf:pacemaker:controld \
 
 
         params daemon="gfs_controld" args="" \
 
         params daemon="gfs_controld" args="" \
 
         op monitor interval="120s"
 
         op monitor interval="120s"
ms msDRBD resDRBD \
 
        meta resource-stickines="100" notify="true" master-max="2" interleave="true"
 
 
  clone cloneDLM resDLM \
 
  clone cloneDLM resDLM \
 
         meta globally-unique="false" interleave="true"
 
         meta globally-unique="false" interleave="true"
Zeile 115: Zeile 117:
 
         stonith-enabled="false" \
 
         stonith-enabled="false" \
 
         no-quorum-policy="ignore"
 
         no-quorum-policy="ignore"
 +
primitive resDRBD ocf:linbit:drbd \
 +
        params drbd_resource="disk0" \
 +
        operations $id="resDRBD-operations" \
 +
        op monitor interval="20" role="Master" timeout="20" \
 +
        op monitor interval="30" role="Slave" timeout="20"
 +
ms msDRBD resDRBD \
 +
        meta resource-stickines="100" notify="true" master-max="2" interleave="true"
 +
colocation colDLMDRBD inf: cloneDLM msDRBD:Master
 +
order ordDRBDDLM 0: msDRBD:promote cloneDLM
 +
 +
==[ONE]check the filesystem==
 +
drbd-overview
 +
0:disk0  Connected Secondary/Secondary Inconsistent/Inconsistent C r-----
 +
 +
drbdadm secondary disk0
 +
drbdadm disconnect disk0
 +
drbdadm -- --discard-my-data connect disk0
 +
 +
==[OTHER]set to primary==
 +
#drbdadm -- --overwrite-data-of-peer primary disk0
 +
==[ONE]set to primary==
 +
#drbdadm primary disk0
  
 
==[ONE]Now we format one site with gfs2 ==
 
==[ONE]Now we format one site with gfs2 ==
   sudo mkfs.gfs2 -p lock_dlm -j4 -t pacemaker:pcmk /dev/drbd/by-res/disk0
+
   sudo mkfs.gfs2 -p lock_dlm -j2 -t pacemaker:gfs2 /dev/drbd/by-res/disk0
  
==[ALL]Set up dlm_controld and o2cb with drbd and mounting on both sites==
+
==[ALL]Set up dlm_controld and gfs2 with drbd and mounting on both sites==
 
  node fix
 
  node fix
 
  node foxy
 
  node foxy
Zeile 128: Zeile 152:
 
         params drbd_resource="disk0" \
 
         params drbd_resource="disk0" \
 
         operations $id="resDRBD-operations" \
 
         operations $id="resDRBD-operations" \
         op monitor interval="20" role="Master" timeout="20" \
+
         op monitor interval="20" role="Master" timeout="20"
        op monitor interval="30" role="Slave" timeout="20"
 
 
  primitive resGFSD ocf:pacemaker:controld \
 
  primitive resGFSD ocf:pacemaker:controld \
 
         params daemon="gfs_controld" args="" \
 
         params daemon="gfs_controld" args="" \
 
         op monitor interval="120s"
 
         op monitor interval="120s"
 
  ms msDRBD resDRBD \
 
  ms msDRBD resDRBD \
         meta resource-stickines="100" notify="true" master-max="2" interleave="true"
+
         meta resource-stickines="100" notify="true" master-max="2" interleave="true" target-role="Started"
 
  clone cloneDLM resDLM \
 
  clone cloneDLM resDLM \
 
         meta globally-unique="false" interleave="true"
 
         meta globally-unique="false" interleave="true"
 
  clone cloneGFSD resGFSD \
 
  clone cloneGFSD resGFSD \
 
         meta globally-unique="false" interleave="true" target-role="Started"
 
         meta globally-unique="false" interleave="true" target-role="Started"
 
 
  colocation colGFSDDLM inf: cloneGFSD cloneDLM
 
  colocation colGFSDDLM inf: cloneGFSD cloneDLM
 
  order ordDLMGFSD 0: cloneDLM cloneGFSD
 
  order ordDLMGFSD 0: cloneDLM cloneGFSD
   
+
  order ordGFSDFS 0: cloneGFSD cloneFS
 
  property $id="cib-bootstrap-options" \
 
  property $id="cib-bootstrap-options" \
 
         dc-version="1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c" \
 
         dc-version="1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c" \
Zeile 148: Zeile 170:
 
         stonith-enabled="false" \
 
         stonith-enabled="false" \
 
         no-quorum-policy="ignore"
 
         no-quorum-policy="ignore"
 +
 
  primitive resFS ocf:heartbeat:Filesystem \
 
  primitive resFS ocf:heartbeat:Filesystem \
 
         params device="/dev/drbd/by-res/disk0" directory="/opt" fstype="gfs2" \
 
         params device="/dev/drbd/by-res/disk0" directory="/opt" fstype="gfs2" \
Zeile 155: Zeile 178:
 
         meta interleave="true" ordered="true" target-role="Started"
 
         meta interleave="true" ordered="true" target-role="Started"
 
  colocation colFSGFSD inf: cloneFS cloneGFSD
 
  colocation colFSGFSD inf: cloneFS cloneGFSD
  order ordGFSDFS 0: cloneGFSD cloneFS
+
   
  
  

Aktuelle Version vom 19. April 2013, 08:14 Uhr

Patch /usr/lib/ocf/resource.d/heartbeat/Filesystem

i must change this ...

if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" ] ; then
     ocf_log err "Couldn't find device [$DEVICE]. Expected /dev/??? to exist"

to

if [ "$DEVICE" != "/dev/null" -a ! -b "$DEVICE" -a "$FSTYPE" != "gfs2" ] ; then
     ocf_log err "Couldn't find device [$DEVICE]. Expected /dev/??? to exist"

that pacemaker recover a fail node

[ALL]Set up dlm_controld and gfs2

node fix
node foxy
property $id="cib-bootstrap-options" \
       dc-version="1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c" \
       cluster-infrastructure="cman" \
       stonith-enabled="false" \
       no-quorum-policy="ignore"
primitive resDLM ocf:pacemaker:controld \
       params daemon="dlm_controld" \
       op monitor interval="120s"
primitive resGFSD ocf:pacemaker:controld \
       params daemon="gfs_controld" args="" \
       op monitor interval="120s"
clone cloneDLM resDLM \
       meta globally-unique="false" interleave="true"
clone cloneGFSD resGFSD \
       meta globally-unique="false" interleave="true" target-role="Started"
colocation colGFSDDLM inf: cloneGFSD cloneDLM
order ordDLMGFSD 0: cloneDLM cloneGFSD

check it
 # crm status
============
Last updated: Sat Sep  8 18:01:58 2012
Last change: Sat Sep  8 17:59:08 2012 via cibadmin on fix
Stack: cman
Current DC: fix - partition with quorum
Version: 1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c
2 Nodes configured, unknown expected votes
4 Resources configured.
============

Online: [ fix foxy ]

Clone Set: cloneDLM [resDLM]
    Started: [ fix foxy ]
Clone Set: cloneGFSD [resGFSD]
    Started: [ fix foxy ]

[ALL] Configure drbd

On both nodes create file /etc/drbd.d/disk0.res containing:

resource disk0 {
         protocol C;
        net {
                cram-hmac-alg sha1;
                shared-secret "lucid";
                allow-two-primaries;
                after-sb-0pri discard-zero-changes;
                after-sb-1pri discard-secondary;
                after-sb-2pri disconnect;
    }
        startup {
                become-primary-on both;
       }
       on fix {
               device /dev/drbd0;
               disk /dev/sda3;
               address 10.168.244.161:7788;
               meta-disk internal;
       }
       on foxy {
               device /dev/drbd0;
               disk /dev/sda3;
               address 10.168.244.162:7788;
               meta-disk internal;
       }
}

Pacemaker will handle starting and stopping drbd services, so remove its init script:

sudo update-rc.d -f drbd remove

[ALL] Prepare partitions

erasing is not always necessary

dd if=/dev/zero of=/dev/sda3

Create drbd resource:

sudo drbdadm create-md disk0
You should get:
Writing meta data...
initializing activity log
NOT initialized bitmap
New drbd meta data block successfully created.
success

Start drbd:

sudo service drbd start

[ALL]Set up dlm_controld and gfs2 with drbd

node fix
node foxy
primitive resDLM ocf:pacemaker:controld \
        params daemon="dlm_controld" \
        op monitor interval="120s"
 primitive resGFSD ocf:pacemaker:controld \
        params daemon="gfs_controld" args="" \
        op monitor interval="120s"
clone cloneDLM resDLM \
        meta globally-unique="false" interleave="true"
clone cloneGFSD resGFSD \
        meta globally-unique="false" interleave="true" target-role="Started"
colocation colGFSDDLM inf: cloneGFSD cloneDLM
order ordDLMGFSD 0: cloneDLM cloneGFSD
property $id="cib-bootstrap-options" \
       dc-version="1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c" \
       cluster-infrastructure="cman" \
       stonith-enabled="false" \
       no-quorum-policy="ignore"
primitive resDRBD ocf:linbit:drbd \
        params drbd_resource="disk0" \
        operations $id="resDRBD-operations" \
        op monitor interval="20" role="Master" timeout="20" \
        op monitor interval="30" role="Slave" timeout="20"
ms msDRBD resDRBD \
        meta resource-stickines="100" notify="true" master-max="2" interleave="true"
colocation colDLMDRBD inf: cloneDLM msDRBD:Master
order ordDRBDDLM 0: msDRBD:promote cloneDLM

[ONE]check the filesystem

drbd-overview 
0:disk0  Connected Secondary/Secondary Inconsistent/Inconsistent C r----- 

drbdadm secondary disk0 
drbdadm disconnect disk0 
drbdadm -- --discard-my-data connect disk0

[OTHER]set to primary

#drbdadm -- --overwrite-data-of-peer primary disk0

[ONE]set to primary

#drbdadm primary disk0

[ONE]Now we format one site with gfs2

 sudo mkfs.gfs2 -p lock_dlm -j2 -t pacemaker:gfs2 /dev/drbd/by-res/disk0

[ALL]Set up dlm_controld and gfs2 with drbd and mounting on both sites

node fix
node foxy
primitive resDLM ocf:pacemaker:controld \
       params daemon="dlm_controld" \
       op monitor interval="120s"
primitive resDRBD ocf:linbit:drbd \
       params drbd_resource="disk0" \
       operations $id="resDRBD-operations" \
       op monitor interval="20" role="Master" timeout="20"
primitive resGFSD ocf:pacemaker:controld \
       params daemon="gfs_controld" args="" \
       op monitor interval="120s"
ms msDRBD resDRBD \
       meta resource-stickines="100" notify="true" master-max="2" interleave="true" target-role="Started"
clone cloneDLM resDLM \
       meta globally-unique="false" interleave="true"
clone cloneGFSD resGFSD \
       meta globally-unique="false" interleave="true" target-role="Started"
colocation colGFSDDLM inf: cloneGFSD cloneDLM
order ordDLMGFSD 0: cloneDLM cloneGFSD
order ordGFSDFS 0: cloneGFSD cloneFS
property $id="cib-bootstrap-options" \
       dc-version="1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c" \
       cluster-infrastructure="cman" \
       stonith-enabled="false" \
       no-quorum-policy="ignore"

primitive resFS ocf:heartbeat:Filesystem \
       params device="/dev/drbd/by-res/disk0" directory="/opt" fstype="gfs2" \
       op monitor interval="120s" \
       meta target-role="Started"
clone cloneFS resFS \
       meta interleave="true" ordered="true" target-role="Started"
colocation colFSGFSD inf: cloneFS cloneGFSD


check it

#crm status
============
Last updated: Sat Sep  8 18:34:27 2012
Last change: Sat Sep  8 18:32:43 2012 via cibadmin on fix
Stack: cman
Current DC: fix - partition with quorum
Version: 1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c
2 Nodes configured, unknown expected votes
8 Resources configured.
============

Online: [ fix foxy ]

Clone Set: cloneDLM [resDLM]
    Started: [ fix foxy ]
Clone Set: cloneGFSD [resGFSD]
    Started: [ fix foxy ]
Master/Slave Set: msDRBD [resDRBD]
    Masters: [ fix foxy ]
Clone Set: cloneFS [resFS]
    Started: [ fix foxy ]