hvjunk
2017-May-09 15:01 UTC
[Gluster-users] gdeploy not starting all the daemons for NFS-ganesha :(
Hi there,
Given the following config file, what am I doing wrong causing the error at the
bottom & no mounted /gluster_shared_storage?
Hendrik
[root at linked-clone-of-centos-linux ~]# cat t.conf
[hosts]
10.10.10.11
10.10.10.12
10.10.10.13
[backend-setup]
devices=/dev/sdb
mountpoints=/gluster/brick1
brick_dirs=/gluster/brick1/one
pools=pool1
#Installing nfs-ganesha
[yum]
action=install
repolistgpgcheck=no
update=no
packages=glusterfs-ganesha
[service]
action=start
service=glusterd
[service]
action=enable
service=glusterd
#This will create a volume. Skip this section if your volume already exists
[volume]
action=create
volname=ganesha
transport=tcp
replica_count=3
arbiter_count=1
force=yes
[clients]
action=mount
volname=ganesha
hosts=10.10.10.11,10.10.10.12,10.10.10.13
fstype=glusterfs
client_mount_points=/mnt/ganesha_mnt
#Creating a high availability cluster and exporting the volume
[nfs-ganesha]
action=create-cluster
ha-name=ganesha-ha-360
cluster-nodes=10.10.10.11,10.10.10.12
vip=10.10.10.31,10.10.10.41
volname=ganesha
[nfs-ganesha]
action=export-volume
volname=ganesha
[nfs-ganesha]
action=refresh-config
volname=ganesha
[root at linked-clone-of-centos-linux ~]#
[root at linked-clone-of-centos-linux ~]# gdeploy -c t.conf -k
PLAY [gluster_servers]
*************************************************************************************
TASK [Clean up filesystem signature]
***********************************************************************
skipping: [10.10.10.11] => (item=/dev/sdb)
skipping: [10.10.10.12] => (item=/dev/sdb)
skipping: [10.10.10.13] => (item=/dev/sdb)
TASK [Create Physical Volume]
******************************************************************************
changed: [10.10.10.13] => (item=/dev/sdb)
changed: [10.10.10.11] => (item=/dev/sdb)
changed: [10.10.10.12] => (item=/dev/sdb)
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*************************************************************************************
TASK [Create volume group on the disks]
********************************************************************
changed: [10.10.10.12] => (item={u'brick': u'/dev/sdb',
u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.13] => (item={u'brick': u'/dev/sdb',
u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.11] => (item={u'brick': u'/dev/sdb',
u'vg': u'GLUSTER_vg1'})
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*************************************************************************************
TASK [Create logical volume named metadata]
****************************************************************
changed: [10.10.10.12] => (item=GLUSTER_vg1)
changed: [10.10.10.13] => (item=GLUSTER_vg1)
changed: [10.10.10.11] => (item=GLUSTER_vg1)
TASK [create data LV that has a size which is a multiple of stripe width]
**********************************
changed: [10.10.10.13] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.11] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.12] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
TASK [Convert the logical volume]
**************************************************************************
changed: [10.10.10.11] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.13] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.12] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
TASK [create stripe-aligned thin volume]
*******************************************************************
changed: [10.10.10.13] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.11] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.12] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
TASK [Change the attributes of the logical volume]
*********************************************************
changed: [10.10.10.11] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.13] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
changed: [10.10.10.12] => (item={u'lv': u'GLUSTER_lv1',
u'pool': u'pool1', u'vg': u'GLUSTER_vg1'})
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=5 changed=5 unreachable=0 failed=0
10.10.10.12 : ok=5 changed=5 unreachable=0 failed=0
10.10.10.13 : ok=5 changed=5 unreachable=0 failed=0
PLAY [gluster_servers]
*************************************************************************************
TASK [Create an xfs filesystem]
****************************************************************************
changed: [10.10.10.13] => (item=/dev/GLUSTER_vg1/GLUSTER_lv1)
changed: [10.10.10.12] => (item=/dev/GLUSTER_vg1/GLUSTER_lv1)
changed: [10.10.10.11] => (item=/dev/GLUSTER_vg1/GLUSTER_lv1)
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*************************************************************************************
TASK [Create the backend disks, skips if present]
**********************************************************
changed: [10.10.10.13] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
changed: [10.10.10.12] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
changed: [10.10.10.11] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
TASK [Mount the volumes]
***********************************************************************************
changed: [10.10.10.11] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
changed: [10.10.10.12] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
changed: [10.10.10.13] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=2 changed=2 unreachable=0 failed=0
10.10.10.12 : ok=2 changed=2 unreachable=0 failed=0
10.10.10.13 : ok=2 changed=2 unreachable=0 failed=0
PLAY [gluster_servers]
*************************************************************************************
TASK [Set SELinux labels on the bricks]
********************************************************************
changed: [10.10.10.11] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
changed: [10.10.10.12] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
changed: [10.10.10.13] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
TASK [Restore the SELinux context]
*************************************************************************
changed: [10.10.10.11] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
changed: [10.10.10.12] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
changed: [10.10.10.13] => (item={u'device':
u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path':
u'/gluster/brick1'})
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=2 changed=2 unreachable=0 failed=0
10.10.10.12 : ok=2 changed=2 unreachable=0 failed=0
10.10.10.13 : ok=2 changed=2 unreachable=0 failed=0
PLAY [gluster_servers]
*************************************************************************************
TASK [Create/Enabling yum repos]
***************************************************************************
skipping: [10.10.10.12] => (item=)
skipping: [10.10.10.11] => (item=)
skipping: [10.10.10.13] => (item=)
TASK [Clean up the metadata]
*******************************************************************************
skipping: [10.10.10.12]
skipping: [10.10.10.11]
skipping: [10.10.10.13]
TASK [Yum update]
******************************************************************************************
skipping: [10.10.10.12]
skipping: [10.10.10.11]
skipping: [10.10.10.13]
TASK [Installs/Removes a package using yum]
****************************************************************
changed: [10.10.10.12]
changed: [10.10.10.13]
changed: [10.10.10.11]
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*************************************************************************************
TASK [Enable or disable services]
**************************************************************************
changed: [10.10.10.12] => (item=glusterd)
changed: [10.10.10.11] => (item=glusterd)
changed: [10.10.10.13] => (item=glusterd)
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*************************************************************************************
TASK [start/stop/restart/reload services]
******************************************************************
changed: [10.10.10.11] => (item=glusterd)
changed: [10.10.10.12] => (item=glusterd)
changed: [10.10.10.13] => (item=glusterd)
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*************************************************************************************
TASK [Create the brick dirs, skips if present]
*************************************************************
changed: [10.10.10.11] => (item=/gluster/brick1/one)
changed: [10.10.10.12] => (item=/gluster/brick1/one)
changed: [10.10.10.13] => (item=/gluster/brick1/one)
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0
PLAY [master]
**********************************************************************************************
TASK [Creates a Trusted Storage Pool]
**********************************************************************
changed: [10.10.10.11]
TASK [Pause for a few seconds]
*****************************************************************************
Pausing for 5 seconds
(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)
ok: [10.10.10.11]
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=2 changed=1 unreachable=0 failed=0
PLAY [master]
**********************************************************************************************
TASK [Creates a volume]
************************************************************************************
changed: [10.10.10.11]
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
PLAY [master]
**********************************************************************************************
TASK [Starts a volume]
*************************************************************************************
changed: [10.10.10.11]
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
PLAY [clients]
*********************************************************************************************
TASK [Create the dir to mount the volume, skips if present]
************************************************
changed: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
changed: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
changed: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0
PLAY [clients]
*********************************************************************************************
TASK [Mount the volumes, if fstype is glusterfs]
***********************************************************
changed: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
changed: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
changed: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0
PLAY [clients]
*********************************************************************************************
TASK [Gathering Facts]
*************************************************************************************
ok: [10.10.10.11]
ok: [10.10.10.12]
ok: [10.10.10.13]
TASK [Uncomment STATD_PORT for rpc.statd to listen on]
*****************************************************
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
TASK [Uncomment LOCKD_TCPPORT for rpc.lockd to listen on]
**************************************************
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
TASK [Uncomment LOCKD_UDPPORT for rpc.lockd to listen on]
**************************************************
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
TASK [Uncomment MOUNTD_PORT for rpc.mountd to listen on]
***************************************************
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
TASK [Restart nfs service (RHEL 6 only)]
*******************************************************************
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
TASK [Restart rpc-statd service]
***************************************************************************
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
TASK [Restart nfs-config service]
**************************************************************************
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
TASK [Restart nfs-mountd service]
**************************************************************************
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
TASK [Restart nfslock service (RHEL 6 & 7)]
****************************************************************
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
TASK [Mount the volumes if fstype is NFS]
******************************************************************
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=1 changed=0 unreachable=0 failed=0
10.10.10.12 : ok=1 changed=0 unreachable=0 failed=0
10.10.10.13 : ok=1 changed=0 unreachable=0 failed=0
PLAY [clients]
*********************************************************************************************
TASK [Mount the volumes, if fstype is CIFS]
****************************************************************
skipping: [10.10.10.12] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.11] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
skipping: [10.10.10.13] => (item={u'mountpoint':
u'/mnt/ganesha_mnt', u'fstype': u'fuse'})
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=0 changed=0 unreachable=0 failed=0
10.10.10.12 : ok=0 changed=0 unreachable=0 failed=0
10.10.10.13 : ok=0 changed=0 unreachable=0 failed=0
PLAY [master]
**********************************************************************************************
TASK [Delete lines from Ganesha export file]
***************************************************************
TASK [Add lines to Ganesha export file]
********************************************************************
TASK [Update lines to Ganesha export file]
*****************************************************************
TASK [Add a block to Ganesha export file]
******************************************************************
TASK [Refresh NFS-Ganesha config]
**************************************************************************
fatal: [10.10.10.11]: FAILED! => {"changed": true, "cmd":
"/usr/libexec/ganesha/ganesha-ha.sh --refresh-config
\"/var/run/gluster/shared_storage/nfs-ganesha\"
\"ganesha\"", "delta": "0:00:00.227074",
"end": "2017-05-09 16:54:45.442219", "failed":
true, "rc": 1, "start": "2017-05-09
16:54:45.215145", "stderr": "grep:
/var/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf: No such file or
directory\ngrep: /var/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf: No
such file or directory\nError: cluster is not currently running on this
node\ngrep:
/var/run/gluster/shared_storage/nfs-ganesha/exports/export.ganesha.conf: No such
file or directory", "stderr_lines": ["grep:
/var/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf: No such file or
directory", "grep:
/var/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf: No such file or
directory", "Error: cluster is not currently running on this
node", "grep:
/var/run/gluster/shared_storage/nfs-ganesha/exports/export.ganesha.conf: No such
file or directory"], "stdout": "Error: refresh-config failed
on localhost.", "stdout_lines": ["Error: refresh-config
failed on localhost."]}
to retry, use: --limit @/tmp/tmpacx8yi/ganesha-refresh-config.retry
PLAY RECAP
*************************************************************************************************
10.10.10.11 : ok=0 changed=0 unreachable=0 failed=1
Ignoring errors...
You can view the generated configuration files inside /tmp/tmpacx8yi
[root at linked-clone-of-centos-linux ~]#
Soumya Koduri
2017-May-10 05:15 UTC
[Gluster-users] gdeploy not starting all the daemons for NFS-ganesha :(
CCin Sac, Manisha, Arthy who could help with troubleshooting. Thanks, Soumya On 05/09/2017 08:31 PM, hvjunk wrote:> Hi there, > > Given the following config file, what am I doing wrong causing the error at the bottom & no mounted /gluster_shared_storage? > > Hendrik > > [root at linked-clone-of-centos-linux ~]# cat t.conf > [hosts] > 10.10.10.11 > 10.10.10.12 > 10.10.10.13 > > [backend-setup] > devices=/dev/sdb > mountpoints=/gluster/brick1 > brick_dirs=/gluster/brick1/one > pools=pool1 > > #Installing nfs-ganesha > [yum] > action=install > repolist> gpgcheck=no > update=no > packages=glusterfs-ganesha > > [service] > action=start > service=glusterd > > [service] > action=enable > service=glusterd > > #This will create a volume. Skip this section if your volume already exists > [volume] > action=create > volname=ganesha > transport=tcp > replica_count=3 > arbiter_count=1 > force=yes > > [clients] > action=mount > volname=ganesha > hosts=10.10.10.11,10.10.10.12,10.10.10.13 > fstype=glusterfs > client_mount_points=/mnt/ganesha_mnt > > > #Creating a high availability cluster and exporting the volume > [nfs-ganesha] > action=create-cluster > ha-name=ganesha-ha-360 > cluster-nodes=10.10.10.11,10.10.10.12 > vip=10.10.10.31,10.10.10.41 > volname=ganesha > > > [nfs-ganesha] > action=export-volume > volname=ganesha > > [nfs-ganesha] > action=refresh-config > volname=ganesha > [root at linked-clone-of-centos-linux ~]# > > > [root at linked-clone-of-centos-linux ~]# gdeploy -c t.conf -k > > PLAY [gluster_servers] ************************************************************************************* > > TASK [Clean up filesystem signature] *********************************************************************** > skipping: [10.10.10.11] => (item=/dev/sdb) > skipping: [10.10.10.12] => (item=/dev/sdb) > skipping: [10.10.10.13] => (item=/dev/sdb) > > TASK [Create Physical Volume] ****************************************************************************** > changed: [10.10.10.13] => (item=/dev/sdb) > changed: [10.10.10.11] => (item=/dev/sdb) > changed: [10.10.10.12] => (item=/dev/sdb) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [gluster_servers] ************************************************************************************* > > TASK [Create volume group on the disks] ******************************************************************** > changed: [10.10.10.12] => (item={u'brick': u'/dev/sdb', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.13] => (item={u'brick': u'/dev/sdb', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.11] => (item={u'brick': u'/dev/sdb', u'vg': u'GLUSTER_vg1'}) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [gluster_servers] ************************************************************************************* > > TASK [Create logical volume named metadata] **************************************************************** > changed: [10.10.10.12] => (item=GLUSTER_vg1) > changed: [10.10.10.13] => (item=GLUSTER_vg1) > changed: [10.10.10.11] => (item=GLUSTER_vg1) > > TASK [create data LV that has a size which is a multiple of stripe width] ********************************** > changed: [10.10.10.13] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.11] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.12] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > > TASK [Convert the logical volume] ************************************************************************** > changed: [10.10.10.11] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.13] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.12] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > > TASK [create stripe-aligned thin volume] ******************************************************************* > changed: [10.10.10.13] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.11] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.12] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > > TASK [Change the attributes of the logical volume] ********************************************************* > changed: [10.10.10.11] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.13] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > changed: [10.10.10.12] => (item={u'lv': u'GLUSTER_lv1', u'pool': u'pool1', u'vg': u'GLUSTER_vg1'}) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=5 changed=5 unreachable=0 failed=0 > 10.10.10.12 : ok=5 changed=5 unreachable=0 failed=0 > 10.10.10.13 : ok=5 changed=5 unreachable=0 failed=0 > > > PLAY [gluster_servers] ************************************************************************************* > > TASK [Create an xfs filesystem] **************************************************************************** > changed: [10.10.10.13] => (item=/dev/GLUSTER_vg1/GLUSTER_lv1) > changed: [10.10.10.12] => (item=/dev/GLUSTER_vg1/GLUSTER_lv1) > changed: [10.10.10.11] => (item=/dev/GLUSTER_vg1/GLUSTER_lv1) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [gluster_servers] ************************************************************************************* > > TASK [Create the backend disks, skips if present] ********************************************************** > changed: [10.10.10.13] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > changed: [10.10.10.12] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > changed: [10.10.10.11] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > > TASK [Mount the volumes] *********************************************************************************** > changed: [10.10.10.11] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > changed: [10.10.10.12] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > changed: [10.10.10.13] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=2 changed=2 unreachable=0 failed=0 > 10.10.10.12 : ok=2 changed=2 unreachable=0 failed=0 > 10.10.10.13 : ok=2 changed=2 unreachable=0 failed=0 > > > PLAY [gluster_servers] ************************************************************************************* > > TASK [Set SELinux labels on the bricks] ******************************************************************** > changed: [10.10.10.11] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > changed: [10.10.10.12] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > changed: [10.10.10.13] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > > TASK [Restore the SELinux context] ************************************************************************* > changed: [10.10.10.11] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > changed: [10.10.10.12] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > changed: [10.10.10.13] => (item={u'device': u'/dev/GLUSTER_vg1/GLUSTER_lv1', u'path': u'/gluster/brick1'}) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=2 changed=2 unreachable=0 failed=0 > 10.10.10.12 : ok=2 changed=2 unreachable=0 failed=0 > 10.10.10.13 : ok=2 changed=2 unreachable=0 failed=0 > > > PLAY [gluster_servers] ************************************************************************************* > > TASK [Create/Enabling yum repos] *************************************************************************** > skipping: [10.10.10.12] => (item=) > skipping: [10.10.10.11] => (item=) > skipping: [10.10.10.13] => (item=) > > TASK [Clean up the metadata] ******************************************************************************* > skipping: [10.10.10.12] > skipping: [10.10.10.11] > skipping: [10.10.10.13] > > TASK [Yum update] ****************************************************************************************** > skipping: [10.10.10.12] > skipping: [10.10.10.11] > skipping: [10.10.10.13] > > TASK [Installs/Removes a package using yum] **************************************************************** > changed: [10.10.10.12] > changed: [10.10.10.13] > changed: [10.10.10.11] > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [gluster_servers] ************************************************************************************* > > TASK [Enable or disable services] ************************************************************************** > changed: [10.10.10.12] => (item=glusterd) > changed: [10.10.10.11] => (item=glusterd) > changed: [10.10.10.13] => (item=glusterd) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [gluster_servers] ************************************************************************************* > > TASK [start/stop/restart/reload services] ****************************************************************** > changed: [10.10.10.11] => (item=glusterd) > changed: [10.10.10.12] => (item=glusterd) > changed: [10.10.10.13] => (item=glusterd) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [gluster_servers] ************************************************************************************* > > TASK [Create the brick dirs, skips if present] ************************************************************* > changed: [10.10.10.11] => (item=/gluster/brick1/one) > changed: [10.10.10.12] => (item=/gluster/brick1/one) > changed: [10.10.10.13] => (item=/gluster/brick1/one) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [master] ********************************************************************************************** > > TASK [Creates a Trusted Storage Pool] ********************************************************************** > changed: [10.10.10.11] > > TASK [Pause for a few seconds] ***************************************************************************** > Pausing for 5 seconds > (ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort) > ok: [10.10.10.11] > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=2 changed=1 unreachable=0 failed=0 > > > PLAY [master] ********************************************************************************************** > > TASK [Creates a volume] ************************************************************************************ > changed: [10.10.10.11] > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [master] ********************************************************************************************** > > TASK [Starts a volume] ************************************************************************************* > changed: [10.10.10.11] > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [clients] ********************************************************************************************* > > TASK [Create the dir to mount the volume, skips if present] ************************************************ > changed: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > changed: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > changed: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [clients] ********************************************************************************************* > > TASK [Mount the volumes, if fstype is glusterfs] *********************************************************** > changed: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > changed: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > changed: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=1 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=1 unreachable=0 failed=0 > > > PLAY [clients] ********************************************************************************************* > > TASK [Gathering Facts] ************************************************************************************* > ok: [10.10.10.11] > ok: [10.10.10.12] > ok: [10.10.10.13] > > TASK [Uncomment STATD_PORT for rpc.statd to listen on] ***************************************************** > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > TASK [Uncomment LOCKD_TCPPORT for rpc.lockd to listen on] ************************************************** > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > TASK [Uncomment LOCKD_UDPPORT for rpc.lockd to listen on] ************************************************** > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > TASK [Uncomment MOUNTD_PORT for rpc.mountd to listen on] *************************************************** > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > TASK [Restart nfs service (RHEL 6 only)] ******************************************************************* > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > TASK [Restart rpc-statd service] *************************************************************************** > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > TASK [Restart nfs-config service] ************************************************************************** > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > TASK [Restart nfs-mountd service] ************************************************************************** > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > TASK [Restart nfslock service (RHEL 6 & 7)] **************************************************************** > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > TASK [Mount the volumes if fstype is NFS] ****************************************************************** > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=1 changed=0 unreachable=0 failed=0 > 10.10.10.12 : ok=1 changed=0 unreachable=0 failed=0 > 10.10.10.13 : ok=1 changed=0 unreachable=0 failed=0 > > > PLAY [clients] ********************************************************************************************* > > TASK [Mount the volumes, if fstype is CIFS] **************************************************************** > skipping: [10.10.10.12] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.11] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > skipping: [10.10.10.13] => (item={u'mountpoint': u'/mnt/ganesha_mnt', u'fstype': u'fuse'}) > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=0 changed=0 unreachable=0 failed=0 > 10.10.10.12 : ok=0 changed=0 unreachable=0 failed=0 > 10.10.10.13 : ok=0 changed=0 unreachable=0 failed=0 > > > PLAY [master] ********************************************************************************************** > > TASK [Delete lines from Ganesha export file] *************************************************************** > > TASK [Add lines to Ganesha export file] ******************************************************************** > > TASK [Update lines to Ganesha export file] ***************************************************************** > > TASK [Add a block to Ganesha export file] ****************************************************************** > > TASK [Refresh NFS-Ganesha config] ************************************************************************** > fatal: [10.10.10.11]: FAILED! => {"changed": true, "cmd": "/usr/libexec/ganesha/ganesha-ha.sh --refresh-config \"/var/run/gluster/shared_storage/nfs-ganesha\" \"ganesha\"", "delta": "0:00:00.227074", "end": "2017-05-09 16:54:45.442219", "failed": true, "rc": 1, "start": "2017-05-09 16:54:45.215145", "stderr": "grep: /var/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf: No such file or directory\ngrep: /var/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf: No such file or directory\nError: cluster is not currently running on this node\ngrep: /var/run/gluster/shared_storage/nfs-ganesha/exports/export.ganesha.conf: No such file or directory", "stderr_lines": ["grep: /var/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf: No such file or directory", "grep: /var/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf: No such file or directory", "Error: cluster is not currently running on this node", "grep: /var/run/gluster/shared_storage/nfs-ganesha/exports/expor > t.ganesha.conf: No such file or directory"], "stdout": "Error: refresh-config failed on localhost.", "stdout_lines": ["Error: refresh-config failed on localhost."]} > to retry, use: --limit @/tmp/tmpacx8yi/ganesha-refresh-config.retry > > PLAY RECAP ************************************************************************************************* > 10.10.10.11 : ok=0 changed=0 unreachable=0 failed=1 > > Ignoring errors... > You can view the generated configuration files inside /tmp/tmpacx8yi > [root at linked-clone-of-centos-linux ~]# > _______________________________________________ > Gluster-users mailing list > Gluster-users at gluster.org > http://lists.gluster.org/mailman/listinfo/gluster-users >