Veritas Volume Manager: Growing a disk group and expand the filesystem
Validated by Mr Man!

Lets start off on node2
[root@node02 ~]# vxdisk list
DEVICE TYPE DISK GROUP STATUS
sda auto:none - - online invalid
sdb auto:cdsdisk disk1 data1dg online clone_disk
sdc auto:cdsdisk disk2 data1dg online clone_disk
sdd auto:cdsdisk disk3 data1dg online clone_disk
sde auto:cdsdisk disk4 data1dg online clone_disk
sdf auto:cdsdisk disk5 data1dg online clone_disk
sdg auto:cdsdisk disk6 data1dg online clone_disk
sdh auto:none - - online invalid
sdi auto:cdsdisk - - online
Notice the device sdi
is currently not a DISK nor member of GROUP data1dg. Now list the clone disks and check the maximum size the current group can grow up to
[root@node02 ~]# vxdg -g data1dg -q listclone
sdb data1dg online clone_disk
sdc data1dg online clone_disk
sdd data1dg online clone_disk
sde data1dg online clone_disk
sdf data1dg online clone_disk
sdg data1dg online clone_disk
[root@node02 ~]# vxassist -g data1dg maxsize
Maximum volume size: 1996800 (975Mb)
If you run into non-clone disk such as this message, it can be cleared as follows
[root@node02 ~]# vxdg -g data1dg adddisk disk7=sdi
VxVM vxdg ERROR V-5-1-0 Device sdi is a non-clone disk.
Adding a non-clone disk to a clone diskgroup is not allowed
[root@node02 ~]# vxdisk set sdi clone=on
[root@node02 ~]# vxassist -g data1dg maxsize
Maximum volume size: 1075638272 (525214Mb)
[root@node02 ~]# vxdg -g data1dg -q listclone
sdb data1dg online clone_disk
sdc data1dg online clone_disk
sdd data1dg online clone_disk
sde data1dg online clone_disk
sdf data1dg online clone_disk
sdg data1dg online clone_disk
sdi data1dg online clone_disk
List the disks again to verify it has been added
[root@node02 ~]# vxdisk list
DEVICE TYPE DISK GROUP STATUS
sda auto:none - - online invalid
sdb auto:cdsdisk disk1 data1dg online clone_disk
sdc auto:cdsdisk disk2 data1dg online clone_disk
sdd auto:cdsdisk disk3 data1dg online clone_disk
sde auto:cdsdisk disk4 data1dg online clone_disk
sdf auto:cdsdisk disk5 data1dg online clone_disk
sdg auto:cdsdisk disk6 data1dg online clone_disk
sdh auto:none - - online invalid
sdi auto:cdsdisk disk7 data1dg online clone_disk
Inspect the available disk space
[root@node02 ~]# vxdg free
GROUP DISK DEVICE TAG OFFSET LENGTH FLAGS
data1dg disk6 sdg sdg 803209200 1997200 -
data1dg disk7 sdi sdi 0 1073642208 -
[root@node02 ~]# vxassist -g data1dg maxsize
Maximum volume size: 1075638272 (525214Mb)
[root@node02 ~]# vxprint -htqg data1dg
dg data1dg default default 25000 1436822698.10.node01
dm disk1 sdb auto 65536 402570288 -
dm disk2 sdc auto 65536 268357344 -
dm disk3 sdd auto 65536 134145808 -
dm disk4 sde auto 65536 268357344 -
dm disk5 sdf auto 65536 134145808 -
dm disk6 sdg auto 65536 805206400 -
dm disk7 sdi auto 65536 1073642208 -
v data1volume - ENABLED ACTIVE 2010785792 SELECT - fsgen
pl vol01-01 data1volume ENABLED ACTIVE 2010785792 CONCAT - RW
sd disk1-01 vol01-01 disk1 0 402570288 0 sdb ENA
sd disk2-01 vol01-01 disk2 0 268357344 402570288 sdc ENA
sd disk3-01 vol01-01 disk3 0 134145808 670927632 sdd ENA
sd sde-01 vol01-01 disk4 0 268357344 805073440 sde ENA
sd disk5-01 vol01-01 disk5 0 134145808 1073430784 sdf ENA
sd disk6-01 vol01-01 disk6 0 803209200 1207576592 sdg ENA
Grow the disk group
[root@node02 ~]# vxassist -g data1dg maxgrow data1volume
Volume data1volume can be extended by 1075638272 to: 3086424064 (1507043Mb)
[root@node02 ~]# vxdisk -o alldgs list
DEVICE TYPE DISK GROUP STATUS
sda auto:none - - online invalid
sdb auto:cdsdisk disk1 data1dg online clone_disk
sdc auto:cdsdisk disk2 data1dg online clone_disk
sdd auto:cdsdisk disk3 data1dg online clone_disk
sde auto:cdsdisk disk4 data1dg online clone_disk
sdf auto:cdsdisk disk5 data1dg online clone_disk
sdg auto:cdsdisk disk6 data1dg online clone_disk
sdh auto:none - - online invalid
sdi auto:cdsdisk disk7 data1dg online clone_disk
Lastly resize it
[root@node02 ~]# /etc/vx/bin/vxresize -g data1dg data1volume +1075638272
On the VCS side, check the status of VCS (notice switched to node01 at terminal)
[root@node01 ~]# hastatus -sum
-- SYSTEM STATE
-- System State Frozen
A node01 RUNNING 0
A node02 UNKNOWN 0
-- GROUP STATE
-- Group System Probed AutoDisabled State
B ClusterService node01 Y N ONLINE
B ClusterService node02 Y N OFFLINE
B app_SG node01 Y N ONLINE
B app_SG node02 Y N OFFLINE
-- RESOURCES NOT PROBED
-- Group Type Resource System
E ClusterService NIC csgnic node02
E ClusterService Phantom csg_PHANTOM node02
E app_SG DiskGroup cnf1_DG node02
E app_SG IP app_IP node02
E app_SG Mount data1volume_cd_MNT node02
E app_SG Proxy csgnic_PROXY node02
E app_SG Volume data1volume_VOL node02
List the disks using vxlist
[root@node01 ~]# vxlist
TY DEVICE DISK DISKGROUP SIZE FREE STATUS
disk sda - - - - uninitialized
disk sdb disk1 data1dg 191.96g 0.00 imported
disk sdc disk2 data1dg 127.96g 0.00 imported
disk sdd disk3 data1dg 63.96g 0.00 imported
disk sde disk4 data1dg 127.96g 0.00 imported
disk sdf disk5 data1dg 63.96g 0.00 imported
disk sdg disk6 data1dg 383.95g 975.19m imported
disk sdh - - - - uninitialized
disk sdi disk7 data1dg 511.95g 511.95g imported
TY DISKGROUP IMPORT STATUS SIZE FREE ID
dg data1dg private enabled 1.43t 512.90g 1436822698.10.node01
TY VOLUME DISKGROUP SIZE STATUS LAYOUT LINKAGE
vol data1volume data1dg 958.81g healthy concat -
TY FS FSTYPE SIZE FREE %USED DEVICE_PATH MOUNT_POINT
fs / ext3 181.00g 104.43g 40% /dev/sda2 /
fs boot ext3 484.22m 354.26m 23% /dev/sda1 /boot
fs cd vxfs 958.81g 70.86g 93% /dev/vx/dsk/data1dg/data1volume /data1volume/cd
TY NAME TYPE STATE WWN PNAME
hba c2 SCSI online - c2
hba c3 SCSI online - c3
TY ENCLR_NAME ENCLR_MODEL ARRAY_TYPE STATUS ENCLR_SNO
encl other_disks VMware OTHER_DISKS connected OTHER_DISKS
To list all disk groups
[root@node01 ~]# vxdisk -o alldgs list
DEVICE TYPE DISK GROUP STATUS
sda auto:none - - online invalid
sdb auto:cdsdisk disk1 data1dg online clone_disk
sdc auto:cdsdisk disk2 data1dg online clone_disk
sdd auto:cdsdisk disk3 data1dg online clone_disk
sde auto:cdsdisk disk4 data1dg online clone_disk
sdf auto:cdsdisk disk5 data1dg online clone_disk
sdg auto:cdsdisk disk6 data1dg online clone_disk
sdh auto:none - - online invalid
sdi auto:cdsdisk disk7 data1dg online clone_disk
Lets check on VCS Again, notice the status of data1volume_VOL
root@node01 ~]# /opt/VRTSvcs/bin/hastatus
attempting to connect....
attempting to connect....connected
group resource system message
--------------- -------------------- -------------------- --------------------
node01 RUNNING
node02 RUNNING
ClusterService node01 ONLINE
ClusterService node02 OFFLINE
-------------------------------------------------------------------------
app_SG node01 ONLINE
app_SG node02 OFFLINE
csgnic node01 ONLINE
csgnic node02 ONLINE
csg_PHANTOM node01 ONLINE
-------------------------------------------------------------------------
csg_PHANTOM node02 OFFLINE
cnf1_DG node01 ONLINE
cnf1_DG node02 OFFLINE
app_IP node01 ONLINE
app_IP node02 OFFLINE
-------------------------------------------------------------------------
data1volume_cd_MNT node01 ONLINE
data1volume_cd_MNT node02 OFFLINE
csgnic_PROXY node01 ONLINE
csgnic_PROXY node02 ONLINE
data1volume_VOL node01 ONLINE
-------------------------------------------------------------------------
data1volume_VOL node02 OFFLINE
Instead of rebooting the node, lets move the hagrp to node02 from node01
[root@node01 ~]# hagrp -switch app_SG -to node02
[root@node01 ~]# hagrp -switch ClusterService -to node02
Look at hastatus to verify
[root@node01 ~]# /opt/VRTSvcs/bin/hastatus -sum
-- SYSTEM STATE
-- System State Frozen
A node01 RUNNING 0
A node02 RUNNING 0
-- GROUP STATE
-- Group System Probed AutoDisabled State
B ClusterService node01 Y N OFFLINE
B ClusterService node02 Y N ONLINE
B app_SG node01 Y N OFFLINE
B app_SG node02 Y N ONLINE
For the full view
[root@node01 ~]# /opt/VRTSvcs/bin/hastatus
attempting to connect....
attempting to connect....connected
group resource system message
--------------- -------------------- -------------------- --------------------
node01 RUNNING
node02 RUNNING
ClusterService node01 OFFLINE
ClusterService node02 ONLINE
-------------------------------------------------------------------------
app_SG node01 OFFLINE
app_SG node02 ONLINE
csgnic node01 ONLINE
csgnic node02 ONLINE
csg_PHANTOM node01 OFFLINE
-------------------------------------------------------------------------
csg_PHANTOM node02 ONLINE
cnf1_DG node01 OFFLINE
cnf1_DG node02 ONLINE
app_IP node01 OFFLINE
app_IP node02 ONLINE
-------------------------------------------------------------------------
data1volume_cd_MNT node01 OFFLINE
data1volume_cd_MNT node02 ONLINE
csgnic_PROXY node01 ONLINE
csgnic_PROXY node02 ONLINE
data1volume_VOL node01 OFFLINE
-------------------------------------------------------------------------
data1volume_VOL node02 ONLINE
ClusterService node01 ONLINE
csg_PHANTOM node01 ONLINE
ClusterService node01 STOPPING ONLINE
csg_PHANTOM node01 WAITING FOR OFFLINE
-------------------------------------------------------------------------
csg_PHANTOM node01 OFFLINE
ClusterService node01 OFFLINE
You now see the diskgroup status is healthy but more importantly notice the FREE
size is 512.90g
that we just added from sdi
root@node02 ~]# /opt/VRTS/bin/vxlist vol
TY VOLUME DISKGROUP SIZE STATUS LAYOUT LINKAGE
vol data1volume data1dg 958.81g healthy concat -
[root@node02 ~]# /opt/VRTS/bin/vxlist
TY DEVICE DISK DISKGROUP SIZE FREE STATUS
disk sda - - - - uninitialized
disk sdb disk1 data1dg 191.96g 0.00 imported
disk sdc disk2 data1dg 127.96g 0.00 imported
disk sdd disk3 data1dg 63.96g 0.00 imported
disk sde disk4 data1dg 127.96g 0.00 imported
disk sdf disk5 data1dg 63.96g 0.00 imported
disk sdg disk6 data1dg 383.95g 975.19m imported
disk sdh - - - - uninitialized
disk sdi disk7 data1dg 511.95g 511.95g imported
TY DISKGROUP IMPORT STATUS SIZE FREE ID
dg data1dg private enabled 1.43t 512.90g 1436822698.10.node01
TY VOLUME DISKGROUP SIZE STATUS LAYOUT LINKAGE
vol data1volume data1dg 958.81g healthy concat -
TY FS FSTYPE SIZE FREE %USED DEVICE_PATH MOUNT_POINT
fs / ext3 181.00g 157.37g 9% /dev/sda2 /
fs boot ext3 484.22m 355.66m 23% /dev/sda1 /boot
fs cd vxfs 958.81g 70.86g 93% /dev/vx/dsk/data1dg/data1volume /data1volume/cd
TY NAME TYPE STATE WWN PNAME
hba c2 SCSI online - c2
hba c3 SCSI online - c3
TY ENCLR_NAME ENCLR_MODEL ARRAY_TYPE STATUS ENCLR_SNO
encl other_disks VMware OTHER_DISKS connected OTHER_DISKS
Check the maxgrow available for that volume
[root@node02 ~]# vxassist -g data1dg maxsize
Maximum volume size: 1075638272 (525214Mb)
[root@node02 ~]# vxassist -g data1dg maxgrow data1volume
Volume data1volume can be extended by 1075638272 to: 3086424064 (1507043Mb)
Then resize it with the extent info provided
[root@node02 ~]# /opt/VRTS/bin/vxresize -b -g data1dg data1volume +1075638272
[root@node02 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda2 182G 15G 158G 9% /
tmpfs 3.9G 0 3.9G 0% /dev/shm
/dev/sda1 485M 104M 356M 23% /boot
tmpfs 4.0K 0 4.0K 0% /dev/vx
/dev/vx/dsk/data1dg/data1volume
1.5T 884G 552G 62% /data1volume/cd
[root@node02 ~]#
FYI, if you run into this error code: VxVM vxresize ERROR V-5-1-14105
[root@node02 ~]# /opt/VRTS/bin/vxresize -b -g data1dg data1volume +1075638272
VxVM vxresize ERROR V-5-1-14105 The lockfile /etc/vx/locks/data1dg_data1volume exists. This means either previous resize operation may be going on the volume data1volume in the diskgroup data1dg or the operation may have exited incompletely. To continue with resize operation : (1) please make sure that there is no resize operation is in progress on the same volume, (2) remove /etc/vx/locks/data1dg_data1volume and (3) restart the command.
Use vxtask list to provide any activity vx is doing
[root@node02 ~]# vxtask list
TASKID PTID TYPE/STATE PCT PROGRESS
The lock file should be an empty file, just remove it (or place it somewhere else other than the locks directory)
[root@node02 ~]# file /etc/vx/locks/data1dg_data1volume
/etc/vx/locks/data1dg_data1volume: empty
[root@node02 ~]# ls -ltr /etc/vx/locks/data1dg_data1volume
-rw-r--r-- 1 root root 0 Apr 21 17:35 /etc/vx/locks/data1dg_data1volume
[root@node02 ~]# mv /etc/vx/locks/data1dg_data1volume /tmp/data1dg_data1volume
Now retry the operation of resizing
[root@node02 ~]# /opt/VRTS/bin/vxresize -b -g data1dg data1volume +1075638272
[root@node02 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda2 182G 15G 158G 9% /
tmpfs 3.9G 0 3.9G 0% /dev/shm
/dev/sda1 485M 104M 356M 23% /boot
tmpfs 4.0K 0 4.0K 0% /dev/vx
/dev/vx/dsk/data1dg/data1volume
1.5T 884G 552G 62% /data1volume/cd
[root@node02 ~]#