1. Hierarchy of Concepts
- Physical Volumes
- Physical Extents
- Volume Groups
- Logical Volumes
- Logical Extents : LVM 용량관리의 최소단위 (늘이고 줄일때..)
- Filesystems
2. LVM 만들기
1) PV 만들기
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 3G 0 disk
sdb 8:16 0 3G 0 disk
sdc 8:32 0 3G 0 disk
vda 252:0 0 30G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 29G 0 part
├─cl-root 253:0 0 27G 0 lvm /
└─cl-swap 253:1 0 2G 0 lvm [SWAP]
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/vda2 cl lvm2 a-- 29.00g 4.00m
[root@localhost ~]# pvcreate /dev/sda /dev/sdb
Physical volume "/dev/sda" successfully created.
Physical volume "/dev/sdb" successfully created.
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda lvm2 --- 3.00g 3.00g
/dev/sdb lvm2 --- 3.00g 3.00g
/dev/vda2 cl lvm2 a-- 29.00g 4.00m
2) pvdisplay
[root@localhost ~]# pvdisplay /dev/sda
"/dev/sda" is a new physical volume of "3.00 GiB"
--- NEW Physical volume ---
PV Name /dev/sda
VG Name
PV Size 3.00 GiB
Allocatable NO
PE Size 0
Total PE 0
Free PE 0
Allocated PE 0
PV UUID RMCjfY-Kclx-8EHg-HUAh-W1hH-m7Td-6qnrLq
3) volume group 만들기
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
cl 1 2 0 wz--n- 29.00g 4.00m
[root@localhost ~]# vgcreate vgtest /dev/sda /dev/sdb
Volume group "vgtest" successfully created
[root@localhost ~]# vgdisplay vgtest
--- Volume group ---
VG Name vgtest
System ID
Format lvm2
Metadata Areas 2
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 2
Act PV 2
VG Size 5.99 GiB
PE Size 4.00 MiB
Total PE 1534
Alloc PE / Size 0 / 0
Free PE / Size 1534 / 5.99 GiB
VG UUID Y0941g-PZMj-rIKO-GTb9-OHos-gjNd-3A1I5t
[root@localhost ~]# pvdisplay /dev/sda
--- Physical volume ---
PV Name /dev/sda
VG Name vgtest // 이제 pvdisplay 하면 포함된 VG 확인 가능
PV Size 3.00 GiB / not usable 4.00 MiB
Allocatable yes
PE Size 4.00 MiB
Total PE 767
Free PE 767
Allocated PE 0
PV UUID RMCjfY-Kclx-8EHg-HUAh-W1hH-m7Td-6qnrLq
[root@localhost ~]# vgremove vgtest
Volume group "vgtest" successfully removed
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
cl 1 2 0 wz--n- 29.00g 4.00m
[root@localhost ~]# vgcreate -s 16M vgcolor /dev/sda /dev/sdb // extent size 지정해서 vg 생성도 가능
Volume group "vgcolor" successfully created
[root@localhost ~]# vgdisplay vgcolor
--- Volume group ---
VG Name vgcolor
System ID
Format lvm2
Metadata Areas 2
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 2
Act PV 2
VG Size 5.97 GiB
PE Size 16.00 MiB
Total PE 382
Alloc PE / Size 0 / 0
Free PE / Size 382 / 5.97 GiB
VG UUID v0YDH9-KTWc-HdKN-48oA-41BG-icbh-bSx4kf
[root@localhost ~]# pvdisplay /dev/sda
--- Physical volume ---
PV Name /dev/sda
VG Name vgcolor
PV Size 3.00 GiB / not usable 16.00 MiB
Allocatable yes
PE Size 16.00 MiB
Total PE 191
Free PE 191
Allocated PE 0
PV UUID RMCjfY-Kclx-8EHg-HUAh-W1hH-m7Td-6qnrLq
4) Logical Volume 만들기
[root@localhost ~]# lvcreate -l 10 -n lvred vgcolor
Logical volume "lvred" created.
[root@localhost ~]# lvcreate -L 150M -n lvblue vgcolor
Rounding up size to full physical extent 160.00 MiB //extent 단위가 16MB 이기 때문에.
Logical volume "lvblue" created.
// logical volume명 지정할때 두가지 패턴
/dev/mapper/VGname-LVname
/dev/VGname/LVname
[root@localhost ~]# lvdisplay vgcolor
--- Logical volume ---
LV Path /dev/vgcolor/lvred
LV Name lvred
VG Name vgcolor
LV UUID gQ4IbN-XzFV-98FB-2IKu-KQVB-aTVw-3zrKO0
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2018-07-16 15:56:28 +0900
LV Status available
# open 0
LV Size 160.00 MiB //16짜리 10개 만든거니까 160MB
Current LE 10
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:2
--- Logical volume ---
LV Path /dev/vgcolor/lvblue
LV Name lvblue
VG Name vgcolor
LV UUID e8V5vd-62bT-AIy2-9uzQ-yJmN-YeLm-M6OmKj
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2018-07-16 15:59:38 +0900
LV Status available
# open 0
LV Size 160.00 MiB
Current LE 10
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:3
5) 포맷하기
[root@localhost ~]# mkfs.ext4 /dev/vgcolor/lvblue
mke2fs 1.42.9 (28-Dec-2013)
Discarding device blocks: done
Filesystem label=
OS type: Linux
Block size=1024 (log=0)
Fragment size=1024 (log=0)
Stride=0 blocks, Stripe width=0 blocks
40960 inodes, 163840 blocks
8192 blocks (5.00%) reserved for the super user
First data block=1
Maximum filesystem blocks=33816576
20 block groups
8192 blocks per group, 8192 fragments per group
2048 inodes per group
Superblock backups stored on blocks:
8193, 24577, 40961, 57345, 73729
Allocating group tables: done
Writing inode tables: done
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done
[root@localhost ~]# mkfs.xfs /dev/vgcolor/lvred
meta-data=/dev/vgcolor/lvred isize=512 agcount=4, agsize=10240 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=40960, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=855, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# blkid
/dev/vda1: UUID="6ad7f1e0-44ee-447e-9722-e025d75aa66b" TYPE="xfs"
/dev/vda2: UUID="S9z7ps-wqio-Aj4S-u2M7-mrqh-Nn0D-YKqJwp" TYPE="LVM2_member"
/dev/mapper/cl-root: UUID="bcce7fe1-2bbe-4d56-a827-938192523236" TYPE="xfs"
/dev/mapper/cl-swap: UUID="879c7cab-d3c7-478f-b9fc-93b5fceb9fe4" TYPE="swap"
/dev/sda: UUID="RMCjfY-Kclx-8EHg-HUAh-W1hH-m7Td-6qnrLq" TYPE="LVM2_member"
/dev/sdb: UUID="ju4zJm-gio4-ln1g-UV9Y-suRg-0vBr-CeE7k0" TYPE="LVM2_member"
/dev/mapper/vgcolor-lvred: UUID="592584bb-522b-45b2-979b-99ca42a5d5d7" TYPE="xfs"
/dev/mapper/vgcolor-lvblue: UUID="1a735a24-2dd8-477a-97ac-9bd8ea43dce1" TYPE="ext4"
6) extend (온라인 중 가능)
[root@localhost ~]# pvcreate /dev/sdc
Physical volume "/dev/sdc" successfully created.
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda vgcolor lvm2 a-- 2.98g 2.67g
/dev/sdb vgcolor lvm2 a-- 2.98g 2.98g
/dev/sdc lvm2 --- 3.00g 3.00g
/dev/vda2 cl lvm2 a-- 29.00g 4.00m
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# vgextend vgcolor /dev/sdc
Volume group "vgcolor" successfully extended
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
cl 1 2 0 wz--n- 29.00g 4.00m
vgcolor 3 2 0 wz--n- 8.95g 8.64g
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/cl-root 27G 4.4G 23G 17% /
devtmpfs 984M 0 984M 0% /dev
tmpfs 1001M 156K 1001M 1% /dev/shm
tmpfs 1001M 8.9M 992M 1% /run
tmpfs 1001M 0 1001M 0% /sys/fs/cgroup
/dev/vda1 1014M 157M 858M 16% /boot
tmpfs 201M 4.0K 201M 1% /run/user/42
tmpfs 201M 32K 201M 1% /run/user/0
/dev/mapper/vgcolor-lvred 157M 8.3M 149M 6% /red
/dev/mapper/vgcolor-lvblue 151M 1.6M 139M 2% /blue
[root@localhost ~]# lvextend -L 480MB /dev/vgcolor/lvred
Size of logical volume vgcolor/lvred changed from 160.00 MiB (10 extents) to 480.00 MiB (30 extents).
Logical volume vgcolor/lvred successfully resized.
[root@localhost ~]# lvextend -l 30 /dev/vgcolor/lvblue
Size of logical volume vgcolor/lvblue changed from 160.00 MiB (10 extents) to 480.00 MiB (30 extents).
Logical volume vgcolor/lvblue successfully resized.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root cl -wi-ao---- 26.99g
swap cl -wi-ao---- 2.00g
lvblue vgcolor -wi-ao---- 480.00m
lvred vgcolor -wi-ao---- 480.00m
// lv 껍데기만 키운거고 포맷된 파일시스템 크기 자체는 늘어나지 않는다.
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/cl-root 27G 4.4G 23G 17% /
devtmpfs 984M 0 984M 0% /dev
tmpfs 1001M 156K 1001M 1% /dev/shm
tmpfs 1001M 8.9M 992M 1% /run
tmpfs 1001M 0 1001M 0% /sys/fs/cgroup
/dev/vda1 1014M 157M 858M 16% /boot
tmpfs 201M 4.0K 201M 1% /run/user/42
tmpfs 201M 32K 201M 1% /run/user/0
/dev/mapper/vgcolor-lvred 157M 8.3M 149M 6% /red
/dev/mapper/vgcolor-lvblue 151M 1.6M 139M 2% /blue
//파일시스템도 공간 크기 늘려줘야 함.
[root@localhost ~]# resize2fs /dev/vgcolor/lvblue
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/vgcolor/lvblue is mounted on /blue; on-line resizing required
old_desc_blocks = 2, new_desc_blocks = 4
The filesystem on /dev/vgcolor/lvblue is now 491520 blocks long.
[root@localhost ~]# xfs_growfs --help
xfs_growfs: 부적절한 옵션 -- '-'
Usage: xfs_growfs [options] mountpoint
Options:
-d grow data/metadata section
-l grow log section
-r grow realtime section
-n don't change anything, just show geometry
-i convert log from external to internal format
-t alternate location for mount table (/etc/mtab)
-x convert log from internal to external format
-D size grow data/metadata section to size blks
-L size grow/shrink log section to size blks
-R size grow realtime section to size blks
-e size set realtime extent size to size blks
-m imaxpct set inode max percent to imaxpct
-V print version information
[root@localhost ~]# xfs_growfs /dev/vgcolor/lvred
meta-data=/dev/mapper/vgcolor-lvred isize=512 agcount=4, agsize=10240 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=40960, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=855, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 40960 to 122880
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/cl-root 27G 4.4G 23G 17% /
devtmpfs 984M 0 984M 0% /dev
tmpfs 1001M 156K 1001M 1% /dev/shm
tmpfs 1001M 8.9M 992M 1% /run
tmpfs 1001M 0 1001M 0% /sys/fs/cgroup
/dev/vda1 1014M 157M 858M 16% /boot
tmpfs 201M 4.0K 201M 1% /run/user/42
tmpfs 201M 32K 201M 1% /run/user/0
/dev/mapper/vgcolor-lvred 477M 8.5M 469M 2% /red
/dev/mapper/vgcolor-lvblue 461M 2.3M 435M 1% /blue
7) reduce
// XFS 타입은 reduce 불가능
// reduce 하려면 umount 부터 해야 한다.
[root@localhost ~]# umount /blue
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/cl-root 27G 4.4G 23G 17% /
devtmpfs 984M 0 984M 0% /dev
tmpfs 1001M 156K 1001M 1% /dev/shm
tmpfs 1001M 8.9M 992M 1% /run
tmpfs 1001M 0 1001M 0% /sys/fs/cgroup
/dev/vda1 1014M 157M 858M 16% /boot
tmpfs 201M 4.0K 201M 1% /run/user/42
tmpfs 201M 32K 201M 1% /run/user/0
/dev/mapper/vgcolor-lvred 477M 8.5M 469M 2% /red
[root@localhost ~]#
[root@localhost ~]# resize2fs /dev/vgcolor/lvblue 320M
resize2fs 1.42.9 (28-Dec-2013)
Please run 'e2fsck -f /dev/vgcolor/lvblue' first.
[root@localhost ~]# e2fsck -f /dev/vgcolor/lvblue
e2fsck 1.42.9 (28-Dec-2013)
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
/dev/vgcolor/lvblue: 13/122880 files (0.0% non-contiguous), 21946/491520 blocks
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# resize2fs /dev/vgcolor/lvblue 320M
resize2fs 1.42.9 (28-Dec-2013)
Resizing the filesystem on /dev/vgcolor/lvblue to 327680 (1k) blocks.
The filesystem on /dev/vgcolor/lvblue is now 327680 blocks long.
[root@localhost ~]# lvreduce -L 320M /dev/vgcolor/lvblue
WARNING: Reducing active logical volume to 320.00 MiB.
THIS MAY DESTROY YOUR DATA (filesystem etc.)
Do you really want to reduce vgcolor/lvblue? [y/n]: y
Size of logical volume vgcolor/lvblue changed from 480.00 MiB (30 extents) to 320.00 MiB (20 extents).
Logical volume vgcolor/lvblue successfully resized.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root cl -wi-ao---- 26.99g
swap cl -wi-ao---- 2.00g
lvblue vgcolor -wi-a----- 320.00m
lvred vgcolor -wi-ao---- 480.00m
[root@localhost ~]# mount /dev/vgcolor/lvblue /blue
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/cl-root 27G 4.4G 23G 17% /
devtmpfs 984M 0 984M 0% /dev
tmpfs 1001M 156K 1001M 1% /dev/shm
tmpfs 1001M 8.9M 992M 1% /run
tmpfs 1001M 0 1001M 0% /sys/fs/cgroup
/dev/vda1 1014M 157M 858M 16% /boot
tmpfs 201M 4.0K 201M 1% /run/user/42
tmpfs 201M 32K 201M 1% /run/user/0
/dev/mapper/vgcolor-lvred 477M 8.5M 469M 2% /red
/dev/mapper/vgcolor-lvblue 306M 2.1M 286M 1% /blue
8) remove
[root@localhost ~]# lvremove /dev/vgcolor/lvred
Do you really want to remove active logical volume vgcolor/lvred? [y/n]: y
Logical volume "lvred" successfully removed
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root cl -wi-ao---- 26.99g
swap cl -wi-ao---- 2.00g
lvblue vgcolor -wi-a----- 320.00m
// vg 지워버리면 그 안에 lv 있어도 지울 수 있다.
[root@localhost ~]# vgremove vgcolor
Do you really want to remove volume group "vgcolor" containing 1 logical volumes? [y/n]: y
Do you really want to remove active logical volume vgcolor/lvblue? [y/n]: y
Logical volume "lvblue" successfully removed
Volume group "vgcolor" successfully removed
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
cl 1 2 0 wz--n- 29.00g 4.00m
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda lvm2 --- 3.00g 3.00g
/dev/sdb lvm2 --- 3.00g 3.00g
/dev/sdc lvm2 --- 3.00g 3.00g
/dev/vda2 cl lvm2 a-- 29.00g 4.00m
[root@localhost ~]# pvremove /dev/sdc
Labels on physical volume "/dev/sdc" successfully wiped.
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda lvm2 --- 3.00g 3.00g
/dev/sdb lvm2 --- 3.00g 3.00g
/dev/vda2 cl lvm2 a-- 29.00g 4.00m
9) 태그 설정 및 vg내 특정 pv로만 lv 만들기
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 3G 0 disk
sdb 8:16 0 3G 0 disk
sdc 8:32 0 3G 0 disk
sdd 8:48 0 10G 0 disk
vda 252:0 0 30G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 29G 0 part
├─cl-root 253:0 0 27G 0 lvm /
└─cl-swap 253:1 0 2G 0 lvm [SWAP]
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# pvcreate /dev/sda /dev/sdd
Physical volume "/dev/sda" successfully created.
Physical volume "/dev/sdd" successfully created.
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# vgcreate -s 8M vgcloud /dev/sda /dev/sdd
Volume group "vgcloud" successfully created
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda vgcloud lvm2 a-- 2.99g 2.99g
/dev/sdb lvm2 --- 3.00g 3.00g
/dev/sdd vgcloud lvm2 a-- 9.99g 9.99g
/dev/vda2 cl lvm2 a-- 29.00g 4.00m
[root@localhost ~]# pvs -a
PV VG Fmt Attr PSize PFree
/dev/cl/root --- 0 0
/dev/cl/swap --- 0 0
/dev/sda vgcloud lvm2 a-- 2.99g 2.99g
/dev/sdb lvm2 --- 3.00g 3.00g
/dev/sdc --- 0 0
/dev/sdd vgcloud lvm2 a-- 9.99g 9.99g
/dev/vda1 --- 0 0
/dev/vda2 cl lvm2 a-- 29.00g 4.00m
[root@localhost ~]# pvs -o +tags
PV VG Fmt Attr PSize PFree PV Tags
/dev/sda vgcloud lvm2 a-- 2.99g 2.99g
/dev/sdb lvm2 --- 3.00g 3.00g
/dev/sdd vgcloud lvm2 a-- 9.99g 9.99g
/dev/vda2 cl lvm2 a-- 29.00g 4.00m
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# pvchange --addtag ssd /dev/sda
Physical volume "/dev/sda" changed
1 physical volume changed / 0 physical volumes not changed
[root@localhost ~]# pvchange --addtag slowhdd /dev/sdd
Physical volume "/dev/sdd" changed
1 physical volume changed / 0 physical volumes not changed
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# lvcreate -l 100%FREE -n lvdata vgcloud @slowhdd
Logical volume "lvdata" created.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root cl -wi-ao---- 26.99g
swap cl -wi-ao---- 2.00g
lvdata vgcloud -wi-a----- 9.99g
[root@localhost ~]# lvcreate --type cache-pool -l 100%FREE -n cachepool vgcloud @ssd
Using default stripesize 64.00 KiB.
Logical volume "cachepool" created.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root cl -wi-ao---- 26.99g
swap cl -wi-ao---- 2.00g
cachepool vgcloud Cwi---C--- 2.98g
lvdata vgcloud -wi-a----- 9.99g
[root@localhost ~]#
[root@localhost ~]# vgdisplay -v vgcloud
--- Volume group ---
VG Name vgcloud
System ID
Format lvm2
Metadata Areas 2
Metadata Sequence No 7
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 2
Open LV 0
Max PV 0
Cur PV 2
Act PV 2
VG Size 12.98 GiB
PE Size 8.00 MiB
Total PE 1662
Alloc PE / Size 1662 / 12.98 GiB
Free PE / Size 0 / 0
VG UUID IRvUrM-OyrO-RQ7U-czzP-w60o-rrvH-UKmSwM
--- Logical volume ---
LV Path /dev/vgcloud/lvdata
LV Name lvdata
VG Name vgcloud
LV UUID 3Ju1fd-vSFp-mIcV-Mk95-zwJJ-51ZP-zM5E2l
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2018-07-17 11:09:16 +0900
LV Status available
# open 0
LV Size 9.99 GiB
Current LE 1279
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:2
--- Logical volume ---
LV Path /dev/vgcloud/cachepool
LV Name cachepool
VG Name vgcloud
LV UUID R1R9DZ-3y3d-LDWG-2aN6-gXZ2-fK7W-mL0Yup
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2018-07-17 11:11:04 +0900
LV Status NOT available
LV Size 2.98 GiB
Current LE 381
Segments 1
Allocation inherit
Read ahead sectors auto
--- Physical volumes ---
PV Name /dev/sda
PV UUID 2nNqgS-3qTl-bUMh-1kxe-snmt-GRfP-YLWCXd
PV Status allocatable
Total PE / Free PE 383 / 0
PV Name /dev/sdd
PV UUID oygnI2-up4Z-1XAZ-McSy-bUz6-WM91-2gwH2L
PV Status allocatable
Total PE / Free PE 1279 / 0
[root@localhost ~]#
10) 스토리지 티어링
[root@localhost ~]# lvconvert --type cache --cachepool vgcloud/cachepool vgcloud/lvdata
Do you want wipe existing metadata of cache pool volume vgcloud/cachepool? [y/n]: y
Logical volume vgcloud/lvdata is now cached.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root cl -wi-ao---- 26.99g
swap cl -wi-ao---- 2.00g
lvdata vgcloud Cwi-a-C--- 9.99g [cachepool] [lvdata_corig] 0.00 4.98 0.00
[root@localhost ~]# mkfs.xfs /dev/vgcloud/lvdata
meta-data=/dev/vgcloud/lvdata isize=512 agcount=16, agsize=163696 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=2619136, imaxpct=25
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mount /dev/vgcloud/lvdata /mnt
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/cl-root 27G 4.4G 23G 17% /
devtmpfs 984M 0 984M 0% /dev
tmpfs 1001M 192K 1001M 1% /dev/shm
tmpfs 1001M 8.9M 992M 1% /run
tmpfs 1001M 0 1001M 0% /sys/fs/cgroup
/dev/vda1 1014M 157M 858M 16% /boot
tmpfs 201M 4.0K 201M 1% /run/user/42
tmpfs 201M 36K 201M 1% /run/user/0
/dev/mapper/vgcloud-lvdata 10G 33M 10G 1% /mnt
[root@localhost ~]# lvs -o lv_name,cache_mode,cache_total_blocks,cache_used_blocks
LV CacheMode CacheTotalBlocks CacheUsedBlocks
root
swap
lvdata writethrough 48768 2559
// lvs 옵션
[root@localhost ~]# lvs -o help
Logical Volume Fields
---------------------
lv_all - All fields in this section.
lv_uuid - Unique identifier.
lv_name - Name. LVs created for internal use are enclosed in brackets.
lv_full_name - Full name of LV including its VG, namely VG/LV.
lv_path - Full pathname for LV. Blank for internal LVs.
lv_dm_path - Internal device-mapper pathname for LV (in /dev/mapper directory).
lv_parent - For LVs that are components of another LV, the parent LV.
Logical Volume Device Info and Status Combined Fields
-----------------------------------------------------
lv_all - All fields in this section.
lv_attr - Various attributes - see man page.
Logical Volume Fields
---------------------
lv_all - All fields in this section.
lv_layout - LV layout.
lv_role - LV role.
lv_initial_image_sync - Set if mirror/RAID images underwent initial resynchronization.
lv_image_synced - Set if mirror/RAID image is synchronized.
lv_merging - Set if snapshot LV is being merged to origin.
lv_converting - Set if LV is being converted.
lv_allocation_policy - LV allocation policy.
lv_allocation_locked - Set if LV is locked against allocation changes.
lv_fixed_minor - Set if LV has fixed minor number assigned.
lv_merge_failed - Set if snapshot merge failed.
lv_snapshot_invalid - Set if snapshot LV is invalid.
lv_skip_activation - Set if LV is skipped on activation.
lv_when_full - For thin pools, behavior when full.
lv_active - Active state of the LV.
lv_active_locally - Set if the LV is active locally.
lv_active_remotely - Set if the LV is active remotely.
lv_active_exclusively - Set if the LV is active exclusively.
lv_major - Persistent major number or -1 if not persistent.
lv_minor - Persistent minor number or -1 if not persistent.
lv_read_ahead - Read ahead setting in current units.
lv_size - Size of LV in current units.
lv_metadata_size - For thin and cache pools, the size of the LV that holds the metadata.
seg_count - Number of segments in LV.
origin - For snapshots and thins, the origin device of this LV.
origin_uuid - For snapshots and thins, the UUID of origin device of this LV.
origin_size - For snapshots, the size of the origin device of this LV.
lv_ancestors - LV ancestors ignoring any stored history of the ancestry chain.
lv_full_ancestors - LV ancestors including stored history of the ancestry chain.
lv_descendants - LV descendants ignoring any stored history of the ancestry chain.
lv_full_descendants - LV descendants including stored history of the ancestry chain.
Logical Volume Device Status Fields
-----------------------------------
lv_all - All fields in this section.
data_percent - For snapshot, cache and thin pools and volumes, the percentage full if LV is active.
snap_percent - For snapshots, the percentage full if LV is active.
metadata_percent - For cache and thin pools, the percentage of metadata full if LV is active.
copy_percent - For Cache, RAID, mirrors and pvmove, current percentage in-sync.
sync_percent - For Cache, RAID, mirrors and pvmove, current percentage in-sync.
Logical Volume Fields
---------------------
lv_all - All fields in this section.
raid_mismatch_count - For RAID, number of mismatches found or repaired.
raid_sync_action - For RAID, the current synchronization action being performed.
raid_write_behind - For RAID1, the number of outstanding writes allowed to writemostly devices.
raid_min_recovery_rate - For RAID1, the minimum recovery I/O load in kiB/sec/disk.
raid_max_recovery_rate - For RAID1, the maximum recovery I/O load in kiB/sec/disk.
move_pv - For pvmove, Source PV of temporary LV created by pvmove.
move_pv_uuid - For pvmove, the UUID of Source PV of temporary LV created by pvmove.
convert_lv - For lvconvert, Name of temporary LV created by lvconvert.
convert_lv_uuid - For lvconvert, UUID of temporary LV created by lvconvert.
mirror_log - For mirrors, the LV holding the synchronisation log.
mirror_log_uuid - For mirrors, the UUID of the LV holding the synchronisation log.
data_lv - For thin and cache pools, the LV holding the associated data.
data_lv_uuid - For thin and cache pools, the UUID of the LV holding the associated data.
metadata_lv - For thin and cache pools, the LV holding the associated metadata.
metadata_lv_uuid - For thin and cache pools, the UUID of the LV holding the associated metadata.
pool_lv - For thin volumes, the thin pool LV for this volume.
pool_lv_uuid - For thin volumes, the UUID of the thin pool LV for this volume.
lv_tags - Tags, if any.
lv_profile - Configuration profile attached to this LV.
lv_lockargs - Lock args of the LV used by lvmlockd.
lv_time - Creation time of the LV, if known
lv_time_removed - Removal time of the LV, if known
lv_host - Creation host of the LV, if known.
lv_modules - Kernel device-mapper modules required for this LV.
lv_historical - Set if the LV is historical.
Logical Volume Device Info Fields
---------------------------------
lv_all - All fields in this section.
lv_kernel_major - Currently assigned major number or -1 if LV is not active.
lv_kernel_minor - Currently assigned minor number or -1 if LV is not active.
lv_kernel_read_ahead - Currently-in-use read ahead setting in current units.
lv_permissions - LV permissions.
lv_suspended - Set if LV is suspended.
lv_live_table - Set if LV has live table present.
lv_inactive_table - Set if LV has inactive table present.
lv_device_open - Set if LV device is open.
Logical Volume Device Status Fields
-----------------------------------
lv_all - All fields in this section.
cache_total_blocks - Total cache blocks.
cache_used_blocks - Used cache blocks.
cache_dirty_blocks - Dirty cache blocks.
cache_read_hits - Cache read hits.
cache_read_misses - Cache read misses.
cache_write_hits - Cache write hits.
cache_write_misses - Cache write misses.
kernel_cache_settings - Cache settings/parameters as set in kernel, including default values (cached segments only).
kernel_cache_policy - Cache policy used in kernel.
lv_health_status - LV health status.
kernel_discards - For thin pools, how discards are handled in kernel.
lv_check_needed - For thin pools and cache volumes, whether metadata check is needed.
Physical Volume Label Fields
----------------------------
pv_all - All fields in this section.
pv_fmt - Type of metadata.
pv_uuid - Unique identifier.
dev_size - Size of underlying device in current units.
pv_name - Name.
pv_major - Device major number.
pv_minor - Device minor number.
pv_mda_free - Free metadata area space on this device in current units.
pv_mda_size - Size of smallest metadata area on this device in current units.
pv_ext_vsn - PV header extension version.
Physical Volume Fields
----------------------
pv_all - All fields in this section.
pe_start - Offset to the start of data on the underlying device.
pv_size - Size of PV in current units.
pv_free - Total amount of unallocated space in current units.
pv_used - Total amount of allocated space in current units.
pv_attr - Various attributes - see man page.
pv_allocatable - Set if this device can be used for allocation.
pv_exported - Set if this device is exported.
pv_missing - Set if this device is missing in system.
pv_pe_count - Total number of Physical Extents.
pv_pe_alloc_count - Total number of allocated Physical Extents.
pv_tags - Tags, if any.
pv_mda_count - Number of metadata areas on this device.
pv_mda_used_count - Number of metadata areas in use on this device.
pv_ba_start - Offset to the start of PV Bootloader Area on the underlying device in current units.
pv_ba_size - Size of PV Bootloader Area in current units.
pv_in_use - Set if PV is used.
pv_duplicate - Set if PV is an unchosen duplicate.
Volume Group Fields
-------------------
vg_all - All fields in this section.
vg_fmt - Type of metadata.
vg_uuid - Unique identifier.
vg_name - Name.
vg_attr - Various attributes - see man page.
vg_permissions - VG permissions.
vg_extendable - Set if VG is extendable.
vg_exported - Set if VG is exported.
vg_partial - Set if VG is partial.
vg_allocation_policy - VG allocation policy.
vg_clustered - Set if VG is clustered.
vg_size - Total size of VG in current units.
vg_free - Total amount of free space in current units.
vg_sysid - System ID of the VG indicating which host owns it.
vg_systemid - System ID of the VG indicating which host owns it.
vg_lock_type - Lock type of the VG used by lvmlockd.
vg_lock_args - Lock args of the VG used by lvmlockd.
vg_extent_size - Size of Physical Extents in current units.
vg_extent_count - Total number of Physical Extents.
vg_free_count - Total number of unallocated Physical Extents.
max_lv - Maximum number of LVs allowed in VG or 0 if unlimited.
max_pv - Maximum number of PVs allowed in VG or 0 if unlimited.
pv_count - Number of PVs in VG.
vg_missing_pv_count - Number of PVs in VG which are missing.
lv_count - Number of LVs.
snap_count - Number of snapshots.
vg_seqno - Revision number of internal metadata. Incremented whenever it changes.
vg_tags - Tags, if any.
vg_profile - Configuration profile attached to this VG.
vg_mda_count - Number of metadata areas on this VG.
vg_mda_used_count - Number of metadata areas in use on this VG.
vg_mda_free - Free metadata area space for this VG in current units.
vg_mda_size - Size of smallest metadata area for this VG in current units.
vg_mda_copies - Target number of in use metadata areas in the VG.
Logical Volume Segment Fields
-----------------------------
seg_all - All fields in this section.
segtype - Type of LV segment.
stripes - Number of stripes or mirror legs.
stripe_size - For stripes, amount of data placed on one device before switching to the next.
region_size - For mirrors, the unit of data copied when synchronising devices.
chunk_size - For snapshots, the unit of data used when tracking changes.
thin_count - For thin pools, the number of thin volumes in this pool.
discards - For thin pools, how discards are handled.
cache_mode - For cache pools, how writes are cached.
zero - For thin pools and volumes, if zeroing is enabled.
transaction_id - For thin pools, the transaction id and creation transaction id for thins.
thin_id - For thin volume, the thin device id.
seg_start - Offset within the LV to the start of the segment in current units.
seg_start_pe - Offset within the LV to the start of the segment in physical extents.
seg_size - Size of segment in current units.
seg_size_pe - Size of segment in physical extents.
seg_tags - Tags, if any.
seg_pe_ranges - Ranges of Physical Extents of underlying devices in command line format (deprecated, use seg_le_ranges for common format).
seg_le_ranges - Ranges of Logical Extents of underlying devices in command line format.
seg_metadata_le_ranges - Ranges of Logical Extents of underlying metadata devices in command line format.
devices - Underlying devices used with starting extent numbers.
metadata_devices - Underlying metadata devices used with starting extent numbers.
seg_monitor - Dmeventd monitoring status of the segment.
cache_policy - The cache policy (cached segments only).
cache_settings - Cache settings/parameters (cached segments only).
Physical Volume Segment Fields
------------------------------
pvseg_all - All fields in this section.
pvseg_start - Physical Extent number of start of segment.
pvseg_size - Number of extents in segment.
Special Fields
--------------
selected - Set if item passes selection criteria.
help - Show help.
? - Show help.
[root@localhost ~]#
11) Thin Provisioning
[root@localhost ~]# pvcreate /dev/sdb
Physical volume "/dev/sdb" successfully created.
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda vgcloud lvm2 a-- 2.99g 0
/dev/sdb lvm2 --- 3.00g 3.00g
/dev/sdd vgcloud lvm2 a-- 9.99g 0
/dev/vda2 cl lvm2 a-- 29.00g 4.00m
[root@localhost ~]# vgcreate vmail /dev/sdb
Volume group "vmail" successfully created
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
cl 1 2 0 wz--n- 29.00g 4.00m
vgcloud 2 1 0 wz--n- 12.98g 0
vmail 1 0 0 wz--n- 3.00g 3.00g
[root@localhost ~]# lvcreate -l 100%FREE -T vmail/usermailpool (-T 볼륨그룹명/thin-pool-name)
Using default stripesize 64.00 KiB.
Logical volume "usermailpool" created.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root cl -wi-ao---- 26.99g
swap cl -wi-ao---- 2.00g
lvdata vgcloud Cwi-aoC--- 9.99g [cachepool] [lvdata_corig] 5.25 4.98 0.00
usermailpool vmail twi-a-tz-- 2.99g 0.00 1.07
[root@localhost ~]# lvcreate -V 2G -n user1mail -T vmail/usermailpool
Using default stripesize 64.00 KiB.
Logical volume "user1mail" created.
[root@localhost ~]# lvcreate -V 2G -n user2mail -T vmail/usermailpool
Using default stripesize 64.00 KiB.
WARNING: Sum of all thin volume sizes (4.00 GiB) exceeds the size of thin pool vmail/usermailpool and the size of whole volume group (3.00 GiB)!
For thin pool auto extension activation/thin_pool_autoextend_threshold should be below 100.
Logical volume "user2mail" created.
[root@localhost ~]# lvcreate -V 2G -n user3mail -T vmail/usermailpool
Using default stripesize 64.00 KiB.
WARNING: Sum of all thin volume sizes (6.00 GiB) exceeds the size of thin pool vmail/usermailpool and the size of whole volume group (3.00 GiB)!
For thin pool auto extension activation/thin_pool_autoextend_threshold should be below 100.
Logical volume "user3mail" created.
[root@localhost ~]#
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root cl -wi-ao---- 26.99g
swap cl -wi-ao---- 2.00g
lvdata vgcloud Cwi-aoC--- 9.99g [cachepool] [lvdata_corig] 5.25 4.98 0.00
user1mail vmail Vwi-a-tz-- 2.00g usermailpool 0.00 // 버추얼한 로지컬볼륨
user2mail vmail Vwi-a-tz-- 2.00g usermailpool 0.00
user3mail vmail Vwi-a-tz-- 2.00g usermailpool 0.00
usermailpool vmail twi-aotz-- 2.99g 0.00 1.37
'UNIX > Redhat' 카테고리의 다른 글
Linux Admin2 - Chapter13 Control and troubleshoot the RHEL Boot process (0) | 2018.07.20 |
---|---|
Linux Admin2 - Chapter11 Access networked attached storage with NFS (0) | 2018.07.20 |
Linux Admin2 - Chapter7 Manage selinux security (0) | 2018.07.19 |
Linux Admin2 - Chapter6 Control access to files with Access Control Lists (ACL) (0) | 2018.07.19 |
Linux Admin2 - Chapter4 Schedule Future linux tasks (0) | 2018.07.18 |