当前位置:   article > 正文

一个月考取RHCE【第十天】_/dev/sr0 6.7g 6.7g 0 100% /run/media/admin/centos-

/dev/sr0 6.7g 6.7g 0 100% /run/media/admin/centos-8-baseos-x86_64

目录

一、RAID

1、简介

 2、部署磁盘阵列

3、损坏磁盘阵列并修复

4、磁盘热备

5、删除磁盘阵列

二、LVM

1、常用的LVM命令

​ 2、部署逻辑卷

3、扩容

4、缩小

5、快照卷

6、删除逻辑卷


一、RAID

1、简介

Redundant Array of Independent Disks,独立冗余磁盘阵列

 2、部署磁盘阵列

 虚拟机配置(添加4块磁盘)

第7章 使用RAID与LVM磁盘阵列技术第7章 使用RAID与LVM磁盘阵列技术

 madam命令参数

[root@linuxprobe ~]# ll /dev/sd*【查看系统中磁盘信息】
brw-rw----. 1 root disk 8,  0 Nov  1 15:29 /dev/sda
brw-rw----. 1 root disk 8,  1 Nov  1 15:29 /dev/sda1
brw-rw----. 1 root disk 8,  2 Nov  1 15:29 /dev/sda2
brw-rw----. 1 root disk 8, 16 Nov  1 15:29 /dev/sdb
brw-rw----. 1 root disk 8, 32 Nov  1 15:29 /dev/sdc
brw-rw----. 1 root disk 8, 48 Nov  1 15:29 /dev/sdd
brw-rw----. 1 root disk 8, 64 Nov  1 15:29 /dev/sde

[root@linuxprobe ~]# mdadm -Cv /dev/md0 -n 4 -l 10 /dev/sd[b-e](-C创建RAID阵列卡,-v显示过程,-n是磁盘数量,-l表示等级)
mdadm: layout defaults to n2
mdadm: layout defaults to n2
mdadm: chunk size defaults to 512K
mdadm: size set to 20954112K
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.

[root@linuxprobe ~]# mdadm -D /dev/md0 【-D参数查看创建进度】
/dev/md0:
           Version : 1.2
     Creation Time : Mon Nov  1 15:44:14 2021
        Raid Level : raid10
        Array Size : 41908224 (39.97 GiB 42.91 GB)
     Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
      Raid Devices : 4
     Total Devices : 4
       Persistence : Superblock is persistent

       Update Time : Mon Nov  1 15:47:42 2021
             State : clean 
    Active Devices : 4
   Working Devices : 4
    Failed Devices : 0
     Spare Devices : 0

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

              Name : linuxprobe.com:0  (local to host linuxprobe.com)
              UUID : 0bfb0568:a9ec7c68:6f5777c3:f6e29607
            Events : 17

    Number   Major   Minor   RaidDevice State
       0       8       16        0      active sync set-A   /dev/sdb
       1       8       32        1      active sync set-B   /dev/sdc
       2       8       48        2      active sync set-A   /dev/sdd
       3       8       64        3      active sync set-B   /dev/sde

[root@linuxprobe ~]# mkdir /RAID【创建挂载目录】
[root@linuxprobe ~]# mount /dev/md0 /RAID/【挂载】
[root@linuxprobe ~]# df -h【查看挂载信息】
Filesystem             Size  Used Avail Use% Mounted on
devtmpfs               969M     0  969M   0% /dev
tmpfs                  984M     0  984M   0% /dev/shm
tmpfs                  984M  9.6M  974M   1% /run
tmpfs                  984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root   17G  4.0G   14G  24% /
/dev/sda1             1014M  152M  863M  15% /boot
tmpfs                  197M   16K  197M   1% /run/user/42
tmpfs                  197M  2.3M  195M   2% /run/user/0
/dev/sr0               6.7G  6.7G     0 100% /run/media/root/RHEL-8-0-0-BaseOS-x86_64
/dev/md0                40G  319M   40G   1% /RAID
[root@linuxprobe ~]# vim /etc/fstab 【配置永久挂载】
[root@linuxprobe ~]# cat /etc/fstab 

#
# /etc/fstab
# Created by anaconda on Sun Oct 10 14:16:07 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
/dev/mapper/rhel-root   /                       xfs     defaults        0 0
UUID=510c4a0a-d775-44ad-bcab-3f4a612274d1 /boot                   xfs     defaults        0 0
/dev/mapper/rhel-swap   swap                    swap    defaults        0 0
/dev/md0    /RAID    xfs    defaults    0    0


3、损坏磁盘阵列并修复


移除了第三块磁盘
[root@linuxprobe ~]# ll /dev/sd*
brw-rw----. 1 root disk 8,  0 Nov  1 15:52 /dev/sda
brw-rw----. 1 root disk 8,  1 Nov  1 15:52 /dev/sda1
brw-rw----. 1 root disk 8,  2 Nov  1 15:52 /dev/sda2
brw-rw----. 1 root disk 8, 16 Nov  1 15:52 /dev/sdb
brw-rw----. 1 root disk 8, 48 Nov  1 15:52 /dev/sdd
brw-rw----. 1 root disk 8, 64 Nov  1 15:52 /dev/sde

[root@linuxprobe ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Mon Nov  1 15:44:14 2021
        Raid Level : raid10
        Array Size : 41908224 (39.97 GiB 42.91 GB)
     Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
      Raid Devices : 4
     Total Devices : 3
       Persistence : Superblock is persistent

       Update Time : Mon Nov  1 15:59:20 2021
             State : clean, degraded 
    Active Devices : 3
   Working Devices : 3
    Failed Devices : 0
     Spare Devices : 0

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

              Name : linuxprobe.com:0  (local to host linuxprobe.com)
              UUID : 0bfb0568:a9ec7c68:6f5777c3:f6e29607
            Events : 20

    Number   Major   Minor   RaidDevice State
       0       8       16        0      active sync set-A   /dev/sdb
       -       0        0        1      removed
       2       8       48        2      active sync set-A   /dev/sdd
       3       8       64        3      active sync set-B   /dev/sde

重新添加并查看信息
[root@linuxprobe ~]# mdadm /dev/md0 -a /dev/sdc【重新添加磁盘并添加到raid中】
mdadm: added /dev/sdc
[root@linuxprobe ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Mon Nov  1 16:17:18 2021
        Raid Level : raid10
        Array Size : 41908224 (39.97 GiB 42.91 GB)
     Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
      Raid Devices : 4
     Total Devices : 4
       Persistence : Superblock is persistent

       Update Time : Mon Nov  1 16:27:34 2021
             State : clean, degraded, recovering 
    Active Devices : 3
   Working Devices : 4
    Failed Devices : 0
     Spare Devices : 1

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

    Rebuild Status : 3% complete

              Name : linuxprobe.com:0  (local to host linuxprobe.com)
              UUID : 34615b8b:6db6aa49:ede79808:1b7cf763
            Events : 22

    Number   Major   Minor   RaidDevice State
       0       8       16        0      active sync set-A   /dev/sdb
       4       8       32        1      spare rebuilding   /dev/sdc
       2       8       48        2      active sync set-A   /dev/sdd
       3       8       64        3      active sync set-B   /dev/sde
[root@linuxprobe ~]# mdadm -D /dev/md0【再次查看已创建完成】
/dev/md0:
           Version : 1.2
     Creation Time : Mon Nov  1 16:17:18 2021
        Raid Level : raid10
        Array Size : 41908224 (39.97 GiB 42.91 GB)
     Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
      Raid Devices : 4
     Total Devices : 4
       Persistence : Superblock is persistent

       Update Time : Mon Nov  1 16:29:19 2021
             State : clean 
    Active Devices : 4
   Working Devices : 4
    Failed Devices : 0
     Spare Devices : 0

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

              Name : linuxprobe.com:0  (local to host linuxprobe.com)
              UUID : 34615b8b:6db6aa49:ede79808:1b7cf763
            Events : 39

    Number   Major   Minor   RaidDevice State
       0       8       16        0      active sync set-A   /dev/sdb
       4       8       32        1      active sync set-B   /dev/sdc
       2       8       48        2      active sync set-A   /dev/sdd
       3       8       64        3      active sync set-B   /dev/sde

 

4、磁盘热备

[root@linuxprobe ~]# mdadm -Cv /dev/md0 -n 3 -l 5 -x 1 /dev/sd[b-e]
mdadm: layout defaults to left-symmetric
mdadm: layout defaults to left-symmetric
mdadm: chunk size defaults to 512K
mdadm: size set to 20954112K
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.

[root@linuxprobe ~]# mdadm -D /dev/md0 
/dev/md0:
           Version : 1.2
     Creation Time : Tue Nov  2 09:54:55 2021
        Raid Level : raid5
        Array Size : 41908224 (39.97 GiB 42.91 GB)
     Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
      Raid Devices : 3
     Total Devices : 4
       Persistence : Superblock is persistent

       Update Time : Tue Nov  2 09:56:41 2021
             State : clean 
    Active Devices : 3
   Working Devices : 4
    Failed Devices : 0
     Spare Devices : 1

            Layout : left-symmetric
        Chunk Size : 512K

Consistency Policy : resync

              Name : linuxprobe.com:0  (local to host linuxprobe.com)
              UUID : eb99aadf:627197e2:14207fa6:a19068ad
            Events : 18

    Number   Major   Minor   RaidDevice State
       0       8       16        0      active sync   /dev/sdb
       1       8       32        1      active sync   /dev/sdc
       4       8       48        2      active sync   /dev/sdd

       3       8       64        -      spare   /dev/sde

[root@linuxprobe ~]# mkfs.ext4 /dev/md0【初始化磁盘】
mke2fs 1.44.3 (10-July-2018)
Creating filesystem with 10477056 4k blocks and 2621440 inodes
Filesystem UUID: 5a81f149-973a-45b1-a054-356ca1c28837
Superblock backups stored on blocks: 
    32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208, 
    4096000, 7962624

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (65536 blocks): done
Writing superblocks and filesystem accounting information: done   

[root@linuxprobe ~]# mkdir /raid
[root@linuxprobe ~]# mount /dev/md0 /raid/
[root@linuxprobe ~]# df -h
Filesystem             Size  Used Avail Use% Mounted on
devtmpfs               969M     0  969M   0% /dev
tmpfs                  984M     0  984M   0% /dev/shm
tmpfs                  984M  9.6M  974M   1% /run
tmpfs                  984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root   17G  4.0G   14G  24% /
/dev/sda1             1014M  152M  863M  15% /boot
tmpfs                  197M   16K  197M   1% /run/user/42
tmpfs                  197M  2.3M  195M   2% /run/user/0
/dev/sr0               6.7G  6.7G     0 100% /run/media/root/RHEL-8-0-0-BaseOS-x86_64
/dev/md0                40G   49M   38G   1% /raid
[root@linuxprobe ~]# echo "/dev/md0 /raid ext4 defaults 0 0 " >> /etc/fstab 
[root@linuxprobe ~]# cat /etc/fstab 

#
# /etc/fstab
# Created by anaconda on Sun Oct 10 14:16:07 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
/dev/mapper/rhel-root   /                       xfs     defaults        0 0
UUID=510c4a0a-d775-44ad-bcab-3f4a612274d1 /boot                   xfs     defaults        0 0
/dev/mapper/rhel-swap   swap                    swap    defaults        0 0
/dev/md0 /raid ext4 defaults 0 0 


删除一块磁盘,模拟损坏
[root@linuxprobe ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Tue Nov  2 09:54:55 2021
        Raid Level : raid5
        Array Size : 41908224 (39.97 GiB 42.91 GB)
     Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
      Raid Devices : 3
     Total Devices : 3
       Persistence : Superblock is persistent

       Update Time : Tue Nov  2 10:11:05 2021
             State : clean, degraded, recovering 
    Active Devices : 2
   Working Devices : 3
    Failed Devices : 0
     Spare Devices : 1

            Layout : left-symmetric
        Chunk Size : 512K

Consistency Policy : resync

    Rebuild Status : 90% complete

              Name : linuxprobe.com:0  (local to host linuxprobe.com)
              UUID : eb99aadf:627197e2:14207fa6:a19068ad
            Events : 36

    Number   Major   Minor   RaidDevice State
       0       8       16        0      active sync   /dev/sdb
       3       8       64        1      spare rebuilding   /dev/sde
       4       8       48        2      active sync   /dev/sdd
[root@linuxprobe ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Tue Nov  2 09:54:55 2021
        Raid Level : raid5
        Array Size : 41908224 (39.97 GiB 42.91 GB)
     Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
      Raid Devices : 3
     Total Devices : 3
       Persistence : Superblock is persistent

       Update Time : Tue Nov  2 10:11:19 2021
             State : clean 
    Active Devices : 3
   Working Devices : 3
    Failed Devices : 0
     Spare Devices : 0

            Layout : left-symmetric
        Chunk Size : 512K

Consistency Policy : resync

              Name : linuxprobe.com:0  (local to host linuxprobe.com)
              UUID : eb99aadf:627197e2:14207fa6:a19068ad
            Events : 39

    Number   Major   Minor   RaidDevice State
       0       8       16        0      active sync   /dev/sdb
       3       8       64        1      active sync   /dev/sde
       4       8       48        2      active sync   /dev/sdd

 

5、删除磁盘阵列

[root@linuxprobe ~]# umount /RAID【卸载】

[root@linuxprobe ~]# mdadm /dev/md0 -f /dev/sd[b-e](模拟损坏)
mdadm: set /dev/sdb

dev/md0
mdadm: set /dev/sdd faulty in /dev/md0
mdadm: set /dev/sde faulty in /dev/md0
[root@linuxprobe ~]# mdadm -D /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Tue Nov  2 09:54:55 2021
        Raid Level : raid5
        Array Size : 41908224 (39.97 GiB 42.91 GB)
     Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
      Raid Devices : 3
     Total Devices : 3
       Persistence : Superblock is persistent

       Update Time : Tue Nov  2 10:29:51 2021
             State : clean, FAILED 
    Active Devices : 0
    Failed Devices : 3
     Spare Devices : 0

            Layout : left-symmetric
        Chunk Size : 512K

Consistency Policy : resync

    Number   Major   Minor   RaidDevice State
       -       0        0        0      removed
       -       0        0        1      removed
       -       0        0        2      removed

       0       8       16        -      faulty   /dev/sdb
       3       8       64        -      faulty   /dev/sde
       4       8       48        -      faulty   /dev/sdd

[root@linuxprobe ~]# mdadm /dev/md0 -r /dev/sd[b-e]移除设备
mdadm: hot removed /dev/sdb from /dev/md0
mdadm: hot removed /dev/sdd from /dev/md0
mdadm: hot removed /dev/sde from /dev/md0
[root@linuxprobe ~]# mdadm -S /dev/md0 【停用设备 -S相当于--stop
mdadm: stopped /dev/md0
[root@linuxprobe ~]# mdadm -D /dev/md0
mdadm: cannot open /dev/md0: No such file or directory


二、LVM

Logical Volume Manager,LVM逻辑卷管理器

1、常用的LVM命令

 2、部署逻辑卷

[root@linuxprobe ~]# ll /dev/sd*
brw-rw----. 1 root disk 8,  0 Nov  2 11:29 /dev/sda
brw-rw----. 1 root disk 8,  1 Nov  2 11:29 /dev/sda1
brw-rw----. 1 root disk 8,  2 Nov  2 11:29 /dev/sda2
brw-rw----. 1 root disk 8, 16 Nov  2 11:29 /dev/sdb
brw-rw----. 1 root disk 8, 32 Nov  2 11:29 /dev/sdc
[root@linuxprobe ~]# pvcreate /dev/sd[b-c]
  Physical volume "/dev/sdb" successfully created.
  Physical volume "/dev/sdc" successfully created.
[root@linuxprobe ~]# vgcreate storage /dev/sdb /dev/sdc
  Volume group "storage" successfully created
[root@linuxprobe ~]# vgdisplay 
  --- Volume group ---
  VG Name               storage
  System ID             
  Format                lvm2
  Metadata Areas        2
  Metadata Sequence No  1
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                0
  Open LV               0
  Max PV                0
  Cur PV                2
  Act PV                2
  VG Size               39.99 GiB
  PE Size               4.00 MiB
  Total PE              10238
  Alloc PE / Size       0 / 0   
  Free  PE / Size       10238 / 39.99 GiB
  VG UUID               R3LtPW-F9iZ-P5T1-ZvCh-x5pO-hV9A-euoiNN
   
[root@linuxprobe ~]# lvcreate -n wang -l 100 storage【-n名字,-l基本单元,一个单元大小为4M。生成一个100*4=400M的逻辑卷】
  Logical volume "wang" created.
[root@linuxprobe ~]# lvdisplay 
  --- Logical volume ---
  LV Path                /dev/storage/wang
  LV Name                wang
  VG Name                storage
  LV UUID                oYxbKi-nrDE-44qW-mDmL-hs5G-639S-0JbF1v
  LV Write Access        read/write
  LV Creation host, time linuxprobe.com, 2021-11-02 13:58:43 +0800
  LV Status              available
  # open                 0
  LV Size                400.00 MiB
  Current LE             100
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:2
   
如果使用了逻辑卷管理器,则不建议用XFS文件系统,因为XFS文件系统自身就可以使用xfs_growfs命令进行磁盘扩容。这虽然不比LVM灵活,但起码也够用。在实测阶段我们发现,在有一些服务器上,XFS与LVM的兼容性并不好。

[root@linuxprobe ~]# mkfs.ext4 /dev/storage/wang 
mke2fs 1.44.3 (10-July-2018)
Creating filesystem with 409600 1k blocks and 102400 inodes
Filesystem UUID: 8bc5e110-86ce-4312-89d1-5a0237d7ce31
Superblock backups stored on blocks: 
    8193, 24577, 40961, 57345, 73729, 204801, 221185, 401409

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (8192 blocks): done
Writing superblocks and filesystem accounting information: done 

[root@linuxprobe ~]# mkdir /wang
[root@linuxprobe ~]# mount /dev/storage/wang /wang/
[root@linuxprobe ~]# echo "/dev/storage/wang /wang ext4 defaults 0 0" >> /etc/fstab 
[root@linuxprobe ~]# cat /etc/fstab 

#
# /etc/fstab
# Created by anaconda on Sun Oct 10 14:16:07 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk/'.
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.
#
# After editing this file, run 'systemctl daemon-reload' to update systemd
# units generated from this file.
#
/dev/mapper/rhel-root   /                       xfs     defaults        0 0
UUID=510c4a0a-d775-44ad-bcab-3f4a612274d1 /boot                   xfs     defaults        0 0
/dev/mapper/rhel-swap   swap                    swap    defaults        0 0
/dev/storage/wang /wang ext4 defaults 0 0
[root@linuxprobe ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
devtmpfs                  969M     0  969M   0% /dev
tmpfs                     984M     0  984M   0% /dev/shm
tmpfs                     984M  9.6M  974M   1% /run
tmpfs                     984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root      17G  4.0G   14G  24% /
/dev/sda1                1014M  152M  863M  15% /boot
tmpfs                     197M   16K  197M   1% /run/user/42
tmpfs                     197M  2.3M  195M   2% /run/user/0
/dev/sr0                  6.7G  6.7G     0 100% /run/media/root/RHEL-8-0-0-BaseOS-x86_64
/dev/mapper/storage-wang  380M  2.3M  354M   1% /wang
[root@linuxprobe ~]# reboot 

3、扩容

[root@linuxprobe ~]# umount /wang 【扩容前先卸载】
[root@linuxprobe ~]# lvextend -L 800M /dev/storage/wang 【扩容到800M】
  Size of logical volume storage/wang changed from 400.00 MiB (100 extents) to 800.00 MiB (200 extents).
  Logical volume storage/wang successfully resized.
[root@linuxprobe ~]# e2fsck -f /dev/storage/wang 【检查磁盘完整性】
e2fsck 1.44.3 (10-July-2018)
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
/dev/storage/wang: 11/102400 files (0.0% non-contiguous), 23456/409600 blocks
[root@linuxprobe ~]# resize2fs /dev/storage/wang 【扩容信息同步至内核】
resize2fs 1.44.3 (10-July-2018)
Resizing the filesystem on /dev/storage/wang to 819200 (1k) blocks.
The filesystem on /dev/storage/wang is now 819200 (1k) blocks long.

[root@linuxprobe ~]# mount -a
[root@linuxprobe ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
devtmpfs                  969M     0  969M   0% /dev
tmpfs                     984M     0  984M   0% /dev/shm
tmpfs                     984M  9.3M  974M   1% /run
tmpfs                     984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root      17G  4.0G   14G  24% /
/dev/sda1                1014M  152M  863M  15% /boot
tmpfs                     197M   16K  197M   1% /run/user/42
tmpfs                     197M  4.0K  197M   1% /run/user/0
/dev/mapper/storage-wang  767M  2.5M  721M   1% /wang

[root@linuxprobe ~]# umount /wang 
[root@linuxprobe ~]# df -h
Filesystem             Size  Used Avail Use% Mounted on
devtmpfs               969M     0  969M   0% /dev
tmpfs                  984M     0  984M   0% /dev/shm
tmpfs                  984M  9.3M  974M   1% /run
tmpfs                  984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root   17G  4.0G   14G  24% /
/dev/sda1             1014M  152M  863M  15% /boot
tmpfs                  197M   16K  197M   1% /run/user/42
tmpfs                  197M  4.0K  197M   1% /run/user/0
[root@linuxprobe ~]# e2fsck -f /dev/storage/wang 
e2fsck 1.44.3 (10-July-2018)
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
/dev/storage/wang: 11/204800 files (0.0% non-contiguous), 36617/819200 blocks

[root@linuxprobe ~]# resize2fs -f /dev/storage/wang 300M【通知内核将逻辑卷容量缩小至300M】
resize2fs 1.44.3 (10-July-2018)
Resizing the filesystem on /dev/storage/wang to 307200 (1k) blocks.
The filesystem on /dev/storage/wang is now 307200 (1k) blocks long.

[root@linuxprobe ~]# lvreduce -L 300M /dev/storage/wang 【修改逻辑卷容量至300M】
  WARNING: Reducing active logical volume to 300.00 MiB.
  THIS MAY DESTROY YOUR DATA (filesystem etc.)
Do you really want to reduce storage/wang? [y/n]: y
  Size of logical volume storage/wang changed from 800.00 MiB (200 extents) to 300.00 MiB (75 extents).
  Logical volume storage/wang successfully resized.
[root@linuxprobe ~]# mount -a
[root@linuxprobe ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
devtmpfs                  969M     0  969M   0% /dev
tmpfs                     984M     0  984M   0% /dev/shm
tmpfs                     984M  9.3M  974M   1% /run
tmpfs                     984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root      17G  4.0G   14G  24% /
/dev/sda1                1014M  152M  863M  15% /boot
tmpfs                     197M   16K  197M   1% /run/user/42
tmpfs                     197M  4.0K  197M   1% /run/user/0
/dev/mapper/storage-wang  283M  2.1M  262M   1% /wang
[root@linuxprobe ~]# 

4、缩小

[root@linuxprobe ~]# umount /wang 
[root@linuxprobe ~]# df -h
Filesystem             Size  Used Avail Use% Mounted on
devtmpfs               969M     0  969M   0% /dev
tmpfs                  984M     0  984M   0% /dev/shm
tmpfs                  984M  9.3M  974M   1% /run
tmpfs                  984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root   17G  4.0G   14G  24% /
/dev/sda1             1014M  152M  863M  15% /boot
tmpfs                  197M   16K  197M   1% /run/user/42
tmpfs                  197M  4.0K  197M   1% /run/user/0
[root@linuxprobe ~]# e2fsck -f /dev/storage/wang 
e2fsck 1.44.3 (10-July-2018)
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
/dev/storage/wang: 11/204800 files (0.0% non-contiguous), 36617/819200 blocks

[root@linuxprobe ~]# resize2fs -f /dev/storage/wang 300M【告诉内核逻辑卷缩小至300M】
resize2fs 1.44.3 (10-July-2018)
Resizing the filesystem on /dev/storage/wang to 307200 (1k) blocks.
The filesystem on /dev/storage/wang is now 307200 (1k) blocks long.

[root@linuxprobe ~]# lvreduce -L 300M /dev/storage/wang 
  WARNING: Reducing active logical volume to 300.00 MiB.
  THIS MAY DESTROY YOUR DATA (filesystem etc.)
Do you really want to reduce storage/wang? [y/n]: y
  Size of logical volume storage/wang changed from 800.00 MiB (200 extents) to 300.00 MiB (75 extents).
  Logical volume storage/wang successfully resized.
[root@linuxprobe ~]# mount -a
[root@linuxprobe ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
devtmpfs                  969M     0  969M   0% /dev
tmpfs                     984M     0  984M   0% /dev/shm
tmpfs                     984M  9.3M  974M   1% /run
tmpfs                     984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root      17G  4.0G   14G  24% /
/dev/sda1                1014M  152M  863M  15% /boot
tmpfs                     197M   16K  197M   1% /run/user/42
tmpfs                     197M  4.0K  197M   1% /run/user/0
/dev/mapper/storage-wang  283M  2.1M  262M   1% /wang
[root@linuxprobe ~]# 
 

5、快照卷

[root@linuxprobe ~]# umount /wang 
[root@linuxprobe ~]# df -h
Filesystem             Size  Used Avail Use% Mounted on
devtmpfs               969M     0  969M   0% /dev
tmpfs                  984M     0  984M   0% /dev/shm
tmpfs                  984M  9.3M  974M   1% /run
tmpfs                  984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root   17G  4.0G   14G  24% /
/dev/sda1             1014M  152M  863M  15% /boot
tmpfs                  197M   16K  197M   1% /run/user/42
tmpfs                  197M  4.0K  197M   1% /run/user/0
[root@linuxprobe ~]# e2fsck -f /dev/storage/wang 
e2fsck 1.44.3 (10-July-2018)
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
/dev/storage/wang: 11/204800 files (0.0% non-contiguous), 36617/819200 blocks
[root@linuxprobe ~]# resize2fs -f /dev/storage/wang 
resize2fs 1.44.3 (10-July-2018)
The filesystem is already 819200 (1k) blocks long.  Nothing to do!

[root@linuxprobe ~]# resize2fs -f /dev/storage/wang 300M
resize2fs 1.44.3 (10-July-2018)
Resizing the filesystem on /dev/storage/wang to 307200 (1k) blocks.
The filesystem on /dev/storage/wang is now 307200 (1k) blocks long.

[root@linuxprobe ~]# lvreduce -L 300M /dev/storage/wang 
  WARNING: Reducing active logical volume to 300.00 MiB.
  THIS MAY DESTROY YOUR DATA (filesystem etc.)
Do you really want to reduce storage/wang? [y/n]: y
  Size of logical volume storage/wang changed from 800.00 MiB (200 extents) to 300.00 MiB (75 extents).
  Logical volume storage/wang successfully resized.
[root@linuxprobe ~]# mount -a
[root@linuxprobe ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
devtmpfs                  969M     0  969M   0% /dev
tmpfs                     984M     0  984M   0% /dev/shm
tmpfs                     984M  9.3M  974M   1% /run
tmpfs                     984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root      17G  4.0G   14G  24% /
/dev/sda1                1014M  152M  863M  15% /boot
tmpfs                     197M   16K  197M   1% /run/user/42
tmpfs                     197M  4.0K  197M   1% /run/user/0
/dev/mapper/storage-wang  283M  2.1M  262M   1% /wang
[root@linuxprobe ~]# cd /wang/
[root@linuxprobe wang]# ls
adjtime                     mcelog
aliases                     microcode_ctl
alsa                        mime.types
alternatives                mke2fs.conf
[root@linuxprobe wang]# cd ~
[root@linuxprobe ~]# lvcreate -L 300M -s -n SNAP /dev/storage/wang【创建一个快照卷叫SNAP,-L为大小】
  Logical volume "SNAP" created.
[root@linuxprobe ~]# rm -rf /wang/*
[root@linuxprobe ~]# ll /wang/
total 0
[root@linuxprobe ~]# umount /wang 
[root@linuxprobe ~]# lvconvert --merge /dev/storage/SNAP 【恢复快照卷】
  Merging of volume storage/SNAP started.
  storage/wang: Merged: 100.00%
[root@linuxprobe ~]# mount -a
[root@linuxprobe ~]# ls /wang/
adjtime                     mcelog
aliases                     microcode_ctl
alsa                        mime.types
alternatives                mke2fs.conf


6、删除逻辑卷


[root@linuxprobe ~]# umount /wang 
[root@linuxprobe ~]# vim /etc/fstab 【删除挂载信息】
[root@linuxprobe ~]# df -h
Filesystem             Size  Used Avail Use% Mounted on
devtmpfs               969M     0  969M   0% /dev
tmpfs                  984M     0  984M   0% /dev/shm
tmpfs                  984M  9.3M  974M   1% /run
tmpfs                  984M     0  984M   0% /sys/fs/cgroup
/dev/mapper/rhel-root   17G  4.0G   14G  24% /
/dev/sda1             1014M  152M  863M  15% /boot
tmpfs                  197M   16K  197M   1% /run/user/42
tmpfs                  197M  4.0K  197M   1% /run/user/0
[root@linuxprobe ~]# lvremove /dev/storage/wang 
Do you really want to remove active logical volume storage/wang? [y/n]: y
  Logical volume "wang" successfully removed
[root@linuxprobe ~]# vgremove storage 
  Volume group "storage" successfully removed
[root@linuxprobe ~]# pvremove /dev/sd[b-c]
  Labels on physical volume "/dev/sdb" successfully wiped.
  Labels on physical volume "/dev/sdc" successfully wiped.
[root@linuxprobe ~]# 

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/菜鸟追梦旅行/article/detail/130854?site
推荐阅读
相关标签
  

闽ICP备14008679号