1.raid 0 准备两个磁盘
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 39G 0 part
├─centos-root 253:0 0 37G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 20G 0 disk
sdc 8:32 0 20G 0 disk
开始raid0
[root@localhost ~]# mdadm -C -v /dev/md0 -l 0 -n 2 /dev/sdb /dev/sdc
mdadm: chunk size defaults to 512K
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
查看阵列信息
[root@localhost ~]# mdadm -Ds
ARRAY /dev/md0 metadata=1.2 name=localhost.localdomain:0 UUID=75952896:8013ba84:0167933a:2c4d70d6
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 39G 0 part
├─centos-root 253:0 0 37G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0
sdc 8:32 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0
格式化,挂载,创建目录
[root@localhost ~]# mkfs.xfs /dev/md0
meta-data=/dev/md0 isize=512 agcount=16, agsize=654720 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=10475520, imaxpct=25
= sunit=128 swidth=256 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=5120, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mkdir /raid0
[root@localhost ~]# mount /dev/md0 /raid0/
[root@localhost ~]# df -h
文件系统 容量 已用 可用 已用% 挂载点
devtmpfs 475M 0 475M 0% /dev
tmpfs 487M 0 487M 0% /dev/shm
tmpfs 487M 7.7M 479M 2% /run
tmpfs 487M 0 487M 0% /sys/fs/cgroup
/dev/mapper/centos-root 37G 16G 22G 43% /
/dev/sda1 1014M 136M 879M 14% /boot
tmpfs 98M 0 98M 0% /run/user/0
/dev/md0 40G 33M 40G 1% /raid0
尝试抽掉一个磁盘,报错,证明raid0功能,读写都是N*磁盘的,没有冗余,应用场景:不用备份的web
[root@localhost ~]# mdadm /dev/md0 --fail /dev/sdc
mdadm: set device faulty failed for /dev/sdc: Device or resource busy
2.raid1
创建raid1,准备3个磁盘,有一个是热备份的;
模拟磁盘故障,是热备是会自动顶替故障磁盘,如下:
[root@localhost ~]# mdadm -C -v /dev/md1 -l 1 -n 2 /dev/sdd /dev/sde -x1 /dev/sdf
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
mdadm: size set to 20954112K
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md1 started.
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 39G 0 part
├─centos-root 253:0 0 37G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0 /raid0
sdc 8:32 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0 /raid0
sdd 8:48 0 20G 0 disk
└─md1 9:1 0 20G 0 raid1
sde 8:64 0 20G 0 disk
└─md1 9:1 0 20G 0 raid1
sdf 8:80 0 20G 0 disk
└─md1 9:1 0 20G 0 raid1
sdg 8:96 0 20G 0 disk
sdh 8:112 0 20G 0 disk
sdi 8:128 0 20G 0 disk
sdj 8:144 0 20G 0 disk
sr0 11:0 1 4.4G 0 rom
格式化,挂载,写点东西进去
[root@localhost ~]# mkfs.xfs -f /dev/md1
meta-data=/dev/md1 isize=512 agcount=4, agsize=1309632 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=5238528, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mkdir /raid1
[root@localhost ~]# mount /dev/md1 /raid1
[root@localhost ~]# echo "aaa0" >/raid1/aaa
[root@localhost ~]# cat /raid1/aaa
aaa0
[root@localhost ~]# df -h
文件系统 容量 已用 可用 已用% 挂载点
devtmpfs 475M 0 475M 0% /dev
tmpfs 487M 0 487M 0% /dev/shm
tmpfs 487M 7.7M 479M 2% /run
tmpfs 487M 0 487M 0% /sys/fs/cgroup
/dev/mapper/centos-root 37G 16G 22G 43% /
/dev/sda1 1014M 136M 879M 14% /boot
tmpfs 98M 0 98M 0% /run/user/0
/dev/md0 40G 33M 40G 1% /raid0
/dev/md1 20G 33M 20G 1% /raid1
模拟抽调一个盘,查看数据是否还在,并且热备是否顶替
[root@localhost ~]# mdadm /dev/md1 --fail /dev/sde
mdadm: set /dev/sde faulty in /dev/md1
[root@localhost ~]# mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Sun Apr 25 20:19:46 2021
Raid Level : raid1
Array Size : 20954112 (19.98 GiB 21.46 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 2
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sun Apr 25 20:23:06 2021
State : clean, degraded, recovering
Active Devices : 1
Working Devices : 2
Failed Devices : 1
Spare Devices : 1
Consistency Policy : resync
Rebuild Status : 14% complete
Name : localhost.localdomain:1 (local to host localhost.localdomain)
UUID : 016ade52:fa8c0453:dd815be2:d2daee8a
Events : 27
Number Major Minor RaidDevice State
0 8 48 0 active sync /dev/sdd
2 8 80 1 spare rebuilding /dev/sdf
1 8 64 - faulty /dev/sde
我们看到热备在同步,同步好了的状态如下:并且东西还在,证明功能,使用功能只有50%,读N*磁盘,写只有1*磁盘,有冗余功能,应用场景,有状态的DB
[root@localhost ~]# mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Sun Apr 25 20:19:46 2021
Raid Level : raid1
Array Size : 20954112 (19.98 GiB 21.46 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 2
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sun Apr 25 20:24:37 2021
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 1
Spare Devices : 0
Consistency Policy : resync
Name : localhost.localdomain:1 (local to host localhost.localdomain)
UUID : 016ade52:fa8c0453:dd815be2:d2daee8a
Events : 44
Number Major Minor RaidDevice State
0 8 48 0 active sync /dev/sdd
2 8 80 1 active sync /dev/sdf
1 8 64 - faulty /dev/sde
[root@localhost ~]# cat /raid1/aaa
aaa0
移除坏的磁盘:
[root@localhost ~]# mdadm /dev/md1 -r /dev/sde
mdadm: hot removed /dev/sde from /dev/md1
[root@localhost ~]# mdadm -D /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Sun Apr 25 20:19:46 2021
Raid Level : raid1
Array Size : 20954112 (19.98 GiB 21.46 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Sun Apr 25 20:30:13 2021
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : resync
Name : localhost.localdomain:1 (local to host localhost.localdomain)
UUID : 016ade52:fa8c0453:dd815be2:d2daee8a
Events : 45
Number Major Minor RaidDevice State
0 8 48 0 active sync /dev/sdd
2 8 80 1 active sync /dev/sdf
3.raid5
3.1准备4个磁盘,包含一个热备的磁盘,再模拟损坏一块磁盘,验证raid5的功能,如下:
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 39G 0 part
├─centos-root 253:0 0 37G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0 /raid0
sdc 8:32 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0 /raid0
sdd 8:48 0 20G 0 disk
└─md1 9:1 0 20G 0 raid1 /raid1
sde 8:64 0 20G 0 disk
sdf 8:80 0 20G 0 disk
└─md1 9:1 0 20G 0 raid1 /raid1
sdg 8:96 0 20G 0 disk
sdh 8:112 0 20G 0 disk
sdi 8:128 0 20G 0 disk
sdj 8:144 0 20G 0 disk
sr0 11:0 1 4.4G 0 rom
[root@localhost ~]# mdadm -C -v /dev/md5 -l 5 -n 3 /dev/sd{g,h,i} -x1 /dev/sdj
mdadm: layout defaults to left-symmetric
mdadm: layout defaults to left-symmetric
mdadm: chunk size defaults to 512K
mdadm: size set to 20954112K
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md5 started.
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 39G 0 part
├─centos-root 253:0 0 37G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0 /raid0
sdc 8:32 0 20G 0 disk
└─md0 9:0 0 40G 0 raid0 /raid0
sdd 8:48 0 20G 0 disk
└─md1 9:1 0 20G 0 raid1 /raid1
sde 8:64 0 20G 0 disk
sdf 8:80 0 20G 0 disk
└─md1 9:1 0 20G 0 raid1 /raid1
sdg 8:96 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sdh 8:112 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sdi 8:128 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sdj 8:144 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sr0 11:0 1 4.4G 0 rom
格式化,挂载,写的东西进去:
[root@localhost ~]# mkfs.xfs -f /dev/md5
meta-data=/dev/md5 isize=512 agcount=16, agsize=654720 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=10475520, imaxpct=25
= sunit=128 swidth=256 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=5120, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mkdir /raid5
[root@localhost ~]# mount /dev/md5 /raid5/
[root@localhost ~]# echo raid5 > /raid5/aaaa
[root@localhost ~]# mdadm -D /dev/md5
/dev/md5:
Version : 1.2
Creation Time : Sun Apr 25 20:33:09 2021
Raid Level : raid5
Array Size : 41908224 (39.97 GiB 42.91 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 3
Total Devices : 4
Persistence : Superblock is persistent
Update Time : Sun Apr 25 20:35:20 2021
State : clean
Active Devices : 3
Working Devices : 4
Failed Devices : 0
Spare Devices : 1
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Name : localhost.localdomain:5 (local to host localhost.localdomain)
UUID : 02342380:da55c73a:16786aa6:3e84863c
Events : 20
Number Major Minor RaidDevice State
0 8 96 0 active sync /dev/sdg
1 8 112 1 active sync /dev/sdh
4 8 128 2 active sync /dev/sdi
3 8 144 - spare /dev/sdj
模拟损坏,验证raid5功能,如下;
[root@localhost ~]# mdadm /dev/md5 --fail /dev/sdg
mdadm: set /dev/sdg faulty in /dev/md5
[root@localhost ~]# mdadm -D /dev/md5
/dev/md5:
Version : 1.2
Creation Time : Sun Apr 25 20:33:09 2021
Raid Level : raid5
Array Size : 41908224 (39.97 GiB 42.91 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 3
Total Devices : 4
Persistence : Superblock is persistent
Update Time : Sun Apr 25 20:37:05 2021
State : clean, degraded, recovering
Active Devices : 2
Working Devices : 3
Failed Devices : 1
Spare Devices : 1
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Rebuild Status : 5% complete
Name : localhost.localdomain:5 (local to host localhost.localdomain)
UUID : 02342380:da55c73a:16786aa6:3e84863c
Events : 22
Number Major Minor RaidDevice State
3 8 144 0 spare rebuilding /dev/sdj
1 8 112 1 active sync /dev/sdh
4 8 128 2 active sync /dev/sdi
0 8 96 - faulty /dev/sdg
看到/dev/sdj顶上去了,带同步好了后看下数据,并移除坏的磁盘,
[root@localhost ~]# cat /raid5/aaaa
raid5
[root@localhost ~]# mdadm /dev/md5 -r /dev/sdg
mdadm: hot removed /dev/sdg from /dev/md5
[root@localhost ~]# mdadm -D /dev/md5
/dev/md5:
Version : 1.2
Creation Time : Sun Apr 25 20:33:09 2021
Raid Level : raid5
Array Size : 41908224 (39.97 GiB 42.91 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 3
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sun Apr 25 20:39:09 2021
State : clean
Active Devices : 3
Working Devices : 3
Failed Devices : 0
Spare Devices : 0
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Name : localhost.localdomain:5 (local to host localhost.localdomain)
UUID : 02342380:da55c73a:16786aa6:3e84863c
Events : 44
Number Major Minor RaidDevice State
3 8 144 0 active sync /dev/sdj
1 8 112 1 active sync /dev/sdh
4 8 128 2 active sync /dev/sdi
验证raid5功能实现,
利用率:N-1,浪费一块奇偶校验;
读:N-1
写:N-1
都是接近raid0的读写性能
有冗余,允许一块的磁盘损坏,应用场景,需要备份的web,db
4.raid10
先做raid1,再做raid0
磁盘利用率:50%
冗余:只要一对镜像有一块磁盘没有坏就可以,
应用场景:需要备份的DB
5.raid后添加磁盘,停掉raid
mdadm /dev/md5 -a /dev/sdb
mdadm -S /dev/md5
手机扫一扫
移动阅读更方便
你可能感兴趣的文章