赞
踩
hadoop hdfs的datanode的一块磁盘故障导致服务器的根分区写满了
作者 伍增田 Tommy WU
问题描述:
生产环境的一块磁盘故障,挂载点为/srv/hadoop/data10,没有及时更换。碰巧当天夜里服务器掉电重启了。
早上发现有根分区几乎写满的告警。
原因分析:
由于磁盘故障,没有从配置 dfs.datanode.data.dir 中删除掉响应的目录, 服务器掉电重启后/srv/hadoop/data10成为根分区下的目录,导致业务数据写入到根分区了。
解决办法:
规避措施:磁盘故障后,要及时从配置 dfs.datanode.data.dir 中删除掉对应的目录
彻底方案:
检查数据目录应该是磁盘的挂载点,不应该是根分区的目录,如果不是挂载点就报错。
org.apache.hadoop.util.DiskChecker.java
public static void checkDir(File dir) throws DiskErrorException
获取 /srv/hadoop/data10 和它的父目录的stat信息,如果是挂载点,他们的device是不同的
[root@controller ~]# stat /srv
File: ‘/srv’
Size: 6 Blocks: 0 IO Block: 4096 directory
Device: fd00h/64768d Inode: 268629443 Links: 2
Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
Access: 2024-05-22 04:45:10.030737123 -0500
Modify: 2018-04-10 23:59:55.000000000 -0500
Change: 2024-03-27 13:05:58.401989843 -0500
Birth: -
java没有api直接获取文件的stat信息中的device数据,需要编写jni方法调用lib c的库函数来实现,下面是python的实现方式:
s2 = os.lstat(os.path.join(path, '..'))
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
# path/.. on a different device as path
return True
[root@datanode5 ~]# cat /proc/self/mounts
rootfs / rootfs rw 0 0
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
devtmpfs /dev devtmpfs rw,nosuid,size=32838580k,nr_inodes=8209645,mode=755 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,nosuid,nodev,mode=755 0 0
tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/net_cls cgroup rw,nosuid,nodev,noexec,relatime,net_cls 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpuacct,cpu 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
configfs /sys/kernel/config configfs rw,relatime 0 0
/dev/md125 / xfs rw,relatime,attr2,inode64,noquota 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=31,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0
mqueue /dev/mqueue mqueue rw,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
/dev/md127 /boot xfs rw,relatime,attr2,inode64,noquota 0 0
/dev/sda1 /srv/hadoop/data1 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdb1 /srv/hadoop/data2 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdc1 /srv/hadoop/data3 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdd1 /srv/hadoop/data4 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdg1 /srv/hadoop/data5 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdh1 /srv/hadoop/data6 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdi1 /srv/hadoop/data7 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdj1 /srv/hadoop/data8 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdk1 /srv/hadoop/data9 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdl1 /srv/hadoop/data10 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdm1 /srv/hadoop/data11 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdn1 /srv/hadoop/data12 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdo1 /srv/hadoop/data13 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdq1 /srv/hadoop/data14 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdr1 /srv/hadoop/data15 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sds1 /srv/hadoop/data16 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdt1 /srv/hadoop/data17 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdu1 /srv/hadoop/data18 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdv1 /srv/hadoop/data19 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdw1 /srv/hadoop/data20 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
/dev/sdx1 /srv/hadoop/data21 xfs rw,noatime,nodiratime,attr2,nobarrier,inode64,noquota 0 0
tmpfs /run/user/0 tmpfs rw,nosuid,nodev,relatime,size=6569644k,mode=700 0 0
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。