Hadoop 3.1.1 HDFS 集群部署

依赖服务系统优化查看 ZooKeeper 集群状态创建路径配置 /etc/profile配置 $HADOOP_HOME/etc/hadoop/hadoop-env.sh配置 $HADOOP_HOME/etc/hadoop/core-site.xml创建 DataNode 黑名单文件配置 $HADOOP_HOME/etc/hadoop/hdfs-site.xml配置 $HADOOP_HOME/etc/hadoop/workers分发配置初始化 HDFS1. 初始化之前需提前启动所有的 JN2. 初始化 HDFS 命名空间,只需在 NN-Master 节点进行初始化3. 创建并初始化 ZK 路径

启动 HDFS1. NameNode-Master2. NameNode-Backup3. NameNode-Master

验证 HDFS 状态常用操作

依赖服务

JDK 1.8.0_133ZooKeeper 3.5.5:https://blog.csdn.net/weixin_42598916/article/details/135726572?spm=1001.2014.3001.5502

系统优化

每个节点都需进行如下优化

# 按需求更改主机名

hostname hadoop1

# 关闭 SELinux

# 将 SELINUX 值更改为 disabled

vi /etc/selinux/config

SELINUX=disabled

# 需要重启后才可生效

# 查看 SELinux 状态

getenforce

# 关闭防火墙

systemctl stop firewalld && systemctl disable firewalld && systemctl status firewalld

# 安装 Chrony 服务

yum install chrony -y

# 配置 Chrony 服务

# 注释默认的 NTP 服务地址

# 配置所需的 NTP 服务地址

vi /etc/chonry.conf

server hadoop1 iburst

# 重启 Chrony 服务并配置开机自启

systemctl enable chronyd --now

# 查看 Chrony 服务状态

chronyc sources -v

210 Number of sources = 1

.-- Source mode '^' = server, '=' = peer, '#' = local clock.

/ .- Source state '*' = current synced, '+' = combined , '-' = not combined,

| / '?' = unreachable, 'x' = time may be in error, '~' = time too variable.

|| .- xxxx [ yyyy ] +/- zzzz

|| Reachability register (octal) -. | xxxx = adjusted offset,

|| Log2(Polling interval) --. | | yyyy = measured offset,

|| \ | | zzzz = estimated error.

|| | |

MS Name/IP address Stratum Poll Reach LastRx Last sample

====================================================================================================================

^* hadoop1 4 6 377 12 -28us[ -45us] +/- 75ms

# 配置免密登录

# 所有节点生成 id_rsa.pub

ssh-keygen -t rsa

# 将每个节点的 id_rsa.pub 信息,分别放入所有节点的 authorized_keys 文件内

cat id_rsa.pub >> hadoop1:/root/.ssh/authorized_keys

cat id_rsa.pub >> hadoop2:/root/.ssh/authorized_keys

cat id_rsa.pub >> hadoop3:/root/.ssh/authorized_keys

# 最终效果

cat /root/.ssh/authorized_keys

# redis-nodes

ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDwuKw9LdfDO3Ln+ViNtQEqZtH/RvoFymKkexBXRUK/2XcczKHPv967KHH71L/5vPOQPUXZLZg3TPERlRTIW9MvCh0LmceGAiQHrxczx56RnYh8nESknd2jbHBToGwqgoB8xsB2IQuhze0CqvRs7A0nrbyBvnUpg/DvePTOSSgii4z9kishBCbrCPamQm20drXVDK3gQ9Q+/YJLKa3+mxzI67xfk/jby0A0DD9XKL7fflRgMK0GXEtYsJ04tKc5Bo+w6Zc8gHyryFrKD4wpeoPakqmrdzaTVYI1x5WvrAPrQplxAP8iNfBqRJSHvlDBXVeXgSxz2I4HBshsStkKp root@redis1

ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkspWeTwWoWyr6biMnefOYT4kh+7gPAboHAWe7p67IR9pfu+Rkk/vxLFDbi7X6Td9AhIXEZH6fY5BhihBzhRO/VtjE24QqnXdOLDHV1i0rSEYh6GOAbnVl/93lKidQF/2wvnQET31m1iwls3ul6aWw8/pOcxWy6kB+6MRiOExhu+0erE3jBFLcl+e0IJLKp/nLjCof/qWh3hLGVyhgMn/WmGhf7OyUbedXFqAwwS83/M60jSL1nB1lnIOoHrNSdnrN/GJVXmmwJjJAG4g4hbAg2zNind2rz6p4mq5k7iBbDUFghFwKKYsGeV0Onm7SKErFlHCJNFSOgfVNpaUYJ root@redis2

ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+DGKAYw3tbdmv2GDsz3HEOdoKk8JVCEvDFczap2g3DoaqwEBkRag2l9IQ3RZL/WtpKe0f2vZzcm5t3d7e6YhyfEXRn1fjOmynTcykB13xAVrlRfJ6Sayur0OiPzWBktpNj8qaTKjwH+lyHGBwa5duqKiVEglEH2mX5grcOa/mH2Mo+IWsCYeCldKjfdBy2drlAim1fYvJwvtg0uDe8sfDUdDonG4phNOVaWB2u79SxKlGnGewGNuOrifIzkbc0mH9kNgrlw/xdSIqaFA738Yn/4n/kSe3BgceJ0wBowLzorgW2ogyGOdQp6MzBRlg/hxn4EDLJisrC9mSCMOOl root@redis3

# hadoop-nodes

ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvWawSJqu4/Adnu6TjvV8rVDAqTU2CGNaSBOTDjcytHHOaY8UiwUMKvXUJugBmRkyhtWhQPHrVSmOH6+qMnHk5XQcWBmce8qCQqDoz49WwyZH95ciY/ynKR9dzAJwXN5fvJEoKxBhSJLk27SDsgRUX05IAjTN5Wx05GCNC36CRGHr6bwsC5iK+nv1ZllkRPyqoICJcvVVoJFDe+svNwLJS8bEpTUS/3C6w1RdfEgGVK0/NLnmANz6VIu5LAZqOpwFcB8Zed3wgnoHUfDCSXLEUQbcgRxDvba7lcvOqbiNh4Tr6WctSHw0UD9PSK6AXdS0jAAyjZ1J5kbWaI+vmZ root@hadoop1

ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwCqgQWDgw7sSqNer1oONzsFhoCWBmLqdWOQCcC7RYhD6kiVzdAEP7qZwWKRwoe/E++xP0+slgxsIsXGVoObGrlT3n+g/2xsgTCaBT/6sGV7k28UOozh76GlyfJjzavbwWE9Q2yR2mkb3/ILGE6CUNCkqqLuYEDTG4DxNupGhsGSYChAcjclzYFrMxDARiOJ8cahDjVlmGzFWxNhzJ36pFC1Rdyeu4CrtZ8tkuqQagGZqB63bVmvTiOM2fY8Wp8TNv0Zz2XmFmv7IUhpDXlPZdFCviwLYLLoJ9LTG32rO/jY0U78LFdDpsYdebthztNakKMZEhCqVIR+k1VMPtp root@hadoop2

ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHmj5qT64jSc3LCR2EBKB+12C1XxhFlc44X8zdf3mL8gbepG+ndMgBV4eombLg7QjZshCsjhk9d8esofAlrPk5tX/nWWHg3p5jYTh5/6V+iU7VDpWmMVN/87dsjBbmM9P6jTNiwqk4rdSXDKXkmrVygGHnEj95eP35Nq1JKg+GS7RjWWB0+loGQ4eYKO1nj2nYNOlNBi28CKh1uMWf42bDtcfKP3Z4gEOtPBD5rVPiU2Tq6jgtAs/VvaYGv5FHO4MB0lBE1ik8zp/4trfGU5hie/1PzCRAIvsqPEBSzeUs9nhHODj6vZYwgQupK9Qv5jEbQgh6pCGEfFZlfsC03 root@hadoop3

# 配置 OracleJDK

# 下载 Oracle JDK 并存放至指定路径内

# 配置 /etc/profile 文件

cat > /etc/profile << EOF

# Oracle JDK 1.8.0_333

export JAVA_HOME=/data/service/jdk/jdk1.8.0_333

export CLASSPATH=$:CLASSPATH:$JAVA_HOME/lib/

export PATH=$PATH:$JAVA_HOME/bin

EOF

# 刷新配置

source /etc/profile

# 查看 JDK 状态

java -version

java version "1.8.0_333"

Java(TM) SE Runtime Environment (build 1.8.0_333-b02)

Java HotSpot(TM) 64-Bit Server VM (build 25.333-b02, mixed mode)

# 配置 HOSTS 文件

cat > /etc/hosts << EOF

# redis-nodes

10.10.10.21 redis1

10.10.10.22 redis2

10.10.10.23 redis3

# hadoop-nodes

10.10.10.131 hadoop1

10.10.10.132 hadoop2

10.10.10.133 hadoop3

EOF

# 关闭 swap

swapoff -a

# 注销 swap 分区挂载

vi /etc/fstab

# 配置 vm.swapiness

echo "vm.swappiness = 0" >> /etc/sysctl.conf

# 刷新配置

sysctl -p

# 配置 transparent_hugepage

# 临时生效

echo never > /sys/kernel/mm/transparent_hugepage/enabled && echo never > /sys/kernel/mm/transparent_hugepage/defrag

# 永久生效

echo "echo never > /sys/kernel/mm/transparent_hugepage/enabled" >> /etc/rc.local && echo "echo never > /sys/kernel/mm/transparent_hugepage/defrag" >> /etc/rc.local

# 配置 最大连接数

# CentOS6 的文件名为 90-nproc.conf

# CentOS7 的文件名为 20-nproc.conf

vi /etc/security/limits.d/20-nproc.conf

* - nofile 655350

* - nproc 655350

查看 ZooKeeper 集群状态

$ZK_HOME/bin/zkCli.sh -server hadoop1:2181,hadoop2:2181,hadoop3:2181

[zk: hadoop1:2181,hadoop2:2181,hadoop3:2181(CONNECTED) 0] ls /

[admin, brokers, cluster, config, consumers, controller, controller_epoch, hadoop-ha, hbase, isr_change_notification, latest_producer_id_block, log_dir_event_notification, rmstore, spark, yarn-leader-election, zookeeper]

创建路径

每个节点都需创建如下路径

mkdir -p /data/service/hadoop/{hadoop_logs,hadoop_pid,hadoop_tmp,hdfs_nn1,hdfs_nn2,hdfs_dn1,hdfs_dn2,hdfs_dn3}

配置 /etc/profile

每个节点都需配置如下环境变量以便于后续启停及使用 HDFS 相关脚本和命令

# Hadoop 3.1.1

export HADOOP_HOME=/data/service/hadoop/hadoop-3.1.1

export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/lib

配置 $HADOOP_HOME/etc/hadoop/hadoop-env.sh

export JAVA_HOME="/data/service/jdk/jdk1.8.0_333"

export HADOOP_HOME="/data/service/hadoop/hadoop-3.1.1"

export HADOOP_CONF_DIR="/data/service/hadoop/hadoop-3.1.1/etc/hadoop"

export HADOOP_LOG_DIR="/data/service/hadoop/hadoop_logs"

export HADOOP_PID_DIR="/data/service/hadoop/hadoop_pid"

export HDFS_NAMENODE_OPTS="-Xms1024m -Xmx1024m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=80 -XX:+CMSParallelRemarkEnabled"

export HDFS_DATANODE_OPTS="-Xms512m -Xmx512m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=80 -XX:+CMSParallelRemarkEnabled"

export HDFS_ZKFC_OPTS="-Xms512m -Xmx512m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=80 -XX:+CMSParallelRemarkEnabled"

export HDFS_JOURNALNODE_OPTS="-Xms512m -Xmx512m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=80 -XX:+CMSParallelRemarkEnabled"

export HDFS_NAMENODE_USER=root

export HDFS_DATANODE_USER=root

export HDFS_ZKFC_USER=root

export HDFS_JOURNALNODE_USER=root

配置 $HADOOP_HOME/etc/hadoop/core-site.xml

fs.defaultFS

hdfs://hdfscluster

指定默认 HDFS 名称

hadoop.tmp.dir

/data/service/hadoop/hadoop_tmp

指定 HDFS 临时文件存放路径

hadoop.http.staticuser.user

root

指定 HDFS Web UI 操作 HDFS 的用户

ha.zookeeper.quorum

hadoop1:2181,hadoop2:2181,hadoop3:2181

指定 ZooKeeper 地址

fs.trash.interval

1440

指定垃圾桶内数据保留时间。默认为 0,即关闭。单位为分钟

创建 DataNode 黑名单文件

touch $HADOOP_HOME/etc/hadoop/hdfs-exclude.txt

配置 $HADOOP_HOME/etc/hadoop/hdfs-site.xml

dfs.namenode.name.dir

/data/service/hadoop/hdfs_nn1

/data/service/hadoop/hdfs_nn2

指定 NameNode 元数据存放路径

dfs.hosts.exclude

/data/service/hadoop/hadoop-3.1.1/etc/hadoop/hdfs-exclude.txt

DataNode 黑名单文件路径

dfs.datanode.du.reserved

1024

指定每块磁盘的保留空间。单位为字节。默认为 0

dfs.data.dir

/data/service/hadoop/hdfs_dn1

/data/service/hadoop/hdfs_dn2

/data/service/hadoop/hdfs_dn3

指定 DataNode 数据存放路径

dfs.datanode.failed.volumes.tolerated

1

指定 DataNode 最多能够故障多少块磁盘

dfs.replication

2

指定数据副本数。默认值为 3

dfs.nameservices

hdfscluster

指定 HDFS 名字,需与 core-site.xml 中一致。后续配置也与此相同

dfs.namenode.acls.enabled

true

开启 ACL 权限管理功能

dfs.disk.balancer.enabled

true

开启磁盘负载均衡功能

dfs.ha.namenodes.hdfscluster

nn1,nn2,nn3

指定 NameNode ID

dfs.namenode.rpc-address.hdfscluster.nn1

hadoop1:8020

指定 nn1 的 RPC 地址

dfs.namenode.rpc-address.hdfscluster.nn2

hadoop2:8020

指定 nn2 的 RPC 地址

dfs.namenode.rpc-address.hdfscluster.nn3

hadoop3:8020

指定 nn3 的 RPC 地址

dfs.namenode.http-address.hdfscluster.nn1

hadoop1:9870

指定 nn1 的访问地址

dfs.namenode.http-address.hdfscluster.nn2

hadoop2:9870

指定 nn2 的访问地址

dfs.namenode.http-address.hdfscluster.nn3

hadoop3:9870

指定 nn3 的访问地址

dfs.namenode.shared.edits.dir

qjournal://redis1:8485;redis2:8485;redis3:8485/hdfscluster

指定 JournalNode 同步 NameNode 元数据的存放路径

dfs.client.failover.proxy.provider.hdfscluster

org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider

指定高可用方案,失败后自动切换的方式

dfs.ha.fencing.methods

shell(/bin/true)

指定 ssh 方案

dfs.ha.fencing.ssh.private-key-files

/root/.ssh/id_rsa

指定 ssh key 路径

dfs.journalnode.edits.dir

/data/service/hadoop/hdfs_jn

指定 JournalNode 数据存放路径

dfs.ha.automatic-failover.enabled

true

开启自动故障转移。但开启后,手动故障转移将失效

dfs.namenode.handler.count

21

NameNode 的服务器线程的数量。计算公式:20 * log2(集群节点数)

dfs.namenode.handler.count

21

NameNode 用于服务调用的服务器线程数量。计算公式:20 * log2(集群节点数)

dfs.datanode.handler.count

21

DataNode 服务器线程数。计算公式:python -c 'import math ; print int(math.log(N) * 20)'

dfs.datanode.max.transfer.threads

8192

指定在 DataNode 内外传输数据使用的最大线程数。默认值为4096

配置 $HADOOP_HOME/etc/hadoop/workers

hadoop1

hadoop2

hadoop3

分发配置

将 /data/service/hadoop 分发至所有节点

初始化 HDFS

1. 初始化之前需提前启动所有的 JN

hdfs --daemon start journalnode

2. 初始化 HDFS 命名空间,只需在 NN-Master 节点进行初始化

hdfs namenode -format hadoop1

3. 创建并初始化 ZK 路径

hdfs zkfc -formatZK

启动 HDFS

1. NameNode-Master

hdfs --daemon start namenode

2. NameNode-Backup

hdfs namenode -bootstrapStandby

3. NameNode-Master

$HADOOP_HOME/sbin/start-dfs.sh

验证 HDFS 状态

HDFS Web UI:10.10.10.131:9870HDFS Web UI:10.10.10.132:9870HDFS Web UI:10.10.10.133:9870命令行查看

hdfs haadmin -getServiceState nn1

Active

hdfs haadmin -getServiceState nn2

Standby

hdfs haadmin -getServiceState nn3

Standby

hdfs dfsadmin -report

Configured Capacity: 454060661760 (422.88 GB)

Present Capacity: 422851436544 (393.81 GB)

DFS Remaining: 422851325952 (393.81 GB)

DFS Used: 110592 (108 KB)

DFS Used%: 0.00%

Replicated Blocks:

Under replicated blocks: 0

Blocks with corrupt replicas: 0

Missing blocks: 0

Missing blocks (with replication factor 1): 0

Pending deletion blocks: 0

Erasure Coded Block Groups:

Low redundancy block groups: 0

Block groups with corrupt internal blocks: 0

Missing block groups: 0

Pending deletion blocks: 0

-------------------------------------------------

Live datanodes (3):

Name: 10.10.10.131:9866 (hadoop1)

Hostname: hadoop1

Decommission Status : Normal

Configured Capacity: 151353553920 (140.96 GB)

DFS Used: 36864 (36 KB)

Non DFS Used: 10601001984 (9.87 GB)

DFS Remaining: 140752515072 (131.09 GB)

DFS Used%: 0.00%

DFS Remaining%: 93.00%

Configured Cache Capacity: 0 (0 B)

Cache Used: 0 (0 B)

Cache Remaining: 0 (0 B)

Cache Used%: 100.00%

Cache Remaining%: 0.00%

Xceivers: 1

Last contact: Wed Nov 16 14:05:29 CST 2022

Last Block Report: Wed Nov 16 10:26:13 CST 2022

Num of Blocks: 0

Name: 10.10.10.132:9866 (hadoop2)

Hostname: hadoop2

Decommission Status : Normal

Configured Capacity: 151353553920 (140.96 GB)

DFS Used: 36864 (36 KB)

Non DFS Used: 10296947712 (9.59 GB)

DFS Remaining: 141056569344 (131.37 GB)

DFS Used%: 0.00%

DFS Remaining%: 93.20%

Configured Cache Capacity: 0 (0 B)

Cache Used: 0 (0 B)

Cache Remaining: 0 (0 B)

Cache Used%: 100.00%

Cache Remaining%: 0.00%

Xceivers: 1

Last contact: Wed Nov 16 14:05:29 CST 2022

Last Block Report: Wed Nov 16 13:02:34 CST 2022

Num of Blocks: 0

Name: 10.10.10.133:9866 (hadoop3)

Hostname: hadoop3

Decommission Status : Normal

Configured Capacity: 151353553920 (140.96 GB)

DFS Used: 36864 (36 KB)

Non DFS Used: 10311275520 (9.60 GB)

DFS Remaining: 141042241536 (131.36 GB)

DFS Used%: 0.00%

DFS Remaining%: 93.19%

Configured Cache Capacity: 0 (0 B)

Cache Used: 0 (0 B)

Cache Remaining: 0 (0 B)

Cache Used%: 100.00%

Cache Remaining%: 0.00%

Xceivers: 1

Last contact: Wed Nov 16 14:05:29 CST 2022

Last Block Report: Wed Nov 16 10:53:26 CST 2022

Num of Blocks: 0

常用操作

# 获取指定参数的配置

hdfs getconf -confKey

# 创建测试文件

echo 1 > test.log

# 创建测试目录

hdfs dfs -mkdir hdfs://hdfscluster/test/

# 上传文件

hdfs dfs -put hdfs://hdfscluster/test/test.log ./

# 下载文件

hdfs dfs -get hdfs://hdfscluster/test/test.log ./

# 删除文件

hdfs dfs -rm hdfs://hdfscluster/test/test.log

# 删除文件并跳过垃圾回收

hdfs dfs -rm –skipTrash hdfs://hdfscluster/test/test.log

推荐阅读

评论可见,请评论后查看内容,谢谢!!!
 您阅读本篇文章共花了: