hostnamectl set-hostname master
bash
hostnamectl set-hostname slave1
bash
hostnamectl set-hostname slave2
bash
useradd hadoop
passwd hadoop
123
123
vi /etc/sudoers
hadoop ALL=(ALL) ALL
vi /etc/hosts
systemctl stop firewalld
systemctl disable firewalld
ssh-keygen -t rsa
ssh-copy-id master
yes
123
ssh-copy-id slave1
yes
123
ssh-copy-id slave2
yes
123
mkdir /opt/module
export JAVA_HOME=/opt/module/jdk
export PATH=$PATH:$JAVA_HOME/bin
export HADOOP_HOME=/opt/module/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export HDFS_NAMENODE_USER=hadoop
export HDFS_DATANODE_USER=hadoop
export HDFS_SECONDARYNAMENODE_USER=hadoop
export YARN_RESOURCEMANAGER_USER=hadoop
export YARN_NODEMANAGER_USER=hadoop
vi workers
master
slave1
slave2
export JAVA_HOME=/opt/module/jdk
export JAVA_HOME=/opt/module/jdk
<property></property>
<name></name>
<value></value>
core
fs.defaultFS
hdfs://master:9000
hadoop.tmp.dir
/opt/module/hadoop/data
hadoop.http.staticuser.user
hadoop
hadoop.proxyuser.hadoop.hosts
*
hadoop.proxyuser.hadoop.groups
*
hadoop.proxyuser.hadoop.users
*
hdfs
dfs.replication
3
dfs.namenode.http-address
master:9870
dfs.namenode.secondary.http-address
master:50090
dfs.permissions.enabled
false
yarn
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.resourcemanager.hostname
master
yarn.nodemanager.env-whitelist
JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME
yarn.scheduler.minimum-allocation-mb
2048
yarn.scheduler.maximum-allocation-mb
4096
yarn.nodemanager.resource.memory-mb
8092
yarn.nodemanager.pmem-check-enabled
false
yarn.nodemanager.vmem-check-enabled
false
yarn.log-aggregation-enable
true
yarn.log.server.url
http://master:19888/jobhistory/logs
yarn.log-aggregation.retain-seconds
604800
mapred
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
master:10020
mapreduce.jobhistory.webapp.address
master:19888