一:hadoop
vi /etc/hosts (三台虚拟机都做)
输入i 开始编写
:wq-->保存退出
密钥
ssh-keygen -t rsa(回车4次)
ssh-copy-id master
ssh-copy-id slave1
ssh-copy-id slave2
ssh master(验证)
exit退出
创建文件夹
mkdir -p /root/software
打开powershell
jdk-8u401-linux-x64.tar.gz
hadoop-3.3.6.tar.gz
mysql-5.7.25-1.el7.x86_64.rpm-bundle.tar
任意一个有数据的文件
解压
tar -zxvf /root/software/jdk-8u401-linux-x64.tar.gz -C /root/software
配置环境变量
vi /etc/profile
export JAVA_HOME=/root/software/jdk1.8.0_401
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
加载变量
source /etc/profile
验证java
java -version
远程拷贝
scp -r /root/software/jdk1.8.0_401/ root@slave1:/root/software/
scp -r /root/software/jdk1.8.0_401/ root@slave2:/root/software/
scp /etc/profile root@slave1:/etc/(要分别在slave1,slave2上重新加载)
scp /etc/profile root@slave2:/etc/
解压hadoop包
tar -zxvf /root/software/hadoop-3.3.6.tar.gz -C /root/software
创建相关文件夹
mkdir -p /root/software/hadoop-3.3.6/hadoopDatas/tempDatas
mkdir -p /root/software/hadoop-3.3.6/hadoopDatas/namenodeDatas
mkdir -p /root/software/hadoop-3.3.6/hadoopDatas/datanodeDatas
mkdir -p /root/software/hadoop-3.3.6/hadoopDatas/dfs/nn/edits
mkdir -p /root/software/hadoop-3.3.6/hadoopDatas/dfs/snn/name
mkdir -p /root/software/hadoop-3.3.6/hadoopDatas/dfs/nn/snn/edits
配置hadoop-env.sh
vi /root/software/hadoop-3.3.6/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/root/software/jdk1.8.0_401
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
core-site.xml
vi /root/software/hadoop-3.3.6/etc/hadoop/core-site.xml
<configuration><property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>
</configuration>
hdfs-site.xml
vi /root/software/hadoop-3.3.6/etc/hadoop/hdfs-site.xml
<configuration><property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/root/software/hadoop-3.3.6/hadoopDatas/namenodeDatas</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/root/software/hadoop-3.3.6/hadoopDatas/datanodeDatas</value>
</property>
</configuration>
mapred-site.xml
vi /root/software/hadoop-3.3.6/etc/hadoop/mapred-site.xml
<configuration><property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>yarn.app.mapreduce.am.env</name>
<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
</property>
<property>
<name>mapreduce.map.env</name>
<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
</property>
<property>
<name>mapreduce.reduce.env</name>
<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
</property>
</configuration>
yarn-site.xml
vi /root/software/hadoop-3.3.6/etc/hadoop/yarn-site.xml
<configuration><property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>
workers
vi /root/software/hadoop-3.3.6/etc/hadoop/workers
master
slave1
slave2
远程拷贝
scp -r /root/software/hadoop-3.3.6/ slave1:/root/software/
scp -r /root/software/hadoop-3.3.6/ slave2:/root/software/
配置hadoop环境变量
vi /etc/profile.d/hadoop.sh
export HADOOP_HOME=/root/software/hadoop-3.3.6
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
加载变量
source /etc/profile
远程复制环境变量
scp /etc/profile root@slave1:/etc/(要分别在slave1,slave2上重新加载)
scp /etc/profile root@slave2:/etc/
格式化HDFS(三台都要做)
hdfs namenode -format
启动hadoop集群(三台都要做)
start-dfs.sh
start-yarn.sh
mapreduce --daemon start historyserver
验证
jps
二:mysql
centos7带mariadb,有冲突要删除
查看
rpm -qa | grep mariadb
删除
rpm -e --nodeps mariadb-libs-5.5.68-1.el7.x86_64
mysql安装包上传
解压
tar -xvf /root/software/mysql-5.7.25-1.el7.x86_64.rpm-bundle.tar -C /root/software
按顺序安装
rpm -ivh /root/software/mysql-community-common-5.7.25-1.el7.x86_64.rpm
rpm -ivh /root/software/mysql-community-libs-5.7.25-1.el7.x86_64.rpm
rpm -ivh /root/software/mysql-community-devel-5.7.25-1.el7.x86_64.rpm
rpm -ivh /root/software/mysql-community-libs-compat-5.7.25-1.el7.x86_64.rpm
rpm -ivh /root/software/mysql-community-client-5.7.25-1.el7.x86_64.rpm
rpm -ivh /root/software/mysql-community-server-5.7.25-1.el7.x86_64.rpm
初始化
mysqld --initialize
查看日志文件中生成的初始密码
grep password /var/log/mysqld.log
设置权限
chown -R mysql:mysql /var/lib/mysql/
启动mysql
systemctl start mysqld
登录mysql
mysql -uroot -p
修改密码
alter user 'root'@'localhost' identified by'123456';
让root用户从任何主机连接
use mysql;
update user set user.Host='%' where user.User='root';
刷新权限
flush privileges;
退出
exit或者quit
三:hdfs
创建目录
hdfs dfs -mkdir /qw
查看目录
hdfs dfs -ls /
将alldata.csv文件上传到/root/software/中,再上传到/qw目录下
hdfs dfs -put /root/software/alldata.csv /qw/
查看
hdfs dfs -ls /qw
查看alldata.csv文件前十行内容
hdfs dfs -cat /qw/alldata.csv | head -10
版权归原作者 悲傷GG爆 所有, 如有侵权,请联系我们删除。