环境搭建
一、修改主机名
临时修改
hostnamectl set-hostname master
刷新
bash
二、关闭防火墙
查看防火墙状态
systemctl status firewalld
关闭防火墙
systemctl stop firewalld
三、主机映射
vi hosts
三台都需要添加
四、修改时区
tzselect
五、确认ntp服务是否安装
安装命令: yum install -y ntp
屏蔽默认server 设置master为本地时钟源 服务器层级设为0
vi /etc/ntp.conf
server 127.127.1.0
fudge 127.127.1.0 stratum 10
重新启动 systemctl restart ntpd.service
启动 systemctl start ntpd.service
查看状态 systemctl status ntpd.service
ntp只需要在主节点上
从节点同步一下 ntpdate master
六、定时任务
crontab -e
*/30 10-17 * * * /usr/sbin/ntpdate master
七、免密登录
ssh-keygen
ssh-copy-id master
在主节点对从节点进行免密登陆
八、jdk安装
配置环境变量
export JAVA_HOME=/usr/java/jdk1.8.0_171
export CLASSPATH=$JAVA_HOME/lib/
export PATH=$PATH:$JAVA_HOME/bin
export PATH JAVA_HOME CLASSPATH
将安装好的java目录拷贝到从节点
scp -r /usr/java slave1:/usr/
九、Zookeeper搭建
配置环境变量
export ZOOKEEPER_HOME=/usr/zookeeper/zookeeper-3.4.10
export PATH=$PATH:$ZOOKEEPER_HOME/bin
修改zoo.cfg文件
#The number of milliseconds of each tick
tickTime=2000
#The number of ticks that the initial
#synchronization phase can take
initLimit=10
#The number of ticks that can pass between
#sending a request and getting an acknowledgement
syncLimit=5
#the directory where the snapshot is stored.
#do not use /tmp for storage, /tmp here is just
#example sakes.
dataDir=/usr/zookeeper/zookeeper-3.4.10/zkdata
dataLogDir=/usr/zookeeper/zookeeper-3.4.10/zkdatalog
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
创建存储目录
mkdir zkdata zkdatalog
进入zkdata中创建myid内容为1
将zookeeper同步到slave1 slave2
scp -r /usr/zookeeper/ slave1:/usr/
启动
三个同时启动
zkServer.sh start
查看状态
zkServer.sh status
十、 hadoop搭建
环境变量
export HADOOP_HOME=/usr/hadoop/hadoop-2.7.3
export CLASSPATH=$CLASSPATH:$HADOOP_HOME/lib
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
配置hadoop运行环境JAVA_HOME vi hadoop-env.sh
export JAVA_HOME=/usr/java/jdk1.8.0_171
配置yarn运行环境JAVA_HOME vi yarn-env.sh
export JAVA_HOME=/usr/java/jdk1.8.0_171
配置core-site.xml
<property>
<name>fs.default.name</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/hadoop/hadoop-2.7.3/hdfs/tmp</value>
</property>
配置hdfs-site.xml
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/hadoop/hadoop-2.7.3/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/hadoop/hadoop-2.7.3/hdfs/data</value>
</property>
配置yarn-site.xml
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:18141</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
配置mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
配置 slaves
slave1 slave2
配置 master
master
分发到slave1 slave2
scp -r /usr/hadoop/ slave2:/usr/
格式化
hadoop namenode -format
mysql服务
systemctl start mysqld.service
查看密码
grep ''temporary password" /var/log/mysqld.log
设置密码强度
set global validate_password_policy=0;
设置密码长度
set global validate_password_length=6
修改本地密码
alter user 'root'@'localhost' identified by 'root'
任意节点可以访问
GRANT ALL PROVOLEGES ON . TO 'root'@'%' IDENTIFIED BY 'root' WITH GRANT OPTION
5.7
/etc/my.cnf
bind-address = 0.0.0.0
刷新权限
flush privileges;
hive安装
分发slave1 slave2
scp -r /usr/hive slave1:/usr/
环境变量
export HIVE_HOME=/usr/hive/apache-hive-2.1.1-bin
export PATH=$PATH:$HIVE_HOME/bin
设置运行环境
vi hive-env.sh
export HADOOP_HOME=/usr/hadoop/hadoop-2.7.3
export HIVE_CONF_DIR=/usr/hive/apache-hive-2.1.1-bin/conf
export HIVE_AUX_JARS_PATH=/usr/hive/apache-hive-2.1.1-bin/lib
解决jline版本冲突问题
cp /usr/hive/apache-hive-2.1.1-bin/lib/jline-2.12.jar /usr/hadoop/hadoop-2.7.3/share/hadoop/yarn/lib/
从新分发
hive服务端slave1
配置vi hive-site.xml
<configuration>
# hive 产生的元数据存放的位置
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive_remote/warehouse</value>
</property>
# 数据库链接driver mysql驱动
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
#数据库链接jdbc的url地址
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://slave2:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value>
</property>
# mysql数据库用户名
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
</property>
</configuration>
cp mysql-connector-java-5.1.49.jar /usr/hive/apache-hive-2.1.1-bin/lib/
hive客户端master
配置vi hive-site.xml
<configuration>
# 存放hive产生的元数据位置
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive_remote/warehouse</value>
</property>
# 使用本地服务连接hive 默认为true
<property>
<name>hive.metastore.local</name>
<value>false</value>
</property>
# 连接服务器
<property>
<name>hive.metastore.uris</name>
<value>thrift://slave1:9083</value>
</property>
</configuration>
启动服务slave1
切换到hive主目录
初始化bin/schematool -dbType mysql -initSchema
bin/hive --service metastore
启动 master
切换到hive主目录
bin/hive
mysql 修改安全策略
SET GLOBAL validate_password_policy=LOW;
SET GLOBAL validate_password_length=4;
SET GLOBAL validate_password_number_count=1;
SET GLOBAL validate_password_special_char_count=1;
SHOW VARIABLES LIKE 'validate_password%';
修改密码
set password = password("root");
mysql> FLUSH PRIVILEGES; #刷新