0


利用Docker快速部署hadoop、hive和spark

文章目录

一、配置文件yml

1.docker-compose.yml

version:'3.4'services:namenode:image: test/hadoop-namenode:1.1.0-hadoop2.8-java8
    container_name: namenode
    volumes:- ./data/namenode:/hadoop/dfs/name
    environment:- CLUSTER_NAME=test
    env_file:- ./hadoop-hive.env
    ports:- 50070:50070- 8020:8020- 5005:5005resourcemanager:image: test/hadoop-resourcemanager:1.1.0-hadoop2.8-java8
    container_name: resourcemanager
    environment:- CLUSTER_NAME=test
    env_file:- ./hadoop-hive.env
    ports:- 8088:8088depends_on:- datanode

  historyserver:image: test/hadoop-historyserver:1.1.0-hadoop2.8-java8
    container_name: historyserver
    environment:- CLUSTER_NAME=test
    env_file:- ./hadoop-hive.env
    ports:- 8188:8188datanode:image: test/hadoop-datanode:1.1.0-hadoop2.8-java8
    container_name: datanode
    depends_on:- namenode
    volumes:- ./data/datanode:/hadoop/dfs/data
    env_file:- ./hadoop-hive.env
    ports:- 50075:50075- 50010:50010nodemanager:image: test/hadoop-nodemanager:1.1.0-hadoop2.8-java8
    container_name: nodemanager
    hostname: nodemanager
    environment:- CLUSTER_NAME=test
    env_file:- ./hadoop-hive.env
    ports:- 8042:8042hive-server:image: test/hive:2.1.0-postgresql-metastore
    container_name: hive-server
    env_file:- ./hadoop-hive.env
    environment:-"HIVE_CORE_CONF_javax_jdo_option_ConnectionURL=jdbc:postgresql://hive-metastore/metastore"ports:-"10000:10000"hive-metastore:image: test/hive:2.1.0-postgresql-metastore
    container_name: hive-metastore
    env_file:- ./hadoop-hive.env
    command: /opt/hive/bin/hive --service metastore
    ports:- 9083:9083#hive的元数据存储到postgresqlhive-metastore-postgresql:image: test/hive-metastore-postgresql:2.1.0
    container_name: hive-metastore-postgresql
    ports:- 5432:5432#win10直接挂载存在权限问题,只能使用name volumevolumes:- hive-metastore-postgresql:/var/lib/postgresql/data

  spark-master:image: test/spark-master:2.1.0-hadoop2.8-hive-java8
    container_name: spark-master
    hostname: spark-master
    volumes:- ./copy-jar.sh:/copy-jar.sh
    ports:- 8888:8080- 7077:7077env_file:- ./hadoop-hive.env
    volumes:- ./hive-site.xml:/spark/conf/hive-site.xml   #spark连接hive的配置文件spark-worker:image: test/spark-worker:2.1.0-hadoop2.8-hive-java8
    container_name: spark-worker
    depends_on:- spark-master
    environment:- SPARK_MASTER=spark://spark-master:7077ports:-"8181:8081"env_file:- ./hadoop-hive.env
      
  mysql-server:image: mysql:5.7container_name: mysql-server
    ports:-"3606:3306"environment:- MYSQL_ROOT_PASSWORD=123456
    volumes:- ./data/mysql:/var/lib/mysql   #数据挂载到当前目录下的data/mysql文件夹elasticsearch:image: elasticsearch:7.5.2
    container_name: elasticsearch
    environment:#      - discovery.type=single-node-"ES_JAVA_OPTS=-Xms512m -Xmx512m"ports:-"9200:9200"-"9300:9300"volumes:- es1:/usr/share/elasticsearch/data
      - ./conf/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      
  kibana:image: kibana:7.5.2
    container_name: kibana
    ports:-"5601:5601"redis:image: redis
    container_name: redis
    ports:-"6379:6379"server:image: openjdk:8-jre-slim
    command: java -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 -jar /app.jar
    ports:-"8000:8080"-"5555:5005"volumes:- ./application.yml:/application.yml:ro
      - ./spark-itags-1.0-SNAPSHOT.jar:/app.jar:ro
    environment:- TZ=Asia/Shanghai     #配置容器时区,默认UTC- spring.profiles.active=development
    restart:"no"depends_on:- redis

volumes:hive-metastore-postgresql:driver: local
  es1:driver: local

二、执行脚本

1.启动脚本run.sh

#!/bin/bashdocker-compose -f docker-compose.yml up -d namenode hive-metastore-postgresql
docker-compose -f docker-compose.yml up -d datanode hive-metastore
docker-compose -f docker-compose.yml up -d resourcemanager
docker-compose -f docker-compose.yml up -d nodemanager
docker-compose -f docker-compose.yml up -d historyserver
sleep5docker-compose -f docker-compose.yml up -d hive-server
docker-compose -f docker-compose.yml up -d spark-master spark-worker
docker-compose -f docker-compose.yml up -d mysql-server
docker-compose -f docker-compose.yml up -d elasticsearch
docker-compose -f docker-compose.yml up -d kibana
#选取并输出本机ipmy_ip=`ip route get 1|awk'{print $NF;exit}'`#$NF 表示的最后一个Field(列),即输出最后一个字段的内容echo"Namenode: http://${my_ip}:50070"echo"Datanode: http://${my_ip}:50075"echo"Spark-master: http://${my_ip}:8080"docker-composeexec spark-master bash -c "./copy-jar.sh && exit"

2.关闭脚本stop.sh

#!/bin/bashdocker-compose stop
标签: hadoop hive docker

本文转载自: https://blog.csdn.net/m0_43405302/article/details/123166945
版权归原作者 绝域时空 所有, 如有侵权,请联系我们删除。

“利用Docker快速部署hadoop、hive和spark”的评论:

还没有评论