service:
Name node:
Image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
Container name: name node
Reboot: Always
port:
-9870:9870
-9000:9000
volume:
– hadoop_namenode:/hadoop/dfs/name
environment:
– CLUSTER_NAME=Test
Environment file:
– ./hadoop.env
Data node:
Image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
Container name: datanode
Reboot: Always
volume:
– hadoop_datanode:/hadoop/dfs/data
environment:
SERVICE_PRECONDITION: “Name node:9870”
Environment file:
– ./hadoop.env
Resource manager:
Image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8
Container name: Resource Manager
Reboot: Always
environment:
SERVICE_PRECONDITION: “Name node:9000 Name node:9870 Data node:9864”
Environment file:
– ./hadoop.env
Node manager 1:
Image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
Container name: Node Manager
Reboot: Always
environment:
SERVICE_PRECONDITION: “Name node:9000 Name node:9870 Data node:9864 Resource manager:8088”
Environment file:
– ./hadoop.env
History server:
Image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8
Container name: History server
Reboot: Always
environment:
SERVICE_PRECONDITION: “Name node:9000 Name node:9870 Data node:9864 Resource manager:8088”
volume:
– hadoop_historyserver:/hadoop/yarn/timeline
Environment file:
– ./hadoop.env
Keeper:
Image: confluentinc/cp-zookeeper:7.5.0
Hostname: Zoo Keeper
Container Name: Zoo Keeper
port:
– “2181:2181”
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
broker:
Image: confluentinc/cp-server:7.5.0
Hostname: Broker
Container name: Broker
dependence:
– Zoo Keeper
port:
– “9092:9092”
– “9101:9101”
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: ‘Zoo Keeper:2181’
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: Local host
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: Broker:29092
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: ‘true’
CONFLUENT_SUPPORT_CUSTOMER_ID: ‘Anonymous’
Schema registry:
Image: confluentinc/cp-schema-registry:7.5.0
Hostname: Schema Registry
Container name: Schema Registry
dependence:
– Broker
port:
– “8081:8081”
environment:
SCHEMA_REGISTRY_HOST_NAME: Schema registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: ‘Broker:29092’
SCHEMA_REGISTRY_LISTENERS:
Connecting:
Image: confluentinc/cp-kafka-connect:latest
Hostname: Connection
Container name: Connection
dependence:
– Zoo Keeper
– Broker
– Schema Registry
port:
-8083:8083
environment:
CONNECT_BOOTSTRAP_SERVERS: ‘Broker:29092’
CONNECT_REST_ADVERTISED_HOST_NAME: Connect
Connection_REST_Port: 8083
CONNECT_GROUP_ID: Configuration-Connection Group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
Classpath: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.5.0.jar
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: “io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor”
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: “io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor”
CONNECT_LOG4J_ROOT_LOGLEVEL: “Information”
CONNECT_LOG4J_LOGGERS: “org.apache.kafka.connect.runtime.rest=WARN, org.reflections=ERROR”
CONNECT_PLUGIN_PATH: ‘/usr/share/java,/usr/share/confluent-hub-components/,/connectors/’
Instructions:
– death
– -exc
– |
confluent-hub install –no-prompt –component-dir /usr/share/confluent-hub-components/ confluentinc/kafka-connect-hdfs:latest
confluent-hub install –no-prompt –component-dir /usr/share/confluent-hub-components/ confluentinc/kafka-connect-datagen:latest
exec /etc/confluent/docker/run
Control center:
Image: confluentinc/cp-enterprise-control-center:7.5.0
Hostname: Control Center
Container name: Control Center
dependence:
– Broker
– Schema Registry
– Connecting
– ksqldb-server
port:
– “9021:9021”
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: ‘Broker:29092’
CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: ‘Connection:8083’
CONTROL_CENTER_KSQL_KSQLDB1_URL: “http://ksqldb-server:8088”
CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: “http://localhost:8088”
CONTROL_CENTER_SCHEMA_REGISTRY_URL: “http://schema-registry:8081”
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
Port: 9021
CONTROL_CENTER_CONNECT_HEALTHCHECK_ENDPOINT: ‘/connector’
ksqldb-server:
Image: confluentinc/cp-ksqldb-server:7.5.0
Hostname: ksqldb-server
Container name: ksqldb-server
dependence:
– Broker
– Connecting
port:
– “8088:8088”
environment:
KSQL_CONFIG_DIR: “/etc/ksql”
KSQL_BOOTSTRAP_SERVERS: “Broker:29092”
KSQL_HOST_NAME: ksqldb server
KSQL_LISTENERS: “http://0.0.0.0:8088”
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
KSQL_KSQL_SCHEMA_REGISTRY_URL: “http://schema-registry:8081”
KSQL_PRODUCER_INTERCEPTOR_CLASSES: “io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor”
KSQL_CONSUMER_INTERCEPTOR_CLASSES: “io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor”
KSQL_KSQL_CONNECT_URL: “http://connect:8083”
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: ‘true’
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: ‘true’
ksqldb-cli:
Image: confluentinc/cp-ksqldb-cli:7.5.0
Container name: ksqldb-cli
dependence:
– Broker
– Connecting
– ksqldb-server
Entry point: /bin/sh
T: True
ksql-datagen:
Image: confluentinc/ksqldb-examples:7.5.0
Hostname: ksql-datagen
Container name: ksql-datagen
dependence:
– ksqldb-server
– Broker
– Schema Registry
– Connecting
Command: “bash -c ‘echo Waiting for Kafka to be ready… &&
cub kafka-ready -b broker:29092 1 40 &&
echo Waiting for Confluent schema registry to be ready… &&
cub sr-ready schema registry 8081 40 &&
echo Waiting a few seconds for topic creation to complete… &&
Sleep 11 &&
tail -f /dev/null'”
environment:
KSQL_CONFIG_DIR: “/etc/ksql”
STREAMS_BOOTSTRAP_SERVERS: Broker:29092
STREAMS_SCHEMA_REGISTRY_HOST: Schema registry
STREAMS_SCHEMA_REGISTRY_PORT: 8081
Remaining proxies:
Image: confluentinc/cp-kafka-rest:7.5.0
dependence:
– Broker
– Schema Registry
port:
-8082:8082
Hostname: restproxy
Container name: Rest Proxy
environment:
KAFKA_REST_HOST_NAME: Rest proxy
KAFKA_REST_BOOTSTRAP_SERVERS: ‘Broker:29092’
KAFKA_REST_LISTENERS: “http://0.0.0.0:8082”
KAFKA_REST_SCHEMA_REGISTRY_URL: ‘http://schema-registry:8081’
volume:
hadoop_namenode:
hadoop_datanode:
hadoop_history server: