### 创建各种文件目录 ```bash mkdir -p /tmp/kafka/broker{1..3}/{data,logs} mkdir -p /tmp/zookeeper/zookeeper/{data,datalog,logs,conf} ``` ### zookeeper配置文件 - vi /tmp/zookeeper/zookeeper/conf/zoo.cfg ```yaml # The number of milliseconds of each tick tickTime=2000 # The number of ticks that the initial # synchronization phase can take initLimit=10 # The number of ticks that can pass between # sending a request and getting an acknowledgement syncLimit=5 # the directory where the snapshot is stored. # do not use /tmp for storage, /tmp here is just # example sakes. dataDir=/data dataLogDir=/datalog # the port at which the clients will connect clientPort=2181 # the maximum number of client connections. # increase this if you need to handle more clients #maxClientCnxns=60 # # Be sure to read the maintenance section of the # administrator guide before turning on autopurge. # # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance # # The number of snapshots to retain in dataDir autopurge.snapRetainCount=3 # Purge task interval in hours # Set to "0" to disable auto purge feature autopurge.purgeInterval=1 ``` - vi /tmp/zookeeper/zookeeper/conf/log4j.properties ```yaml # Define some default values that can be overridden by system properties zookeeper.root.logger=INFO, CONSOLE zookeeper.console.threshold=INFO zookeeper.log.dir=/logs zookeeper.log.file=zookeeper.log zookeeper.log.threshold=DEBUG zookeeper.tracelog.dir=. zookeeper.tracelog.file=zookeeper_trace.log # # ZooKeeper Logging Configuration # # Format is " (, )+ # DEFAULT: console appender only log4j.rootLogger=${zookeeper.root.logger} # Example with rolling log file #log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE # Example with rolling log file and tracing #log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE # # Log INFO level and above messages to the console # log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold} log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n # # Add ROLLINGFILE to rootLogger to get log file output # Log DEBUG level and above messages to a log file log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold} log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file} # Max log file size of 10MB log4j.appender.ROLLINGFILE.MaxFileSize=10MB # uncomment the next line to limit number of backup files log4j.appender.ROLLINGFILE.MaxBackupIndex=10 log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n # # Add TRACEFILE to rootLogger to get log file output # Log DEBUG level and above messages to a log file log4j.appender.TRACEFILE=org.apache.log4j.FileAppender log4j.appender.TRACEFILE.Threshold=TRACE log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file} log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout ### Notice we are including log4j's NDC here (%x) log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n ``` ### docker-compose 配置文件 - vi docker-compose.yaml ```yaml version: '2' services: zookeeper: container_name: zookeeper image: wurstmeister/zookeeper:v1 pull_policy: never restart: unless-stopped hostname: zoo1 volumes: - "/tmp/zookeeper/zookeeper/data:/data" - "/tmp/zookeeper/zookeeper/datalog:/datalog" - "/tmp/zookeeper/zookeeper/logs:/logs" - "/tmp/zookeeper/zookeeper/conf:/opt/zookeeper-3.4.13/conf" ports: - "2181:2181" networks: - kafka kafka1: container_name: kafka1 image: wurstmeister/kafka:v1 pull_policy: never ports: - "8002:9092" environment: KAFKA_ADVERTISED_HOST_NAME: 10.25.76.114 ## 修改:宿主机IP KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.25.76.114:8002 ## 修改:宿主机IP KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181" KAFKA_ADVERTISED_PORT: 8002 KAFKA_BROKER_ID: 1 KAFKA_LOG_DIRS: /kafka/data volumes: - /tmp/kafka/broker1/logs:/opt/kafka/logs - /tmp/kafka/broker1/data:/kafka/data depends_on: - zookeeper networks: - kafka kafka2: container_name: kafka2 image: wurstmeister/kafka:v1 pull_policy: never ports: - "8003:9092" environment: KAFKA_ADVERTISED_HOST_NAME: 10.25.76.114 ## 修改:宿主机IP KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.25.76.114:8003 ## 修改:宿主机IP KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181" KAFKA_ADVERTISED_PORT: 8003 KAFKA_BROKER_ID: 2 KAFKA_LOG_DIRS: /kafka/data volumes: - /tmp/kafka/broker2/logs:/opt/kafka/logs - /tmp/kafka/broker2/data:/kafka/data depends_on: - zookeeper networks: - kafka kafka3: container_name: kafka3 image: wurstmeister/kafka:v1 pull_policy: never ports: - "8004:9092" environment: KAFKA_ADVERTISED_HOST_NAME: 10.25.76.114 ## 修改:宿主机IP KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.25.76.114:8004 ## 修改:宿主机IP KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181" KAFKA_ADVERTISED_PORT: 8004 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3 KAFKA_MIN_INSYNC_REPLICAS: 2 KAFKA_BROKER_ID: 3 KAFKA_LOG_DIRS: /kafka/data volumes: - /tmp/kafka/broker3/logs:/opt/kafka/logs - /tmp/kafka/broker3/data:/kafka/data depends_on: - zookeeper networks: - kafka kafka-ui: image: provectuslabs/kafka-ui:v1 pull_policy: never environment: DYNAMIC_CONFIG_ENABLED: 'true' ports: - "8001:8080" ## 暴露端口 networks: - kafka depends_on: - zookeeper networks: kafka: driver: bridge ```