123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111 |
- FROM ubuntu:18.04 AS builder
- RUN sed -i 's#archive.ubuntu.com#mirrors.aliyun.com#g' /etc/apt/sources.list \
- && sed -i 's#security.ubuntu.com#mirrors.aliyun.com#g' /etc/apt/sources.list
- ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN:zh LC_ALL=zh_CN.UTF-8 DEBIAN_FRONTEND=noninteractive
- RUN rm -rf /etc/apt/sources.list.d/ && apt update
- # ARG USERNAME=sxkj
- # ARG USER_UID=1000
- # ARG USER_GID=$USER_UID
- # # Create the user
- # RUN groupadd --gid $USER_GID $USERNAME \
- # && useradd --uid $USER_UID --gid $USER_GID -m $USERNAME \
- # #
- # # [Optional] Add sudo support. Omit if you don't need to install software after connecting.
- # && apt-get update \
- # && apt-get install -y sudo \
- # && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \
- # && chmod 0440 /etc/sudoers.d/$USERNAME
- RUN apt-get update && apt-get install -y --no-install-recommends \
- supervisor \
- iputils-ping \
- wget \
- zsh \
- build-essential \
- cmake \
- git \
- curl \
- vim \
- ca-certificates \
- zip \
- unzip \
- openjdk-8-jdk \
- && rm -rf /var/lib/apt/lists/*
- CMD ["supervisord", "-n"]
- FROM builder as builder1
- # ********************************************************
- # * Anything else you want to do like clean up goes here *
- # ********************************************************
- # [Optional] Set the default user. Omit if you want to keep the default as root.
- # USER $USERNAME
- # ENV HOME /home/sxkj
- # ENV WORKSPACE ${HOME}/bigdata
- # WORKDIR ${WORKSPACE}
- # RUN sudo chown -R sxkj:sxkj ${WORKSPACE}
- # 目录准备
- ENV WORKSPACE /workspace
- WORKDIR ${WORKSPACE}
- RUN mkdir -p hadoop livy spark
- # hadoop
- COPY hadoop-2.7.5.tar.gz hadoop
- RUN cd hadoop && tar xzvf hadoop-2.7.5.tar.gz && mv hadoop-2.7.5*/* . && rm -rf hadoop-2.7.5*
- # spark
- COPY spark-2.4.7-bin-hadoop2.7.tgz spark
- RUN cd spark && tar xzvf spark-2.4.7-bin-hadoop2.7.tgz && mv spark-2.4.7*/* . && rm -rf spark-2.4.7*
- # livy
- # COPY apache-livy-0.7.1-incubating-bin.zip livy
- # COPY apache-livy-0.8.0-spark-2.4.7-hadoop-2.7-bin.zip livy
- COPY apache-livy-0.8.1-incubating-SNAPSHOT-bin.zip livy
- RUN cd livy && unzip apache-livy-*.zip && mv apache*/* . && rm -rf apache-livy-*
- # ENVS
- # java
- ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
- ENV PATH=$JAVA_HOME/bin:$PATH
- # hadoop
- ENV JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native
- ENV HADOOP_HOME=${WORKSPACE}/hadoop
- ENV HADOOP_CONF_DIR=${WORKSPACE}/conf
- ENV PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
- # spark
- ENV SPARK_HOME=${WORKSPACE}/spark
- ENV PATH=$SPARK_HOME/bin:$PATH
- ENV SPARK_CONF_DIR=${WORKSPACE}/conf
- ENV LIVY_HOME=${WORKSPACE}/livy
- ENV PATH=$LIVY_HOME/bin:$PATH
- ENV LIVY_CONF_DIR=${WORKSPACE}/conf
- # 启动命令
- RUN echo "\
- [program:livy]\n\
- directory=/workspace\n\
- command=livy/bin/livy-server \n\
- autorestart=true\n\
- startretries=0\n\
- redirect_stderr=true\n\
- stdout_logfile=/var/log/livy.log\n\
- stdout_logfile_maxbytes=50MB\n\
- " > /etc/supervisor/conf.d/livy.conf
- RUN mkdir -p ${WORKSPACE}/livy/logs
- FROM builder1 as image-sxkj
- COPY docker/sxkj .
|