Browse Source

add prod confs

Zhang Li 2 years ago
parent
commit
cf6f285cfa

+ 1 - 0
.dockerignore

@@ -45,3 +45,4 @@ tests/**/coverage
 tests/**/.cache-loader
 
 **/node_modules
+Makefile

+ 20 - 12
Dockerfile.dev

@@ -1,10 +1,8 @@
 FROM node:16.15.1-bullseye-slim AS builder-fe
 COPY docker_build/requirements.txt .
-RUN echo "deb http://mirror.nju.edu.cn/debian/ bullseye main contrib non-free" > /etc/apt/sources.list && \
-    echo "deb http://mirror.nju.edu.cn/debian/ bullseye-updates main contrib non-free" >> /etc/apt/sources.list && \
-    echo "deb http://mirror.nju.edu.cn/debian/ bullseye-backports main contrib non-free" >> /etc/apt/sources.list && \
-    echo "deb http://mirror.nju.edu.cn/debian-security bullseye-security main contrib non-free" >> /etc/apt/sources.list && \
-    apt update && apt install -y python3 python3-pip python-is-python3 make \
+RUN sed -i "s@http://\(deb\|security\).debian.org@http://mirrors.aliyun.com@g" /etc/apt/sources.list
+RUN apt update
+RUN apt update && apt install -y libpython3-dev  python3 python3-pip python-is-python3 make \
     && pip config set global.index-url https://mirror.baidu.com/pypi/simple \
     && pip install -U pip setuptools && pip install -r requirements.txt -i https://mirror.baidu.com/pypi/simple \
     && yarn config set registry https://registry.npm.taobao.org
@@ -95,16 +93,12 @@ RUN cd /opt/conda/lib/python3.9/site-packages/ \
     && jupyter-kernelspec install sparkmagic/kernels/pysparkkernel --user\
     && /opt/conda/bin/jupyter serverextension enable --py sparkmagic
 RUN /opt/conda/bin/pip install dist/*.whl -i https://mirror.baidu.com/pypi/simple
-ADD confs/krb5.conf /etc/
-ADD confs/config.json .
-RUN mkdir -p $HOME/.sparkmagic && cp config.json $HOME/.sparkmagic
-ADD emr-5xjsy31f_ailab.keytab /
-# RUN kinit -k -t emr-5xjsy31f_ailab.keytab ailab
+
 
 RUN echo "\
 [program:jupyter]\n\
 directory=/workspace\n\
-command=/bin/bash -c 'kinit -k -t /emr-5xjsy31f_ailab.keytab ailab && /opt/conda/bin/jupyter lab --ip 0.0.0.0 --port 8888 --allow-root --no-browser' \n\
+command=/bin/bash -c 'kinit -k -t /user.keytab ailab && /opt/conda/bin/jupyter lab --ip 0.0.0.0 --port 8888 --allow-root --no-browser' \n\
 autorestart=true\n\
 startretries=0\n\
 redirect_stderr=true\n\
@@ -112,4 +106,18 @@ stdout_logfile=/var/log/jupyter.log\n\
 stdout_logfile_maxbytes=50MB\n\
 " > /etc/supervisor/conf.d/jupyter.conf
 
-EXPOSE 8888
+EXPOSE 8888
+
+
+FROM builder3 as image-test
+ADD confs/dev/krb5.conf /etc/
+ADD confs/dev/config.json .
+RUN mkdir -p $HOME/.sparkmagic && cp config.json $HOME/.sparkmagic
+ADD confs/dev/user.keytab /
+
+
+FROM builder3 as image-prod
+ADD confs/prod/krb5.conf /etc/
+ADD confs/prod/config.json .
+RUN mkdir -p $HOME/.sparkmagic && cp config.json $HOME/.sparkmagic
+ADD confs/dev/user.keytab /

+ 12 - 0
Makefile

@@ -0,0 +1,12 @@
+
+.PHONY: all prod test
+
+all: prod test
+
+prod:
+	@DOCKER_BUILDKIT=1 docker build -f Dockerfile.dev  --build-arg BUILDKIT_INLINE_CACHE=1  --target image-prod -t jupyterlab:prod .
+
+
+test:
+	@DOCKER_BUILDKIT=1 docker build -f Dockerfile.dev  --build-arg BUILDKIT_INLINE_CACHE=1  --target image-test -t jupyterlab:test .
+

+ 0 - 0
confs/config.json → confs/dev/config.json


+ 0 - 0
confs/core-site.xml → confs/dev/core-site.xml


+ 0 - 0
confs/emr-5xjsy31f_ailab.keytab → confs/dev/emr-5xjsy31f_ailab.keytab


+ 0 - 0
confs/hdfs-site.xml → confs/dev/hdfs-site.xml


+ 0 - 0
confs/hive-site.xml → confs/dev/hive-site.xml


+ 0 - 0
confs/hosts → confs/dev/hosts


+ 0 - 0
confs/krb5.conf → confs/dev/krb5.conf


+ 0 - 0
emr-5xjsy31f_ailab.keytab → confs/dev/user.keytab


+ 0 - 0
confs/yarn-site.xml → confs/dev/yarn-site.xml


+ 0 - 0
confs/信息.txt → confs/dev/信息.txt


+ 77 - 0
confs/prod/config.json

@@ -0,0 +1,77 @@
+{
+  "kernel_python_credentials" : {
+    "username": "",
+    "password": "",
+    "url": "http://10.254.7.8:30998",
+    "auth": "None"
+  },
+
+  "kernel_scala_credentials" : {
+    "username": "",
+    "password": "",
+    "url": "http://10.254.7.8:30998",
+    "auth": "None"
+  },
+  "kernel_r_credentials": {
+    "username": "",
+    "password": "",
+    "url": "http://10.254.7.8:30998"
+  },
+
+  "logging_config": {
+    "version": 1,
+    "formatters": {
+      "magicsFormatter": {
+        "format": "%(asctime)s\t%(levelname)s\t%(message)s",
+        "datefmt": ""
+      }
+    },
+    "handlers": {
+      "magicsHandler": {
+        "class": "hdijupyterutils.filehandler.MagicsFileHandler",
+        "formatter": "magicsFormatter",
+        "home_path": "~/.sparkmagic"
+      }
+    },
+    "loggers": {
+      "magicsLogger": {
+        "handlers": ["magicsHandler"],
+        "level": "DEBUG",
+        "propagate": 0
+      }
+    }
+  },
+  "authenticators": {
+    "Kerberos": "sparkmagic.auth.kerberos.Kerberos",
+    "None": "sparkmagic.auth.customauth.Authenticator",
+    "Basic_Access": "sparkmagic.auth.basic.Basic"
+  },
+
+  "wait_for_idle_timeout_seconds": 15,
+  "livy_session_startup_timeout_seconds": 60,
+
+  "fatal_error_suggestion": "The code failed because of a fatal error:\n\t{}.\n\nSome things to try:\na) Make sure Spark has enough available resources for Jupyter to create a Spark context.\nb) Contact your Jupyter administrator to make sure the Spark magics library is configured correctly.\nc) Restart the kernel.",
+
+  "ignore_ssl_errors": false,
+
+  "session_configs": {
+    "driverMemory": "1000M",
+    "executorCores": 2
+  },
+
+  "use_auto_viz": true,
+  "coerce_dataframe": true,
+  "max_results_sql": 2500,
+  "pyspark_dataframe_encoding": "utf-8",
+
+  "heartbeat_refresh_seconds": 30,
+  "livy_server_heartbeat_timeout_seconds": 0,
+  "heartbeat_retry_seconds": 10,
+
+  "server_extension_default_kernel_name": "pysparkkernel",
+  "custom_headers": {},
+
+  "retry_policy": "configurable",
+  "retry_seconds_to_sleep_list": [0.2, 0.5, 1, 3, 5],
+  "configurable_retry_policy_max_retries": 8
+}

+ 196 - 0
confs/prod/core-site.xml

@@ -0,0 +1,196 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+
+    <property>
+        <name>emr.cfs.group.id.map</name>
+        <value>root:0;hadoop:500</value>
+    </property>
+
+    <property>
+        <name>emr.cfs.io.blocksize</name>
+        <value>1048576</value>
+    </property>
+
+    <property>
+        <name>emr.cfs.user.id.map</name>
+        <value>root:0;hadoop:500</value>
+    </property>
+
+    <property>
+        <name>emr.cfs.write.level</name>
+        <value>2</value>
+    </property>
+
+    <property>
+        <name>fs.AbstractFileSystem.ofs.impl</name>
+        <value>com.qcloud.chdfs.fs.CHDFSDelegateFSAdapter</value>
+    </property>
+
+    <property>
+        <name>fs.cfs.impl</name>
+        <value>com.tencent.cloud.emr.CFSFileSystem</value>
+    </property>
+
+    <property>
+        <name>fs.cos.buffer.dir</name>
+        <value>/data/emr/hdfs/tmp</value>
+    </property>
+
+    <property>
+        <name>fs.cos.local_block_size</name>
+        <value>2097152</value>
+    </property>
+
+    <property>
+        <name>fs.cos.userinfo.appid</name>
+        <value>1302259445</value>
+    </property>
+
+    <property>
+        <name>fs.cos.userinfo.region</name>
+        <value>bj</value>
+    </property>
+
+    <property>
+        <name>fs.cos.userinfo.useCDN</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>fs.cosn.block.size</name>
+        <value>67108864</value>
+    </property>
+
+    <property>
+        <name>fs.cosn.credentials.provider</name>
+        <value>org.apache.hadoop.fs.auth.EMRInstanceCredentialsProvider</value>
+    </property>
+
+    <property>
+        <name>fs.cosn.impl</name>
+        <value>org.apache.hadoop.fs.cosnative.NativeCosFileSystem</value>
+    </property>
+
+    <property>
+        <name>fs.cosn.local_block_size</name>
+        <value>2097152</value>
+    </property>
+
+    <property>
+        <name>fs.cosn.tmp.dir</name>
+        <value>/data/emr/hdfs/tmp/hadoop_cos</value>
+    </property>
+
+    <property>
+        <name>fs.cosn.upload.buffer</name>
+        <value>mapped_disk</value>
+    </property>
+
+    <property>
+        <name>fs.cosn.upload.buffer.size</name>
+        <value>-1</value>
+    </property>
+
+    <property>
+        <name>fs.cosn.userinfo.region</name>
+        <value>ap-beijing</value>
+    </property>
+
+    <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://HDFS84854</value>
+    </property>
+
+    <property>
+        <name>fs.emr.version</name>
+        <value>9c06b7b</value>
+    </property>
+
+    <property>
+        <name>fs.ofs.impl</name>
+        <value>com.qcloud.chdfs.fs.CHDFSHadoopFileSystemAdapter</value>
+    </property>
+
+    <property>
+        <name>fs.ofs.tmp.cache.dir</name>
+        <value>/data/emr/hdfs/tmp/chdfs</value>
+    </property>
+
+    <property>
+        <name>fs.ofs.user.appid</name>
+        <value>1302259445</value>
+    </property>
+
+    <property>
+        <name>fs.trash.interval</name>
+        <value>1440</value>
+    </property>
+
+    <property>
+        <name>ha.health-monitor.rpc-timeout.ms</name>
+        <value>180000</value>
+    </property>
+
+    <property>
+        <name>ha.zookeeper.session-timeout.ms</name>
+        <value>180000</value>
+    </property>
+
+    <property>
+        <name>hadoop.http.staticuser.user</name>
+        <value>hadoop</value>
+    </property>
+
+    <property>
+        <name>hadoop.logfile.count</name>
+        <value>20</value>
+    </property>
+
+    <property>
+        <name>hadoop.logfile.size</name>
+        <value>1000000000</value>
+    </property>
+
+    <property>
+        <name>hadoop.proxyuser.hadoop.groups</name>
+        <value>*</value>
+    </property>
+
+    <property>
+        <name>hadoop.proxyuser.hadoop.hosts</name>
+        <value>*</value>
+    </property>
+
+    <property>
+        <name>hadoop.security.authentication</name>
+        <value>kerberos</value>
+    </property>
+
+    <property>
+        <name>hadoop.security.authorization</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hadoop.tmp.dir</name>
+        <value>/data/emr/hdfs/tmp</value>
+    </property>
+
+    <property>
+        <name>hadoop.zk.timeout-ms</name>
+        <value>60000</value>
+    </property>
+
+    <property>
+        <name>io.compression.codec.lzo.class</name>
+        <value>com.hadoop.compression.lzo.LzoCodec</value>
+    </property>
+
+    <property>
+        <name>io.compression.codecs</name>
+        <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+    </property>
+
+</configuration>

+ 310 - 0
confs/prod/hdfs-site.xml

@@ -0,0 +1,310 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+	
+	<property>
+		<name>dfs.balance.bandwidthPerSec</name>
+		<value>10485760</value>
+	</property>
+	
+	<property>
+		<name>dfs.block.access.token.enable</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>dfs.blockdfs.webhdfs.enabled.access.token.enable</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>dfs.blocksize</name>
+		<value>134217728</value>
+	</property>
+	
+	<property>
+		<name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>dfs.client.failover.proxy.provider.HDFS84854</name>
+		<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+	</property>
+	
+	<property>
+		<name>dfs.client.file-block-storage-locations.timeout.millis</name>
+		<value>10000</value>
+	</property>
+	
+	<property>
+		<name>dfs.client.read.shortcircuit</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>dfs.client.slow.io.warning.threshold.ms</name>
+		<value>900000</value>
+	</property>
+	
+	<property>
+		<name>dfs.client.socket-timeout</name>
+		<value>60000</value>
+	</property>
+	
+	<property>
+		<name>dfs.client.use.datanode.hostname</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>dfs.datanode.handler.count</name>
+		<value>128</value>
+	</property>
+	
+	<property>
+		<name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>dfs.datanode.kerberos.principal</name>
+		<value>hadoop/_HOST@EMR-56L6ZNTS</value>
+	</property>
+	
+	<property>
+		<name>dfs.datanode.keytab.file</name>
+		<value>/var/krb5kdc/emr.keytab</value>
+	</property>
+	
+	<property>
+		<name>dfs.datanode.max.transfer.threads</name>
+		<value>16480</value>
+	</property>
+	
+	<property>
+		<name>dfs.datanode.max.xcievers</name>
+		<value>8000</value>
+	</property>
+	
+	<property>
+		<name>dfs.datanode.slow.io.warning.threshold.ms</name>
+		<value>90000</value>
+	</property>
+	
+	<property>
+		<name>dfs.datanode.socket.write.timeout</name>
+		<value>480000</value>
+	</property>
+	
+	<property>
+		<name>dfs.datanode.use.datanode.hostname</name>
+		<value>false</value>
+	</property>
+	
+	<property>
+		<name>dfs.domain.socket.path</name>
+		<value>/var/lib/hadoop/dn</value>
+	</property>
+	
+	<property>
+		<name>dfs.encryption.key.provider.uri</name>
+		<value></value>
+	</property>
+	
+	<property>
+		<name>dfs.ha.automatic-failover.enabled</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>dfs.ha.fencing.methods</name>
+		<value>shell(/bin/true)</value>
+	</property>
+	
+	<property>
+		<name>dfs.ha.namenodes.HDFS84854</name>
+		<value>nn1,nn2</value>
+	</property>
+	
+	<property>
+		<name>dfs.heartbeat.interval</name>
+		<value>2</value>
+	</property>
+	
+	<property>
+		<name>dfs.hosts</name>
+		<value>/usr/local/service/hadoop/etc/hadoop/hdfshosts</value>
+	</property>
+	
+	<property>
+		<name>dfs.hosts.exclude</name>
+		<value>/usr/local/service/hadoop/etc/hadoop/hdfsexcludedhosts</value>
+	</property>
+	
+	<property>
+		<name>dfs.journalnode.edits.dir</name>
+		<value>/data/emr/hdfs/journalnode</value>
+	</property>
+	
+	<property>
+		<name>dfs.journalnode.kerberos.internal.spnego.principal</name>
+		<value>HTTP/_HOST@EMR-56L6ZNTS</value>
+	</property>
+	
+	<property>
+		<name>dfs.journalnode.kerberos.principal</name>
+		<value>hadoop/_HOST@EMR-56L6ZNTS</value>
+	</property>
+	
+	<property>
+		<name>dfs.journalnode.keytab.file</name>
+		<value>/var/krb5kdc/emr.keytab</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
+		<value>false</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.handler.count</name>
+		<value>64</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.heartbeat.recheck-interval</name>
+		<value>90000</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.http-address.HDFS84854.nn1</name>
+		<value>172.23.21.7:4008</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.http-address.HDFS84854.nn2</name>
+		<value>172.23.21.8:4008</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.https-address.HDFS84854.nn1</name>
+		<value>172.23.21.7:4009</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.https-address.HDFS84854.nn2</name>
+		<value>172.23.21.8:4009</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.inode.attributes.provider.class</name>
+		<value>org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.kerberos.internal.spnego.principal</name>
+		<value>HTTP/_HOST@EMR-56L6ZNTS</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.kerberos.principal</name>
+		<value>hadoop/_HOST@EMR-56L6ZNTS</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.keytab.file</name>
+		<value>/var/krb5kdc/emr.keytab</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.name.dir</name>
+		<value>/data/emr/hdfs/namenode</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.rpc-address.HDFS84854.nn1</name>
+		<value>172.23.21.7:4007</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.rpc-address.HDFS84854.nn2</name>
+		<value>172.23.21.8:4007</value>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.shared.edits.dir</name>
+		<value>qjournal://172.23.21.17:4005;172.23.21.15:4005;172.23.21.10:4005/hadoop</value>
+	</property>
+	
+	<property>
+		<name>dfs.nameservices</name>
+		<value>HDFS84854</value>
+	</property>
+	
+	<property>
+		<name>dfs.permissions</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>dfs.permissions.umask-mode</name>
+		<value>077</value>
+	</property>
+	
+	<property>
+		<name>dfs.replication</name>
+		<value>3</value>
+	</property>
+	
+	<property>
+		<name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
+		<value>HTTP/_HOST@EMR-56L6ZNTS</value>
+	</property>
+	
+	<property>
+		<name>dfs.secondary.namenode.kerberos.principal</name>
+		<value>hadoop/_HOST@EMR-56L6ZNTS</value>
+	</property>
+	
+	<property>
+		<name>dfs.secondary.namenode.keytab.file</name>
+		<value>/var/krb5kdc/emr.keytab</value>
+	</property>
+	
+	<property>
+		<name>dfs.socket.timeout</name>
+		<value>60000</value>
+	</property>
+	
+	<property>
+		<name>dfs.web.authentication.kerberos.keytab</name>
+		<value>/var/krb5kdc/emr.keytab</value>
+	</property>
+	
+	<property>
+		<name>dfs.web.authentication.kerberos.principal</name>
+		<value>HTTP/_HOST@EMR-56L6ZNTS</value>
+	</property>
+	
+	<property>
+		<name>dfs.webhdfs.enabled</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>ha.zookeeper.quorum</name>
+		<value>172.23.21.17:2181,172.23.21.15:2181,172.23.21.10:2181</value>
+	</property>
+	
+	<property>
+		<name>ignore.secure.ports.for.testing</name>
+		<value>true</value>
+	</property>
+	
+	<property>
+		<name>output.replace-datanode-on-failure</name>
+		<value>false</value>
+	</property>
+	
+</configuration>

+ 365 - 0
confs/prod/hive-site.xml

@@ -0,0 +1,365 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+    <property>
+        <name>datanucleus.schema.autoCreateTables</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hbase.zookeeper.quorum</name>
+        <value>172.23.21.17:2181,172.23.21.15:2181,172.23.21.10:2181</value>
+    </property>
+
+    <property>
+        <name>hive.cluster.delegation.token.store.class</name>
+        <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
+    </property>
+
+    <property>
+        <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
+        <value>172.23.21.17:2181,172.23.21.10:2181,172.23.21.15:2181</value>
+    </property>
+
+    <property>
+        <name>hive.downloaded.resources.dir</name>
+        <value>/data/emr/hive/tmp/${hive.session.id}_resources</value>
+    </property>
+
+    <property>
+        <name>hive.exec.dynamic.partition.mode</name>
+        <value>nonstrict</value>
+    </property>
+
+    <property>
+        <name>hive.exec.local.scratchdir</name>
+        <value>/data/emr/hive/tmp</value>
+    </property>
+
+    <property>
+        <name>hive.exec.parallel</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.exec.parallel.thread.number</name>
+        <value>16</value>
+    </property>
+
+    <property>
+        <name>hive.execution.engine</name>
+        <value>tez</value>
+    </property>
+
+    <property>
+        <name>hive.fetch.task.aggr</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.hwi.listen.host</name>
+        <value>0.0.0.0</value>
+    </property>
+
+    <property>
+        <name>hive.hwi.listen.port</name>
+        <value>7002</value>
+    </property>
+
+    <property>
+        <name>hive.llap.daemon.output.service.port</name>
+        <value>7009</value>
+    </property>
+
+    <property>
+        <name>hive.llap.daemon.rpc.port</name>
+        <value>7007</value>
+    </property>
+
+    <property>
+        <name>hive.llap.daemon.web.port</name>
+        <value>7008</value>
+    </property>
+
+    <property>
+        <name>hive.llap.daemon.yarn.shuffle.port</name>
+        <value>7006</value>
+    </property>
+
+    <property>
+        <name>hive.llap.management.rpc.port</name>
+        <value>7005</value>
+    </property>
+
+    <property>
+        <name>hive.merge.mapfiles</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.merge.mapredfiles</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.merge.size.per.task</name>
+        <value>134217728</value>
+    </property>
+
+    <property>
+        <name>hive.merge.smallfiles.avgsize</name>
+        <value>134217728</value>
+    </property>
+
+    <property>
+        <name>hive.merge.sparkfiles</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.merge.tezfiles</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.alter.notifications.basic</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.db.encoding</name>
+        <value>UTF-8</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.dml.events</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.event.db.listener.timetolive</name>
+        <value>172800s</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.event.db.notification.api.auth</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.kerberos.keytab.file</name>
+        <value>/var/krb5kdc/emr.keytab</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.kerberos.principal</name>
+        <value>hadoop/_HOST@EMR-56L6ZNTS</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.metrics.enabled</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.notifications.add.thrift.objects</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.port</name>
+        <value>7004</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.sasl.enabled</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.schema.verification</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.schema.verification.record.version</name>
+        <value>false</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.server.max.message.size</name>
+        <value>858993459</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.transactional.event.listeners</name>
+        <value>org.apache.hive.hcatalog.listener.DbNotificationListener</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.uris</name>
+        <value>thrift://172.23.21.7:7004,thrift://172.23.21.8:7004</value>
+    </property>
+
+    <property>
+        <name>hive.metastore.warehouse.dir</name>
+        <value>/user/hive/warehouse</value>
+    </property>
+
+    <property>
+        <name>hive.querylog.location</name>
+        <value>/data/emr/hive/tmp</value>
+    </property>
+
+    <property>
+        <name>hive.server2.authentication</name>
+        <value>kerberos</value>
+    </property>
+
+    <property>
+        <name>hive.server2.authentication.kerberos.keytab</name>
+        <value>/var/krb5kdc/emr.keytab</value>
+    </property>
+
+    <property>
+        <name>hive.server2.authentication.kerberos.principal</name>
+        <value>hadoop/_HOST@EMR-56L6ZNTS</value>
+    </property>
+
+    <property>
+        <name>hive.server2.logging.operation.log.location</name>
+        <value>/data/emr/hive/tmp/operation_logs</value>
+    </property>
+
+    <property>
+        <name>hive.server2.metrics.enabled</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.server2.support.dynamic.service.discovery</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.server2.thrift.bind.host</name>
+        <value>172.23.21.11</value>
+    </property>
+
+    <property>
+        <name>hive.server2.thrift.http.port</name>
+        <value>7000</value>
+    </property>
+
+    <property>
+        <name>hive.server2.thrift.max.worker.threads</name>
+        <value>1000</value>
+    </property>
+
+    <property>
+        <name>hive.server2.thrift.port</name>
+        <value>7001</value>
+    </property>
+
+    <property>
+        <name>hive.server2.webui.host</name>
+        <value>0.0.0.0</value>
+    </property>
+
+    <property>
+        <name>hive.server2.webui.port</name>
+        <value>7003</value>
+    </property>
+
+    <property>
+        <name>hive.server2.zookeeper.namespace</name>
+        <value>hiveserver2</value>
+    </property>
+
+    <property>
+        <name>hive.stats.autogather</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.tez.auto.reducer.parallelism</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>hive.tez.container.size</name>
+        <value>1024</value>
+    </property>
+
+    <property>
+        <name>hive.zookeeper.client.port</name>
+        <value>2181</value>
+    </property>
+
+    <property>
+        <name>hive.zookeeper.quorum</name>
+        <value>172.23.21.17:2181,172.23.21.15:2181,172.23.21.10:2181</value>
+    </property>
+
+    <property>
+        <name>io.compression.codec.lzo.class</name>
+        <value>com.hadoop.compression.lzo.LzoCodec</value>
+    </property>
+
+    <property>
+        <name>io.compression.codecs</name>
+        <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+    </property>
+
+    <property>
+        <name>javax.jdo.option.ConnectionDriverName</name>
+        <value>com.mysql.jdbc.Driver</value>
+    </property>
+
+    <property>
+        <name>javax.jdo.option.ConnectionPassword</name>
+        <value>enochLk9j7Hv35</value>
+    </property>
+
+    <property>
+        <name>javax.jdo.option.ConnectionURL</name>
+        <value>jdbc:mysql://172.23.21.6:3306/hivemetastore?useSSL=false&amp;createDatabaseIfNotExist=true&amp;characterEncoding=UTF-8</value>
+    </property>
+
+    <property>
+        <name>javax.jdo.option.ConnectionUserName</name>
+        <value>root</value>
+    </property>
+
+    <property>
+        <name>mapreduce.input.fileinputformat.list-status.num-threads</name>
+        <value>5</value>
+    </property>
+
+    <property>
+        <name>spark.yarn.jars</name>
+        <value>hdfs:///spark/jars/*</value>
+    </property>
+
+</configuration>

+ 25 - 0
confs/prod/krb5.conf

@@ -0,0 +1,25 @@
+[libdefaults]
+    dns_lookup_realm = false
+    dns_lookup_kdc = false
+    ticket_lifetime = 24h
+    renew_lifetime = 7d
+    forwardable = true
+    rdns = false
+    default_realm = EMR-56L6ZNTS
+    default_tgs_enctypes = des3-cbc-sha1
+    default_tkt_enctypes = des3-cbc-sha1
+    permitted_enctypes = des3-cbc-sha1
+    kdc_timeout = 3000
+    max_retries = 3
+[realms]
+    EMR-56L6ZNTS = {
+
+        kdc = 172.23.21.7:88
+        admin_server = 172.23.21.7
+        kdc = 172.23.21.8:88
+        admin_server = 172.23.21.8
+
+    }
+
+[domain_realm]
+# .example.com = EXAMPLE.COM

BIN
confs/prod/user.keytab


+ 8 - 0
confs/prod/信息.txt

@@ -0,0 +1,8 @@
+ip+port:172.23.21.7:7001
+username:ylaiuser
+password:EMR-56L6ZNTS
+schema:default
+hdfs_path:hdfs://HDFS84854
+
+
+

+ 10 - 0
confs/prod/平台登录地址.txt

@@ -0,0 +1,10 @@
+1、AI平台
+http://ailab.digitalyili.com/
+tenant/tenant_pwd 租户管理员
+pmguser/pmguser_pwd 项目组管理员
+pmuser/pmuser_pwd 项目负责人
+biaozhu/biaozhu_pwd 标注人员
+
+2、foot
+http://aicenter.digitalyili.com/
+admin/Foot@2022

+ 42 - 0
confs/prod/第三方服务.txt

@@ -0,0 +1,42 @@
+1、rabbitmq
+172.23.22.106:5671
+root/1qaz@WSX
+
+2、gitlab
+aigit.digitalyili.com
+root/Admin@123
+
+3、artifactory
+http://aicenter.digitalyili.com/ui
+admin/Admin@123
+
+4、kafka
+172.23.22.106:9092
+
+5、redis(prod):
+ai平台:
+IP: 172.23.12.226:6379
+用户信息:RttN4RbjQBf
+
+6、mysql(prod):
+ip:172.23.12.194:3306
+用户信息:aihub_prod : RttN4RbjQBfv
+库名:
+aihub_base_prod
+aihub_data_prod
+aihub_foot_prod
+aihub_base_test_prod
+aihub_data_test_prod
+
+7、pgsql
+ip:172.23.12.207:5432
+用户信息:aihub_data : t@evPYeB5W5c$K0p
+库名:prod_aihub_data
+
+8、cos桶(prod):
+桶名:aihub-prod-1302259445
+访问路径:https://aihub-prod-1302259445.cos.ap-beijing.myqcloud.com
+CDN访问:https://aihubcos.digitalyili.com(暂未配置)
+API秘钥:
+SecretId AKIDb2D7mBJAk47tOdaGfogHacOt74lBJCqD
+SecretKey 8yXpqZ1D8m9drpY0B5i4B2btxlJacOt7