123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341 |
- # Licensed to the Apache Software Foundation (ASF) under one
- # or more contributor license agreements. See the NOTICE file
- # distributed with this work for additional information
- # regarding copyright ownership. The ASF licenses this file
- # to you under the Apache License, Version 2.0 (the
- # "License"); you may not use this file except in compliance
- # with the License. You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # Define some default values that can be overridden by system properties
- hadoop.root.logger=INFO,console
- hadoop.log.dir=.
- hadoop.log.file=hadoop.log
- # Define the root logger to the system property "hadoop.root.logger".
- log4j.rootLogger=${hadoop.root.logger}, EventCounter
- # Logging Threshold
- log4j.threshold=ALL
- # Null Appender
- log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
- #
- # Rolling File Appender - cap space usage at 5gb.
- #
- hadoop.log.maxfilesize=256MB
- hadoop.log.maxbackupindex=20
- log4j.appender.RFA=org.apache.log4j.RollingFileAppender
- log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
- log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
- log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
- log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
- # Pattern format: Date LogLevel LoggerName LogMessage
- log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- # Debugging Pattern format
- #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
- #
- # Daily Rolling File Appender
- #
- log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
- log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
- # Rollover at midnight
- log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
- log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
- # Pattern format: Date LogLevel LoggerName LogMessage
- log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- # Debugging Pattern format
- #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
- #
- # console
- # Add "console" to rootlogger above if you want to use this
- #
- log4j.appender.console=org.apache.log4j.ConsoleAppender
- log4j.appender.console.target=System.err
- log4j.appender.console.layout=org.apache.log4j.PatternLayout
- log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- #
- # TaskLog Appender
- #
- log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
- log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
- log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- #
- # HDFS block state change log from block manager
- #
- # Uncomment the following to log normal block state change
- # messages from BlockManager in NameNode.
- #log4j.logger.BlockStateChange=DEBUG
- #
- #Security appender
- #
- hadoop.security.logger=INFO,NullAppender
- hadoop.security.log.maxfilesize=256MB
- hadoop.security.log.maxbackupindex=20
- log4j.category.SecurityLogger=${hadoop.security.logger}
- hadoop.security.log.file=SecurityAuth-${user.name}.audit
- log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
- log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
- log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
- log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
- log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
- #
- # Daily Rolling Security appender
- #
- log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
- log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
- log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
- log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
- #
- # hadoop configuration logging
- #
- # Uncomment the following line to turn off configuration deprecation warnings.
- # log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
- #
- # hdfs audit logging
- #
- hdfs.audit.logger=INFO,NullAppender
- hdfs.audit.log.maxfilesize=256MB
- hdfs.audit.log.maxbackupindex=20
- log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
- log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
- log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
- log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
- log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
- log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
- log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
- #
- # NameNode metrics logging.
- # The default is to retain two namenode-metrics.log files up to 64MB each.
- #
- namenode.metrics.logger=INFO,NullAppender
- log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
- log4j.additivity.NameNodeMetricsLog=false
- log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
- log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
- log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
- log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
- log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
- log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
- #
- # DataNode metrics logging.
- # The default is to retain two datanode-metrics.log files up to 64MB each.
- #
- datanode.metrics.logger=INFO,NullAppender
- log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
- log4j.additivity.DataNodeMetricsLog=false
- log4j.appender.DNMETRICSRFA=org.apache.log4j.RollingFileAppender
- log4j.appender.DNMETRICSRFA.File=${hadoop.log.dir}/datanode-metrics.log
- log4j.appender.DNMETRICSRFA.layout=org.apache.log4j.PatternLayout
- log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
- log4j.appender.DNMETRICSRFA.MaxBackupIndex=1
- log4j.appender.DNMETRICSRFA.MaxFileSize=64MB
- # Custom Logging levels
- #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
- #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
- #log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
- # AWS SDK & S3A FileSystem
- #log4j.logger.com.amazonaws=ERROR
- log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
- #log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
- #
- # Event Counter Appender
- # Sends counts of logging messages at different severity levels to Hadoop Metrics.
- #
- log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
- #
- # Job Summary Appender
- #
- # Use following logger to send summary to separate file defined by
- # hadoop.mapreduce.jobsummary.log.file :
- # hadoop.mapreduce.jobsummary.logger=INFO,JSA
- #
- hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
- hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
- hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
- hadoop.mapreduce.jobsummary.log.maxbackupindex=20
- log4j.appender.JSA=org.apache.log4j.RollingFileAppender
- log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
- log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
- log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
- log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
- log4j.appender.JSA.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
- log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
- #
- # shuffle connection log from shuffleHandler
- # Uncomment the following line to enable logging of shuffle connections
- # log4j.logger.org.apache.hadoop.mapred.ShuffleHandler.audit=DEBUG
- #
- # Yarn ResourceManager Application Summary Log
- #
- # Set the ResourceManager summary log filename
- yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
- # Set the ResourceManager summary log level and appender
- yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
- #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
- # To enable AppSummaryLogging for the RM,
- # set yarn.server.resourcemanager.appsummary.logger to
- # <LEVEL>,RMSUMMARY in hadoop-env.sh
- # Appender for ResourceManager Application Summary Log
- # Requires the following properties to be set
- # - hadoop.log.dir (Hadoop Log directory)
- # - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
- # - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
- log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
- log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
- log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
- log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
- log4j.appender.RMSUMMARY.MaxFileSize=256MB
- log4j.appender.RMSUMMARY.MaxBackupIndex=20
- log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
- log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- #
- # YARN ResourceManager audit logging
- #
- rm.audit.logger=INFO,NullAppender
- rm.audit.log.maxfilesize=256MB
- rm.audit.log.maxbackupindex=20
- log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
- log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
- log4j.appender.RMAUDIT=org.apache.log4j.RollingFileAppender
- log4j.appender.RMAUDIT.File=${hadoop.log.dir}/rm-audit.log
- log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
- log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- log4j.appender.RMAUDIT.MaxFileSize=${rm.audit.log.maxfilesize}
- log4j.appender.RMAUDIT.MaxBackupIndex=${rm.audit.log.maxbackupindex}
- #
- # YARN NodeManager audit logging
- #
- nm.audit.logger=INFO,NullAppender
- nm.audit.log.maxfilesize=256MB
- nm.audit.log.maxbackupindex=20
- log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
- log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
- log4j.appender.NMAUDIT=org.apache.log4j.RollingFileAppender
- log4j.appender.NMAUDIT.File=${hadoop.log.dir}/nm-audit.log
- log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
- log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601}%p %c{2}: %m%n
- log4j.appender.NMAUDIT.MaxFileSize=${nm.audit.log.maxfilesize}
- log4j.appender.NMAUDIT.MaxBackupIndex=${nm.audit.log.maxbackupindex}
- # HS audit log configs
- #mapreduce.hs.audit.logger=INFO,HSAUDIT
- #log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
- #log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
- #log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
- #log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
- #log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
- #log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- #log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
- # Http Server Request Logs
- #log4j.logger.http.requests.namenode=INFO,namenoderequestlog
- #log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
- #log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
- #log4j.appender.namenoderequestlog.RetainDays=3
- #log4j.logger.http.requests.datanode=INFO,datanoderequestlog
- #log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
- #log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
- #log4j.appender.datanoderequestlog.RetainDays=3
- #log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
- #log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
- #log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
- #log4j.appender.resourcemanagerrequestlog.RetainDays=3
- #log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
- #log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
- #log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
- #log4j.appender.jobhistoryrequestlog.RetainDays=3
- #log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
- #log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
- #log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
- #log4j.appender.nodemanagerrequestlog.RetainDays=3
- # WebHdfs request log on datanodes
- # Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to
- # direct the log to a separate file.
- #datanode.webhdfs.logger=INFO,console
- #log4j.logger.datanode.webhdfs=${datanode.webhdfs.logger}
- #log4j.appender.HTTPDRFA=org.apache.log4j.DailyRollingFileAppender
- #log4j.appender.HTTPDRFA.File=${hadoop.log.dir}/hadoop-datanode-webhdfs.log
- #log4j.appender.HTTPDRFA.layout=org.apache.log4j.PatternLayout
- #log4j.appender.HTTPDRFA.layout.ConversionPattern=%d{ISO8601} %m%n
- #log4j.appender.HTTPDRFA.DatePattern=.yyyy-MM-dd
- # Appender for viewing information for errors and warnings
- yarn.ewma.cleanupInterval=300
- yarn.ewma.messageAgeLimitSeconds=86400
- yarn.ewma.maxUniqueMessages=250
- log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
- log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
- log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
- log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
- #
- # Fair scheduler state dump
- #
- # Use following logger to dump the state to a separate file
- #log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSSTATEDUMP
- #log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false
- #log4j.appender.FSSTATEDUMP=org.apache.log4j.RollingFileAppender
- #log4j.appender.FSSTATEDUMP.File=${hadoop.log.dir}/fairscheduler-statedump.log
- #log4j.appender.FSSTATEDUMP.layout=org.apache.log4j.PatternLayout
- #log4j.appender.FSSTATEDUMP.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- #log4j.appender.FSSTATEDUMP.MaxFileSize=${hadoop.log.maxfilesize}
- #log4j.appender.FSSTATEDUMP.MaxBackupIndex=${hadoop.log.maxbackupindex}
- # Log levels of third-party libraries
- log4j.logger.org.apache.commons.beanutils=WARN
|