当前位置:   article > 正文

系统日志logback

系统日志logback
[quote]
logback-classic-1.0.3.jar
[/quote]

[quote]
logback-core-1.0.3.jar
[/quote]

[quote]
logstash-logback-encoder-1.2.jar
[/quote]

<?xml version="1.0" encoding="UTF-8"?><configuration scan="true">	<!-- 将Maven pom.xml的属性应用到logback中来, 需要在pom.xml中指定build resources -->    <property scope="system" name="APP_NAME" value="lifeix-payment" />    <property scope="system" name="APP_VERSION" value="1.0.0-Beta1" />    <property scope="system" name="APP_ENV" value="development" />    <property scope="system" name="LOG_DIR" value="/usr/local/tomcat/logs" />	<property scope="system" name="LOG_ORDER_DIR" value="/usr/local/tomcat/logs/task" />	<property scope="system" name="LOG_ORDER_NAME" value="lifeix-payment-order" />    <!-- 任务输出日志 -->	<appender name="ordertask" class="ch.qos.logback.core.rolling.RollingFileAppender" >		<filter class="ch.qos.logback.classic.filter.ThresholdFilter">			<level>debug</level>		</filter>		<file>${LOG_ORDER_DIR}/${LOG_ORDER_NAME}.log</file>		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">			<!-- 日志每天进行rotate -->			<fileNamePattern>${LOG_ORDER_DIR}/${LOG_ORDER_NAME}-%d{yyyy-MM-dd}.%i.log</fileNamePattern>			<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">				<!-- 每个日志文件大小不超过2GB -->				<maxFileSize>2000MB</maxFileSize>			</timeBasedFileNamingAndTriggeringPolicy>		</rollingPolicy>		<!-- 日志输出格式 -->		<encoder>			<pattern>%-20(%d{yyy-MM-dd HH:mm:ss.SSS} [%thread]) %-5level %logger{80} - %msg%n</pattern>		</encoder>	</appender>	<appender name="ROLLING" class="ch.qos.logback.core.rolling.RollingFileAppender">		<filter class="ch.qos.logback.classic.filter.ThresholdFilter">			<level>info</level>		</filter>		<file>${LOG_DIR}/${APP_NAME}.log</file>		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">			<!-- rollover daily -->			<fileNamePattern>${LOG_DIR}/${APP_NAME}-%d{yyyy-MM-dd}.%i.log</fileNamePattern>			<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">				<!-- or whenever the file size reaches 2GB -->				<maxFileSize>2000MB</maxFileSize>			</timeBasedFileNamingAndTriggeringPolicy>		</rollingPolicy>		<encoder>			<pattern>%-20(%d{yyy-MM-dd HH:mm:ss.SSS} [%thread]) %-5level %logger{80} - %msg%n</pattern>		</encoder>	</appender>	<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">		<filter class="ch.qos.logback.classic.filter.ThresholdFilter">			<level>info</level>		</filter>		<encoder>			<pattern>%-20(%d{yyy-MM-dd HH:mm:ss.SSS} [%thread]) %-5level %logger{80} - %msg%n</pattern>		</encoder>	</appender>	<!-- logstash appender settings -->	<appender name="logstash" class="ch.qos.logback.core.rolling.RollingFileAppender">		<file>${LOG_DIR}/logstash_${APP_NAME}_logback.json</file>		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">			<!-- rollover daily -->			<fileNamePattern>${LOG_DIR}/logstash_${APP_NAME}_logback%d{yyyy-MM-dd}.%i.json</fileNamePattern>			<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">				<!-- or whenever the file size reaches 2MB -->				<maxFileSize>2000MB</maxFileSize>			</timeBasedFileNamingAndTriggeringPolicy>		</rollingPolicy>		<encoding>UTF-8</encoding>		<encoder class="net.logstash.logback.encoder.LogstashEncoder" />	</appender>	<root level="INFO">		<appender-ref ref="ROLLING" />		<appender-ref ref="CONSOLE" />		<appender-ref ref="logstash" />	</root>	<!-- 对于一些特定的包进行默认日志级别设定 -->	<logger name="org.kaleidofoundry.core" level="WARN" />	<logger name="net.rubyeye.xmemcached" level="INFO" />	<logger name="com.google.code.yanf4j.core" level="ERROR" />	<logger name="org.apache.mina.filter.logging" level="WARN" />	<logger name="com.lifeix.pay.api.util.PaymentOrderLog" level="DEBUG" additivity="false">		<appender-ref ref="ordertask" />	</logger></configuration>


 * Logback: the reliable, generic, fast and flexible logging framework.package ch.qos.logback.core.rolling;import java.io.File;import java.io.IOException;import static ch.qos.logback.core.CoreConstants.CODES_URL;import ch.qos.logback.core.FileAppender;import ch.qos.logback.core.rolling.helper.CompressionMode;/** * <code>RollingFileAppender</code> extends {@link FileAppender} to backup the * log files depending on {@link RollingPolicy} and {@link TriggeringPolicy}. * <p> *  * For more information about this appender, please refer to the online manual * at http://logback.qos.ch/manual/appenders.html#RollingFileAppender *  * @author Heinz Richter * @author Ceki Gülcü */public class RollingFileAppender<E> extends FileAppender<E> {  File currentlyActiveFile;  TriggeringPolicy<E> triggeringPolicy;  RollingPolicy rollingPolicy;  public void start() {    if (triggeringPolicy == null) {      addWarn("No TriggeringPolicy was set for the RollingFileAppender named "          + getName());      addWarn("For more information, please visit "+CODES_URL+"#rfa_no_tp");      return;    }    // we don't want to void existing log files    if (!append) {      addWarn("Append mode is mandatory for RollingFileAppender");      append = true;    }    if (rollingPolicy == null) {      addError("No RollingPolicy was set for the RollingFileAppender named "          + getName());      addError("For more information, please visit "+CODES_URL+"rfa_no_rp");      return;    }    if (isPrudent()) {      if (rawFileProperty() != null) {        addWarn("Setting \"File\" property to null on account of prudent mode");        setFile(null);      }      if (rollingPolicy.getCompressionMode() != CompressionMode.NONE) {        addError("Compression is not supported in prudent mode. Aborting");        return;      }    }    currentlyActiveFile = new File(getFile());    addInfo("Active log file name: " + getFile());    super.start();  }  @Override  public void stop() {    if(rollingPolicy != null) rollingPolicy.stop();    if(triggeringPolicy != null) triggeringPolicy.stop();    super.stop();  }  @Override  public void setFile(String file) {    // http://jira.qos.ch/browse/LBCORE-94    // allow setting the file name to null if mandated by prudent mode    if (file != null && ((triggeringPolicy != null) || (rollingPolicy != null))) {      addError("File property must be set before any triggeringPolicy or rollingPolicy properties");      addError("Visit "+CODES_URL+"#rfa_file_after for more information");    }    super.setFile(file);  }  @Override  public String getFile() {    return rollingPolicy.getActiveFileName();  }  /**   * Implemented by delegating most of the rollover work to a rolling policy.   */  public void rollover() {    synchronized (lock) {      // Note: This method needs to be synchronized because it needs exclusive      // access while it closes and then re-opens the target file.      //      // make sure to close the hereto active log file! Renaming under windows      // does not work for open files.      this.closeOutputStream();      try {        rollingPolicy.rollover();      } catch (RolloverFailure rf) {        addWarn("RolloverFailure occurred. Deferring roll-over.");        // we failed to roll-over, let us not truncate and risk data loss        this.append = true;      }      try {        // update the currentlyActiveFile        // http://jira.qos.ch/browse/LBCORE-90        currentlyActiveFile = new File(rollingPolicy.getActiveFileName());        // This will also close the file. This is OK since multiple        // close operations are safe.        this.openFile(rollingPolicy.getActiveFileName());      } catch (IOException e) {        addError("setFile(" + fileName + ", false) call failed.", e);      }    }  }  /**   * This method differentiates RollingFileAppender from its super class.   */  @Override  protected void subAppend(E event) {    // The roll-over check must precede actual writing. This is the    // only correct behavior for time driven triggers.    // We need to synchronize on triggeringPolicy so that only one rollover    // occurs at a time    synchronized (triggeringPolicy) {      if (triggeringPolicy.isTriggeringEvent(currentlyActiveFile, event)) {        rollover();      }    }    super.subAppend(event);  }  public RollingPolicy getRollingPolicy() {    return rollingPolicy;  }  public TriggeringPolicy<E> getTriggeringPolicy() {    return triggeringPolicy;  }  /**   * Sets the rolling policy. In case the 'policy' argument also implements   * {@link TriggeringPolicy}, then the triggering policy for this appender is   * automatically set to be the policy argument.   *    * @param policy   */  @SuppressWarnings("unchecked")  public void setRollingPolicy(RollingPolicy policy) {    rollingPolicy = policy;    if (rollingPolicy instanceof TriggeringPolicy) {      triggeringPolicy = (TriggeringPolicy<E>) policy;    }  }  public void setTriggeringPolicy(TriggeringPolicy<E> policy) {    triggeringPolicy = policy;    if (policy instanceof RollingPolicy) {      rollingPolicy = (RollingPolicy) policy;    }  }}



/** * Logback: the reliable, generic, fast and flexible logging framework. * Copyright (C) 1999-2011, QOS.ch. All rights reserved. * * This program and the accompanying materials are dual-licensed under * either the terms of the Eclipse Public License v1.0 as published by * the Eclipse Foundation * *   or (per the licensee's choosing) * * under the terms of the GNU Lesser General Public License version 2.1 * as published by the Free Software Foundation. */package ch.qos.logback.core.rolling;import java.io.File;import java.util.Date;import java.util.concurrent.Future;import ch.qos.logback.core.CoreConstants;import ch.qos.logback.core.rolling.helper.*;/** * <code>TimeBasedRollingPolicy</code> is both easy to configure and quite * powerful. It allows the roll over to be made based on time. It is possible to * specify that the roll over occur once per day, per week or per month. *  * <p>For more information, please refer to the online manual at * http://logback.qos.ch/manual/appenders.html#TimeBasedRollingPolicy *  * @author Ceki Gülcü */public class TimeBasedRollingPolicy<E> extends RollingPolicyBase implements    TriggeringPolicy<E> {  static final String FNP_NOT_SET = "The FileNamePattern option must be set before using TimeBasedRollingPolicy. ";  static final int INFINITE_HISTORY = 0;  // WCS: without compression suffix  FileNamePattern fileNamePatternWCS;  private Compressor compressor;  private RenameUtil renameUtil = new RenameUtil();  Future<?> future;  private int maxHistory = INFINITE_HISTORY;  private ArchiveRemover archiveRemover;  TimeBasedFileNamingAndTriggeringPolicy<E> timeBasedFileNamingAndTriggeringPolicy;  boolean cleanHistoryOnStart = false;  public void start() {    // set the LR for our utility object    renameUtil.setContext(this.context);    // find out period from the filename pattern    if (fileNamePatternStr != null) {      fileNamePattern = new FileNamePattern(fileNamePatternStr, this.context);      determineCompressionMode();    } else {      addWarn(FNP_NOT_SET);      addWarn(CoreConstants.SEE_FNP_NOT_SET);      throw new IllegalStateException(FNP_NOT_SET          + CoreConstants.SEE_FNP_NOT_SET);    }    compressor = new Compressor(compressionMode);    compressor.setContext(context);    // wcs : without compression suffix    fileNamePatternWCS = new FileNamePattern(Compressor.computeFileNameStr_WCS(            fileNamePatternStr, compressionMode), this.context);    addInfo("Will use the pattern " + fileNamePatternWCS        + " for the active file");     if(compressionMode == CompressionMode.ZIP) {      String zipEntryFileNamePatternStr = transformFileNamePattern2ZipEntry(fileNamePatternStr);      zipEntryFileNamePattern = new FileNamePattern(zipEntryFileNamePatternStr, context);    }    if (timeBasedFileNamingAndTriggeringPolicy == null) {      timeBasedFileNamingAndTriggeringPolicy = new DefaultTimeBasedFileNamingAndTriggeringPolicy<E>();    }    timeBasedFileNamingAndTriggeringPolicy.setContext(context);    timeBasedFileNamingAndTriggeringPolicy.setTimeBasedRollingPolicy(this);    timeBasedFileNamingAndTriggeringPolicy.start();    // the maxHistory property is given to TimeBasedRollingPolicy instead of to    // the TimeBasedFileNamingAndTriggeringPolicy. This makes it more convenient    // for the user at the cost of inconsistency here.    if (maxHistory != INFINITE_HISTORY) {      archiveRemover = timeBasedFileNamingAndTriggeringPolicy.getArchiveRemover();      archiveRemover.setMaxHistory(maxHistory);      if(cleanHistoryOnStart) {        addInfo("Cleaning on start up");        archiveRemover.clean(new Date(timeBasedFileNamingAndTriggeringPolicy.getCurrentTime()));      }    }    super.start();  }  private String transformFileNamePattern2ZipEntry(String fileNamePatternStr) {    String slashified = FileFilterUtil.slashify(fileNamePatternStr);    return FileFilterUtil.afterLastSlash(slashified);  }  public void setTimeBasedFileNamingAndTriggeringPolicy(      TimeBasedFileNamingAndTriggeringPolicy<E> timeBasedTriggering) {    this.timeBasedFileNamingAndTriggeringPolicy = timeBasedTriggering;  }  public TimeBasedFileNamingAndTriggeringPolicy<E> getTimeBasedFileNamingAndTriggeringPolicy() {    return timeBasedFileNamingAndTriggeringPolicy;  }  public void rollover() throws RolloverFailure {    // when rollover is called the elapsed period's file has    // been already closed. This is a working assumption of this method.    String elapsedPeriodsFileName = timeBasedFileNamingAndTriggeringPolicy        .getElapsedPeriodsFileName();    String elpasedPeriodStem = FileFilterUtil.afterLastSlash(elapsedPeriodsFileName);    if (compressionMode == CompressionMode.NONE) {      if (getParentsRawFileProperty() != null) {        renameUtil.rename(getParentsRawFileProperty(), elapsedPeriodsFileName);      } // else { nothing to do if CompressionMode == NONE and parentsRawFileProperty == null }    } else {      if (getParentsRawFileProperty() == null) {        future = asyncCompress(elapsedPeriodsFileName, elapsedPeriodsFileName, elpasedPeriodStem);      } else {        future = renamedRawAndAsyncCompress(elapsedPeriodsFileName, elpasedPeriodStem);      }    }    if (archiveRemover != null) {      archiveRemover.clean(new Date(timeBasedFileNamingAndTriggeringPolicy.getCurrentTime()));    }  }  Future asyncCompress(String nameOfFile2Compress, String nameOfCompressedFile, String innerEntryName)      throws RolloverFailure {    AsynchronousCompressor ac = new AsynchronousCompressor(compressor);    return ac.compressAsynchronously(nameOfFile2Compress, nameOfCompressedFile, innerEntryName);  }  Future renamedRawAndAsyncCompress(String nameOfCompressedFile, String innerEntryName)      throws RolloverFailure {    String parentsRawFile = getParentsRawFileProperty();    String tmpTarget = parentsRawFile + System.nanoTime() + ".tmp";    renameUtil.rename(parentsRawFile, tmpTarget);    return asyncCompress(tmpTarget, nameOfCompressedFile, innerEntryName);  }  /**   *    * The active log file is determined by the value of the parent's filename   * option. However, in case the file name is left blank, then, the active log   * file equals the file name for the current period as computed by the   * <b>FileNamePattern</b> option.   *    * <p>The RollingPolicy must know whether it is responsible for changing the   * name of the active file or not. If the active file name is set by the user   * via the configuration file, then the RollingPolicy must let it like it is.   * If the user does not specify an active file name, then the RollingPolicy   * generates one.   *    * <p> To be sure that the file name used by the parent class has been   * generated by the RollingPolicy and not specified by the user, we keep track   * of the last generated name object and compare its reference to the parent   * file name. If they match, then the RollingPolicy knows it's responsible for   * the change of the file name.   *    */  public String getActiveFileName() {    String parentsRawFileProperty = getParentsRawFileProperty();    if (parentsRawFileProperty != null) {      return parentsRawFileProperty;    } else {      return timeBasedFileNamingAndTriggeringPolicy          .getCurrentPeriodsFileNameWithoutCompressionSuffix();    }  }  public boolean isTriggeringEvent(File activeFile, final E event) {    return timeBasedFileNamingAndTriggeringPolicy.isTriggeringEvent(activeFile, event);  }  /**   * Get the number of archive files to keep.   *    * @return number of archive files to keep   */  public int getMaxHistory() {    return maxHistory;  }  /**   * Set the maximum number of archive files to keep.   *    * @param maxHistory   *                number of archive files to keep   */  public void setMaxHistory(int maxHistory) {    this.maxHistory = maxHistory;  }  public boolean isCleanHistoryOnStart() {    return cleanHistoryOnStart;  }  /**   * Should archive removal be attempted on application start up? Default is false.   * @since 1.0.1   * @param cleanHistoryOnStart   */  public void setCleanHistoryOnStart(boolean cleanHistoryOnStart) {    this.cleanHistoryOnStart = cleanHistoryOnStart;  }  @Override  public String toString() {    return "c.q.l.core.rolling.TimeBasedRollingPolicy";  }}


/** * Logback: the reliable, generic, fast and flexible logging framework. * Copyright (C) 1999-2011, QOS.ch. All rights reserved. * * This program and the accompanying materials are dual-licensed under * either the terms of the Eclipse Public License v1.0 as published by * the Eclipse Foundation * *   or (per the licensee's choosing) * * under the terms of the GNU Lesser General Public License version 2.1 * as published by the Free Software Foundation. */package ch.qos.logback.core.rolling;import java.io.File;import java.util.Date;import ch.qos.logback.core.joran.spi.NoAutoStart;import ch.qos.logback.core.rolling.helper.CompressionMode;import ch.qos.logback.core.rolling.helper.FileFilterUtil;import ch.qos.logback.core.rolling.helper.SizeAndTimeBasedArchiveRemover;import ch.qos.logback.core.util.FileSize;@NoAutoStartpublic class SizeAndTimeBasedFNATP<E> extends        TimeBasedFileNamingAndTriggeringPolicyBase<E> {  int currentPeriodsCounter = 0;  FileSize maxFileSize;  String maxFileSizeAsString;  @Override  public void start() {    // we depend on certain fields having been initialized    // in super.start()    super.start();    archiveRemover = new SizeAndTimeBasedArchiveRemover(tbrp.fileNamePattern, rc);    archiveRemover.setContext(context);    // we need to get the correct value of currentPeriodsCounter.    // usually the value is 0, unless the appender or the application    // is stopped and restarted within the same period    String regex = tbrp.fileNamePattern.toRegex(dateInCurrentPeriod);    String stemRegex = FileFilterUtil.afterLastSlash(regex);    computeCurrentPeriodsHighestCounterValue(stemRegex);    started = true;  }  void computeCurrentPeriodsHighestCounterValue(final String stemRegex) {    File file = new File(getCurrentPeriodsFileNameWithoutCompressionSuffix());    File parentDir = file.getParentFile();    File[] matchingFileArray = FileFilterUtil            .filesInFolderMatchingStemRegex(parentDir, stemRegex);    if (matchingFileArray == null || matchingFileArray.length == 0) {      currentPeriodsCounter = 0;      return;    }    currentPeriodsCounter = FileFilterUtil.findHighestCounter(matchingFileArray, stemRegex);    // if parent raw file property is not null, then the next    // counter is max  found counter+1    if (tbrp.getParentsRawFileProperty() != null || (tbrp.compressionMode != CompressionMode.NONE)) {      // TODO test me      currentPeriodsCounter++;    }  }  // IMPORTANT: This field can be updated by multiple threads. It follows that  // its values may *not* be incremented sequentially. However, we don't care  // about the actual value of the field except that from time to time the  // expression (invocationCounter++ & invocationMask) == invocationMask) should be true.  private int invocationCounter;  private int invocationMask = 0x1;  public boolean isTriggeringEvent(File activeFile, final E event) {    long time = getCurrentTime();    if (time >= nextCheck) {      Date dateInElapsedPeriod = dateInCurrentPeriod;      elapsedPeriodsFileName = tbrp.fileNamePatternWCS              .convertMultipleArguments(dateInElapsedPeriod, currentPeriodsCounter);      currentPeriodsCounter = 0;      setDateInCurrentPeriod(time);      computeNextCheck();      return true;    }    // for performance reasons, check for changes every 16,invocationMask invocations    if (((++invocationCounter) & invocationMask) != invocationMask) {      return false;    }    if (invocationMask < 0x0F) {      invocationMask = (invocationMask << 1) + 1;    }    if (activeFile.length() >= maxFileSize.getSize()) {      elapsedPeriodsFileName = tbrp.fileNamePatternWCS              .convertMultipleArguments(dateInCurrentPeriod, currentPeriodsCounter);      currentPeriodsCounter++;      return true;    }    return false;  }  private String getFileNameIncludingCompressionSuffix(Date date, int counter) {    return tbrp.fileNamePattern.convertMultipleArguments(            dateInCurrentPeriod, counter);  }  @Override  public String getCurrentPeriodsFileNameWithoutCompressionSuffix() {    return tbrp.fileNamePatternWCS.convertMultipleArguments(            dateInCurrentPeriod, currentPeriodsCounter);  }  public String getMaxFileSize() {    return maxFileSizeAsString;  }  public void setMaxFileSize(String maxFileSize) {    this.maxFileSizeAsString = maxFileSize;    this.maxFileSize = FileSize.valueOf(maxFileSize);  }}


 * Logback: the reliable, generic, fast and flexible logging framework.package ch.qos.logback.classic.filter;import ch.qos.logback.classic.Level;import ch.qos.logback.classic.spi.ILoggingEvent;import ch.qos.logback.core.filter.Filter;import ch.qos.logback.core.spi.FilterReply;/** * Filters events below the threshold level. *  * Events with a level below the specified * level will be denied, while events with a level * equal or above the specified level will trigger a * FilterReply.NEUTRAL result, to allow the rest of the * filter chain process the event. *  * For more information about filters, please refer to the online manual at * http://logback.qos.ch/manual/filters.html#thresholdFilter * * @author Sébastien Pennec */public class ThresholdFilter extends Filter<ILoggingEvent> {  Level level;  @Override  public FilterReply decide(ILoggingEvent event) {    if (!isStarted()) {      return FilterReply.NEUTRAL;    }    if (event.getLevel().isGreaterOrEqual(level)) {      return FilterReply.NEUTRAL;    } else {      return FilterReply.DENY;    }  }  public void setLevel(String level) {    this.level = Level.toLevel(level);  }  public void start() {    if (this.level != null) {      super.start();    }  }}


/** * Logback: the reliable, generic, fast and flexible logging framework. * Copyright (C) 1999-2011, QOS.ch. All rights reserved. * * This program and the accompanying materials are dual-licensed under * either the terms of the Eclipse Public License v1.0 as published by * the Eclipse Foundation * *   or (per the licensee's choosing) * * under the terms of the GNU Lesser General Public License version 2.1 * as published by the Free Software Foundation. */package ch.qos.logback.core;import java.util.Arrays;import ch.qos.logback.core.joran.spi.ConsoleTarget;import ch.qos.logback.core.status.Status;import ch.qos.logback.core.status.WarnStatus;/** * ConsoleAppender appends log events to <code>System.out</code> or * <code>System.err</code> using a layout specified by the user. The default * target is <code>System.out</code>. *  * For more information about this appender, please refer to the online manual * at http://logback.qos.ch/manual/appenders.html#ConsoleAppender *  * @author Ceki Gülcü * @author Tom SH Liu * @author Ruediger Dohna */public class ConsoleAppender<E> extends OutputStreamAppender<E> {  protected ConsoleTarget target = ConsoleTarget.SystemOut;  /**   * Sets the value of the <b>Target</b> option. Recognized values are   * "System.out" and "System.err". Any other value will be ignored.   */  public void setTarget(String value) {    ConsoleTarget t = ConsoleTarget.findByName(value.trim());    if (t == null) {      targetWarn(value);    } else {      target = t;    }  }  /**   * Returns the current value of the <b>target</b> property. The default value   * of the option is "System.out".   *    * See also {@link #setTarget}.   */  public String getTarget() {    return target.getName();  }  private void targetWarn(String val) {    Status status = new WarnStatus("[" + val + "] should be one of "        + Arrays.toString(ConsoleTarget.values()), this);    status.add(new WarnStatus(        "Using previously set target, System.out by default.", this));    addStatus(status);  }  @Override  public void start() {    setOutputStream(target.getStream());    super.start();  }}


 * Licensed under the Apache License, Version 2.0 (the "License");package net.logstash.logback.encoder;import static org.apache.commons.io.IOUtils.*;import java.io.IOException;import java.util.Map;import java.util.Map.Entry;import org.apache.commons.lang.time.FastDateFormat;import ch.qos.logback.classic.spi.ILoggingEvent;import ch.qos.logback.classic.spi.IThrowableProxy;import ch.qos.logback.classic.spi.ThrowableProxyUtil;import ch.qos.logback.core.Context;import ch.qos.logback.core.CoreConstants;import ch.qos.logback.core.encoder.EncoderBase;import com.fasterxml.jackson.core.JsonGenerator.Feature;import com.fasterxml.jackson.databind.ObjectMapper;import com.fasterxml.jackson.databind.node.ObjectNode;public class LogstashEncoder extends EncoderBase<ILoggingEvent> {    private static final ObjectMapper MAPPER = new ObjectMapper().configure(Feature.ESCAPE_NON_ASCII, true);    private static final FastDateFormat ISO_DATETIME_TIME_ZONE_FORMAT_WITH_MILLIS = FastDateFormat.getInstance("yyyy-MM-dd'T'HH:mm:ss.SSSZZ");    private static final StackTraceElement DEFAULT_CALLER_DATA = new StackTraceElement("", "", "", 0);    private boolean immediateFlush = true;    @Override    public void doEncode(ILoggingEvent event) throws IOException {        ObjectNode eventNode = MAPPER.createObjectNode();        eventNode.put("@timestamp", ISO_DATETIME_TIME_ZONE_FORMAT_WITH_MILLIS.format(event.getTimeStamp()));        eventNode.put("@message", event.getFormattedMessage());        eventNode.put("@fields", createFields(event));        write(MAPPER.writeValueAsBytes(eventNode), outputStream);        write(CoreConstants.LINE_SEPARATOR, outputStream);        if (immediateFlush) {            outputStream.flush();        }    }    private ObjectNode createFields(ILoggingEvent event) {        ObjectNode fieldsNode = MAPPER.createObjectNode();        fieldsNode.put("logger_name", event.getLoggerName());        fieldsNode.put("thread_name", event.getThreadName());        fieldsNode.put("level", event.getLevel().toString());        fieldsNode.put("level_value", event.getLevel().toInt());        StackTraceElement callerData = extractCallerData(event);        fieldsNode.put("caller_class_name", callerData.getClassName());        fieldsNode.put("caller_method_name", callerData.getMethodName());        fieldsNode.put("caller_file_name", callerData.getFileName());        fieldsNode.put("caller_line_number", callerData.getLineNumber());        IThrowableProxy throwableProxy = event.getThrowableProxy();        if (throwableProxy != null) {            fieldsNode.put("stack_trace", ThrowableProxyUtil.asString(throwableProxy));        }        Context context = getContext();        if (context != null) {            addPropertiesAsFields(fieldsNode, context.getCopyOfPropertyMap());        }        addPropertiesAsFields(fieldsNode, event.getMDCPropertyMap());        return fieldsNode;    }    private void addPropertiesAsFields(final ObjectNode fieldsNode, final Map<String, String> properties) {        if (properties != null) {            for (Entry<String, String> entry : properties.entrySet()) {                String key = entry.getKey();                String value = entry.getValue();                fieldsNode.put(key, value);            }        }    }    private StackTraceElement extractCallerData(final ILoggingEvent event) {        final StackTraceElement[] ste = event.getCallerData();        if (ste == null || ste.length == 0) {            return DEFAULT_CALLER_DATA;        }        return ste[0];    }    @Override    public void close() throws IOException {        write(LINE_SEPARATOR, outputStream);    }    public boolean isImmediateFlush() {        return immediateFlush;    }    public void setImmediateFlush(boolean immediateFlush) {        this.immediateFlush = immediateFlush;    }}


package com.lifeix.pay.api.util;import org.slf4j.Logger;import org.slf4j.LoggerFactory;/** * 订单信息收集日志 *  * @author peter *  */public class PaymentOrderLog {    protected final static Logger LOG = LoggerFactory.getLogger(PaymentOrderLog.class);    public static void debug(String message) {	LOG.debug(message);    }    public static void info(String message) {	LOG.info(message);    }    public static void warn(String message) {	LOG.warn(message);    }    public static void error(String message) {	LOG.error(message);    }    public static void error(String message, Throwable e) {	LOG.error(message, e);    }}
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/知新_RL/article/detail/781206
推荐阅读
相关标签
  

闽ICP备14008679号