pom.xml

  <dependency>
<groupId>com.github.danielwegener</groupId>
<artifactId>logback-kafka-appender</artifactId>
<version>0.2.0-RC2</version>
</dependency> <dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>6.4</version>
</dependency> <dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.2.3</version>
</dependency>

logback-spring.xml的文俊

<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="10 seconds" debug="false"> <!--上下文名称-->
<contextName>logback</contextName> <!--日志根目录 -->
<property name="log.path" value="C:/logs" />
<springProperty scope="context" name="servicename" source="spring.application.name" defaultValue="UnknownService"/>
<springProperty scope="context" name="env" source="spring.profiles.active" defaultValue="dev"/>
<springProperty scope="context" name="bootstrapServers" source="spring.kafka.bootstrap-servers" defaultValue="localhost:9092"/>
<springProperty scope="context" name="serviceport" source="server.port" defaultValue="80"/>
<!--获取服务器的IP和名称-->
<conversionRule conversionWord="serviceip" converterClass="com.icar.web.makedata.utils.LogIpConfigUtil" /> <!--以上三行需要和yml对应--> <!--输出日志到控制台 -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<encoder>
<pattern>%yellow(%date{yyyy-MM-dd HH:mm:ss}) |%highlight(%-5level) |%blue(%thread) |%green(%file:%line) |%magenta(%logger) |%cyan(%msg%n)</pattern>
<charset>UTF-8</charset>
</encoder>
</appender> <!--①.level=INFO的日志文件 -->
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/info/infolevel_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>
<maxFileSize>100MB</maxFileSize>
<maxHistory>15</maxHistory>
<totalSizeCap>2GB</totalSizeCap>
</rollingPolicy>
<!--日志输出级别-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<!--日志文件输出格式-->
<encoder>
<pattern>[%d{yyyy-MM-dd HH:mm:ss.SSS}] %thread %-5level %logger{50} --- %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender> <!--②.level=WARN的日志文件 -->
<appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--基本设置-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/warn/warnlevel_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>
<maxFileSize>100MB</maxFileSize>
<maxHistory>15</maxHistory>
<totalSizeCap>2GB</totalSizeCap>
</rollingPolicy>
<!--日志输出级别-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>WARN</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<!--日志文件输出格式-->
<encoder>
<pattern>[%d{yyyy-MM-dd HH:mm:ss.SSS}] %thread %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender> <!--③.level=ERROR的日志文件 -->
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--基本设置-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/error/errorlever_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>
<maxFileSize>100MB</maxFileSize>
<maxHistory>15</maxHistory>
<totalSizeCap>2GB</totalSizeCap>
</rollingPolicy>
<!--日志输出级别-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<!--日志文件输出格式-->
<encoder>
<pattern>[%d{yyyy-MM-dd HH:mm:ss.SSS}] %thread %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- <appender name="KafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="com.github.danielwegener.logback.kafka.encoding.LayoutKafkaMessageEncoder">
<layout class="net.logstash.logback.layout.LogstashLayout" >
&lt;!&ndash; 是否包含上下文 &ndash;&gt;
<includeContext>true</includeContext>
&lt;!&ndash; 是否包含日志来源 &ndash;&gt;
<includeCallerData>true</includeCallerData>
&lt;!&ndash; 自定义附加字段 &ndash;&gt;
<customFields>{"system":"test"}</customFields>
&lt;!&ndash; 自定义字段的简称 &ndash;&gt;
<fieldNames class="net.logstash.logback.fieldnames.ShortenedFieldNames"/>
</layout>
<charset>UTF-8</charset>
</encoder>
&lt;!&ndash;kafka topic 需要与配置文件里面的topic一致 否则kafka会沉默并鄙视你&ndash;&gt;
<topic>applog_dev</topic>
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.HostNameKeyingStrategy" />
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
<producerConfig>bootstrap.servers=124.71.59.186:9092,139.159.249.142:9092,124.71.85.73:9092</producerConfig>
&lt;!&ndash; don't wait for a broker to ack the reception of a batch. &ndash;&gt;
<producerConfig>acks=0</producerConfig>
&lt;!&ndash; wait up to 1000ms and collect log messages before sending them as a batch &ndash;&gt;
<producerConfig>linger.ms=1000</producerConfig>
&lt;!&ndash; even if the producer buffer runs full, do not block the application but start to drop messages &ndash;&gt;
<producerConfig>max.block.ms=0</producerConfig>
&lt;!&ndash; define a client-id that you use to identify yourself against the kafka broker &ndash;&gt;
<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig> this is the fallback appender if kafka is not available.
<appender-ref ref="CONSOLE" />
</appender>--> <appender name="KafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers class="net.logstash.logback.composite.loggingevent.LoggingEventJsonProviders">
<pattern>
<!-- <pattern>
{
"env": "${env}",
"servicename":"${servicename}",
"type":"${servicename}",
"serviceinfo":"%serviceip:${serviceport}",
"date":"%d{yyyy-MM-dd HH:mm:ss.SSS}",
"level":"%level",
"thread": "%thread",
"logger": "%logger{36}",
"msg":"%msg",
"exception":"%exception"
}
</pattern>-->
<pattern>
{
"env": "${env}",
"servicename":"${servicename}",
"type":"${servicename}",
"serviceinfo":"%serviceip:${serviceport}",
"date":"%d{yyyy-MM-dd HH:mm:ss.SSS}",
"level":"%level",
"thread": "%thread",
"msg":"%msg",
"exception":"%exception"
}
</pattern>
</pattern>
</providers>
</encoder>
<topic>appdev</topic>
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
<producerConfig>acks=0</producerConfig>
<producerConfig>linger.ms=1000</producerConfig>
<producerConfig>max.block.ms=0</producerConfig>
<producerConfig>bootstrap.servers=${bootstrapServers}</producerConfig>
</appender> <appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="KafkaAppender"/>
</appender> <root level="INFO">
<appender-ref ref="ASYNC"/>
</root>
<!-- <root>
<appender-ref ref="CONSOLE"/>
<appender-ref ref="INFO_FILE"/>
<appender-ref ref="WARN_FILE"/>
<appender-ref ref="ERROR_FILE"/>
</root>-->
<!--springProfile表示在dev环境下使用 -->
<!-- <springProfile name="dev">
<logger name="com.nick" level="INFO" additivity="true">
<appender-ref ref="KafkaAppender" />
</logger>
</springProfile>--> </configuration>
import ch.qos.logback.classic.pattern.ClassicConverter;
import ch.qos.logback.classic.spi.ILoggingEvent;
import lombok.extern.slf4j.Slf4j; import java.io.Console;
import java.net.Inet4Address;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.UnknownHostException;
import java.util.Enumeration; /**
* 获取服务器IP以及服务器名称
*/
//@Slf4j
public class LogIpConfigUtil extends ClassicConverter { public static String serviceIp; static {
try {
/* Enumeration<NetworkInterface> allNetInterfaces = NetworkInterface.getNetworkInterfaces();
InetAddress ip = null;
while (allNetInterfaces.hasMoreElements()) {
NetworkInterface netInterface = (NetworkInterface) allNetInterfaces.nextElement();
if (netInterface.isLoopback() || netInterface.isVirtual() || !netInterface.isUp()) {
continue;
} else {
Enumeration<InetAddress> addresses = netInterface.getInetAddresses();
while (addresses.hasMoreElements()) {
ip = addresses.nextElement();
if (ip != null && ip instanceof Inet4Address) {
return ip.getHostAddress();
}
}
}
}*/ InetAddress addr = InetAddress.getLocalHost();
serviceIp =addr.getHostName()+"/" +addr.getHostAddress(); } catch (Exception e) {
//log.error("IP地址获取失败" + e.toString());
}
} @Override
public String convert(ILoggingEvent iLoggingEvent) {
return serviceIp;
}
}

logstash-es.conf

input {
kafka {
group_id => "test-consumer-group"
topics => ["appdev"]
bootstrap_servers => "localhost:9092"
codec => "json"
}
}
filter {
}
output {
stdout { codec => rubydebug }
if [type] == "xxxx" {
elasticsearch {
hosts => [ "localhost:9200" ]
index => "xxx"
}
}
}

最新文章

  1. [学习笔记] Inten
  2. WPF线程(Step1)&mdash;&mdash;Dispatcher
  3. [HTTP那些事]网络请求API
  4. Kendo UI for ASP.NET MVC 的一些使用经验(转)
  5. c++新特性与boost
  6. python 脚本
  7. Delphi使用XmlHttp获取时间
  8. js中判断输入框是否为空(判断是一串空的字符串)
  9. 【c基础】之 文件及其操作
  10. IOS学习1——IOS应用程序的生命周期及基本架构
  11. c# 工具类(字符串和时间,文件)
  12. Mysql8安装与配置
  13. Oracle数据库统一审核的启用测试与关闭
  14. rsync续传大目录一例
  15. JavaScript----特效代码
  16. C++并发编程之std::future
  17. 【Ubuntu】VirtualBox 您没有查看“sf_VirtualDisk”的内容所需的权限。
  18. Linux命令: touch tem.txt创建txt文件
  19. java-appium-527进阶-1 UiAutomator1&amp;2区别和封装
  20. maven常见指令和插件

热门文章

  1. Abp集成HangFire
  2. C#/VB.NET 合并PDF页面
  3. Mybatis Plus之内置Mapper实践
  4. 【科普】为什么ip地址通常以192.168开头?
  5. NS2的LEACH仿真出来的nam文件拓扑的节点为什么x=0,且y=0
  6. SQL注入靶场
  7. zabbix 线路质量监控自定义python模块(Mysql版),多线程(后来发现使用协程更好)降低系统消耗
  8. 图解Dijkstra(迪杰斯特拉)算法+代码实现
  9. CMake进行C/C++开发(linux下)
  10. 缓存&amp;PWA实践