logback-spring 集成 ELK、kafka的配置
阅读原文时间:2023年07月09日阅读:1

pom.xml

com.github.danielwegener logback-kafka-appender 0.2.0-RC2

    <dependency>  
        <groupId>net.logstash.logback</groupId>  
        <artifactId>logstash-logback-encoder</artifactId>  
        <version>6.4</version>  
    </dependency>

    <dependency>  
        <groupId>ch.qos.logback</groupId>  
        <artifactId>logback-classic</artifactId>  
        <version>1.2.3</version>  
    </dependency>

logback-spring.xml的文俊


<!--上下文名称-->  
<contextName>logback</contextName>

<!--日志根目录 -->  
<property name="log.path" value="C:/logs" />  
<springProperty scope="context" name="servicename" source="spring.application.name" defaultValue="UnknownService"/>  
<springProperty scope="context" name="env" source="spring.profiles.active" defaultValue="dev"/>  
<springProperty scope="context" name="bootstrapServers" source="spring.kafka.bootstrap-servers" defaultValue="localhost:9092"/>  
<springProperty scope="context" name="serviceport" source="server.port" defaultValue="80"/>  
<!--获取服务器的IP和名称-->  
<conversionRule conversionWord="serviceip" converterClass="com.icar.web.makedata.utils.LogIpConfigUtil" />

<!--以上三行需要和yml对应-->

<!--输出日志到控制台 -->  
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">  
    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">  
        <level>INFO</level>  
    </filter>  
    <encoder>  
        <pattern>%yellow(%date{yyyy-MM-dd HH:mm:ss}) |%highlight(%-5level) |%blue(%thread) |%green(%file:%line) |%magenta(%logger) |%cyan(%msg%n)</pattern>  
        <charset>UTF-8</charset>  
    </encoder>  
</appender>

<!--①.level=INFO的日志文件 -->  
<appender name="INFO\_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">  
    <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">  
        <fileNamePattern>${log.path}/info/infolevel\_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>  
        <maxFileSize>100MB</maxFileSize>  
        <maxHistory>15</maxHistory>  
        <totalSizeCap>2GB</totalSizeCap>  
    </rollingPolicy>  
    <!--日志输出级别-->  
    <filter class="ch.qos.logback.classic.filter.LevelFilter">  
        <level>INFO</level>  
        <onMatch>ACCEPT</onMatch>  
        <onMismatch>DENY</onMismatch>  
    </filter>  
    <!--日志文件输出格式-->  
    <encoder>  
        <pattern>\[%d{yyyy-MM-dd HH:mm:ss.SSS}\] %thread %-5level %logger{50} --- %msg%n</pattern>  
        <charset>UTF-8</charset>  
    </encoder>  
</appender>

<!--②.level=WARN的日志文件 -->  
<appender name="WARN\_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">  
    <!--基本设置-->  
    <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">  
        <fileNamePattern>${log.path}/warn/warnlevel\_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>  
        <maxFileSize>100MB</maxFileSize>  
        <maxHistory>15</maxHistory>  
        <totalSizeCap>2GB</totalSizeCap>  
    </rollingPolicy>  
    <!--日志输出级别-->  
    <filter class="ch.qos.logback.classic.filter.LevelFilter">  
        <level>WARN</level>  
        <onMatch>ACCEPT</onMatch>  
        <onMismatch>DENY</onMismatch>  
    </filter>  
    <!--日志文件输出格式-->  
    <encoder>  
        <pattern>\[%d{yyyy-MM-dd HH:mm:ss.SSS}\] %thread %-5level %logger{50} - %msg%n</pattern>  
        <charset>UTF-8</charset>  
    </encoder>  
</appender>

<!--③.level=ERROR的日志文件 -->  
<appender name="ERROR\_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">  
    <!--基本设置-->  
    <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">  
        <fileNamePattern>${log.path}/error/errorlever\_makedata.%d{yyyy-MM-dd}.%i.txt</fileNamePattern>  
        <maxFileSize>100MB</maxFileSize>  
        <maxHistory>15</maxHistory>  
        <totalSizeCap>2GB</totalSizeCap>  
    </rollingPolicy>  
    <!--日志输出级别-->  
    <filter class="ch.qos.logback.classic.filter.LevelFilter">  
        <level>ERROR</level>  
        <onMatch>ACCEPT</onMatch>  
        <onMismatch>DENY</onMismatch>  
    </filter>  
    <!--日志文件输出格式-->  
    <encoder>  
        <pattern>\[%d{yyyy-MM-dd HH:mm:ss.SSS}\] %thread %-5level %logger{50} - %msg%n</pattern>  
        <charset>UTF-8</charset>  
    </encoder>  
</appender>  
<appender name="KafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">  
    <encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">  
        <providers class="net.logstash.logback.composite.loggingevent.LoggingEventJsonProviders">  
            <pattern>  
        <!--        <pattern>  
                    {  
                    "env": "${env}",  
                    "servicename":"${servicename}",  
                    "type":"${servicename}",  
                    "serviceinfo":"%serviceip:${serviceport}",  
                    "date":"%d{yyyy-MM-dd HH:mm:ss.SSS}",  
                    "level":"%level",  
                    "thread": "%thread",  
                    "logger": "%logger{36}",  
                    "msg":"%msg",  
                    "exception":"%exception"  
                    }  
                </pattern>-->  
                <pattern>  
                    {  
                    "env": "${env}",  
                    "servicename":"${servicename}",  
                    "type":"${servicename}",  
                    "serviceinfo":"%serviceip:${serviceport}",  
                    "date":"%d{yyyy-MM-dd HH:mm:ss.SSS}",  
                    "level":"%level",  
                    "thread": "%thread",  
                    "msg":"%msg",  
                    "exception":"%exception"  
                    }  
                </pattern>  
            </pattern>  
        </providers>  
    </encoder>  
    <topic>appdev</topic>  
    <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>  
    <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>  
    <producerConfig>acks=0</producerConfig>  
    <producerConfig>linger.ms=1000</producerConfig>  
    <producerConfig>max.block.ms=0</producerConfig>  
    <producerConfig>bootstrap.servers=${bootstrapServers}</producerConfig>  
</appender>

<appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">  
    <appender-ref ref="KafkaAppender"/>  
</appender>

<root level="INFO">  
    <appender-ref ref="ASYNC"/>  
</root>  


import ch.qos.logback.classic.pattern.ClassicConverter;
import ch.qos.logback.classic.spi.ILoggingEvent;
import lombok.extern.slf4j.Slf4j;

import java.io.Console;
import java.net.Inet4Address;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.UnknownHostException;
import java.util.Enumeration;

/**
* 获取服务器IP以及服务器名称
*/
//@Slf4j
public class LogIpConfigUtil extends ClassicConverter {

public  static  String serviceIp;

static {  
    try {  
       /\* Enumeration<NetworkInterface> allNetInterfaces = NetworkInterface.getNetworkInterfaces();  
        InetAddress ip = null;  
        while (allNetInterfaces.hasMoreElements()) {  
            NetworkInterface netInterface = (NetworkInterface) allNetInterfaces.nextElement();  
            if (netInterface.isLoopback() || netInterface.isVirtual() || !netInterface.isUp()) {  
                continue;  
            } else {  
                Enumeration<InetAddress> addresses = netInterface.getInetAddresses();  
                while (addresses.hasMoreElements()) {  
                    ip = addresses.nextElement();  
                    if (ip != null && ip instanceof Inet4Address) {  
                        return ip.getHostAddress();  
                    }  
                }  
            }  
        }\*/

        InetAddress addr = InetAddress.getLocalHost();  
        serviceIp =addr.getHostName()+"/" +addr.getHostAddress();

    } catch (Exception e) {  
        //log.error("IP地址获取失败" + e.toString());  
    }  
}

@Override  
public String convert(ILoggingEvent iLoggingEvent) {  
    return  serviceIp;  
}  

}

logstash-es.conf

input {
kafka {
group_id => "test-consumer-group"
topics => ["appdev"]
bootstrap_servers => "localhost:9092"
codec => "json"
}
}
filter {
}
output {
stdout { codec => rubydebug }
if [type] == "xxxx" {
elasticsearch {
hosts => [ "localhost:9200" ]
index => "xxx"
}
}
}