Centos8 部署 ElasticSearch 集群并搭建 ELK,基于Logstash同步MySQL数据到ElasticSearch
阅读原文时间:2021年11月19日阅读:1

Centos8安装Docker

1.更新一下yum
[root@VM-24-9-centos ~]# yum -y update
2.安装containerd.io
# centos8默认使用podman代替docker,所以需要containerd.io
[root@VM-24-9-centos ~]# yum install https://download.docker.com/linux/fedora/30/x86_64/stable/Packages/containerd.io-1.2.6-3.3.fc30.x86_64.rpm -y

# 安装一些其他依赖
[root@VM-24-9-centos ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@VM-24-9-centos ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
3.安装Docker
[root@VM-24-9-centos ~]# yum install -y docker-ce
4.启动Docker
# 启动docker
[root@VM-24-9-centos ~]# systemctl start docker
# 设置开机自启
[root@VM-24-9-centos ~]# systemctl enable docker
5.设置容器开机自启及其他命令
[root@VM-16-7-centos ~]# docker update --restart=always 容器名
--restart具体参数值详细信息:
    no:容器退出时,不重启容器
  on-failure:只有在非0状态退出时才从新启动容器
  always:无论退出状态是如何,都重启容器

# 根据容器名模糊批量停止/删除容器
# *号表示模糊查询
[root@VM-24-9-centos ~]# docker stop $(docker ps -q -f name="容器名*")
# 批量删除
[root@VM-24-9-centos ~]# docker rm $(docker ps -qa -f name="ES*")

ELK部署

部署ElasticSearch集群

1.拉取镜像及批量生成配置文件
# 拉取镜像
[root@VM-24-9-centos ~]# docker pull elasticsearch:7.14.2

# 修改虚拟内存限制,以及开启端口转发
[root@VM-24-9-centos etc]# vim /etc/sysctl.conf
vm.max_map_count=262144
net.ipv4.ip_forward = 1
[root@VM-24-9-centos etc]# /sbin/sysctl -p

# 生成配置文件及目录
for port in $(seq 1 3); \
do \
mkdir -p /data/elk/es/node-${port}/conf
mkdir -p /data/elk/es/node-${port}/data
mkdir -p /data/elk/plugins
chmod 777 /data/elk/es/node-${port}/data
touch /data/elk/es/node-${port}/conf/es.yml
cat << EOF >>/data/elk/es/node-${port}/conf/es.yml
cluster.name: jinx
node.name: node${port}
node.master: true
node.data: true
bootstrap.memory_lock: false
network.host: 0.0.0.0
http.port: 920${port}
transport.tcp.port: 930${port}
discovery.seed_hosts: ["x.x.x.x:9301","x.x.x.x:9302","x.x.x.x:9303"]
cluster.initial_master_nodes: ["node1","node2","node3"]
cluster.routing.allocation.cluster_concurrent_rebalance: 32
cluster.routing.allocation.node_concurrent_recoveries: 32
cluster.routing.allocation.node_initial_primaries_recoveries: 32
http.cors.enabled: true
http.cors.allow-origin: "*"
discovery.zen.minimum_master_nodes: 2
EOF
done

# 目录结构如下
[root@VM-24-9-centos data]# tree
.
└── elk
    ├── es
    │&nbsp;&nbsp; ├── node-1
    │&nbsp;&nbsp; │&nbsp;&nbsp; ├── conf
    │&nbsp;&nbsp; │&nbsp;&nbsp; │&nbsp;&nbsp; └── es.yml
    │&nbsp;&nbsp; │&nbsp;&nbsp; └── data
    │&nbsp;&nbsp; ├── node-2
    │&nbsp;&nbsp; │&nbsp;&nbsp; ├── conf
    │&nbsp;&nbsp; │&nbsp;&nbsp; │&nbsp;&nbsp; └── es.yml
    │&nbsp;&nbsp; │&nbsp;&nbsp; └── data
    │&nbsp;&nbsp; └── node-3
    │&nbsp;&nbsp;     ├── conf
    │&nbsp;&nbsp;     │&nbsp;&nbsp; └── es.yml
    │&nbsp;&nbsp;     └── data
    └── plugins

12 directories, 3 files
2.批量创建容器及查看集群信息
# 批量创建容器
for port in $(seq 1 3); \
do \
docker run -e ES_JAVA_OPTS="-Xms512m -Xmx512m" \
-d -p 920${port}:920${port} -p 930${port}:930${port} \
-e ES_MIN_MEM=128m \
-e ES_MAX_MEM=2048m \
-v /data/elk/es/node-${port}/conf/es.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /data/elk/es/node-${port}/data/:/usr/share/elasticsearch/data/ \
-v /data/elk/es/plugins/:/usr/share/elasticsearch/plugins  \
--name es-${port} \
elasticsearch:7.14.2
done

# 查看单个节点信息
[root@VM-24-9-centos ~]# curl http://x.x.x.x:9201/
{
  "name" : "node1",
  "cluster_name" : "jinx",
  "cluster_uuid" : "Vjb7cu6fQ6y2-ZWk0YGIiQ",
  "version" : {
    "number" : "7.2.0",
    "build_flavor" : "default",
    "build_type" : "docker",
    "build_hash" : "508c38a",
    "build_date" : "2019-06-20T15:54:18.811730Z",
    "build_snapshot" : false,
    "lucene_version" : "8.0.0",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}

# 查看集群信息
[root@VM-24-9-centos ~]# curl http://x.x.x.x:9201/_cat/nodes?pretty
172.17.0.2 37 97 0 0.00 0.00 0.08 mdi * node1
172.17.0.4 35 97 0 0.00 0.00 0.08 mdi - node3
172.17.0.3 39 97 1 0.00 0.00 0.08 mdi - node2
172.17.0.6 34 97 1 0.00 0.00 0.08 mdi - node4
3.安装IK分词器

​ 先下载离线包:https://github.com/medcl/elasticsearch-analysis-ik/releases 然后把离线包拷贝到 /data/elk/es/plugins/analysis-ik 目录解压

# 安装 unzip 解压工具柜
[root@VM-24-9-centos plugins]# yum install unzip

# 创建 analysis-ik 目录,并把压缩包复制到此目录,并解压,然后重启容器即可,因为容器挂载了 plugins 目录,重启后便会生效
[root@VM-24-9-centos elk]# mkdir /data/elk/es/plugins/analysis-ik
[root@VM-24-9-centos analysis-ik]# unzip elasticsearch-analysis-ik-7.14.2.zip
[root@VM-24-9-centos analysis-ik]# docker restart $(docker ps -aq -f name="es*")

使用Nginx做集群负载均衡

1.获取镜像
# 拉取镜像 此处我们拉取的是官方最新镜像,其它版本可以去DockerHub查询
[root@VM-24-9-centos ~]# docker pull nginx
2.创建容器
# 创建容器 第一个nginx是容器名,第二个nginx是镜像名
[root@VM-24-9-centos ~]# docker run -d -p 9200:9200 --name nginx nginx
3.把容器内的配置文件等复制到容器外用于挂载
# nginx的配置文件日志文件及默认的页面分别放于容器内的 /etc/nginx /usr/share/nginx/html /var/log/nginx 中,我们需要将其挂载到容器外部

# 创建三个文件夹 conf html logs
[root@VM-24-9-centos data]# mkdir -p /data/nginx/{conf.d,html,logs}

# 将容器内的 nginx.conf配置文件和default.conf配置文件复制出来
[root@VM-24-9-centos data]# docker cp nginx:/usr/share/nginx/html /data/nginx
[root@VM-24-9-centos data]# docker cp nginx:/etc/nginx/nginx.conf /data/nginx
[root@VM-24-9-centos data]# docker cp nginx:/etc/nginx/conf.d/default.conf /data/nginx/conf.d/default.conf

# 查看目录结构
[root@VM-24-9-centos nginx]# cd /data/nginx
[root@VM-24-9-centos nginx]# ll
total 16
drwxr-xr-x 2 root root 4096 Nov 16 10:48 conf.d
drwxr-xr-x 2 root root 4096 Nov 16 10:48 html
drwxr-xr-x 2 root root 4096 Nov 16 10:48 logs
-rw-r--r-- 1 root root  648 Nov  2 23:01 nginx.conf

# 在 conf.d 目录下再建一个 es.conf 配置文件用于做负载均衡
[root@VM-24-9-centos conf]# vim /data/nginx/conf.d/es.conf
upstream es{
    server x.x.x.x:9201 weight=1;
    server x.x.x.x:9202 weight=1;
    server x.x.x.x:9203 weight=1;
}
server {
    listen       9200;
    server_name  localhost;
    #charset koi8-r;
    #access_log  logs/host.access.log  main;
    location / {
        proxy_pass http://es;
    #   root   html;
    #  index  index.html index.htm;
    }
    error_page   500 502 503 504  /50x.html;
    location = /50x.html {
        root   html;
    }
}
4.删除之前的容器,然后创建新的容器把目录挂载上去
# 删除容器
[root@VM-24-9-centos nginx]# docker rm -f  nginx
# 创建新的容器 --privileged=true 容器内部对挂载的目录拥有读写等特权
docker run -d -p 9200:9200 --name nginx_9200 \
-v /data/nginx/html:/usr/share/nginx/html \
-v /data/nginx/logs:/var/log/nginx \
-v /data/nginx/conf.d:/etc/nginx/conf.d \
-v /data/nginx/nginx.conf:/etc/nginx/nginx.conf \
--privileged=true \
nginx
5.访问负载均衡配置的地址查看是否成功
[root@VM-24-9-centos conf]# curl http://x.x.x.x:9200/
{
  "name" : "node3",
  "cluster_name" : "jinx",
  "cluster_uuid" : "5aRGIwI0T-qHks6vXzRNQQ",
  "version" : {
    "number" : "7.14.2",
    "build_flavor" : "default",
    "build_type" : "docker",
    "build_hash" : "6bc13727ce758c0e943c3c21653b3da82f627f75",
    "build_date" : "2021-09-15T10:18:09.722761972Z",
    "build_snapshot" : false,
    "lucene_version" : "8.9.0",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}

部署ElasticSearch-Head

​ ElasticSearch-Head是一个管理界面,可以查看ElasticSearch相关信息

1.拉取ElasticSearch-Head镜像
[root@VM-24-9-centos ~]#  docker pull mobz/elasticsearch-head:5
2.运行ElasticSearch-Head容器
# 创建容器
[root@VM-24-9-centos ~]# docker run -d --name es_admin -p 9100:9100 mobz/elasticsearch-head:5

# pc端访问 IP:9100 即可用管理工具查看集群信息了

部署Kibana

1.拉取镜像
# 拉取镜像
[root@VM-24-9-centos conf.d]# docker pull kibana:7.14.2
2.创建挂载目录
# 创建挂载目录
[root@VM-24-9-centos conf]# mkdir -p /data/elk/kibana/

# 创建配置文件
[root@VM-24-9-centos kibana]# mkdir /data/elk/kibana/conf
[root@VM-24-9-centos kibana]# vim /data/elk/kibana/conf/kibana.yml
server.name: kibana
# kibana的主机地址 0.0.0.0可表示监听所有IP
server.host: "0.0.0.0"
# kibana访问es的URL
elasticsearch.hosts: [ "http://x.x.x.x:9200" ]
elasticsearch.username: 'kibana'
elasticsearch.password: '123456'
# 显示登陆页面
xpack.monitoring.ui.container.elasticsearch.enabled: true
# 语言
i18n.locale: "zh-CN"
server.publicBaseUrl: "http://x.x.x.x:9200"
3.运行容器
[root@VM-24-9-centos conf]# docker run -d -p 5601:5601 --privileged=true --name=kibana -v/data/elk/kibana/conf/kibana.yml:/usr/share/kibana/config/kibana.yml kibana:7.14.2

部署Logstash

1.拉取镜像
[root@VM-24-9-centos ~]# docker pull logstash:7.14.2
2.创建容器和挂载目录并复制配置文件
# 创建容器
[root@VM-24-9-centos ~]# docker run -d  -p 5044:5044 --name logstash logstash:7.14.2

# 创建挂载目录
# 给data目录赋权限,不然启动新容器挂载目录的时候会报权限错误 ArgumentError: Path "/usr/share/logstash/data" must be a writable directory. It is not writable.
[root@VM-24-9-centos ~]# mkdir -p /data/elk/logstash/data && chmod 777 /data/elk/logstash/data

# 拷贝容器内目录
[root@VM-24-9-centos ~]# docker cp logstash:/usr/share/logstash/config  /data/elk/logstash/
[root@VM-24-9-centos ~]# docker cp logstash:/usr/share/logstash/data /data/elk/logstash/
[root@VM-24-9-centos ~]# docker cp logstash:/usr/share/logstash/pipeline /data/elk/logstash/

# 删除容器(只是为了拿到原始配置)
[root@VM-24-9-centos ~]# docker rm -f logstash

# 此时目录如下
[root@VM-16-7-centos elk]# tree
.
└── logstash
    ├── config
    │&nbsp;&nbsp; ├── jvm.options
    │&nbsp;&nbsp; ├── log4j2.properties
    │&nbsp;&nbsp; ├── logstash-sample.conf
    │&nbsp;&nbsp; ├── logstash.yml
    │&nbsp;&nbsp; ├── pipelines.yml
    │&nbsp;&nbsp; └── startup.options
    ├── data
    │&nbsp;&nbsp; ├── dead_letter_queue
    │&nbsp;&nbsp; ├── queue
    │&nbsp;&nbsp; └── uuid
    └── pipeline
        └── logstash.conf

6 directories, 8 files

Mysql数据同步需要 mysql 的 jdbc connector,可去官网(https://dev.mysql.com/downloads/connector/j/)下载 jar 包,下载的具体版本参照自己服务器中 mysql 的版本,下载之后放于 config 文件中

3.修改复制出来的配置文件

3.1 配置用于mysql和es的数据管道
# 在 pipeline 目录下新建一个 logstash_test.conf 文件 作为mysql和es的数据管道
[root@VM-24-9-centos pipeline]# vim logstash_test.conf
input {
    jdbc {
        # 设置 MySql/MariaDB 数据库url以及数据库名称
        #jdbc_connection_string => "jdbc:mysql://x.x.x.x:3306/450w?useSSL=false"
        jdbc_connection_string => "jdbc:mysql://x.x.x.x:3306/450w?useSSL=false"
        # 用户名和密码
        jdbc_user => "jing"
        jdbc_password => "123456"
        # 数据库驱动所在位置,可以是绝对路径或者相对路径,MySQLConnector8.0 之后不用这个参数了,只需指定 jdbc_driver_class,并且值为 com.mysql.cj.jdbc.Driver
        jdbc_driver_library => "/usr/share/logstash/config/mysql-connector-java-8.0.26.jar"
        # 驱动类名 MySQLConnector8.0 版本之后值为 com.mysql.cj.jdbc.Driver
        jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
        # 开启分页
        jdbc_paging_enabled => "true"
        # 分页每页数量,可以自定义
        jdbc_page_size => "100000"
        # 执行的sql文件路径
        # statement_filepath => "/usr/share/logstash/pipeline/450w-sync.sql"
        statement => "SELECT * FROM test"
        # 设置定时任务间隔  含义:分、时、天、月、年,全部为*默认含义为每分钟跑一次任务
        schedule => "* * * * *"
        # 索引类型
        type => "_doc"
        # 是否开启记录上次追踪的结果,也就是上次更新的时间,这个会记录到 last_run_metadata_path 的文件
        use_column_value => true
        # 记录上一次追踪的结果值
        last_run_metadata_path => "/usr/share/logstash/pipeline/track_time"
        # 如果 use_column_value 为true, 配置本参数,追踪的 column 名,可以是自增id或者时间
        tracking_column => "Id"
        # tracking_column 对应字段的类型
        tracking_column_type => "numeric"
        # 是否清除 last_run_metadata_path 的记录,true则每次都从头开始查询所有的数据库记录
        clean_run => false
        # 数据库字段名称大写转小写
        lowercase_column_names => false
    }
}
filter {
    json {
        source => "message"
        remove_field => ["message"]
    }
}
output {
   elasticsearch {
       # es地址
       hosts => ["x.x.x.x:9201","x.x.x.x:9202","x.x.x.x:9203"]
       # 同步的索引名
       index => "450w"
       # 设置_docID和数据相同
       document_id => "%{Id}"
   }
   # 日志输出
   stdout {
       codec => json_lines
   }
}
3.2修改 jvm.options 配置文件

如果服务器内存不大,可以修改jvm内存分配,修改 /data/elk/logstash/config/jvm.options 配置文件 把 -Xms1g -Xmx1g 改为 -Xms512m -Xmx512m

[root@VM-0-17-centos config]# vim jvm.options
## JVM configuration

# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space

-Xms512m
-Xmx512m

################################################################
## Expert settings
################################################################
##
## All settings below this section are considered
## expert settings. Don't tamper with them unless
## you understand what you are doing
##
################################################################

## GC configuration
8-13:-XX:+UseConcMarkSweepGC
8-13:-XX:CMSInitiatingOccupancyFraction=75
8-13:-XX:+UseCMSInitiatingOccupancyOnly

## Locale
# Set the locale language
#-Duser.language=en

# Set the locale country
#-Duser.country=US

# Set the locale variant, if any
#-Duser.variant=

## basic

# set the I/O temp directory
#-Djava.io.tmpdir=$HOME

# set to headless, just in case
-Djava.awt.headless=true

# ensure UTF-8 encoding by default (e.g. filenames)
-Dfile.encoding=UTF-8

# use our provided JNA always versus the system one
#-Djna.nosys=true

# Turn on JRuby invokedynamic
-Djruby.compile.invokedynamic=true
# Force Compilation
-Djruby.jit.threshold=0
# Make sure joni regexp interruptability is enabled
-Djruby.regexp.interruptible=true
"jvm.options" 81L, 2038C
3.3修改 pipelines.yml 配置文件

pipelines.yml 文件,是用来配置数据渠道的

# 先查看默认配置
[root@VM-0-17-centos config]# vim pipelines.yml
# This file is where you define your pipelines. You can define multiple.
# For more information on multiple pipelines, see the documentation:
#   https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html

- pipeline.id: main
  path.config: "/usr/share/logstash/pipeline"

#默认配置只配置了一个管道,并指向 pipeline 目录

多管道配置

​ 如果你想一个管道Id指向多个配置文件可以使用*通配符,配置文件默认指向 pipeline 目录应该是一个道理

- pipeline.id: main
  path.config: "/usr/share/logstash/pipeline/*.conf"

​ 如果你想各自指向各自的配置文件可以如下配置

- pipeline.id: table1
  path.config: "/usr/share/logstash/pipeline/table1.conf"
- pipeline.id: table2
  path.config: "/usr/share/logstash/pipeline/table2.conf"
- pipeline.id: table3
  path.config: "/usr/share/logstash/pipeline/table3.conf"

这里的每个单独的conf文件都是在 pipeline 目录下写好的文件,比如刚才自己创建的 logstash_test.conf 配置文件

可参考:https://www.jianshu.com/p/9da006b4bec4

3.4 logstash.yml 配置
[root@VM-16-7-centos config]# vim logstash.yml
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://x.x.x.x:9201","http://x.x.x.x:9202","http://x.x.x.x:9203" ]
# 如果是单机就配置一个地址,如果是集群就配置集群地址

最后我们配置好之后的目录应该是这样的

[root@VM-0-17-centos logstash]# tree
.
|-- config
|   |-- jvm.options
|   |-- log4j2.properties
|   |-- logstash-sample.conf
|   |-- logstash.yml
|   |-- mysql-connector-java-8.0.26.jar
|   |-- pipelines.yml
|   `-- startup.options
|-- data
`-- pipeline
    |-- logstash.conf
    `-- logstash_test.conf

创建 渠道文件 statement_filepath 配置的sql文件

[root@VM-16-7-centos pipeline]# vim 450w-sync.sql
SELECT `Id`, `Name`, `Birthday`, `Address`, `Date` FROM `table`;

​ mysql同步数据到es是需要 logstash-input-jdbclogstash-output-jdbc 这两个插件的,logstash-input-jdbc容器中已经内置,我们只需要安装 logstash-output-jdbc插件

# 新建一个容器
[root@VM-24-9-centos ~]# docker run -d  -p 5044:5044 --name logstash logstash:7.14.2

# 进入容器安装插件,插件在github的,安装很慢
[root@VM-16-7-centos pipeline]#  docker exec -it logstash bash
[root@c8a33d8198cf logstash]# bin/logstash-plugin install logstash-output-jdbc

# cd到bin目录
[root@c8a33d8198cf bin]# logstash-plugin list

​ 如果不想每次删除容器后都需要重新安装插件,可以基于安装好插件的容器构建一个新的镜像

# 基于安装好插件的容器构建新镜像
[root@VM-16-7-centos pipeline]# docker commit logstash logstash_ouptut:7.14.2

​ 然后使用新构建的镜像启动新容器

# 先删除基础容器再创建新容器
[root@VM-16-7-centos pipeline]# docker rm -f logstash

# 创建新容器
docker run -d -p 5044:5044 -p 9600:9600 --name logstash --user root \
-v /data/elk/logstash/config:/usr/share/logstash/config \
-v /data/elk/logstash/pipeline:/usr/share/logstash/pipeline \
-v /data/elk/logstash/data:/usr/share/logstash/data \
-e TZ=Asia/Shanghai \
logstash_ouptut:7.14.2

# --user root 以root权限运行容器,logstash默认是以logstash用户组和用户启动的,但是默认用户目录权限不足,会报错

Logstash Output 的话,需要安装 logstash-output-jdbc插件 https://github.com/theangryangel/logstash-output-jdbc

关于 unable to load /data/elk/logstash/pipeline/mysql-connector-java-8.0.11.jar from :jdbc_driver_library报错

安装 logstash-output-jdbc插件

# 新建一个容器
[root@VM-24-9-centos ~]# docker run -d  -p 5044:5044 --name logstash logstash:7.14.2
# 进入容器安装插件,插件是外网的,安装很慢
[root@VM-16-7-centos pipeline]#  docker exec -it logstash bash
[root@c8a33d8198cf logstash]# bin/logstash-plugin install logstash-output-jdbc

关于 unable to load /data/elk/logstash/pipeline/mysql-connector-java-8.0.11.jar from :jdbc_driver_library报错

MySQL Connector/J 8.0 之后java.sql.Driver在 MySQL Connector/J 中实现的类的名称 已从 更改 com.mysql.jdbc.Drivercom.mysql.cj.jdbc.Driver。旧的类名已被弃用。

官方说明:https://dev.mysql.com/doc/connector-j/8.0/en/connector-j-api-changes.html

所以,直接把 MySQL Connector 的jar包复制进容器 bin/logstash-core\lib\jars 目录

[root@VM-16-7-centos pipeline]# docker cp /data/elk/logstash/pipeline/mysql-connector-java-8.0.26.jar logstash:/usr/share/logstash/logstash-core/lib/jars

然后把当前容器重新构建成一个新的镜像

[root@VM-16-7-centos pipeline]# docker commit logstash logstash-mysql-connector:8.0.26

然后使用新构建的镜像启动新容器

# 先删除基础容器再创建新容器
[root@VM-16-7-centos pipeline]# docker rm -f logstash

# 创建新容器
docker run -d -p 5044:5044 -p 9600:9600 --name logstash --user root \
-v /data/elk/logstash/config:/usr/share/logstash/config \
-v /data/elk/logstash/pipeline:/usr/share/logstash/pipeline \
-v /data/elk/logstash/data:/usr/share/logstash/data \
-e TZ=Asia/Shanghai \
logstash-mysql-connector:8.0.26

# --user root 以root权限运行容器,logstash默认是以logstash用户组和用户启动的,但是默认用户目录权限不足,会报错

拓展

渠道配置文件检测
进入容器
bin/logstash -f /usr/local/logstash/config/logstash-test.conf -t

记录坑:

Thu Dec 20 12:50:09 CST 2018 WARN: Establishing SSL connection without server's identity verification is not recommended. According to MySQL 5.5.45+, 5.6.26+ and 5.7.6+ requirements SSL connection must be established by default if explicit option isn't set. For compliance with existing applications not using SSL the verifyServerCertificate property is set to 'false'. You need either to explicitly disable SSL by setting useSSL=false, or set useSSL=true and provide truststore for server certificate verification.

在配置文件的连接串后面加上useSSL=false
jdbc:mysql://localhost:3306/testdb?useSSL=false


Error: com.mysql.cj.jdbc.Driver not loaded. :jdbc_driver_library is not set, are you sure you included the proper driver client libraries in your classpath?

可以尝试将驱动器即mysql-connector的jar包拷贝到 容器的 bin\logstash-core\lib\jars 下


Could not execute action: PipelineAction::Create<main>

当您在类加载器路径之外结合较新的 jdk 版本使用最新的 jdbc 驱动程序时,可能会发生类加载器问题。在 github 上有几个问题。把驱动放在logstash文件夹下<logstash-install-dir>/vendor/jar/jdbc/(需要先创建这个文件夹)。如果这不起作用,请将驱动程序移到下方,<logstash-install-dir>/logstash-core\lib\jars并且不要在配置文件中提供任何驱动程序路径:jdbc_driver_library => ""

参考链接:https://stackoverflow.com/questions/59698179/how-to-send-data-from-http-input-to-elasticsearch-using-logstash-ans-jdbc-stream/59712945#59712945


Failed to execute action {:action=>LogStash::PipelineAction::Create/pipeline

多数原因是logstash.conf文件(渠道文件)配置出错,检查一下


管道配置文件里面的 jdbc_user 建议不要使用 root ,会报 Access denied for user 'root'@'x.x.x.x' 错误
重新配置了root权限还是报错,暂时没找到原因,所以换了个用户就行了
https://www.cnblogs.com/kangfei/p/14802521.html