Centos8 部署 ElasticSearch 集群並搭建 ELK,基於Logstash同步MySQL數據到ElasticSearch
Centos8安裝Docker
1.更新一下yum
[root@VM-24-9-centos ~]# yum -y update
2.安裝containerd.io
# centos8默認使用podman代替docker,所以需要containerd.io
[root@VM-24-9-centos ~]# yum install //download.docker.com/linux/fedora/30/x86_64/stable/Packages/containerd.io-1.2.6-3.3.fc30.x86_64.rpm -y
# 安裝一些其他依賴
[root@VM-24-9-centos ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@VM-24-9-centos ~]# yum-config-manager --add-repo //download.docker.com/linux/centos/docker-ce.repo
3.安裝Docker
[root@VM-24-9-centos ~]# yum install -y docker-ce
4.啟動Docker
# 啟動docker
[root@VM-24-9-centos ~]# systemctl start docker
# 設置開機自啟
[root@VM-24-9-centos ~]# systemctl enable docker
5.設置容器開機自啟及其他命令
[root@VM-16-7-centos ~]# docker update --restart=always 容器名
--restart具體參數值詳細資訊:
no:容器退出時,不重啟容器
on-failure:只有在非0狀態退出時才從新啟動容器
always:無論退出狀態是如何,都重啟容器
# 根據容器名模糊批量停止/刪除容器
# *號表示模糊查詢
[root@VM-24-9-centos ~]# docker stop $(docker ps -q -f name="容器名*")
# 批量刪除
[root@VM-24-9-centos ~]# docker rm $(docker ps -qa -f name="ES*")
ELK部署
部署ElasticSearch集群
1.拉取鏡像及批量生成配置文件
# 拉取鏡像
[root@VM-24-9-centos ~]# docker pull elasticsearch:7.14.2
# 修改虛擬記憶體限制,以及開啟埠轉發
[root@VM-24-9-centos etc]# vim /etc/sysctl.conf
vm.max_map_count=262144
net.ipv4.ip_forward = 1
[root@VM-24-9-centos etc]# /sbin/sysctl -p
# 生成配置文件及目錄
for port in $(seq 1 3); \
do \
mkdir -p /data/elk/es/node-${port}/conf
mkdir -p /data/elk/es/node-${port}/data
mkdir -p /data/elk/plugins
chmod 777 /data/elk/es/node-${port}/data
touch /data/elk/es/node-${port}/conf/es.yml
cat << EOF >>/data/elk/es/node-${port}/conf/es.yml
cluster.name: jinx
node.name: node${port}
node.master: true
node.data: true
bootstrap.memory_lock: false
network.host: 0.0.0.0
http.port: 920${port}
transport.tcp.port: 930${port}
discovery.seed_hosts: ["x.x.x.x:9301","x.x.x.x:9302","x.x.x.x:9303"]
cluster.initial_master_nodes: ["node1","node2","node3"]
cluster.routing.allocation.cluster_concurrent_rebalance: 32
cluster.routing.allocation.node_concurrent_recoveries: 32
cluster.routing.allocation.node_initial_primaries_recoveries: 32
http.cors.enabled: true
http.cors.allow-origin: "*"
discovery.zen.minimum_master_nodes: 2
EOF
done
# 目錄結構如下
[root@VM-24-9-centos data]# tree
.
└── elk
├── es
│ ├── node-1
│ │ ├── conf
│ │ │ └── es.yml
│ │ └── data
│ ├── node-2
│ │ ├── conf
│ │ │ └── es.yml
│ │ └── data
│ └── node-3
│ ├── conf
│ │ └── es.yml
│ └── data
└── plugins
12 directories, 3 files
2.批量創建容器及查看集群資訊
# 批量創建容器
for port in $(seq 1 3); \
do \
docker run -e ES_JAVA_OPTS="-Xms512m -Xmx512m" \
-d -p 920${port}:920${port} -p 930${port}:930${port} \
-e ES_MIN_MEM=128m \
-e ES_MAX_MEM=2048m \
-v /data/elk/es/node-${port}/conf/es.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /data/elk/es/node-${port}/data/:/usr/share/elasticsearch/data/ \
-v /data/elk/es/plugins/:/usr/share/elasticsearch/plugins \
--name es-${port} \
elasticsearch:7.14.2
done
# 查看單個節點資訊
[root@VM-24-9-centos ~]# curl //x.x.x.x:9201/
{
"name" : "node1",
"cluster_name" : "jinx",
"cluster_uuid" : "Vjb7cu6fQ6y2-ZWk0YGIiQ",
"version" : {
"number" : "7.2.0",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "508c38a",
"build_date" : "2019-06-20T15:54:18.811730Z",
"build_snapshot" : false,
"lucene_version" : "8.0.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
# 查看集群資訊
[root@VM-24-9-centos ~]# curl //x.x.x.x:9201/_cat/nodes?pretty
172.17.0.2 37 97 0 0.00 0.00 0.08 mdi * node1
172.17.0.4 35 97 0 0.00 0.00 0.08 mdi - node3
172.17.0.3 39 97 1 0.00 0.00 0.08 mdi - node2
172.17.0.6 34 97 1 0.00 0.00 0.08 mdi - node4
3.安裝IK分詞器
先下載離線包://github.com/medcl/elasticsearch-analysis-ik/releases 然後把離線包拷貝到 /data/elk/es/plugins/analysis-ik 目錄解壓
# 安裝 unzip 解壓工具櫃
[root@VM-24-9-centos plugins]# yum install unzip
# 創建 analysis-ik 目錄,並把壓縮包複製到此目錄,並解壓,然後重啟容器即可,因為容器掛載了 plugins 目錄,重啟後便會生效
[root@VM-24-9-centos elk]# mkdir /data/elk/es/plugins/analysis-ik
[root@VM-24-9-centos analysis-ik]# unzip elasticsearch-analysis-ik-7.14.2.zip
[root@VM-24-9-centos analysis-ik]# docker restart $(docker ps -aq -f name="es*")
使用Nginx做集群負載均衡
1.獲取鏡像
# 拉取鏡像 此處我們拉取的是官方最新鏡像,其它版本可以去DockerHub查詢
[root@VM-24-9-centos ~]# docker pull nginx
2.創建容器
# 創建容器 第一個nginx是容器名,第二個nginx是鏡像名
[root@VM-24-9-centos ~]# docker run -d -p 9200:9200 --name nginx nginx
3.把容器內的配置文件等複製到容器外用於掛載
# nginx的配置文件日誌文件及默認的頁面分別放於容器內的 /etc/nginx /usr/share/nginx/html /var/log/nginx 中,我們需要將其掛載到容器外部
# 創建三個文件夾 conf html logs
[root@VM-24-9-centos data]# mkdir -p /data/nginx/{conf.d,html,logs}
# 將容器內的 nginx.conf配置文件和default.conf配置文件複製出來
[root@VM-24-9-centos data]# docker cp nginx:/usr/share/nginx/html /data/nginx
[root@VM-24-9-centos data]# docker cp nginx:/etc/nginx/nginx.conf /data/nginx
[root@VM-24-9-centos data]# docker cp nginx:/etc/nginx/conf.d/default.conf /data/nginx/conf.d/default.conf
# 查看目錄結構
[root@VM-24-9-centos nginx]# cd /data/nginx
[root@VM-24-9-centos nginx]# ll
total 16
drwxr-xr-x 2 root root 4096 Nov 16 10:48 conf.d
drwxr-xr-x 2 root root 4096 Nov 16 10:48 html
drwxr-xr-x 2 root root 4096 Nov 16 10:48 logs
-rw-r--r-- 1 root root 648 Nov 2 23:01 nginx.conf
# 在 conf.d 目錄下再建一個 es.conf 配置文件用於做負載均衡
[root@VM-24-9-centos conf]# vim /data/nginx/conf.d/es.conf
upstream es{
server x.x.x.x:9201 weight=1;
server x.x.x.x:9202 weight=1;
server x.x.x.x:9203 weight=1;
}
server {
listen 9200;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
proxy_pass //es;
# root html;
# index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
4.刪除之前的容器,然後創建新的容器把目錄掛載上去
# 刪除容器
[root@VM-24-9-centos nginx]# docker rm -f nginx
# 創建新的容器 --privileged=true 容器內部對掛載的目錄擁有讀寫等特權
docker run -d -p 9200:9200 --name nginx_9200 \
-v /data/nginx/html:/usr/share/nginx/html \
-v /data/nginx/logs:/var/log/nginx \
-v /data/nginx/conf.d:/etc/nginx/conf.d \
-v /data/nginx/nginx.conf:/etc/nginx/nginx.conf \
--privileged=true \
nginx
5.訪問負載均衡配置的地址查看是否成功
[root@VM-24-9-centos conf]# curl //x.x.x.x:9200/
{
"name" : "node3",
"cluster_name" : "jinx",
"cluster_uuid" : "5aRGIwI0T-qHks6vXzRNQQ",
"version" : {
"number" : "7.14.2",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "6bc13727ce758c0e943c3c21653b3da82f627f75",
"build_date" : "2021-09-15T10:18:09.722761972Z",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
部署ElasticSearch-Head
ElasticSearch-Head是一個管理介面,可以查看ElasticSearch相關資訊
1.拉取ElasticSearch-Head鏡像
[root@VM-24-9-centos ~]# docker pull mobz/elasticsearch-head:5
2.運行ElasticSearch-Head容器
# 創建容器
[root@VM-24-9-centos ~]# docker run -d --name es_admin -p 9100:9100 mobz/elasticsearch-head:5
# pc端訪問 IP:9100 即可用管理工具查看集群資訊了
部署Kibana
1.拉取鏡像
# 拉取鏡像
[root@VM-24-9-centos conf.d]# docker pull kibana:7.14.2
2.創建掛載目錄
# 創建掛載目錄
[root@VM-24-9-centos conf]# mkdir -p /data/elk/kibana/
# 創建配置文件
[root@VM-24-9-centos kibana]# mkdir /data/elk/kibana/conf
[root@VM-24-9-centos kibana]# vim /data/elk/kibana/conf/kibana.yml
server.name: kibana
# kibana的主機地址 0.0.0.0可表示監聽所有IP
server.host: "0.0.0.0"
# kibana訪問es的URL
elasticsearch.hosts: [ "//x.x.x.x:9200" ]
elasticsearch.username: 'kibana'
elasticsearch.password: '123456'
# 顯示登陸頁面
xpack.monitoring.ui.container.elasticsearch.enabled: true
# 語言
i18n.locale: "zh-CN"
server.publicBaseUrl: "//x.x.x.x:9200"
3.運行容器
[root@VM-24-9-centos conf]# docker run -d -p 5601:5601 --privileged=true --name=kibana -v/data/elk/kibana/conf/kibana.yml:/usr/share/kibana/config/kibana.yml kibana:7.14.2
部署Logstash
1.拉取鏡像
[root@VM-24-9-centos ~]# docker pull logstash:7.14.2
2.創建容器和掛載目錄並複製配置文件
# 創建容器
[root@VM-24-9-centos ~]# docker run -d -p 5044:5044 --name logstash logstash:7.14.2
# 創建掛載目錄
# 給data目錄賦許可權,不然啟動新容器掛載目錄的時候會報許可權錯誤 ArgumentError: Path "/usr/share/logstash/data" must be a writable directory. It is not writable.
[root@VM-24-9-centos ~]# mkdir -p /data/elk/logstash/data && chmod 777 /data/elk/logstash/data
# 拷貝容器內目錄
[root@VM-24-9-centos ~]# docker cp logstash:/usr/share/logstash/config /data/elk/logstash/
[root@VM-24-9-centos ~]# docker cp logstash:/usr/share/logstash/data /data/elk/logstash/
[root@VM-24-9-centos ~]# docker cp logstash:/usr/share/logstash/pipeline /data/elk/logstash/
# 刪除容器(只是為了拿到原始配置)
[root@VM-24-9-centos ~]# docker rm -f logstash
# 此時目錄如下
[root@VM-16-7-centos elk]# tree
.
└── logstash
├── config
│ ├── jvm.options
│ ├── log4j2.properties
│ ├── logstash-sample.conf
│ ├── logstash.yml
│ ├── pipelines.yml
│ └── startup.options
├── data
│ ├── dead_letter_queue
│ ├── queue
│ └── uuid
└── pipeline
└── logstash.conf
6 directories, 8 files
Mysql數據同步需要 mysql 的 jdbc connector,可去官網(//dev.mysql.com/downloads/connector/j/)下載 jar 包,下載的具體版本參照自己伺服器中 mysql 的版本,下載之後放於 config 文件中
3.修改複製出來的配置文件
3.1 配置用於mysql和es的數據管道
# 在 pipeline 目錄下新建一個 logstash_test.conf 文件 作為mysql和es的數據管道
[root@VM-24-9-centos pipeline]# vim logstash_test.conf
input {
jdbc {
# 設置 MySql/MariaDB 資料庫url以及資料庫名稱
#jdbc_connection_string => "jdbc:mysql://x.x.x.x:3306/450w?useSSL=false"
jdbc_connection_string => "jdbc:mysql://x.x.x.x:3306/450w?useSSL=false"
# 用戶名和密碼
jdbc_user => "jing"
jdbc_password => "123456"
# 資料庫驅動所在位置,可以是絕對路徑或者相對路徑,MySQLConnector8.0 之後不用這個參數了,只需指定 jdbc_driver_class,並且值為 com.mysql.cj.jdbc.Driver
jdbc_driver_library => "/usr/share/logstash/config/mysql-connector-java-8.0.26.jar"
# 驅動類名 MySQLConnector8.0 版本之後值為 com.mysql.cj.jdbc.Driver
jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
# 開啟分頁
jdbc_paging_enabled => "true"
# 分頁每頁數量,可以自定義
jdbc_page_size => "100000"
# 執行的sql文件路徑
# statement_filepath => "/usr/share/logstash/pipeline/450w-sync.sql"
statement => "SELECT * FROM test"
# 設置定時任務間隔 含義:分、時、天、月、年,全部為*默認含義為每分鐘跑一次任務
schedule => "* * * * *"
# 索引類型
type => "_doc"
# 是否開啟記錄上次追蹤的結果,也就是上次更新的時間,這個會記錄到 last_run_metadata_path 的文件
use_column_value => true
# 記錄上一次追蹤的結果值
last_run_metadata_path => "/usr/share/logstash/pipeline/track_time"
# 如果 use_column_value 為true, 配置本參數,追蹤的 column 名,可以是自增id或者時間
tracking_column => "Id"
# tracking_column 對應欄位的類型
tracking_column_type => "numeric"
# 是否清除 last_run_metadata_path 的記錄,true則每次都從頭開始查詢所有的資料庫記錄
clean_run => false
# 資料庫欄位名稱大寫轉小寫
lowercase_column_names => false
}
}
filter {
json {
source => "message"
remove_field => ["message"]
}
}
output {
elasticsearch {
# es地址
hosts => ["x.x.x.x:9201","x.x.x.x:9202","x.x.x.x:9203"]
# 同步的索引名
index => "450w"
# 設置_docID和數據相同
document_id => "%{Id}"
}
# 日誌輸出
stdout {
codec => json_lines
}
}
3.2修改 jvm.options 配置文件
如果伺服器記憶體不大,可以修改jvm記憶體分配,修改 /data/elk/logstash/config/jvm.options 配置文件 把 -Xms1g -Xmx1g 改為 -Xms512m -Xmx512m
[root@VM-0-17-centos config]# vim jvm.options
## JVM configuration
# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space
-Xms512m
-Xmx512m
################################################################
## Expert settings
################################################################
##
## All settings below this section are considered
## expert settings. Don't tamper with them unless
## you understand what you are doing
##
################################################################
## GC configuration
8-13:-XX:+UseConcMarkSweepGC
8-13:-XX:CMSInitiatingOccupancyFraction=75
8-13:-XX:+UseCMSInitiatingOccupancyOnly
## Locale
# Set the locale language
#-Duser.language=en
# Set the locale country
#-Duser.country=US
# Set the locale variant, if any
#-Duser.variant=
## basic
# set the I/O temp directory
#-Djava.io.tmpdir=$HOME
# set to headless, just in case
-Djava.awt.headless=true
# ensure UTF-8 encoding by default (e.g. filenames)
-Dfile.encoding=UTF-8
# use our provided JNA always versus the system one
#-Djna.nosys=true
# Turn on JRuby invokedynamic
-Djruby.compile.invokedynamic=true
# Force Compilation
-Djruby.jit.threshold=0
# Make sure joni regexp interruptability is enabled
-Djruby.regexp.interruptible=true
"jvm.options" 81L, 2038C
3.3修改 pipelines.yml 配置文件
pipelines.yml 文件,是用來配置數據渠道的
# 先查看默認配置
[root@VM-0-17-centos config]# vim pipelines.yml
# This file is where you define your pipelines. You can define multiple.
# For more information on multiple pipelines, see the documentation:
# //www.elastic.co/guide/en/logstash/current/multiple-pipelines.html
- pipeline.id: main
path.config: "/usr/share/logstash/pipeline"
#默認配置只配置了一個管道,並指向 pipeline 目錄
多管道配置
如果你想一個管道Id指向多個配置文件可以使用*通配符,配置文件默認指向 pipeline 目錄應該是一個道理
- pipeline.id: main
path.config: "/usr/share/logstash/pipeline/*.conf"
如果你想各自指向各自的配置文件可以如下配置
- pipeline.id: table1
path.config: "/usr/share/logstash/pipeline/table1.conf"
- pipeline.id: table2
path.config: "/usr/share/logstash/pipeline/table2.conf"
- pipeline.id: table3
path.config: "/usr/share/logstash/pipeline/table3.conf"
這裡的每個單獨的conf文件都是在 pipeline 目錄下寫好的文件,比如剛才自己創建的 logstash_test.conf 配置文件
可參考://www.jianshu.com/p/9da006b4bec4
3.4 logstash.yml 配置
[root@VM-16-7-centos config]# vim logstash.yml
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "//x.x.x.x:9201","//x.x.x.x:9202","//x.x.x.x:9203" ]
# 如果是單機就配置一個地址,如果是集群就配置集群地址
最後我們配置好之後的目錄應該是這樣的
[root@VM-0-17-centos logstash]# tree
.
|-- config
| |-- jvm.options
| |-- log4j2.properties
| |-- logstash-sample.conf
| |-- logstash.yml
| |-- mysql-connector-java-8.0.26.jar
| |-- pipelines.yml
| `-- startup.options
|-- data
`-- pipeline
|-- logstash.conf
`-- logstash_test.conf
創建 渠道文件 statement_filepath 配置的sql文件
[root@VM-16-7-centos pipeline]# vim 450w-sync.sql
SELECT `Id`, `Name`, `Birthday`, `Address`, `Date` FROM `table`;
mysql同步數據到es是需要 logstash-input-jdbc
和 logstash-output-jdbc
這兩個插件的,logstash-input-jdbc
容器中已經內置,我們只需要安裝 logstash-output-jdbc
插件
# 新建一個容器
[root@VM-24-9-centos ~]# docker run -d -p 5044:5044 --name logstash logstash:7.14.2
# 進入容器安裝插件,插件在github的,安裝很慢
[root@VM-16-7-centos pipeline]# docker exec -it logstash bash
[root@c8a33d8198cf logstash]# bin/logstash-plugin install logstash-output-jdbc
# cd到bin目錄
[root@c8a33d8198cf bin]# logstash-plugin list
如果不想每次刪除容器後都需要重新安裝插件,可以基於安裝好插件的容器構建一個新的鏡像
# 基於安裝好插件的容器構建新鏡像
[root@VM-16-7-centos pipeline]# docker commit logstash logstash_ouptut:7.14.2
然後使用新構建的鏡像啟動新容器
# 先刪除基礎容器再創建新容器
[root@VM-16-7-centos pipeline]# docker rm -f logstash
# 創建新容器
docker run -d -p 5044:5044 -p 9600:9600 --name logstash --user root \
-v /data/elk/logstash/config:/usr/share/logstash/config \
-v /data/elk/logstash/pipeline:/usr/share/logstash/pipeline \
-v /data/elk/logstash/data:/usr/share/logstash/data \
-e TZ=Asia/Shanghai \
logstash_ouptut:7.14.2
# --user root 以root許可權運行容器,logstash默認是以logstash用戶組和用戶啟動的,但是默認用戶目錄許可權不足,會報錯
Logstash Output 的話,需要安裝 logstash-output-jdbc
插件 //github.com/theangryangel/logstash-output-jdbc
關於 unable to load /data/elk/logstash/pipeline/mysql-connector-java-8.0.11.jar from :jdbc_driver_library
報錯
安裝 logstash-output-jdbc
插件
# 新建一個容器
[root@VM-24-9-centos ~]# docker run -d -p 5044:5044 --name logstash logstash:7.14.2
# 進入容器安裝插件,插件是外網的,安裝很慢
[root@VM-16-7-centos pipeline]# docker exec -it logstash bash
[root@c8a33d8198cf logstash]# bin/logstash-plugin install logstash-output-jdbc
關於 unable to load /data/elk/logstash/pipeline/mysql-connector-java-8.0.11.jar from :jdbc_driver_library
報錯
MySQL Connector/J 8.0 之後java.sql.Driver
在 MySQL Connector/J 中實現的類的名稱 已從 更改 com.mysql.jdbc.Driver
為 com.mysql.cj.jdbc.Driver
。舊的類名已被棄用。
官方說明://dev.mysql.com/doc/connector-j/8.0/en/connector-j-api-changes.html
所以,直接把 MySQL Connector 的jar包複製進容器 bin/logstash-core\lib\jars 目錄
[root@VM-16-7-centos pipeline]# docker cp /data/elk/logstash/pipeline/mysql-connector-java-8.0.26.jar logstash:/usr/share/logstash/logstash-core/lib/jars
然後把當前容器重新構建成一個新的鏡像
[root@VM-16-7-centos pipeline]# docker commit logstash logstash-mysql-connector:8.0.26
然後使用新構建的鏡像啟動新容器
# 先刪除基礎容器再創建新容器
[root@VM-16-7-centos pipeline]# docker rm -f logstash
# 創建新容器
docker run -d -p 5044:5044 -p 9600:9600 --name logstash --user root \
-v /data/elk/logstash/config:/usr/share/logstash/config \
-v /data/elk/logstash/pipeline:/usr/share/logstash/pipeline \
-v /data/elk/logstash/data:/usr/share/logstash/data \
-e TZ=Asia/Shanghai \
logstash-mysql-connector:8.0.26
# --user root 以root許可權運行容器,logstash默認是以logstash用戶組和用戶啟動的,但是默認用戶目錄許可權不足,會報錯
拓展:
渠道配置文件檢測
進入容器
bin/logstash -f /usr/local/logstash/config/logstash-test.conf -t
記錄坑:
Thu Dec 20 12:50:09 CST 2018 WARN: Establishing SSL connection without server's identity verification is not recommended. According to MySQL 5.5.45+, 5.6.26+ and 5.7.6+ requirements SSL connection must be established by default if explicit option isn't set. For compliance with existing applications not using SSL the verifyServerCertificate property is set to 'false'. You need either to explicitly disable SSL by setting useSSL=false, or set useSSL=true and provide truststore for server certificate verification.
在配置文件的連接串後面加上useSSL=false
jdbc:mysql://localhost:3306/testdb?useSSL=false
Error: com.mysql.cj.jdbc.Driver not loaded. :jdbc_driver_library is not set, are you sure you included the proper driver client libraries in your classpath?
可以嘗試將驅動器即mysql-connector的jar包拷貝到 容器的 bin\logstash-core\lib\jars 下
Could not execute action: PipelineAction::Create<main>
當您在類載入器路徑之外結合較新的 jdk 版本使用最新的 jdbc 驅動程式時,可能會發生類載入器問題。在 github 上有幾個問題。把驅動放在logstash文件夾下<logstash-install-dir>/vendor/jar/jdbc/(需要先創建這個文件夾)。如果這不起作用,請將驅動程式移到下方,<logstash-install-dir>/logstash-core\lib\jars並且不要在配置文件中提供任何驅動程式路徑:jdbc_driver_library => ""
參考鏈接://stackoverflow.com/questions/59698179/how-to-send-data-from-http-input-to-elasticsearch-using-logstash-ans-jdbc-stream/59712945#59712945
Failed to execute action {:action=>LogStash::PipelineAction::Create/pipeline
多數原因是logstash.conf文件(渠道文件)配置出錯,檢查一下
管道配置文件裡面的 jdbc_user 建議不要使用 root ,會報 Access denied for user 'root'@'x.x.x.x' 錯誤
重新配置了root許可權還是報錯,暫時沒找到原因,所以換了個用戶就行了
//www.cnblogs.com/kangfei/p/14802521.html