前言

如题, 主要记录一下之前搭建部署的一套日志收集系统。这里采用docker-compose的方式运行,还是那句话主要是思路。

方式: 一台服务器上面利用docker-compose运行三个ES节点跟Kibana,并启用SSL,再使用Filebeat来收集服务器上面的应用日志到ES, 最后Kibana来做展示

配置

目录创建

1
2
3
4
mkdir -p /data/{es/node-1/{data,certs,logs,config},plugins}
mkdir -p /data/{es/node-2/{data,certs,logs,config},plugins}
mkdir -p /data/{es/node-3/{data,certs,logs,config},plugins}
mkdir -p /data/kibana/{certs,config} -p

部署文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
version: "3"
services:
node-1:
image: registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/elasticsearch:7.17.5
networks:
bitdata:
ipv4_address: 172.20.0.3
container_name: node-1
hostname: node-1
environment:
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
- "TZ=Asia/Shanghai"
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
ports:
- "9200:9200"
logging:
driver: "json-file"
options:
max-size: "50m"
volumes:
- ./es/node-1/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./es/plugins:/usr/share/elasticsearch/plugins
- ./es/node-1/data:/usr/share/elasticsearch/data
- ./es/node-1/certs:/usr/share/elasticsearch/config/certs
- ./es/node-1/log:/usr/share/elasticsearch/log
healthcheck:
test: ["CMD-SHELL", "curl -I http://localhost:9200 || exit 1"]
interval: 10s
timeout: 10s
retries: 5
node-2:
image: registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/elasticsearch:7.17.5
networks:
bitdata:
ipv4_address: 172.20.0.4
container_name: node-2
hostname: node-2
environment:
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
- "TZ=Asia/Shanghai"
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
ports:
- "9201:9200"
logging:
driver: "json-file"
options:
max-size: "50m"
volumes:
- ./es/node-2/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./es/plugins:/usr/share/elasticsearch/plugins
- ./es/node-2/data:/usr/share/elasticsearch/data
- ./es/node-2/certs:/usr/share/elasticsearch/config/certs
- ./es/node-2/log:/usr/share/elasticsearch/log
healthcheck:
test: ["CMD-SHELL", "curl -I http://localhost:9200 || exit 1"]
interval: 10s
timeout: 10s
retries: 5
node-3:
image: registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/elasticsearch:7.17.5
networks:
bitdata:
ipv4_address: 172.20.0.5
container_name: node-3
hostname: node-3
environment:
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
- "TZ=Asia/Shanghai"
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
ports:
- "9202:9200"
logging:
driver: "json-file"
options:
max-size: "50m"
volumes:
- ./es/node-3/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./es/plugins:/usr/share/elasticsearch/plugins
- ./es/node-3/data:/usr/share/elasticsearch/data
- ./es/node-3/certs:/usr/share/elasticsearch/config/certs
- ./es/node-3/log:/usr/share/elasticsearch/log
healthcheck:
test: ["CMD-SHELL", "curl -I http://localhost:9200 || exit 1"]
interval: 10s
timeout: 10s
retries: 5
kibana:
networks:
bitdata:
ipv4_address: 172.20.0.6
container_name: kibana
hostname: kibana
image: registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/kibana:7.17.5
environment:
TZ: 'Asia/Shanghai'
volumes:
- ./kibana/cert:/etc/kibana/cert
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- 5601:5601
healthcheck:
test: ["CMD-SHELL", "curl -I http://localhost:5601 || exit 1"]
interval: 10s
timeout: 10s
retries: 5

# 连接外部网络
networks:
bitdata:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/24

非SSL

elasticsearch.yaml配置文件

这里以node-1为例,其他不同的地方就是 node.name

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
vim /data/es/node-1/config/elasticsearch.yml
#集群名称
cluster.name: elastic
#当前该节点的名称
node.name: node-1
#是不是有资格竞选主节点
node.master: true
#是否存储数据
node.data: true
#最大集群节点数
node.max_local_storage_nodes: 3
#给当前节点自定义属性(可以省略)
#node.attr.rack: r1
#数据存档位置
path.data: /usr/share/elasticsearch/data
#日志存放位置
path.logs: /usr/share/elasticsearch/log
#是否开启时锁定内存(默认为是)
#bootstrap.memory_lock: true
#设置网关地址,我是被这个坑死了,这个地址我原先填写了自己的实际物理IP地址,
#然后启动一直报无效的IP地址,无法注入9300端口,这里只需要填写0.0.0.0
network.host: 0.0.0.0
#设置映射端口
http.port: 9200
#内部节点之间沟通端口
transport.tcp.port: 9300
#集群发现默认值为127.0.0.1:9300,如果要在其他主机上形成包含节点的群集,如果搭建集群则需要填写
#es7.x 之后新增的配置,写入候选主节点的设备地址,在开启服务后可以被选为主节点,也就是说把所有的节点都写上
discovery.seed_hosts: ["172.20.0.3","172.20.0.4","172.17.0.5"]
#当你在搭建集群的时候,选出合格的节点集群,有些人说的太官方了,
#其实就是,让你选择比较好的几个节点,在你节点启动时,在这些节点中选一个做领导者,
#如果你不设置呢,elasticsearch就会自己选举,这里我们把三个节点都写上
cluster.initial_master_nodes: ["node-1","node-2","node-3"]
#在群集完全重新启动后阻止初始恢复,直到启动N个节点
#简单点说在集群启动后,至少复活多少个节点以上,那么这个服务才可以被使用,否则不可以被使用,
gateway.recover_after_nodes: 2
#删除索引是是否需要显示其名称,默认为显示
#action.destructive_requires_name: true
# 禁用安全配置
xpack.security.enabled: false

kibana.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
vim /data/kibana/config/kibana.yml
server.host: 0.0.0.0
# 监听端口
server.port: 5601
server.name: "kibana"

# kibana访问es服务器的URL,就可以有多个,以逗号","隔开
elasticsearch.hosts: ["http://172.20.0.3:9200","http://172.20.0.4:9200","http://172.20.0.5:9200"]
monitoring.ui.container.elasticsearch.enabled: true
# kibana访问Elasticsearch的账号与密码(如果ElasticSearch设置了的话)
elasticsearch.username: ""
elasticsearch.password: ""
# kibana web语言
i18n.locale: "zh-CN"

启动

1
2
3
4
5
6
7
8
9
10
11
12
13
root@ip-10-10-10-29:/data# docker-compose up -d
[+] Running 5/5
⠿ Network data_bitdata Created 0.0s
⠿ Container node-2 Started 0.6s
⠿ Container node-3 Started 0.8s
⠿ Container node-1 Started 0.8s
⠿ Container kibana Started 0.5s

CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4d5dae9021be registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/elasticsearch:7.17.5 "/bin/tini -- /usr/l…" About a minute ago Up About a minute (healthy) 0.0.0.0:9200->9200/tcp, :::9200->9200/tcp, 9300/tcp node-1
84e191d6f875 registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/elasticsearch:7.17.5 "/bin/tini -- /usr/l…" About a minute ago Up About a minute (healthy) 9300/tcp, 0.0.0.0:9202->9200/tcp, :::9202->9200/tcp node-3
758a4bff2a73 registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/elasticsearch:7.17.5 "/bin/tini -- /usr/l…" About a minute ago Up About a minute (healthy) 9300/tcp, 0.0.0.0:9201->9200/tcp, :::9201->9200/tcp node-2
fe55f0446902 registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/kibana:7.17.5 "/bin/tini -- /usr/l…" About a minute ago Up About a minute (healthy) 0.0.0.0:5601->5601/tcp, :::5601->5601/tcp kibana

上面已经启动,如果有报错查看容器日志就行了,之前部署的时候报错内容还是很清晰

进入容器查看:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
root@node-1:/usr/share/elasticsearch# curl localhost:9200
{
"name" : "node-1",
"cluster_name" : "elastic",
"cluster_uuid" : "tcKUxyWVQb-7LV4zmomsgg",
"version" : {
"number" : "7.17.5",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "8d61b4f7ddf931f219e3745f295ed2bbc50c8e84",
"build_date" : "2022-06-23T21:57:28.736740635Z",
"build_snapshot" : false,
"lucene_version" : "8.11.1",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
root@node-1:/usr/share/elasticsearch# curl 172.20.3:9200
{
"name" : "node-1",
"cluster_name" : "elastic",
"cluster_uuid" : "tcKUxyWVQb-7LV4zmomsgg",
"version" : {
"number" : "7.17.5",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "8d61b4f7ddf931f219e3745f295ed2bbc50c8e84",
"build_date" : "2022-06-23T21:57:28.736740635Z",
"build_snapshot" : false,
"lucene_version" : "8.11.1",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
root@node-1:/usr/share/elasticsearch# curl 172.20.4:9200
{
"name" : "node-2",
"cluster_name" : "elastic",
"cluster_uuid" : "tcKUxyWVQb-7LV4zmomsgg",
"version" : {
"number" : "7.17.5",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "8d61b4f7ddf931f219e3745f295ed2bbc50c8e84",
"build_date" : "2022-06-23T21:57:28.736740635Z",
"build_snapshot" : false,
"lucene_version" : "8.11.1",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
root@node-1:/usr/share/elasticsearch# curl 172.20.5:9200
{
"name" : "node-3",
"cluster_name" : "elastic",
"cluster_uuid" : "tcKUxyWVQb-7LV4zmomsgg",
"version" : {
"number" : "7.17.5",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "8d61b4f7ddf931f219e3745f295ed2bbc50c8e84",
"build_date" : "2022-06-23T21:57:28.736740635Z",
"build_snapshot" : false,
"lucene_version" : "8.11.1",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
root@ip-10-10-10-29:/data# curl -s 172.20.0.3:9200/_cluster/health | python3 -m json.tool
{
"cluster_name": "elastic",
"status": "green",
"timed_out": false,
"number_of_nodes": 3,
"number_of_data_nodes": 3,
"active_primary_shards": 3,
"active_shards": 6,
"relocating_shards": 0,
"initializing_shards": 0,
"unassigned_shards": 0,
"delayed_unassigned_shards": 0,
"number_of_pending_tasks": 0,
"number_of_in_flight_fetch": 0,
"task_max_waiting_in_queue_millis": 0,
"active_shards_percent_as_number": 100.0
}

以上说明ES集群已经正常工作,看一下通过nginx代理转发到kibana的结果

kibana也能正常工作,只是这里在访问的时候直接就进入了kibana,如果不加一个身份验证功能,这就等于裸奔,即便在放在内网也是极其不安全的,所以需要加一个身份验证。

SSL方式

利用自带的工具生成证书,也可以自行生成证书,但注意要限制域名和IP,否则在进行https通讯时会校验失败

进入node-1容器 ,操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
root@ip-10-10-10-29:~# docker exec -it node-1 bash
root@node-1:/usr/share/elasticsearch#

# 根据实际情况配置生成证书的yaml
cat <<EOF > /usr/share/elasticsearch/node.yml
instances:
- name: "node"
ip:
- "172.20.0.3"
- "172.20.0.4"
- "172.20.0.5"
- "127.0.0.1"
- "172.20.0.6"
- "10.10.10.29" # 这个IP是服务器的IP地址,因为我这里使用的是docker运行es集群,这个必须加上去
dns:
- "node-1"
- "node-2"
- "node-3 "
- "localhost"
- "kibana"
- "others"
EOF

# 生成ca证书,默认文件名为elastic-stack-ca.p12,可添加密码
bin/elasticsearch-certutil ca # 一路回车即可
# 生成证书,默认文件名为certificate-bundle.zip
bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 --silent --in node.yml # 一路回车即可
# 将证书复制到node-1配置目录下

root@node-1:/usr/share/elasticsearch# cp certificate-bundle.zip elastic-stack-ca.p12 config/certs/
root@node-1:/usr/share/elasticsearch# cd config/certs/
root@node-1:/usr/share/elasticsearch/config/certs# ls
certificate-bundle.zip elastic-stack-ca.p12

# 解压证书,目录结构为 实例名/实例名.p12 ,此处为node/node.p12
root@node-1:/usr/share/elasticsearch/config/certs# unzip certificate-bundle.zip
Archive: certificate-bundle.zip
creating: node/
inflating: node/node.p12
root@node-1:/usr/share/elasticsearch/config/certs# ls
certificate-bundle.zip elastic-stack-ca.p12 node
root@node-1:/usr/share/elasticsearch/config/certs# ls node/
node.p12
root@node-1:/usr/share/elasticsearch/config/certs# rm -rf node certificate-bundle.zip
root@node-1:/usr/share/elasticsearch/config/certs# ls
certificate-bundle.zip elastic-stack-ca.p12 node.p12

# 修改证书访问权限
root@node-1:/usr/share/elasticsearch/config# chown -R root:elasticsearch certs/
root@node-1:/usr/share/elasticsearch/config# chmod -R a+rx certs/

# 此时我们需要的证书已经生成了,同样的方式复制到node-2,node3后修改权限,修改属主等操作
# 退出容器,进入node-1的挂在目录:
root@ip-10-10-10-29:/data/es/node-1/certs# pwd
/data/es/node-1/certs

root@ip-10-10-10-29:/data/es/node-1/certs# cd ..
root@ip-10-10-10-29:/data/es/node-1# ls
certs config data log
root@ip-10-10-10-29:/data/es/node-1# cp -rf certs ../node-2/
root@ip-10-10-10-29:/data/es/node-1# cp -rf certs ../node-3/
root@ip-10-10-10-29:/data/es/node-1#

# 证书已经准备完毕
es/
├── node-1
│   ├── certs
│   │   ├── elastic-stack-ca.p12
│   │   └── node.p12
│   ├── config
│   │   ├── elasticsearch.yml
│   ├── data
│   │   └── nodes
│   └── log
├── node-2
│   ├── certs
│   │   ├── elastic-stack-ca.p12
│   │   └── node.p12
│   ├── config
│   │   ├── elasticsearch.yml
│   ├── data
│   │   └── nodes
│   └── log
├── node-3
│   ├── certs
│   │   ├── elastic-stack-ca.p12
│   │   └── node.p12
│   ├── config
│   │   ├── elasticsearch.yml
│   ├── data
│   │   └── nodes
│   └── log
└── plugins

修改配置文件/data/es/node-1/config/elasticsearch.yml. 以节点node-1为例

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# 这里的内容同非ssl部署一样
.......
.......
# 开启xpack安全特性
xpack.security.enabled: true
xpack.security.authc.api_key.enabled: true
# 开启https并配置证书
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.keystore.path: certs/node.p12
xpack.security.http.ssl.truststore.path: certs/node.p12
# 开启节点间通讯ssl并配置证书
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.client_authentication: required
xpack.security.transport.ssl.keystore.path: certs/node.p12
xpack.security.transport.ssl.truststore.path: certs/node.p12

每个节点都需修改后,重启一下es集群

重启完毕后,查看状态和日志,检查是否有异常

若无异常,可以开始设置密码, 进入容器 node-1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
root@ip-10-10-10-29:/data# docker exec -it node-1 bash
root@node-1:/usr/share/elasticsearch# ./bin/elasticsearch-setup-passwords auto
Initiating the setup of passwords for reserved users elastic,apm_system,kibana,kibana_system,logstash_system,beats_system,remote_monitoring_user.
The passwords will be randomly generated and printed to the console.
Please confirm that you would like to continue [y/N]y

# 若集群没问题的话这里就会自动生成系统的一些密码:
.......
.......
.......
.......

# 也可以手动自定义密码
./bin/elasticsearch-setup-passwords interactive

使用 ES API 检查各个节点是否正常

1
2
3
curl -k --user elastic:密码 -X GET "https://172.20.0.3:9200" --cert-type P12 --cert /usr/share/elasticsearch/config/certs/node.p12
curl -k --user elastic:密码 -X GET "https://172.20.0.4:9200" --cert-type P12 --cert /usr/share/elasticsearch/config/certs/node.p12
curl -k --user elastic:密码 -X GET "https://172.20.0.5:9200" --cert-type P12 --cert /usr/share/elasticsearch/config/certs/node.p12

此时ssl已配置完毕,配置kibana

1
2
3
4
5
6
# 把之前放在node-1下面的证书复制到kibana/cert下面:
# 提取公钥
openssl pkcs12 -in elastic-stack-ca.p12 -out elastic-stack-ca.pem -nokeys -clcerts
openssl pkcs12 -in node.p12 -out node.pem -nokeys -clcerts
# 提取私钥
openssl pkcs12 -in node.p12 -out node.key -nocerts -nodes

修改配置文件kibana/config/kibana.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
server.host: 0.0.0.0
# 监听端口
server.port: 5601
server.name: "kibana"

# kibana访问es服务器的URL,就可以有多个,以逗号","隔开
# es 开启了 ssl,所以这里的url 必须使用https协议
elasticsearch.hosts: ["https://172.20.0.3:9200","https://172.20.0.4:9200","https://172.20.0.5:9200"]
monitoring.ui.container.elasticsearch.enabled: true
# kibana访问Elasticsearch的账号与密码(如果ElasticSearch设置了的话)
elasticsearch.username: "kibana_system"
elasticsearch.password: "xxxxxxxxxxxx"
# kibana web语言
i18n.locale: "zh-CN"
# es证书配置, 这里是在docker-compose中已经挂载进容器了
elasticsearch.ssl.certificate: /etc/kibana/cert/node.pem
elasticsearch.ssl.key: /etc/kibana/cert/node.key
# es的ca证书
elasticsearch.ssl.certificateAuthorities: [ "/etc/kibana/cert/elastic-stack-ca.pem" ]

修改完毕后 ,重新部署一下 kibana服务即可

以上只要登录就可以使用了。

Filebeat 日志采集

这里我才用的方式是 docker运行 filebeat服务,然后将网关服务器的Nginx日志挂载进filebeat

1
2
3
4
5
6
7
8
9
10
11
12
13
14
vim /opt/filbeat/docker-compose.yaml
version: "3"
services:
filebeat:
container_name: filebeat_test
image: elastic/filebeat:7.9.0
user: root
volumes:
- /data/wwwlogs/:/var/log/nginx:ro
- ./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- ./ca.crt:/usr/share/filebeat/certs/ca.crt
# 这个证书在接入kibana的时候已经提取出来了,即文件:elastic-stack-ca.pem
# 他们内容是一样的只是文件名、格式不一样
command: filebeat -e

filebeat的配置文件在 /opt/filebeat/filebeat.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
vim /opt/filebeat/filebeat.yml
filebeat.inputs:
##################################################################################
- type: log
paths:
- /var/log/nginx/testnet-xxxxxxxxxxxx.com.log
tags: ["proxy-testnet-xxxxxxxxxxxx.com.log"]
fields:
index: "proxy-testnet-xxxxxxxxxxxx.com.log"
json.keys_under_root: true
json.overwrite_keys: true
processors:
- add_kubernetes_metadata:
matchers:
- logs_path:
logs_path: "/var/log/nginx/"
- decode_json_fields:
when:
regexp:
message: "{*}"
fields: ["message"]
overwrite_keys: true
##################################################################################
output.elasticsearch:
hosts: ["https://10.10.10.29:9200","https://10.10.10.29:9201","https://10.10.10.29:9201"]
username: elastic
password: vDNnbd8FXqR2i13NYGfj
ssl.certificate_authorities:
- /usr/share/filebeat/certs/ca.crt
indices:
- index: "proxy-testnet-xxxxxxxxxxxx.com.log-%{+yyyy.MM.dd}"
when.contains:
fields:
index: "proxy-testnet-xxxxxxxxxxxx.com.log"
setup.kibana:
hosts: "http://10.10.10.29:5601"
username: "elastic"
password: "xxxxxxxxxxxx"

配置完毕之后,查看日志是否异常,如果没有异常,到node-1上面通过ES API查询是否有index生成

1
2
3
4
5
6
7
8
root@node-1:/usr/share/elasticsearch# curl -k --user elastic:xxxxxxxxxxxx -X GET "https://172.20.0.3:9200/_cat/indices " --cert-type P12 --cert /usr/share/elasticsearch/config/certs/node.p12
......
......
......
green open proxy-testnet-xxxxxxxxxxxx.com.log-2024.07.23 Su-T62zGTheqbDsDZLzH_A 1 1 2911 0 1.7mb 914.2kb
......
......
......

Kibana配置

上面可以看到filebeat已经正常往ES写入数据,在kibana中配置一下:

Stack Manager - Index Patterns - Create index pattern

直接创建后菜单栏: Discover 选择刚刚创建的这个Index Pattern

最后直接就可以看到收集到的日志kibana已经做好了字段分隔,只需要选择需要的字段,或者匹配某些字段就可以了。

比如 我想看状态码大于等于500的: status >= 500,都可以在上面的搜索框中去定义筛选

当然也可以把这个日志做成可视化,但选择某个指标的时候,其他指标也可以联动起来,例如:

最后再结合前面写过的 如何利用ElastAlert2对ES中的日志创建告警 就可以轻松的对日志进行监控告警啦~

写在最后

上面过程仅仅是记录,主要还是思路。

k8s version

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
---  
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: ops
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
#====================== input =================
filebeat.inputs:
# nginx-ingress
- type: container
paths:
- /var/log/containers/*goerli-testnet-collector*.log
tags: ["goerli-testnet-collector"]
fields:
index: "goerli-testnet-collector"
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- decode_json_fields:
when:
regexp:
message: "{*}"
fields: ["message"]
overwrite_keys: true
target: ""
# testnet-bsc-collector
- type: container
paths:
- /var/log/containers/*testnet-bsc-collector*.log
tags: ["testnet-bsc-collector"]
fields:
index: "testnet-bsc-collector"
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- decode_json_fields:
when:
regexp:
message: "{*}"
fields: ["message"]
#overwrite_keys: true
target: ""
#================ output =====================
output.elasticsearch:
hosts: ["https://10.10.20.23:9200", "https://10.10.20.120:9200", "https://10.10.20.160:9200"]
username: elastic
password: r40SgkMhmjwK8rIpClFM
ssl.certificate_authorities:
- /usr/share/filebeat/ca.crt
indices:
- index: "INDEX-NAME-%{+yyyy.MM.dd}"
when.contains:
fields:
index: "INDEX-NAME"
- index: "testnet-bsc-collector-%{+yyyy.MM.dd}"
when.contains:
fields:
index: "testnet-bsc-collector"

#============== Elasticsearch template setting ==========
setup.ilm.enabled: false
setup.template.name: 'k8s-logs'
setup.template.pattern: 'k8s-logs-*'
processors:
- drop_fields:
fields: ["agent","kubernetes.labels","input.type","log","ecs.version","host.name","kubernetes.replicaset.name","kubernetes.pod.uid","kubernetes.pod.uid","tags","stream","kubernetes.container.name"]
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: ops
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: filebeat
image: elastic/filebeat:7.9.0
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/containerd/
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/containerd/
- name: varlog
hostPath:
path: /var/log
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: ops
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: ops
labels:
k8s-app: filebeat
---