ELK7.3实战安装配置文档
整体架构
一:环境准备
192.168.43.16 jdk,elasticsearch-master ,logstash,kibana
192.168.43.17 jdk,elasticsearch-node1
192.168.43.18 jdk,elasticsearch-node2
192.168.43.19 liunx ,filebeat
#解压
tar -zxvf jdk-12.0.2_linux-x64_bin.tar.gz -C /usr/ #设置环境变量
vim /etc/profile
export JAVA_HOME=/usr/jdk-12.0.2/
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH
export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH #使环境变量生效
source /etc/profile
# 修改系统文件
vim /etc/security/limits.conf #增加的内容
* soft nofile 65536
* hard nofile 65536
* soft nproc 2048
* hard nproc 4096 #修改系统文件
vim /etc/security/limits.d/20-nproc.conf #调整成以下配置
* soft nproc 4096
root soft nproc unlimited vim /etc/sysctl.conf
#在最后追加
vm.max_map_count=262144
fs.file-max=655360 #使用 sysctl -p 查看修改结果
sysctl -p
vim /etc/hosts
192.168.43.16 elk-master-node
192.168.43.17 elk-data-node1
192.168.43.18 elk-data-node2
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
setenforce 0
systemctl stop firewalld
systemctl disable firewalld
groupadd elk
useradd ‐g elk elk
mkdir -p /home/app/elk
chown -R elk:elk /home/app/elk
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.2-linux-x86_64.tar.gz
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.3.2.tar.gz
wget https://artifacts.elastic.co/downloads/kibana/kibana-7.3.2-linux-x86_64.tar.gz
tar -zxvf elasticsearch-7.3.2-linux-x86_64.tar.gz -C /home/app/elk && \
tar -zxvf logstash-7.3.2.tar.gz -C /home/app/elk && \
tar -zxvf kibana-7.3.2-linux-x86_64.tar.gz -C /home/app/elk
二、安装elasticsearch
1、配置elasticsearch(切换至elk用户)
创建Elasticsearch数据目录 mkdir /home/app/elk/elasticsearch-7.3.2/data -p
创建Elasticsearch日志目录 mkdir /home/app/elk/elasticsearch-7.3.2/logs -p
主节点配置:vim /home/app/elk/elasticsearch-7.3.2/config/elasticsearch.yml
# 集群名称
cluster.name: es
# 节点名称
node.name: es-master
# 存放数据目录,先创建该目录
path.data: /home/app/elk/elasticsearch-7.3.2/data
# 存放日志目录,先创建该目录
path.logs: /home/app/elk/elasticsearch-7.3.2/logs
# 节点IP
network.host: 192.168.43.16
# tcp端口
transport.tcp.port: 9300
# http端口
http.port: 9200
# 种子节点列表,主节点的IP地址必须在seed_hosts中
discovery.seed_hosts: ["192.168.43.16:9300","192.168.43.17:9300","192.168.43.18:9300"]
# 主合格节点列表,若有多个主节点,则主节点进行对应的配置
cluster.initial_master_nodes: ["192.168.43.16:9300"]
# 主节点相关配置 # 是否允许作为主节点
node.master: true
# 是否保存数据
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false # 跨域
http.cors.enabled: true
http.cors.allow-origin: "*"
192.168.43.17数据节点从配置:vim /home/app/elk/elasticsearch-7.3.2/config/elasticsearch.yml
# 集群名称
cluster.name: es
# 节点名称
node.name: es-data1
# 存放数据目录,先创建该目录
path.data: /home/app/elk/elasticsearch-7.3.2/data
# 存放日志目录,先创建该目录
path.logs: /home/app/elk/elasticsearch-7.3.2/logs
# 节点IP
network.host: 192.168.43.17
# tcp端口
transport.tcp.port: 9300
# http端口
http.port: 9200
# 种子节点列表,主节点的IP地址必须在seed_hosts中
discovery.seed_hosts: ["192.168.43.16:9300","192.168.43.17:9300","192.168.43.18:9300"]
# 主合格节点列表,若有多个主节点,则主节点进行对应的配置
cluster.initial_master_nodes: ["192.168.43.16:9300"]
# 主节点相关配置 # 是否允许作为主节点
node.master: false
# 是否保存数据
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false # 跨域
http.cors.enabled: true
http.cors.allow-origin: "*"
# 集群名称
cluster.name: es
# 节点名称
node.name: es-data2
# 存放数据目录,先创建该目录
path.data: /home/app/elk/elasticsearch-7.3.2/data
# 存放日志目录,先创建该目录
path.logs: /home/app/elk/elasticsearch-7.3.2/logs
# 节点IP
network.host: 192.168.43.18
# tcp端口
transport.tcp.port: 9300
# http端口
http.port: 9200
# 种子节点列表,主节点的IP地址必须在seed_hosts中
discovery.seed_hosts: ["192.168.43.16:9300","192.168.43.17:9300","192.168.43.18:9300"]
# 主合格节点列表,若有多个主节点,则主节点进行对应的配置
cluster.initial_master_nodes: ["192.168.43.16:9300"]
# 主节点相关配置 # 是否允许作为主节点
node.master: false
# 是否保存数据
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false # 跨域
http.cors.enabled: true
http.cors.allow-origin: "*"
2、启动elasticserach
sh /home/app/elk/elasticsearch-7.3.2/bin/elasticsearch -d
3、监控检查
curl -X GET 'http://192.168.43.16:9200/_cluster/health?pretty'
[root@localhost elk]# curl -X GET 'http://192.168.43.16:9200/_cluster/health?pretty'
{
"cluster_name" : "es",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 3,
"number_of_data_nodes" : 3,
"active_primary_shards" : 5,
"active_shards" : 10,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
#status=green表示服务正常
三、安装kibana
1、修改配置文件
cd /home/app/elk/kibana-7.3.2-linux-x86_64/config
vim kibana.yml
# 配置kibana的端口
server.port: 5601
# 配置监听ip
server.host: "192.168.43.16"
# 配置es服务器的ip,如果是集群则配置该集群中主节点的ip
elasticsearch.hosts: "http://192.168.43.16:9200/"
# 配置kibana的日志文件路径,不然默认是messages里记录日志
logging.dest:/home/app/elk/kibana-7.3.2-linux-x86_64/logs/kibana.log
2、启动kibana
nohup /home/app/elk/kibana-7.3.2-linux-x86_64/bin/kibana &
三、安装filebeat(192.168.43.19上事先跑了jumpserver服务)
本次实验我们在192.168.43.19上安装filebeat单独对nginx的访问日志和错误日志进行采集,网上有关于发送json格式的配置,在此为了练习grok,直接发送原格式进行配置
1、下载filebeat
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.3.2-linux-x86_64.tar.gz
mkdir -p /opt/software
tar -zxvf filebeat-7.3.2-linux-x86_64.tar.gz -C /opt/software
2、配置filebeat.yml
vim /opt/software/filebeat-7.3.2/filebeat.yml
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
paths:
- /var/log/nginx/access.log
fields:
log_source: nginx-access
- type: log
paths:
- /var/log/nginx/error.log
fields:
log_source: nginx-error
#============================== Dashboards =====================================
setup.dashboards.enabled: false
#============================== Kibana =====================================
#添加libana仪表盘
setup.kibana:
host: "192.168.43.16:5601"
#----------------------------- Logstash output --------------------------------
output.logstash:
# The Logstash hosts
hosts: ["192.168.43.16:5044"]
3、启动filebeat
cd /opt/software/filebeat-7.3.2
nohup ./filebeat -c filebeat.yml &
四、安装logstash
1、创建logstash.conf文件
vim /home/app/elk/logstash-7.3.2/config/logstash.conf
input {
beats {
port => 5044
}
}
filter {
if [fields][log_source]=="nginx-access"{
grok {
match => {
"message" => '%{IP:clientip}\s*%{DATA}\s*%{DATA}\s*\[%{HTTPDATE:requesttime}\]\s*"%{WORD:requesttype}.*?"\s*%{NUMBER:status:int}\s*%{NUMBER:bytes_read:int}\s*"%{DATA:requesturl}"\s*%{QS:ua}'
}
overwrite => ["message"]
}
}
if [fields][log_source]=="nginx-error"{
grok {
match => {
"message" => '(?<time>.*?)\s*\[%{LOGLEVEL:loglevel}\]\s*%{DATA}:\s*%{DATA:errorinfo},\s*%{WORD}:\s*%{IP:clientip},\s*%{WORD}:%{DATA:server},\s*%{WORD}:\s*%{QS:request},\s*%{WORD}:\s*%{QS:upstream},\s*%{WORD}:\s*"%{IP:hostip}",\s*%{WORD}:\s*%{QS:referrer}'
}
overwrite => ["message"]
}
}
}
output {
if [fields][log_source]=="nginx-access"{
elasticsearch {
hosts => ["http://192.168.43.16:9200"]
action => "index"
index => "nginx-access-%{+YYYY.MM.dd}"
}
}
if [fields][log_source]=="nginx-error"{
elasticsearch {
hosts => ["http://192.168.43.16:9200"]
action => "index"
index => "nginx-error-%{+YYYY.MM.dd}"
}
}
stdout { codec => rubydebug }
}
2、启动logstash
/home/app/elk/logstash-7.3.2/bin/logstash -f /home/app/elk/logstash-7.3.2/config/logstash.conf
六、登陆kibana平台
分别点击管理--》索引管理,这时候就能看到nginx的访问日志和错误日志的数据了
接下来创建索引,分别对访问日志和错误日志建立索引,建立完之后点击discover,就能看到日志数据了
nginx-access
nginx-error
参考文档:
https://elkguide.elasticsearch.cn/logstash/plugins/filter/mutate.html
最新文章
- highcharts去掉版权|去掉水印链接(右下角)_
- java中map插入相同的key
- Codeforces Educational Codeforces Round 5 E. Sum of Remainders 数学
- android104 帧动画,补间动画,属性动画
- 静态编译Qt5.4.1和Qt WebKit
- SQL Server 创建作业系列问题
- 项目与软件推荐之编辑器-QOwnNotes(刺激自己)
- vim全局替换命令
- iOS-联系人应用(一)
- JVM进程占用CPU过高问题排查
- 05 JS基础DOM
- Unity3D RTS游戏中帧同步实现
- django模板导入外部js和css等文件
- OrCAD Capture CIS 16.6 在原理图页面内放置图片
- 06-jQuery的文档操作
- java android中日期时间 问题总结
- 安卓开发——ListView控件(初始化ListView、列表刷新、长按添加menu)
- zlib打印bit length overflow
- js 替换中间四位手机号为 *
- kmplayer音轨切换(换配音)
热门文章
- springboot报 org.thymeleaf.exceptions.TemplateInputException: Error resolving template ";succeed";;
- Flink的Job启动JobManager端(源码分析)
- 【原创】Linux cpu hotplug
- ValueError: Cannot create group in read only mode.
- 牛客练习赛37C 	筱玛的迷阵探险 双向搜索+字典树
- URAL-1627-Join 生成树计数
- 【牛客多校】Han Xin and His Troops
- 51nod 1257 背包问题 V3(这不是背包问题是二分)
- Dokit支持iOS本地crash查看功能
- 【Offer】[50-2] 【字符流中第一个只出现一次的字符】