maven引用
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>transport</artifactId>
<version>6.6.0</version>
</dependency>
复制代码
maven引用
<dependency>
<groupId>io.searchbox</groupId>
<artifactId>jest</artifactId>
<version>5.3.3</version>
</dependency>
复制代码
composer引用
{
"require": {
"elasticsearch/elasticsearch": "~6.0"
}
}
复制代码
curl -PUT 'localhost:9200/_index'
复制代码
curl -POST 'localhost:9200/_index/_type?pretty'
{
"_type": {
"properties": {
"field1": {
"type": "text"
},
"field2": {
"type": "text"
},
"field3": {
"type": "text"
},
"field4": {
"type": "long"
}
}
}
}
复制代码
curl -DELETE 'localhost:9200/_index'
复制代码
curl -DELETE 'localhost:9200/_index1,_index2' 或 curl -DELETE 'localhost:9200/_index*'
复制代码
curl -POST 'localhost:9200/_index/_type{/_id}'
{
"field1": "XXXXXXXX",
"field2": "XXXXXXXX",
"field3": "XXXXXXXX",
"field4": "1529396883"
}
复制代码
curl -PUT 'localhost:9200/_index/_type/_id'
{
"field1": "XXXXXXXX",
"field2": "XXXXXXXX",
"field3": "XXXXXXXX",
"field4": "1529396883"
}
复制代码
curl -DELETE 'localhost:9200/_index/_type/_id'
复制代码
curl -XGET 'localhost:9200/_index/_type/_id'
复制代码
{
"_index" : "_index",
"_type" : "_type",
"_id" : "_id",
"_version" : 1,
"found" : true,
"_source" : {
"field1": "XXXXXXXX",
"field2": "XXXXXXXX",
"field3": "XXXXXXXX",
"field4": "1529396883"
}
}
复制代码
curl -XGET 'localhost:9200/_index/_type/_id?_source=field1,field2'
复制代码
{
"_index" : "website",
"_type" : "blog",
"_id" : "123",
"_version" : 1,
"found" : true,
"_source" : {
"field1": "My first blog entry" ,
"field2": "Just trying this out..."
}
}
复制代码
curl -XGET 'localhost:9200/_index/_type/_id/_source'
复制代码
{
"field1": "XXXXXXXX",
"field2": "XXXXXXXX",
"field3": "XXXXXXXX",
"field4": "1529396883"
}
复制代码
curl -XPOST 'localhost:9200/_mget'
{
"docs": [
{
"_index": "_index",
"_type": "_type",
"_id": "_id"
},
{
"_index": "_index",
"_type": "_type",
"_id": "_id"
}
]
}
复制代码
curl -XPOST 'localhost:9200/_bulk?pretty' -H 'Content-Type: application/json' -d' { "delete": { "_index": "website", "_type": "blog", "_id": "123" }} { "create": { "_index": "website", "_type": "blog", "_id": "123" }} { "title": "My first blog post" } { "index": { "_index": "website", "_type": "blog" }} { "title": "My second blog post" } { "update": { "_index": "website", "_type": "blog", "_id": "123", "_retry_on_conflict" : 3} } { "doc" : {"title" : "My updated blog post"} } '
复制代码
{
"took": 4,
"errors": false,
"items": [
{ "create": {
"_index": "website",
"_type": "blog",
"_id": "123",
"_version": 3,
"status": 201
}}
]
}
复制代码
{ action: { metadata }}\n
{ request body }\n
复制代码
某个子请求的失败不会对其余子请求的成功与否形成影响。 若是其中任何子请求失败,最顶层的 error 标志被设置为 true ,而且在相应的请求报告出错误明细php
{
"took": 3,
"errors": true,
"items": [
{ "create": {
"_index": "website",
"_type": "blog",
"_id": "123",
"status": 409,
"error": "DocumentAlreadyExistsException [[website][4] [blog][123]: document already exists]"
}}
]
}
复制代码
bulk会把将要处理的数据载入内存中,因此数据量是有限制的,最佳的数据量不是一个肯定的数值,它取决于硬件、文档大小和复杂性、索引和搜索的负载; 通常建议是1000-5000个文档,大小建议是5-15MB,默认不能超过100M,能够在es的配置文件(即$ES_HOME下的config下的elasticsearch.yml)中修改。html
curl -XGET 'localhost:9200/_index/_type/_search' -H 'Content-Type: application/json' -d' { "query":{ "bool": { "must": { "match": { "field": "value" }}, "must_not": { "match": { "field": "value" }}, "should": { "match": { "field": "value" }}, "filter": { "range": { "field" : { "gt" : num }} } } }, "form":0, "size":10, "sort":{"field":{"order":"desc"}} } ’ 复制代码
结果查看 hits->total 的值
{
"took": 10,// 请求毫秒数
"timed_out": false,// 是否超时
"_shards": {// 分片信息
"total": 5,
"successful": 5,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 200,// 匹配到的文档总数
"max_score": 14.509778,
"hits": [// 查询结果,默认10条
······
]
}
}
复制代码
curl -XGET 'localhost:9200/_index/_type/_search' -H 'Content-Type: application/json' -d' { "query": { "bool": { "must": [ { "match": { "posiName": "questionGender" } }, { "match": { "pageName": "questionDetail" } }, { "match": { "modleName": "questionAnswer" } } ] } }, "aggs": { "distinct": { "cardinality": { "field": "modleId" } } } } ’ 复制代码
结果查看 aggregations->distinct->value 的值
{
"took": 3,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 200,
"max_score": 14.509778,
"hits": [
······
]
},
"aggregations": {
"distinct": {
"value": 3// 去重结果
}
}
}
复制代码
curl -XGET 'localhost:9200/_index/_type/_search' -H 'Content-Type: application/json' -d' { "query": { "bool": { "must": [ { "match": { "posiName": "questionGender" } }, { "match": { "pageName": "questionDetail" } }, { "match": { "modleName": "questionAnswer" } } ] } }, "collapse":{ "field":"modleId" } } ’ 复制代码
结果查看 hits->hits 的值
{
"took": 12,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 200,
"max_score": 14.509778,
"hits": [
{
"_index": "mxsp_events",
"_type": "events",
"_id": "aPVv6mQBkQR_Xrrgricj",
"_score": 14.509778,
"_source": {
"modleId": 2,
"posiName": "questionGender",
"pageName": "questionDetail",
"modleName": "questionAnswer",
"userId": 1540563,
"createdAt": 1532941929
},
"fields": {
"modleId": [
2
]
}
},
{
"_index": "mxsp_events",
"_type": "events",
"_id": "dgIP9GQBkQR_XrrgQF6S",
"_score": 14.509778,
"_source": {
"modleId": 1,
"posiName": "questionGender",
"pageName": "questionDetail",
"modleName": "questionAnswer",
"userId": 3,
"createdAt": 1533103385
},
"fields": {
"modleId": [
1
]
}
},
{
"_index": "mxsp_events",
"_type": "events",
"_id": "nMyw2WQBkQR_XrrgsDQ6",
"_score": 14.312874,
"_source": {
"modleId": "0",
"posiName": "questionGender",
"pageName": "questionDetail",
"modleName": "questionAnswer",
"userId": "19",
"createdAt": "1529396883"
},
"fields": {
"modleId": [
0
]
}
}
]
}
}
复制代码
curl -XGET 'localhost:9200/_index/_type/_search' -H 'Content-Type: application/json' -d' { "query": { "bool": { "must": [ { "match": { "posiName": "questionGender" } }, { "match": { "pageName": "questionDetail" } }, { "match": { "modleName": "questionAnswer" } } ] } }, "aggs": { "group_by": { "terms": { "field": "modleId" } } } } ’ 复制代码
结果查看 aggregations->group_by->buckets 的值
{
"took": 7,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"skipped": 0,
"failed": 0
},
"hits": {
"total": 200,
"max_score": 14.509778,
"hits": [
······
]
},
"aggregations": {
"group_by": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [// 分组结果
{
"key": 2,
"doc_count": 116
},
{
"key": 1,
"doc_count": 83
},
{
"key": 0,
"doc_count": 1
}
]
}
}
}
复制代码
bin/logstash -f logstash.confjava
-f:经过这个命令能够指定Logstash的配置文件,根据配置文件配置logstashmysql
bin/logstash -e 'input { stdin { } } output { stdout {} }' 或 bin/logstash -e ""nginx
-e:后面跟着字符串,该字符串能够被当作logstash的配置(若是是""则默认使用stdin做为输入,stdout做为输出)git
bin/logstash -f logstash.conf -tgithub
-t:检查配置文件是否正确web
# 日志导入
input {
}
# 日志筛选匹配处理
filter {
}
# 日志匹配输出
output {
}
复制代码
https://www.elastic.co/guide/en/logstash/current/input-plugins.html正则表达式
input{
file{
# 要导入的文件的位置,可使用*,例如/var/log/nginx/*.log
path=>"/var/lib/mysql/slow.log"
# 要排除的文件(配合path => "/var/log/*"使用)
excude=>"*.gz"
# 从文件开始的位置开始读,end表示从结尾开始读
start_position=>"beginning"
}
}
复制代码
input{
redis{
# redis地址
host=>"127.0.0.1"
# redis端口号
port=>6379
# 使用redis的数据库,默认为0号
db=>0
# redis的密码,默认不使用
password=>"XXX"
# 链接超时的时间
timeout=>5
# 操做类型,必填项(list、channel和pattern_channel三种;list是BLPOP,channel是SUBSCRIBE,pattern_channel是PSUBSCRIBE)
data_type=>"list"
# 监听的键值,必填项
key=>"logstash-test-list"
# EVAL命令返回的事件数目,表示一次请求返回N条日志信息
batch_count=>1
# 启用线程数量
threads=>1
}
}
复制代码
https://www.elastic.co/guide/en/logstash/current/filter-plugins.htmlredis
正则匹配内容
# SYNTAX表明匹配值的类型,如NUMBER、WORD;SEMANTIC表示存储该值的一个变量名称
基础语法:%{SYNTAX:SEMANTIC}
# field_name表示存储该值的一个变量名称;后面跟上正则表达式;如:(?<queue_id>[0-9A-F]{10,11})
自定义语法:(?<field_name>the pattern here)
# 示例
filter {
grok {
match => {
"message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}"
}
}
}
# 输入
55.3.244.1 GET /index.html 15824 0.043
# 匹配结果
{
"@version" => "1",
"method" => "GET",
"message" => "58.23.56.101 GET /index.html 15824 0.043",
"duration" => "0.043",
"request" => "/index.html",
"client" => "58.23.56.101",
"bytes" => "15824",
"host" => "linchendeMac-mini.local",
"@timestamp" => 2019-03-06T06:24:21.333Z
}
复制代码
基于分隔符原理解析数据,相比于 grok 速度更快、消耗更小的CPU资源;dissect插件有必定局限性:主要适用于每行格式类似且分隔符明确简单的场景;dissect语法比较简单,有一系列字段(field)和分隔符(delimiter)组成
基础语法:%{}字段名称;%{}之间是分隔符
# 示例
input{
stdin{}
}
filter{
dissect {
mapping => { "message" => "%{ip} [%{time} %{+time}] %{method} %{request} %{bytes} %{duration}" }
}
}
output{
stdout{}
}
# 输入
55.3.244.1 [07/Sep/2017:17:24:53 +0800] GET /index.html 15824 0.043
# 匹配结果
{
"bytes" => "15824",
"time" => "07/Sep/2017:17:24:53 +0800",
"duration" => "0.043",
"@timestamp" => 2019-03-06T09:15:28.822Z,
"ip" => "55.3.244.1",
"message" => "55.3.244.1 [07/Sep/2017:17:24:53 +0800] GET /index.html 15824 0.043",
"@version" => "1",
"host" => "linchendeMac-mini.local",
"method" => "GET",
"request" => "/index.html"
}
复制代码
date插件会将 @timestamp 字段的值保存为指定字段对应的时间值,不使用则为当前时间
# 示例
filter {
grok {
match => {
"message" => "%{IP:client} \[%{HTTPDATE:time}\] %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}"
}
}
date{
match=>["time","dd/MMM/yyyy:HH:mm:ss Z"]
}
}
# 输入
55.3.244.1 [07/Sep/2017:17:24:53 +0800] GET /index.html 15824 0.043
# 匹配结果
{
"bytes" => "15824",
"time" => "07/Sep/2017:17:24:53 +0800",
"client" => "55.3.244.1",
"request" => "/index.html",
"@version" => "1",
"duration" => "0.043",
"method" => "GET",
"host" => "linchendeMac-mini.local",
"message" => "55.3.244.1 [07/Sep/2017:17:24:53 +0800] GET /index.html 15824 0.043",
"@timestamp" => 2017-09-07T09:24:53.000Z
}
复制代码
根据ip地址提供对应的地域信息,好比经纬度、城市名等,方便进行地理数据分析
# 示例
filter {
grok {
match => {
"message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}"
}
}
geoip {
# IP地址数据库文件的路径
database => "/usr/local/Cellar/logstash-6.6.0/config/GeoLite2-City.mmdb"
# 含有ip地址的字段名称
source => "client"
# 指定须要的字段
# fields => ["country_name", "region_name", "city_name"]
}
}
# 输入
55.3.244.1 GET /index.html 15824 0.043
# 匹配结果
{
"method" => "GET",
"bytes" => "15824",
"request" => "/index.html",
"duration" => "0.043",
"geoip" => {
"continent_code" => "AS",
"location" => {
"lat" => 24.4798,
"lon" => 118.0819
},
"region_name" => "Fujian",
"ip" => "58.23.56.101",
"city_name" => "Xiamen",
"latitude" => 24.4798,
"country_code3" => "CN",
"longitude" => 118.0819,
"region_code" => "FJ",
"timezone" => "Asia/Shanghai",
"country_name" => "China",
"country_code2" => "CN"
},
"host" => "linchendeMac-mini.local",
"@timestamp" => 2019-03-06T06:13:00.118Z,
"message" => "58.23.56.101 GET /index.html 15824 0.043",
"@version" => "1",
"client" => "58.23.56.101"
}
复制代码
filter {
grok {
match => { "message" => ["(?<RemoteIP>(\d*.\d*.\d*.\d*)) - %{DATA:[nginx][access][user_name]} \[%{HTTPDATE:[nginx][access][time]}\] \"%{WORD:[nginx][access][method]} %{DATA:[nginx][access][url]} HTTP/%{NUMBER:[nginx][access][http_version]}\" %{NUMBER:[nginx][access][response_code]} %{NUMBER:[nginx][access][body_sent][bytes]} \"%{DATA:[nginx][access][referrer]}\" \"%{DATA:[nginx][access][agent]}\""] }
}
}
复制代码
https://www.elastic.co/guide/en/logstash/current/output-plugins.html
output{
elasticsearch{
# elasticsearch地址:端口
hosts=>["127.0.0.1:9200"]
# 导出到index的名称,可使用时间变量
index=>"logstash-slow-%{+YYYY.MM.dd}"
# 导出到type的名称,默认为doc
document_type=>"log"
# elasticsearch帐号密码,无安全认证不须要这两个参数
user=>"admin"
password=>"xxxxxx"
# 模板文件路径
template=>"/opt/logstash-conf/es-template.json"
# 模板命名
template_name=>"logstash"
# 自动管理模板功能(true:默认模板;false:自定义模板)
template_overwrite=>false
}
}
复制代码
output{
redis{
# redis的地址和端口,会覆盖全局端口
host=>["127.0.0.1:6379"]
# 全局端口,默认6379,若是host已指定,本条失效
port=>6379
# 使用redis的数据库,默认为0号
db=>0
# redis的密码,默认不使用
password=>xxx
# 操做类型(list和channel两种;list是RPUSH,channel是PUBLISH)
data_type=>list
# key的名称
key=>xxx
# 失败重连的间隔,默认为1s
reconnect_interval=>1
# 链接超时的时间
timeout=>5
# 批量处理(仅用于data_type=list的模式)
# 是否批量处理(默认false:1条rpush命令只存储1条数据;true:批量处理,1条rpush会发送batch_events条数据或发送batch_timeout秒(取决于哪个先到达))
batch=>true
# 批量处理时一次rpush的最大数据量
batch_events=>50
# 批量处理时一次rpush最多消耗多少时间
batch_timeout=>5
# 拥塞保护(仅用于data_type=list的模式,redis防止内存溢出)
# 每多长时间(单位为秒,0为每次都检查)进行一次拥塞检查
congestion_interval=>1
# list中最多能够存在多少个item数据(默认为0,表示禁用拥塞检测;达到congestion_threshold的数量会阻塞直到有其余消费者消费list中的数据)
congestion_threshold=>0
}
}
复制代码