POST _analyze
{
"tokenizer": "standard",
"text":"I'm in the mood for drinking semi-dry red wine!"
}
|
{
"tokens" : [
{
"token" : "I'm",
"start_offset" : 0,
"end_offset" : 3,
"type" : "<ALPHANUM>",
"position" : 0
},
{
"token" : "in",
"start_offset" : 4,
"end_offset" : 6,
"type" : "<ALPHANUM>",
"position" : 1
},
{
"token" : "the",
"start_offset" : 7,
"end_offset" : 10,
"type" : "<ALPHANUM>",
"position" : 2
},
{
"token" : "mood",
"start_offset" : 11,
"end_offset" : 15,
"type" : "<ALPHANUM>",
"position" : 3
},
{
"token" : "for",
"start_offset" : 16,
"end_offset" : 19,
"type" : "<ALPHANUM>",
"position" : 4
},
{
"token" : "drinking",
"start_offset" : 20,
"end_offset" : 28,
"type" : "<ALPHANUM>",
"position" : 5
},
{
"token" : "semi",
"start_offset" : 29,
"end_offset" : 33,
"type" : "<ALPHANUM>",
"position" : 6
},
{
"token" : "dry",
"start_offset" : 34,
"end_offset" : 37,
"type" : "<ALPHANUM>",
"position" : 7
},
{
"token" : "red",
"start_offset" : 38,
"end_offset" : 41,
"type" : "<ALPHANUM>",
"position" : 8
},
{
"token" : "wine",
"start_offset" : 42,
"end_offset" : 46,
"type" : "<ALPHANUM>",
"position" : 9
}
]
}
|
POST _analyze
{
"analyzer":"standard",
"text":"I'm in the mood for drinking semi-dry red wine!"
}
|
{
"tokens" : [
{
"token" : "i'm",
"start_offset" : 0,
"end_offset" : 3,
"type" : "<ALPHANUM>",
"position" : 0
},
{
"token" : "in",
"start_offset" : 4,
"end_offset" : 6,
"type" : "<ALPHANUM>",
"position" : 1
},
{
"token" : "the",
"start_offset" : 7,
"end_offset" : 10,
"type" : "<ALPHANUM>",
"position" : 2
},
{
"token" : "mood",
"start_offset" : 11,
"end_offset" : 15,
"type" : "<ALPHANUM>",
"position" : 3
},
{
"token" : "for",
"start_offset" : 16,
"end_offset" : 19,
"type" : "<ALPHANUM>",
"position" : 4
},
{
"token" : "drinking",
"start_offset" : 20,
"end_offset" : 28,
"type" : "<ALPHANUM>",
"position" : 5
},
{
"token" : "semi",
"start_offset" : 29,
"end_offset" : 33,
"type" : "<ALPHANUM>",
"position" : 6
},
{
"token" : "dry",
"start_offset" : 34,
"end_offset" : 37,
"type" : "<ALPHANUM>",
"position" : 7
},
{
"token" : "red",
"start_offset" : 38,
"end_offset" : 41,
"type" : "<ALPHANUM>",
"position" : 8
},
{
"token" : "wine",
"start_offset" : 42,
"end_offset" : 46,
"type" : "<ALPHANUM>",
"position" : 9
}
]
}
|
Im so _sad_
Im so :(
|
Pattern: ([a-zA-Z0-9]+)(-?)
Replacement:$1
aaa-bbb-ccc
aaabbbccc
|
Mapping Char Filter | Elasticsearch Reference [7.5] | Elastic
The mapping character filter accepts a map of keys and values. Whenever it encounters a string of characters that is the same as a key, it replaces them with the value associated with that key. Matching is greedy; the longest pattern matching at a given po
www.elastic.co
Mapping Char Filter | Elasticsearch Reference [7.5] | Elastic
The mapping character filter accepts a map of keys and values. Whenever it encounters a string of characters that is the same as a key, it replaces them with the value associated with that key. Matching is greedy; the longest pattern matching at a given po
www.elastic.co
Mapping Char Filter | Elasticsearch Reference [7.5] | Elastic
The mapping character filter accepts a map of keys and values. Whenever it encounters a string of characters that is the same as a key, it replaces them with the value associated with that key. Matching is greedy; the longest pattern matching at a given po
www.elastic.co
Mapping Char Filter | Elasticsearch Reference [7.5] | Elastic
The mapping character filter accepts a map of keys and values. Whenever it encounters a string of characters that is the same as a key, it replaces them with the value associated with that key. Matching is greedy; the longest pattern matching at a given po
www.elastic.co
" I'm in the mood for drinking semi-dry red wine! "
=> [I'm,in,the,mood,for,drinking,semi,dry,red,wine]
|
" I'm in the mood for drinking semi-dry red wine! "
=> [i,m,in,the,mood,for,drinking,semi,dry,red,wine]
|
" I'm in the mood for drinking semi-dry red wine! "
=> [I'm,in,the,mood,for,drinking,semi,dry,red,wine!]
|
"Red wine"
=>[Re,Red,ed,wi,win,wine,in,ine,ne]
|
"Red wine"
=>[Re,Red,,wi,win,wine]
|
" I'm in the mood for drinking semi-dry red wine! "
=> [I'm in the mood for drinking semi dry red wine!]
|
"I, like, red, wine!"
=>[I,like,red,wine!]
|
[I'm,in,the,mood,for,drinking,semi,dry,red,wine]
=> [I'm,in,the,mood,for,drinking,semi,dry,red,wine]
|
[I'm,in,the,mood,for,drinking,semi,dry,red,wine]
=> [i'm,in,the,mood,for,drinking,semi,dry,red,wine]
|
[I'm,in,the,mood,for,drinking,semi,dry,red,wine]
=> [I'M,IN,THE,MOOD,FOR,DRINKING,SEMI,DRY,RED,WINE]
|
[Red,wine]
=>[Re,Red,ed,wi,win,wine,in,ine,ne]
|
[Red,wine]
=>[Re,Red,wi,win,wine]
|
[I'm,in,the,mood,for,drinking,semi,dry,red,wine]
=> [I'm,mood,drinking,semi,dry,red,wine]
|
[Wi-Fi,PowerShell,CE1000,Andy's]
=> [Wi,Fi,Power,Shell,CE,1000,Andy]
|
[I'm,in,the,mood,for,drinking,semi,dry,red,wine]
=> [I'm,mood,drink,semi,dry,red,wine]
|
Protected terms:[drinking]
[I'm,in,the,mood,for,drinking,semi,dry,red,wine]
=> [I'm,mood,drinking,semi,dry,red,wine]
|
[I'm,in,the,mood,for,drinking,semi,dry,red,wine]
=> [I'm,mood,drink,semi,dry,red,wine]
|
[I,am,very,happy]
=> [I,am,very,happy/delighted]
|
" I'm in the mood for drinking semi-dry red wine! "
=> [i,m,in,the,mood,for,drinking,semi,dry,red,wine]
|
" I'm in the mood for drinking semi-dry red wine! "
=> [i,m,in,the,mood,for,drinking,semi,dry,red,wine]
|
" I'm in the mood for drinking semi-dry red wine! "
=> [i,m,,mood,drinking,semi,dry,red,wine]
|
" I'm in the mood for drinking semi-dry red wine! "
=> [i'm,mood,drink,semi,dry,red,wine]
|
" I'm in the mood for drinking semi-dry red wine! "
=> [i'm in the mood for drinking semi-dry red wine!]
|
"I, like, red, wine! "
=> [I,like,red,wine!]
|
PUT my_index
{
"settings": {
"analysis": {
"analyzer": {
"my_custom_analyzer": {
"type": "custom",
"char_filter": [
"emoticons"
],
"tokenizer": "punctuation",
"filter": [
"lowercase",
"english_stop"
]
}
},
"tokenizer": {
"punctuation": {
"type": "pattern",
"pattern": "[ .,!?]"
}
},
"char_filter": {
"emoticons": {
"type": "mapping",
"mappings": [
":) => _happy_",
":( => _sad_"
]
}
},
"filter": {
"english_stop": {
"type": "stop",
"stopwords": "_english_"
}
}
}
}
}
|
POST my_index/_analyze
{
"text": "I'm a :) person, and you?"
}
|
{
"tokens" : [
{
"token" : "i'm",
"start_offset" : 0,
"end_offset" : 3,
"type" : "word",
"position" : 0
},
{
"token" : "_happy_",
"start_offset" : 6,
"end_offset" : 8,
"type" : "word",
"position" : 2
},
{
"token" : "person",
"start_offset" : 9,
"end_offset" : 15,
"type" : "word",
"position" : 3
},
{
"token" : "you",
"start_offset" : 21,
"end_offset" : 24,
"type" : "word",
"position" : 5
}
]
}
|
PUT /my_index/default/_mapping
{
"properties": {
"description": {
"type":"text",
"analyzer":"my_custom_analyzer"
},
"teaser":{
"type":"text",
"analyzer": "standard"
}
}
}
POST /my_index/default/1
{
"description":":)",
"teaser":":)"
}
GET /my_index/default/_search
{
"query": {
"match": {
"description":"_happy_"
}
}
}
|
{
"took" : 17,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"skipped" : 0,
"failed" : 0
},
"hits" : {
"total" : 1,
"max_score" : 0.2876821,
"hits" : [
{
"_index" : "my_index",
"_type" : "default",
"_id" : "1",
"_score" : 0.2876821,
"_source" : {
"description" : ":) drinking :(",
"teaser" : ":) drinking :("
}
}
]
}
}
|
POST /my_index/_close
PUT /my_index/_settings
{
"analysis": {
"analyzer": {
"french_stop":{
"type":"standard",
"stopwords":"_french_"
}
}
}
}
POST /my_index/_open
|
'Monitoring > Elasticsearch' 카테고리의 다른 글
09. Compound Query (0) | 2020.01.17 |
---|---|
08. Query (0) | 2020.01.17 |
06. Mapping (0) | 2020.01.17 |
05. Document (0) | 2020.01.17 |
04. Cluster (0) | 2020.01.17 |