Merge commit '3a8370e1f4' into kb_migration

This commit is contained in:
KMY 2023-08-15 17:58:03 +09:00
commit 61e4be24e1
44 changed files with 503 additions and 278 deletions

View file

@ -1,7 +1,7 @@
# frozen_string_literal: true
class AccountsIndex < Chewy::Index
settings index: { refresh_interval: '30s' }, analysis: {
settings index: index_preset(refresh_interval: '30s'), analysis: {
filter: {
english_stop: {
type: 'stop',

View file

@ -1,7 +1,7 @@
# frozen_string_literal: true
class InstancesIndex < Chewy::Index
settings index: { refresh_interval: '30s' }
settings index: index_preset(refresh_interval: '30s')
index_scope ::Instance.searchable

View file

@ -3,7 +3,7 @@
class StatusesIndex < Chewy::Index
include FormattingHelper
DEVELOPMENT_SETTINGS = {
settings index: index_preset(refresh_interval: '30s', number_of_shards: 5), analysis: {
filter: {
english_stop: {
type: 'stop',
@ -31,52 +31,7 @@ class StatusesIndex < Chewy::Index
),
},
},
}.freeze
PRODUCTION_SETTINGS = {
filter: {
english_stop: {
type: 'stop',
stopwords: '_english_',
},
english_stemmer: {
type: 'stemmer',
language: 'english',
},
english_possessive_stemmer: {
type: 'stemmer',
language: 'possessive_english',
},
},
analyzer: {
content: {
tokenizer: 'uax_url_email',
filter: %w(
english_possessive_stemmer
lowercase
asciifolding
cjk_width
english_stop
english_stemmer
),
},
sudachi_analyzer: {
filter: [],
type: 'custom',
tokenizer: 'sudachi_tokenizer',
},
},
tokenizer: {
sudachi_tokenizer: {
resources_path: '/etc/elasticsearch/sudachi',
split_mode: 'C',
type: 'sudachi_tokenizer',
discard_punctuation: 'true',
},
},
}.freeze
settings index: { refresh_interval: '30s' }, analysis: Rails.env.development? ? DEVELOPMENT_SETTINGS : PRODUCTION_SETTINGS
}
# We do not use delete_if option here because it would call a method that we
# expect to be called with crutches without crutches, causing n+1 queries

View file

@ -1,7 +1,7 @@
# frozen_string_literal: true
class TagsIndex < Chewy::Index
settings index: { refresh_interval: '30s' }, analysis: {
settings index: index_preset(refresh_interval: '30s'), analysis: {
analyzer: {
content: {
tokenizer: 'keyword',