Files
dropshell-templates/logserver/config/logstash.conf
Your Name 70585358b8
All checks were successful
Test and Publish Templates / test-and-publish (push) Successful in 44s
docs: Add 6 and update 11 files
2025-09-20 10:04:42 +12:00

94 lines
2.0 KiB
Plaintext

# Logstash Configuration for LogServer
# Handles Beats input with API key authentication
input {
# Beats input for Filebeat clients
beats {
port => 5044
ssl => false # Set to true for production with proper certificates
# API key authentication handled via filter below
}
# Optional: Syslog input for direct syslog shipping
tcp {
port => 514
type => "syslog"
}
udp {
port => 514
type => "syslog"
}
}
filter {
# Note: API key validation would go here in production
# For now, accepting all connections for simplicity
# TODO: Implement proper API key validation
# Parse Docker logs
if [docker] {
# Docker metadata is already parsed by Filebeat
mutate {
add_field => {
"container_name" => "%{[docker][container][name]}"
"container_id" => "%{[docker][container][id]}"
"container_image" => "%{[docker][container][image]}"
}
}
}
# Parse syslog
if [type] == "syslog" {
grok {
match => {
"message" => "%{SYSLOGLINE}"
}
}
date {
match => [ "timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
# Parse JSON logs if they exist
if [message] =~ /^\{.*\}$/ {
json {
source => "message"
target => "json_message"
}
}
# Add timestamp if not present
if ![timestamp] {
mutate {
add_field => { "timestamp" => "%{@timestamp}" }
}
}
# Clean up metadata
mutate {
remove_field => [ "@version", "beat", "offset", "prospector" ]
}
}
output {
# Send to Elasticsearch with authentication
elasticsearch {
hosts => ["elasticsearch:9200"]
user => "elastic"
password => "${ELASTIC_PASSWORD:changeme}"
# Use different indices based on input type
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
# Manage index templates
manage_template => true
template_overwrite => true
}
# Optional: Debug output (comment out in production)
# stdout {
# codec => rubydebug
# }
}