docs: Add 6 and update 11 files
All checks were successful
Test and Publish Templates / test-and-publish (push) Successful in 44s

This commit is contained in:
Your Name
2025-09-20 10:04:42 +12:00
parent 9045ee5def
commit 70585358b8
17 changed files with 1147 additions and 659 deletions

View File

@@ -3,3 +3,4 @@
# Generated by generate-api-key.sh
api_keys:
video: a7798c63c2ac439b5ba20f3bf8bf27b5361231cdcbdc4fc9d7af715308fdf707

View File

@@ -0,0 +1,94 @@
# Logstash Configuration for LogServer
# Handles Beats input with API key authentication
input {
# Beats input for Filebeat clients
beats {
port => 5044
ssl => false # Set to true for production with proper certificates
# API key authentication handled via filter below
}
# Optional: Syslog input for direct syslog shipping
tcp {
port => 514
type => "syslog"
}
udp {
port => 514
type => "syslog"
}
}
filter {
# Note: API key validation would go here in production
# For now, accepting all connections for simplicity
# TODO: Implement proper API key validation
# Parse Docker logs
if [docker] {
# Docker metadata is already parsed by Filebeat
mutate {
add_field => {
"container_name" => "%{[docker][container][name]}"
"container_id" => "%{[docker][container][id]}"
"container_image" => "%{[docker][container][image]}"
}
}
}
# Parse syslog
if [type] == "syslog" {
grok {
match => {
"message" => "%{SYSLOGLINE}"
}
}
date {
match => [ "timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
# Parse JSON logs if they exist
if [message] =~ /^\{.*\}$/ {
json {
source => "message"
target => "json_message"
}
}
# Add timestamp if not present
if ![timestamp] {
mutate {
add_field => { "timestamp" => "%{@timestamp}" }
}
}
# Clean up metadata
mutate {
remove_field => [ "@version", "beat", "offset", "prospector" ]
}
}
output {
# Send to Elasticsearch with authentication
elasticsearch {
hosts => ["elasticsearch:9200"]
user => "elastic"
password => "${ELASTIC_PASSWORD:changeme}"
# Use different indices based on input type
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
# Manage index templates
manage_template => true
template_overwrite => true
}
# Optional: Debug output (comment out in production)
# stdout {
# codec => rubydebug
# }
}

View File

@@ -16,14 +16,18 @@ LS_PIPELINE_WORKERS=2
# Kibana settings
KIBANA_VERSION=7.17.23
KIBANA_PASSWORD=changeme
KIBANA_BASE_PATH=/
# Authentication (IMPORTANT: Change this!)
ELASTIC_PASSWORD=changeme # Password for 'elastic' user in Kibana/Elasticsearch
# Ports
KIBANA_PORT=5601
LOGSTASH_BEATS_PORT=5044
LOGSTASH_SYSLOG_PORT=514
# Server configuration
SERVER_PUBLICBASEURL=http://localhost:5601 # Change to your server's actual URL
# Log retention
LOG_RETENTION_DAYS=30
LOG_MAX_SIZE_GB=50

View File

@@ -0,0 +1,43 @@
# Ruby script for Logstash to validate API keys
# This is a simplified validation - in production, use proper authentication
require 'yaml'
def register(params)
@api_keys_file = params["api_keys_file"]
end
def filter(event)
# Get the API key from the event
api_key = event.get("[api_key]") || event.get("[@metadata][api_key]")
# If no API key, pass through (for backwards compatibility)
# In production, you should reject events without valid keys
if api_key.nil? || api_key.empty?
# For now, allow events without API keys
# event.cancel # Uncomment to require API keys
return [event]
end
# Load API keys from file
begin
if File.exist?(@api_keys_file)
config = YAML.load_file(@api_keys_file)
valid_keys = config['api_keys'].values if config && config['api_keys']
# Check if the provided key is valid
if valid_keys && valid_keys.include?(api_key)
# Valid key - let the event through
event.set("[@metadata][authenticated]", true)
else
# Invalid key - drop the event
event.cancel
end
end
rescue => e
# Log error but don't crash
event.set("[@metadata][auth_error]", e.message)
end
return [event]
end