Architecture session: queue topology, log schema, REC templates, config refinements

- Rename env.toml to env_{dev,qa,prod}.toml for self-documentation; enforce via gitignore
- Config loader selects env file via BEDS_ENV environment variable, defaults to dev
- Set wbid to "ms" in beds.toml
- Define queue topology: rel/rec .read .write .obj, log, adm, mig
- Define log event schema: compound event_id (node.env.guid), parent_id, depth,
  level/level_val, resource, service, env, node, file, method, line, trace, message, created
- Add example_rec.toml — canonical self-documenting REC template for future developers
- Add mst_logger_rec.toml — logger collection template derived from log event schema

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-02 17:07:18 -07:00
parent 850993edb1
commit 8a89fe3403
5 changed files with 711 additions and 3 deletions

View File

@@ -0,0 +1,254 @@
# =============================================================================
# BEDS REC Template — Logger Collection
# =============================================================================
#
# This template defines the schema for the BEDS logging collection.
# All BEDS nodes publish log events to this collection via the LOG exchange.
# Routing key format: log.{env}.{level}
#
# DESIGN NOTES:
# -------------
# - Append-only: updates are disabled. Log records are immutable.
# - No caching: recursion risk and log data is never stale-read.
# - No auditing: logging the logger creates infinite recursion.
# - No journaling: same reason.
# - No record locking: append-only makes locking unnecessary.
# - Hard deletes enabled: log pruning/warehousing removes records permanently.
# - is_internal = true: excluded from the public REST API catalog.
# - TTL index on created: logs expire automatically per retention policy.
#
# AUTHOR: ms
# VERSION: 1.0
# =============================================================================
# =============================================================================
# IDENTITY
# =============================================================================
version = 1
service = "admin"
schema = "rec"
template_class = "Logger"
collection = "msLogs"
extension = "_log"
wh_template = ""
# =============================================================================
# BEHAVIOURAL FLAGS
# =============================================================================
closed_class = true # internal framework collection — no external access
hard_deletes = true # log pruning permanently removes records
updates_enabled = false # log records are immutable after insert
auditing = "disabled"# never audit the logger — infinite recursion
journaling = false # never journal the logger — same reason
record_history = false # no history tracking on log records
default_status = "active"
search_status = "active"
record_locking = false # append-only — locking unnecessary
query_timers = false # disabled — timer events would log, causing recursion
primary_key = "token"
tokens = true
cache_ttl = 0 # no caching — log data is never stale-read
is_internal = true # excluded from public REST API catalog
# =============================================================================
# FIELDS
# =============================================================================
[fields]
# System fields
_id = "object" # MongoDB native document ID — never returned to clients
db_token = "string" # BEDS GUID token — externally-exposed primary key
status = "string" # record status
# Event lineage
event_id = "string" # compound event ID: node.env.guid — broker event identifier
parent_id = "string" # parent broker event compound ID — empty string if root event
depth = "integer" # levels from root event (0 = root)
# Log level
level_log = "string" # log level label: debug|data|info|error|warning|fatal|timer|event
level_val = "integer" # log level integer: -1 through 7 (enables range queries)
# Origin
resource = "string" # 4-char component identifier e.g. LOGR — matches console output tag
service_log = "string" # node role that issued the event e.g. app_server|admin|logger
env_log = "string" # environment: dev|qa|prod
node_log = "string" # node name from config e.g. registered_users
# Source location
file_log = "string" # source file where the log call originated
method_log = "string" # calling method name
line_log = "integer" # line number of the log call
# Payload
message_log = "string" # the log message text
trace_log = "array" # stack trace — empty array unless trace=true passed to logger
# Timestamp
created = "integer" # epoch timestamp — record creation time
# =============================================================================
# PROTECTED FIELDS
# =============================================================================
protected_fields = [
"_id",
"db_token",
"event_id",
"parent_id",
"depth",
"created",
]
# =============================================================================
# INDEX FIELDS
# =============================================================================
index_fields = [
"_id",
"db_token",
"event_id",
"parent_id",
"depth",
"level_val",
"level_log",
"node_log",
"env_log",
"service_log",
"created",
"status",
]
# =============================================================================
# INDEX NAME REGISTRY
# =============================================================================
index_name_list = [
"cIdx1Log", # compound: event_id + depth — full lineage traversal
"cIdx2Log", # compound: env_log + level_val — range queries by env and severity
]
# =============================================================================
# SINGLE-FIELD INDEXES
# =============================================================================
[single_field_indexes]
db_token = 1
parent_id = 1 # traverse up the event tree
level_log = 1 # filter by level label
node_log = 1 # filter by originating node
created = -1 # most recent first
status = -1
# =============================================================================
# COMPOUND INDEXES
# =============================================================================
[compound_indexes]
cIdx1Log = [["event_id", 1], ["depth", 1]] # reconstruct full event tree
cIdx2Log = [["env_log", 1], ["level_val", 1]] # prod fatals, dev debug, etc.
# =============================================================================
# MULTIKEY INDEXES
# =============================================================================
# trace_log is an array but we do not index it — trace data is fetched by
# event_id, not searched. No multikey indexes required for this collection.
# =============================================================================
# UNIQUE INDEXES
# =============================================================================
[unique_indexes]
db_token = 1
# =============================================================================
# PARTIAL INDEXES
# =============================================================================
# No partial indexes for this collection.
# =============================================================================
# TTL INDEXES
# Log retention policy — records expire automatically.
# Adjust the value to match your retention requirement.
# 2592000 = 30 days
# =============================================================================
[ttl_indexes]
# 2592000 = 30 days. Adjust to match your log retention policy.
created = 2592000
# =============================================================================
# CACHE MAP
# Log records are internal only — cache map controls what the admin UI sees.
# Schema field names are never exposed externally.
# =============================================================================
[cache_map]
db_token = "id"
event_id = "eventId"
parent_id = "parentId"
depth = "depth"
level_log = "level"
level_val = "levelValue"
resource = "resource"
service_log = "service"
env_log = "env"
node_log = "node"
file_log = "file"
method_log = "method"
line_log = "line"
message_log = "message"
trace_log = "trace"
created = "createdDate"
# =============================================================================
# REGEX FIELDS
# =============================================================================
regex_fields = ["message_log", "file_log"]
# =============================================================================
# SUB-COLLECTIONS
# =============================================================================
# No sub-collections for this collection.
# =============================================================================
# WAREHOUSING
# Log records are eligible for warehousing once they exceed the retention
# threshold. Warehouse to COOL storage to maintain schema for audit purposes.
# =============================================================================
[warehouse]
supported = true
remote_support = false
automated = true
dynamic = false
interval = "M" # warehouse monthly
override = false
delete = "H" # hard delete source records after warehousing
[warehouse.qualifier]
created = { operand = "null", operator = "lt", value = "" } # caller supplies cutoff date
status = { operand = "null", operator = "eq", value = "active" }
logical_op = "and"