milestone: enforce required services and add mongo logger PoC path

This commit is contained in:
2026-04-06 18:16:48 -07:00
parent dd04fb5168
commit 836a968806
17 changed files with 1081 additions and 112 deletions

1
Cargo.lock generated
View File

@@ -2254,6 +2254,7 @@ dependencies = [
"serde_json", "serde_json",
"thiserror 1.0.69", "thiserror 1.0.69",
"tokio", "tokio",
"toml",
"tracing", "tracing",
"tracing-journald", "tracing-journald",
"tracing-subscriber", "tracing-subscriber",

View File

@@ -6,6 +6,7 @@ edition = "2024"
[dependencies] [dependencies]
serde = { version = "1", features = ["derive"]} serde = { version = "1", features = ["derive"]}
serde_json = "1" serde_json = "1"
toml = "0.8"
config = "0.14" config = "0.14"
thiserror = "1" thiserror = "1"
tracing = "0.1" tracing = "0.1"

182
src/brokers/logger_store.rs Normal file
View File

@@ -0,0 +1,182 @@
use std::collections::HashMap;
use std::sync::OnceLock;
use futures_lite::StreamExt;
use mongodb::Client;
use mongodb::bson::{self, Document, doc};
use serde_json::Value;
use crate::config::RecNodeConfig;
const LOGGER_COLLECTION_NAME: &str = "msLogs";
struct LoggerMongoStore {
client: Client,
db_name: String,
}
static LOGGER_STORE: OnceLock<LoggerMongoStore> = OnceLock::new();
pub fn is_logger_template(template: &str) -> bool {
matches!(
template.to_ascii_lowercase().as_str(),
"logger" | "log" | "mst_logger" | "mst_logger_rec"
)
}
pub async fn init_from_rec_services(
rec_services: &HashMap<String, RecNodeConfig>,
env_name: &str,
) -> Result<(), String> {
if LOGGER_STORE.get().is_some() {
return Ok(());
}
let rec_node = rec_services
.get("admin")
.or_else(|| rec_services.get("app_server"))
.or_else(|| rec_services.values().next())
.ok_or_else(|| "rec_services is empty; cannot initialize logger store".to_string())?;
let is_dev = matches!(env_name, "development" | "dev");
let auth_uri = format!(
"mongodb://{}:{}@{}:{}/?authSource={}",
rec_node.user,
rec_node.pass,
rec_node.host,
rec_node.port,
rec_node.database,
);
let unauth_uri = format!("mongodb://{}:{}/", rec_node.host, rec_node.port);
let client = if rec_node.user.trim().is_empty() {
connect_and_ping(&unauth_uri, &rec_node.database).await?
} else {
match connect_and_ping(&auth_uri, &rec_node.database).await {
Ok(client) => client,
Err(auth_err) if is_dev => {
tracing::warn!(
"Mongo authenticated init failed in dev, retrying unauthenticated: {}",
auth_err
);
connect_and_ping(&unauth_uri, &rec_node.database).await?
}
Err(auth_err) => return Err(auth_err),
}
};
// In development, bootstrap the target DB if it does not exist yet.
if matches!(env_name, "development" | "dev") {
let db_names = client
.list_database_names()
.await
.map_err(|e| format!("Mongo list databases failed: {}", e))?;
if !db_names.iter().any(|name| name == &rec_node.database) {
let bootstrap_doc = doc! {
"_beds_bootstrap": true,
"created": epoch_secs(),
};
client
.database(&rec_node.database)
.collection::<Document>(LOGGER_COLLECTION_NAME)
.insert_one(bootstrap_doc)
.await
.map_err(|e| format!("Mongo dev DB bootstrap insert failed: {}", e))?;
client
.database(&rec_node.database)
.collection::<Document>(LOGGER_COLLECTION_NAME)
.delete_one(doc! { "_beds_bootstrap": true })
.await
.map_err(|e| format!("Mongo dev DB bootstrap cleanup failed: {}", e))?;
}
}
let _ = LOGGER_STORE.set(LoggerMongoStore {
client,
db_name: rec_node.database.clone(),
});
Ok(())
}
pub async fn append_log(mut data: HashMap<String, Value>) -> Result<String, String> {
let store = LOGGER_STORE
.get()
.ok_or_else(|| "logger store is not initialized".to_string())?;
let token = data
.get("db_token")
.and_then(|v| v.as_str())
.map(ToString::to_string)
.unwrap_or_else(|| format!("log-{}", epoch_secs()));
data.entry("db_token".to_string())
.or_insert_with(|| Value::String(token.clone()));
data.entry("created".to_string())
.or_insert_with(|| Value::from(epoch_secs() as i64));
let doc = bson::to_document(&data)
.map_err(|e| format!("logger write encode failed: {}", e))?;
store
.client
.database(&store.db_name)
.collection::<Document>(LOGGER_COLLECTION_NAME)
.insert_one(doc)
.await
.map_err(|e| format!("logger write failed: {}", e))?;
Ok(token)
}
pub async fn fetch_recent(limit: usize) -> Result<Vec<Value>, String> {
let store = LOGGER_STORE
.get()
.ok_or_else(|| "logger store is not initialized".to_string())?;
let mut cursor = store
.client
.database(&store.db_name)
.collection::<Document>(LOGGER_COLLECTION_NAME)
.find(doc! {})
.await
.map_err(|e| format!("logger fetch failed: {}", e))?;
let mut out = Vec::new();
while let Some(next) = cursor.next().await {
let doc = next.map_err(|e| format!("logger fetch cursor error: {}", e))?;
let json = serde_json::to_value(&doc)
.map_err(|e| format!("logger fetch decode failed: {}", e))?;
out.push(json);
}
out.reverse();
out.truncate(limit);
Ok(out)
}
fn epoch_secs() -> i64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as i64
}
async fn connect_and_ping(uri: &str, database: &str) -> Result<Client, String> {
let client = Client::with_uri_str(uri)
.await
.map_err(|e| format!("Mongo logger client init failed: {}", e))?;
client
.database(database)
.run_command(doc! { "ping": 1 })
.await
.map_err(|e| format!("Mongo logger ping failed: {}", e))?;
Ok(client)
}

View File

@@ -21,6 +21,7 @@
//! * `2026-04-05` - mks - original coding //! * `2026-04-05` - mks - original coding
pub mod error; pub mod error;
pub mod logger_store;
pub mod payload; pub mod payload;
pub mod r_broker; pub mod r_broker;
pub mod w_broker; pub mod w_broker;

View File

@@ -44,6 +44,8 @@ use lapin::{
}; };
use crate::services::amqp::EXCHANGE_NAME; use crate::services::amqp::EXCHANGE_NAME;
use crate::brokers::logger_store;
use crate::brokers::payload::BrokerPayload;
use super::error::BrokerError; use super::error::BrokerError;
/// Routing key this broker binds to. /// Routing key this broker binds to.
@@ -167,7 +169,7 @@ async fn run(
tracing::info!("rBroker[{}] shutdown event received — exiting", instance_id); tracing::info!("rBroker[{}] shutdown event received — exiting", instance_id);
break; break;
} }
"fetch" => handle_fetch(&delivery.data, instance_id), "fetch" => handle_fetch(&delivery.data, instance_id).await,
unknown => { unknown => {
tracing::warn!("rBroker[{}] unknown event type '{}'", instance_id, unknown); tracing::warn!("rBroker[{}] unknown event type '{}'", instance_id, unknown);
None None
@@ -229,12 +231,56 @@ fn handle_ping(instance_id: u32) -> Option<Vec<u8>> {
/// # History /// # History
/// ///
/// * `2026-04-05` - mks - stub /// * `2026-04-05` - mks - stub
fn handle_fetch(data: &[u8], instance_id: u32) -> Option<Vec<u8>> { async fn handle_fetch(data: &[u8], instance_id: u32) -> Option<Vec<u8>> {
let payload: BrokerPayload = match serde_json::from_slice(data) {
Ok(p) => p,
Err(e) => {
let response = serde_json::json!({
"status": "error",
"code": "INVALID_PAYLOAD",
"message": format!("invalid JSON payload: {}", e),
});
return Some(response.to_string().into_bytes());
}
};
if !logger_store::is_logger_template(&payload.template) {
tracing::warn!( tracing::warn!(
"rBroker[{}] fetch event received ({} bytes) — factory dispatch not yet implemented", "rBroker[{}] fetch template '{}' not implemented yet",
instance_id, instance_id,
data.len() payload.template
); );
let response = r#"{"status":"error","code":"NOT_IMPLEMENTED","message":"factory dispatch not yet implemented"}"#; let response = serde_json::json!({
Some(response.as_bytes().to_vec()) "status": "error",
"code": "NOT_IMPLEMENTED",
"message": "only logger fetch is implemented in PoC step 1",
});
return Some(response.to_string().into_bytes());
}
let limit = payload
.data
.get("limit")
.and_then(|v| v.as_u64())
.map(|n| n as usize)
.unwrap_or(50);
let logs = match logger_store::fetch_recent(limit).await {
Ok(items) => items,
Err(e) => {
let response = serde_json::json!({
"status": "error",
"code": "LOGGER_STORE_UNAVAILABLE",
"message": e,
});
return Some(response.to_string().into_bytes());
}
};
let response = serde_json::json!({
"status": "ok",
"code": "LOGGER_FETCH",
"count": logs.len(),
"logs": logs,
});
Some(response.to_string().into_bytes())
} }

View File

@@ -44,6 +44,8 @@ use lapin::{
}; };
use crate::services::amqp::EXCHANGE_NAME; use crate::services::amqp::EXCHANGE_NAME;
use crate::brokers::logger_store;
use crate::brokers::payload::BrokerPayload;
use super::error::BrokerError; use super::error::BrokerError;
/// Routing key this broker binds to. /// Routing key this broker binds to.
@@ -156,7 +158,7 @@ async fn run(
tracing::info!("wBroker[{}] shutdown event received — exiting", instance_id); tracing::info!("wBroker[{}] shutdown event received — exiting", instance_id);
break; break;
} }
"write" => handle_write(&delivery.data, instance_id), "write" => handle_write(&delivery.data, instance_id).await,
"update" => handle_update(&delivery.data, instance_id), "update" => handle_update(&delivery.data, instance_id),
"delete" => handle_delete(&delivery.data, instance_id), "delete" => handle_delete(&delivery.data, instance_id),
unknown => { unknown => {
@@ -218,12 +220,46 @@ fn handle_ping(instance_id: u32) -> Option<Vec<u8>> {
/// # History /// # History
/// ///
/// * `2026-04-05` - mks - stub /// * `2026-04-05` - mks - stub
fn handle_write(data: &[u8], instance_id: u32) -> Option<Vec<u8>> { async fn handle_write(data: &[u8], instance_id: u32) -> Option<Vec<u8>> {
let payload: BrokerPayload = match serde_json::from_slice(data) {
Ok(p) => p,
Err(e) => {
let response = serde_json::json!({
"status": "error",
"code": "INVALID_PAYLOAD",
"message": format!("invalid JSON payload: {}", e),
});
return Some(response.to_string().into_bytes());
}
};
if !logger_store::is_logger_template(&payload.template) {
tracing::warn!( tracing::warn!(
"wBroker[{}] write event ({} bytes) — factory dispatch not yet implemented", "wBroker[{}] write template '{}' not implemented yet",
instance_id, data.len() instance_id,
payload.template
); );
not_implemented_response() return not_implemented_response();
}
let token = match logger_store::append_log(payload.data).await {
Ok(token) => token,
Err(e) => {
let response = serde_json::json!({
"status": "error",
"code": "LOGGER_STORE_UNAVAILABLE",
"message": e,
});
return Some(response.to_string().into_bytes());
}
};
let response = serde_json::json!({
"status": "ok",
"code": "LOGGER_WRITE",
"token": token,
});
Some(response.to_string().into_bytes())
} }
/// Stub handler for `update` events. /// Stub handler for `update` events.

View File

@@ -20,3 +20,4 @@ pub mod config;
pub mod core; pub mod core;
pub mod logging; pub mod logging;
pub mod services; pub mod services;
pub mod template_registry;

View File

@@ -29,6 +29,7 @@ mod config;
mod core; mod core;
mod logging; mod logging;
mod services; mod services;
mod template_registry;
/// Executes the BEDS Initial Program Load (IPL) sequence. /// Executes the BEDS Initial Program Load (IPL) sequence.
/// ///
@@ -40,12 +41,13 @@ mod services;
/// ## IPL Sequence /// ## IPL Sequence
/// 1. Load configuration (beds.toml + env override) /// 1. Load configuration (beds.toml + env override)
/// 2. Initialize logging /// 2. Initialize logging
/// 3. Validate RabbitMQ reachability (TCP) /// 2b. Load and validate runtime REC templates
/// 3b. Authenticate to RabbitMQ + declare beds.events exchange /// 4. Validate RabbitMQ reachability (TCP)
/// 4. Validate MongoDB reachability (TCP) /// 4b. Authenticate to RabbitMQ + declare beds.events exchange
/// 5. Validate MariaDB reachability (TCP) /// 5. Validate MongoDB reachability (TCP)
/// 6. Spawn broker pools (rBroker) /// 6. Validate MariaDB reachability (TCP)
/// 7. Node green /// 7. Spawn broker pools (rBroker)
/// 8. Node green
/// ///
/// # Returns /// # Returns
/// ///
@@ -68,56 +70,48 @@ async fn ipl() -> Result<(), String> {
tracing::info!("Configuration loaded"); tracing::info!("Configuration loaded");
tracing::info!("Logging initialized"); tracing::info!("Logging initialized");
// step 3: validate broker reachability (TCP) — fast pre-flight before auth // step 2b: load and validate REC templates used by runtime dispatch
match services::amqp::validate(&cfg.broker_services) { match template_registry::load_runtime_rec_templates("templates") {
Ok(()) => tracing::info!("RabbitMQ reachable"), Ok(templates) => tracing::info!("REC templates validated: {}", templates.len()),
Err(e) => { Err(e) => {
if cfg.id.env_name == "production" { if cfg.id.env_name == "production" {
return Err(e); return Err(format!("Template validation failed: {}", e));
} }
tracing::warn!("RabbitMQ unreachable (non-fatal in {}): {}", cfg.id.env_name, e); tracing::warn!(
"Template validation failed (non-fatal in {}): {}",
cfg.id.env_name,
e
);
} }
} }
// step 3b: authenticate to RabbitMQ and declare the beds.events exchange // step 4: validate broker reachability (TCP) — required in all environments
let amqp_conn = match services::amqp::AmqpConnection::connect(&cfg.broker_services).await { services::amqp::validate(&cfg.broker_services)?;
Ok(conn) => { tracing::info!("RabbitMQ reachable");
// step 4b: authenticate to RabbitMQ and declare the beds.events exchange
let amqp_conn = services::amqp::AmqpConnection::connect(&cfg.broker_services)
.await
.map_err(|e| format!("RabbitMQ authentication failed: {}", e))?;
tracing::info!("RabbitMQ authenticated"); tracing::info!("RabbitMQ authenticated");
Some(conn)
}
Err(e) => {
if cfg.id.env_name == "production" {
return Err(format!("RabbitMQ authentication failed: {}", e));
}
tracing::warn!("RabbitMQ authentication failed (non-fatal in {}): {}", cfg.id.env_name, e);
None
}
};
if let Some(ref conn) = amqp_conn { amqp_conn
match conn.declare_exchange().await { .declare_exchange()
Ok(()) => tracing::info!("AMQP exchange '{}' declared", services::amqp::EXCHANGE_NAME), .await
Err(e) => { .map_err(|e| format!("Exchange declaration failed: {}", e))?;
if cfg.id.env_name == "production" { tracing::info!("AMQP exchange '{}' declared", services::amqp::EXCHANGE_NAME);
return Err(format!("Exchange declaration failed: {}", e));
}
tracing::warn!("Exchange declaration failed (non-fatal in {}): {}", cfg.id.env_name, e);
}
}
}
// step 4: validate MongoDB reachability — fatal in production, non-fatal in all other envs // step 5: validate MongoDB reachability — required in all environments
match services::mongo::validate_all(&cfg.rec_services) { services::mongo::validate_all(&cfg.rec_services)?;
Ok(()) => tracing::info!("MongoDB reachable"), tracing::info!("MongoDB reachable");
Err(e) => {
if cfg.id.env_name == "production" {
return Err(e);
}
tracing::warn!("MongoDB unreachable (non-fatal in {}): {}", cfg.id.env_name, e);
}
}
// step 5: validate MariaDB reachability — fatal in production, non-fatal in all other envs // step 5b: initialize Mongo-backed logger store for rec.write/rec.read logger sequence
brokers::logger_store::init_from_rec_services(&cfg.rec_services, &cfg.id.env_name)
.await
.map_err(|e| format!("Mongo logger store initialization failed: {}", e))?;
tracing::info!("Mongo logger store initialized");
// step 6: validate MariaDB reachability — fatal in production, non-fatal in all other envs
// secondary instance failures are always non-fatal (handled inside validate_all) // secondary instance failures are always non-fatal (handled inside validate_all)
match services::mariadb::validate_all(&cfg.rel_services) { match services::mariadb::validate_all(&cfg.rel_services) {
Ok(()) => tracing::info!("MariaDB reachable"), Ok(()) => tracing::info!("MariaDB reachable"),
@@ -129,9 +123,9 @@ async fn ipl() -> Result<(), String> {
} }
} }
// step 6: spawn broker pools — queues are declared here, not at exchange declare time // step 7: spawn broker pools — queues are declared here, not at exchange declare time
// rBroker pool requires an authenticated AMQP connection; skip in non-prod if unavailable // rBroker pool requires an authenticated AMQP connection; skip in non-prod if unavailable
let _broker_handles = if let Some(ref conn) = amqp_conn { let _broker_handles: Vec<tokio::task::JoinHandle<()>> = {
use std::sync::Arc; use std::sync::Arc;
let shared_conn = Arc::new( let shared_conn = Arc::new(
lapin::Connection::connect( lapin::Connection::connect(
@@ -148,34 +142,18 @@ async fn ipl() -> Result<(), String> {
.await .await
.map_err(|e| format!("Broker pool connection failed: {}", e))?, .map_err(|e| format!("Broker pool connection failed: {}", e))?,
); );
let _ = conn; // IPL connection stays alive but broker pool owns its own connection
let r_handles = match brokers::spawn_r_broker_pool(Arc::clone(&shared_conn), &cfg.broker_services).await { let _ = &amqp_conn; // keep IPL AMQP connection alive
Ok(handles) => handles,
Err(e) => {
if cfg.id.env_name == "production" {
return Err(format!("rBroker pool failed to start: {}", e));
}
tracing::warn!("rBroker pool failed (non-fatal in {}): {}", cfg.id.env_name, e);
vec![]
}
};
let w_handles = match brokers::spawn_w_broker_pool(Arc::clone(&shared_conn), &cfg.broker_services).await { let r_handles = brokers::spawn_r_broker_pool(Arc::clone(&shared_conn), &cfg.broker_services)
Ok(handles) => handles, .await
Err(e) => { .map_err(|e| format!("rBroker pool failed to start: {}", e))?;
if cfg.id.env_name == "production" {
return Err(format!("wBroker pool failed to start: {}", e)); let w_handles = brokers::spawn_w_broker_pool(Arc::clone(&shared_conn), &cfg.broker_services)
} .await
tracing::warn!("wBroker pool failed (non-fatal in {}): {}", cfg.id.env_name, e); .map_err(|e| format!("wBroker pool failed to start: {}", e))?;
vec![]
}
};
r_handles.into_iter().chain(w_handles).collect() r_handles.into_iter().chain(w_handles).collect()
} else {
tracing::warn!("rBroker pool skipped — no AMQP connection");
vec![]
}; };
tracing::info!("BEDS IPL complete — node green"); tracing::info!("BEDS IPL complete — node green");

View File

@@ -21,7 +21,7 @@ pub mod error;
pub use connection::{AmqpConnection, EXCHANGE_NAME}; pub use connection::{AmqpConnection, EXCHANGE_NAME};
pub use error::AmqpError; // will be used by broker pool error handling pub use error::AmqpError; // will be used by broker pool error handling
use std::net::TcpStream; use std::net::{TcpStream, ToSocketAddrs};
use std::time::Duration; use std::time::Duration;
use crate::config::BrokerServicesConfig; use crate::config::BrokerServicesConfig;
@@ -50,12 +50,27 @@ use crate::config::BrokerServicesConfig;
pub fn validate(cfg: &BrokerServicesConfig) -> Result<(), String> { pub fn validate(cfg: &BrokerServicesConfig) -> Result<(), String> {
let addr_str = format!("{}:{}", cfg.app_server.host, cfg.app_server.port); let addr_str = format!("{}:{}", cfg.app_server.host, cfg.app_server.port);
let addr: std::net::SocketAddr = addr_str let resolved = addr_str
.parse() .to_socket_addrs()
.map_err(|e| format!("Invalid broker address {}: {}", addr_str, e))?; .map_err(|e| format!("Invalid broker address {}: {}", addr_str, e))?;
TcpStream::connect_timeout(&addr, Duration::from_secs(5)) let mut last_err = None;
.map_err(|e| format!("RabbitMQ unreachable at {}: {}", addr_str, e))?; let mut tried = 0;
for addr in resolved {
tried += 1;
match TcpStream::connect_timeout(&addr, Duration::from_secs(5)) {
Ok(_) => return Ok(()),
Err(e) => last_err = Some(e),
}
}
if tried == 0 {
return Err(format!("Invalid broker address {}: no resolved addresses", addr_str));
}
if let Some(e) = last_err {
return Err(format!("RabbitMQ unreachable at {}: {}", addr_str, e));
}
Ok(()) Ok(())
} }

View File

@@ -23,7 +23,7 @@
//! * `2026-04-04` - mks - promoted to services/mariadb/ //! * `2026-04-04` - mks - promoted to services/mariadb/
use std::collections::HashMap; use std::collections::HashMap;
use std::net::TcpStream; use std::net::{TcpStream, ToSocketAddrs};
use std::time::Duration; use std::time::Duration;
use crate::config::{RelInstanceConfig, RelNodeConfig}; use crate::config::{RelInstanceConfig, RelNodeConfig};
@@ -78,12 +78,33 @@ pub fn validate_all(nodes: &HashMap<String, RelNodeConfig>) -> Result<(), String
pub fn validate(label: &str, instance: &RelInstanceConfig) -> Result<(), String> { pub fn validate(label: &str, instance: &RelInstanceConfig) -> Result<(), String> {
let addr_str = format!("{}:{}", instance.host, instance.port); let addr_str = format!("{}:{}", instance.host, instance.port);
let addr: std::net::SocketAddr = addr_str let resolved = addr_str
.parse() .to_socket_addrs()
.map_err(|e| format!("Invalid MariaDB address for rel_services.{} ({}): {}", label, addr_str, e))?; .map_err(|e| format!("Invalid MariaDB address for rel_services.{} ({}): {}", label, addr_str, e))?;
TcpStream::connect_timeout(&addr, Duration::from_secs(5)) let mut last_err = None;
.map_err(|e| format!("MariaDB unreachable at rel_services.{} ({}): {}", label, addr_str, e))?; let mut tried = 0;
for addr in resolved {
tried += 1;
match TcpStream::connect_timeout(&addr, Duration::from_secs(5)) {
Ok(_) => return Ok(()),
Err(e) => last_err = Some(e),
}
}
if tried == 0 {
return Err(format!(
"Invalid MariaDB address for rel_services.{} ({}): no resolved addresses",
label, addr_str
));
}
if let Some(e) = last_err {
return Err(format!(
"MariaDB unreachable at rel_services.{} ({}): {}",
label, addr_str, e
));
}
Ok(()) Ok(())
} }

View File

@@ -22,7 +22,7 @@
//! * `2026-04-04` - mks - promoted to services/mongo/ //! * `2026-04-04` - mks - promoted to services/mongo/
use std::collections::HashMap; use std::collections::HashMap;
use std::net::TcpStream; use std::net::{TcpStream, ToSocketAddrs};
use std::time::Duration; use std::time::Duration;
use crate::config::RecNodeConfig; use crate::config::RecNodeConfig;
@@ -71,12 +71,33 @@ pub fn validate_all(nodes: &HashMap<String, RecNodeConfig>) -> Result<(), String
pub fn validate(name: &str, node: &RecNodeConfig) -> Result<(), String> { pub fn validate(name: &str, node: &RecNodeConfig) -> Result<(), String> {
let addr_str = format!("{}:{}", node.host, node.port); let addr_str = format!("{}:{}", node.host, node.port);
let addr: std::net::SocketAddr = addr_str let resolved = addr_str
.parse() .to_socket_addrs()
.map_err(|e| format!("Invalid MongoDB address for rec_services.{} ({}): {}", name, addr_str, e))?; .map_err(|e| format!("Invalid MongoDB address for rec_services.{} ({}): {}", name, addr_str, e))?;
TcpStream::connect_timeout(&addr, Duration::from_secs(5)) let mut last_err = None;
.map_err(|e| format!("MongoDB unreachable at rec_services.{} ({}): {}", name, addr_str, e))?; let mut tried = 0;
for addr in resolved {
tried += 1;
match TcpStream::connect_timeout(&addr, Duration::from_secs(5)) {
Ok(_) => return Ok(()),
Err(e) => last_err = Some(e),
}
}
if tried == 0 {
return Err(format!(
"Invalid MongoDB address for rec_services.{} ({}): no resolved addresses",
name, addr_str
));
}
if let Some(e) = last_err {
return Err(format!(
"MongoDB unreachable at rec_services.{} ({}): {}",
name, addr_str, e
));
}
Ok(()) Ok(())
} }

View File

@@ -0,0 +1,471 @@
//! # template_registry — Runtime Template Loader/Validator
//!
//! Phase-1 template modernization for BEDS. This module loads REC TOML
//! templates from disk, validates basic structural rules, and returns a
//! runtime registry payload used by IPL and future factory dispatch.
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::{Path, PathBuf};
#[derive(Debug, thiserror::Error)]
pub enum TemplateRegistryError {
#[error("Template directory read failed ({dir}): {source}")]
ReadDir {
dir: String,
source: std::io::Error,
},
#[error("Template file read failed ({file}): {source}")]
ReadFile {
file: String,
source: std::io::Error,
},
#[error("Template parse failed ({file}): {message}")]
Parse { file: String, message: String },
#[error("Template validation failed ({file}): {message}")]
Validation { file: String, message: String },
}
#[derive(Debug, Clone)]
pub struct RuntimeRecTemplate {
pub template_class: String,
pub service: String,
pub collection: String,
pub extension: String,
pub fields: HashMap<String, String>,
pub cache_map: HashMap<String, String>,
}
struct RecTemplateManifest {
service: String,
schema: String,
template_class: String,
collection: String,
extension: String,
fields: HashMap<String, String>,
protected_fields: Vec<String>,
index_fields: Vec<String>,
index_name_list: Vec<String>,
single_field_indexes: HashMap<String, i32>,
compound_indexes: HashMap<String, Vec<(String, i32)>>,
multikey_indexes: HashMap<String, Vec<(String, i32)>>,
unique_indexes: HashMap<String, i32>,
ttl_indexes: HashMap<String, i64>,
partial_indexes: Vec<PartialIndex>,
cache_map: HashMap<String, String>,
regex_fields: Vec<String>,
}
struct PartialIndex {
field: String,
direction: i32,
filter: toml::Value,
}
fn parse_rec_manifest(parsed: &toml::Value, path: &Path) -> Result<RecTemplateManifest, TemplateRegistryError> {
let file = path.display().to_string();
let service = required_string(parsed, "service", &file)?;
let schema = required_string(parsed, "schema", &file)?;
let template_class = required_string(parsed, "template_class", &file)?;
let collection = required_string(parsed, "collection", &file)?;
let extension = required_string(parsed, "extension", &file)?;
let fields_table = parsed
.get("fields")
.and_then(|v| v.as_table())
.ok_or_else(|| TemplateRegistryError::Parse {
file: file.clone(),
message: "missing or invalid [fields] table".to_string(),
})?;
let mut fields = HashMap::new();
for (key, value) in fields_table {
if let Some(ty) = value.as_str() {
fields.insert(key.clone(), ty.to_string());
}
}
let protected_fields = read_string_array_with_fields_fallback(parsed, fields_table, "protected_fields");
let index_fields = read_string_array_with_fields_fallback(parsed, fields_table, "index_fields");
let index_name_list = read_string_array_with_fields_fallback(parsed, fields_table, "index_name_list");
let regex_fields = read_string_array_with_fields_fallback(parsed, fields_table, "regex_fields");
let single_field_indexes = read_i32_map(parsed, "single_field_indexes");
let compound_indexes = read_index_vector_map(parsed, "compound_indexes");
let multikey_indexes = read_index_vector_map(parsed, "multikey_indexes");
let unique_indexes = read_i32_map(parsed, "unique_indexes");
let ttl_indexes = read_i64_map(parsed, "ttl_indexes");
let cache_map = read_string_map(parsed, "cache_map");
let partial_indexes = read_partial_indexes(parsed);
Ok(RecTemplateManifest {
service,
schema,
template_class,
collection,
extension,
fields,
protected_fields,
index_fields,
index_name_list,
single_field_indexes,
compound_indexes,
multikey_indexes,
unique_indexes,
ttl_indexes,
partial_indexes,
cache_map,
regex_fields,
})
}
fn required_string(parsed: &toml::Value, key: &str, file: &str) -> Result<String, TemplateRegistryError> {
parsed
.get(key)
.and_then(|v| v.as_str())
.map(ToString::to_string)
.ok_or_else(|| TemplateRegistryError::Parse {
file: file.to_string(),
message: format!("missing or invalid '{}'", key),
})
}
fn read_string_array_with_fields_fallback(
parsed: &toml::Value,
fields_table: &toml::value::Table,
key: &str,
) -> Vec<String> {
let from_root = parsed.get(key).and_then(|v| v.as_array());
let from_fields = fields_table.get(key).and_then(|v| v.as_array());
from_root
.or(from_fields)
.map(|items| {
items
.iter()
.filter_map(|item| item.as_str().map(ToString::to_string))
.collect()
})
.unwrap_or_default()
}
fn read_i32_map(parsed: &toml::Value, key: &str) -> HashMap<String, i32> {
parsed
.get(key)
.and_then(|v| v.as_table())
.map(|table| {
table
.iter()
.filter_map(|(k, v)| v.as_integer().map(|n| (k.clone(), n as i32)))
.collect()
})
.unwrap_or_default()
}
fn read_i64_map(parsed: &toml::Value, key: &str) -> HashMap<String, i64> {
parsed
.get(key)
.and_then(|v| v.as_table())
.map(|table| {
table
.iter()
.filter_map(|(k, v)| v.as_integer().map(|n| (k.clone(), n)))
.collect()
})
.unwrap_or_default()
}
fn read_string_map(parsed: &toml::Value, key: &str) -> HashMap<String, String> {
parsed
.get(key)
.and_then(|v| v.as_table())
.map(|table| {
table
.iter()
.filter_map(|(k, v)| v.as_str().map(|s| (k.clone(), s.to_string())))
.collect()
})
.unwrap_or_default()
}
fn read_index_vector_map(parsed: &toml::Value, key: &str) -> HashMap<String, Vec<(String, i32)>> {
let mut out = HashMap::new();
let Some(table) = parsed.get(key).and_then(|v| v.as_table()) else {
return out;
};
for (index_name, entries) in table {
let Some(entry_list) = entries.as_array() else {
continue;
};
let mut parsed_entries = Vec::new();
for entry in entry_list {
let Some(tuple) = entry.as_array() else {
continue;
};
if tuple.len() != 2 {
continue;
}
let Some(field) = tuple[0].as_str() else {
continue;
};
let Some(direction) = tuple[1].as_integer() else {
continue;
};
parsed_entries.push((field.to_string(), direction as i32));
}
out.insert(index_name.clone(), parsed_entries);
}
out
}
fn read_partial_indexes(parsed: &toml::Value) -> Vec<PartialIndex> {
let Some(items) = parsed.get("partial_indexes").and_then(|v| v.as_array()) else {
return Vec::new();
};
let mut out = Vec::new();
for item in items {
let Some(tbl) = item.as_table() else {
continue;
};
let Some(field) = tbl.get("field").and_then(|v| v.as_str()) else {
continue;
};
let Some(direction) = tbl.get("direction").and_then(|v| v.as_integer()) else {
continue;
};
let Some(filter) = tbl.get("filter") else {
continue;
};
out.push(PartialIndex {
field: field.to_string(),
direction: direction as i32,
filter: filter.clone(),
});
}
out
}
pub fn load_runtime_rec_templates(template_dir: &str) -> Result<Vec<RuntimeRecTemplate>, TemplateRegistryError> {
let dir = Path::new(template_dir);
let entries = fs::read_dir(dir).map_err(|source| TemplateRegistryError::ReadDir {
dir: template_dir.to_string(),
source,
})?;
let mut templates = Vec::new();
for entry in entries {
let entry = entry.map_err(|source| TemplateRegistryError::ReadDir {
dir: template_dir.to_string(),
source,
})?;
let path = entry.path();
if !is_toml_file(&path) {
continue;
}
let raw = fs::read_to_string(&path).map_err(|source| TemplateRegistryError::ReadFile {
file: path.display().to_string(),
source,
})?;
let parsed: toml::Value = toml::from_str(&raw).map_err(|e| TemplateRegistryError::Parse {
file: path.display().to_string(),
message: e.to_string(),
})?;
// Phase-1 scope only validates/loads REC templates.
let schema = parsed.get("schema").and_then(|v| v.as_str()).unwrap_or_default();
if schema != "rec" {
continue;
}
let manifest = parse_rec_manifest(&parsed, &path)?;
validate_rec_manifest(&manifest, &path)?;
templates.push(RuntimeRecTemplate {
template_class: manifest.template_class,
service: manifest.service,
collection: manifest.collection,
extension: manifest.extension,
fields: manifest.fields,
cache_map: manifest.cache_map,
});
}
Ok(templates)
}
fn validate_rec_manifest(
manifest: &RecTemplateManifest,
path: &PathBuf,
) -> Result<(), TemplateRegistryError> {
let file = path.display().to_string();
if manifest.schema != "rec" {
return Err(TemplateRegistryError::Validation {
file,
message: format!("expected schema='rec', got '{}'", manifest.schema),
});
}
if manifest.template_class.trim().is_empty()
|| manifest.service.trim().is_empty()
|| manifest.collection.trim().is_empty()
|| manifest.extension.trim().is_empty()
{
return Err(TemplateRegistryError::Validation {
file,
message: "required identity fields cannot be blank".to_string(),
});
}
if manifest.fields.is_empty() {
return Err(TemplateRegistryError::Validation {
file,
message: "fields section cannot be empty".to_string(),
});
}
let declared_fields: HashSet<&str> = manifest.fields.keys().map(String::as_str).collect();
ensure_keys_in_fields(&file, "protected_fields", &manifest.protected_fields, &declared_fields)?;
ensure_keys_in_fields(&file, "index_fields", &manifest.index_fields, &declared_fields)?;
ensure_map_keys_in_fields(
&file,
"single_field_indexes",
&manifest.single_field_indexes,
&declared_fields,
)?;
ensure_map_keys_in_fields(
&file,
"unique_indexes",
&manifest.unique_indexes,
&declared_fields,
)?;
ensure_map_keys_in_fields(
&file,
"ttl_indexes",
&manifest.ttl_indexes,
&declared_fields,
)?;
ensure_map_keys_in_fields(&file, "cache_map", &manifest.cache_map, &declared_fields)?;
ensure_keys_in_fields(&file, "regex_fields", &manifest.regex_fields, &declared_fields)?;
for p in &manifest.partial_indexes {
if !declared_fields.contains(p.field.as_str()) {
return Err(TemplateRegistryError::Validation {
file: file.clone(),
message: format!("partial_indexes references undeclared field '{}'", p.field),
});
}
if p.direction != 1 && p.direction != -1 {
return Err(TemplateRegistryError::Validation {
file: file.clone(),
message: format!("partial_indexes field '{}' has invalid direction {}", p.field, p.direction),
});
}
if !p.filter.is_table() {
return Err(TemplateRegistryError::Validation {
file: file.clone(),
message: format!("partial_indexes field '{}' must define filter as a table", p.field),
});
}
}
let named_indexes: HashSet<&str> = manifest.index_name_list.iter().map(String::as_str).collect();
for key in manifest.compound_indexes.keys() {
if !named_indexes.contains(key.as_str()) {
return Err(TemplateRegistryError::Validation {
file: file.clone(),
message: format!("compound index '{}' missing from index_name_list", key),
});
}
}
for key in manifest.multikey_indexes.keys() {
if !named_indexes.contains(key.as_str()) {
return Err(TemplateRegistryError::Validation {
file: file.clone(),
message: format!("multikey index '{}' missing from index_name_list", key),
});
}
}
Ok(())
}
fn ensure_keys_in_fields(
file: &str,
section: &str,
keys: &[String],
declared_fields: &HashSet<&str>,
) -> Result<(), TemplateRegistryError> {
for key in keys {
if !declared_fields.contains(key.as_str()) {
return Err(TemplateRegistryError::Validation {
file: file.to_string(),
message: format!("{} references undeclared field '{}'", section, key),
});
}
}
Ok(())
}
fn ensure_map_keys_in_fields<T>(
file: &str,
section: &str,
map: &HashMap<String, T>,
declared_fields: &HashSet<&str>,
) -> Result<(), TemplateRegistryError> {
for key in map.keys() {
if !declared_fields.contains(key.as_str()) {
return Err(TemplateRegistryError::Validation {
file: file.to_string(),
message: format!("{} references undeclared field '{}'", section, key),
});
}
}
Ok(())
}
fn is_toml_file(path: &Path) -> bool {
path.extension()
.and_then(|ext| ext.to_str())
.map(|ext| ext.eq_ignore_ascii_case("toml"))
.unwrap_or(false)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn loads_and_validates_rec_templates_from_repo() {
let templates = load_runtime_rec_templates("templates").expect("template load failed");
assert!(!templates.is_empty());
assert!(templates.iter().any(|t| t.template_class == "ExampleCollection"));
assert!(templates.iter().any(|t| t.template_class == "Logger"));
}
}

View File

@@ -112,6 +112,81 @@ async fn ping_round_trip(channel: &lapin::Channel, routing_key: &str, expected_b
.expect("failed to ack reply message"); .expect("failed to ack reply message");
} }
async fn request_reply_json(
channel: &lapin::Channel,
routing_key: &str,
event_type: &str,
body: serde_json::Value,
) -> serde_json::Value {
let reply_queue = channel
.queue_declare(
"",
QueueDeclareOptions {
exclusive: true,
auto_delete: true,
..Default::default()
},
FieldTable::default(),
)
.await
.expect("failed to declare reply queue")
.name()
.as_str()
.to_string();
let mut consumer = channel
.basic_consume(
&reply_queue,
"message-flow-test-rpc",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.expect("failed to start reply consumer");
let correlation_id = format!("{}-{}", event_type, routing_key);
let props = BasicProperties::default()
.with_type(event_type.into())
.with_reply_to(reply_queue.clone().into())
.with_correlation_id(correlation_id.clone().into());
let body_bytes = serde_json::to_vec(&body).expect("failed to serialize request body");
channel
.basic_publish(
EXCHANGE_NAME,
routing_key,
BasicPublishOptions::default(),
&body_bytes,
props,
)
.await
.expect("publish failed")
.await
.expect("publish confirm failed");
let delivery = tokio::time::timeout(Duration::from_secs(5), consumer.next())
.await
.expect("timed out waiting for broker reply")
.expect("reply consumer ended unexpectedly")
.expect("reply delivery error");
let payload: serde_json::Value =
serde_json::from_slice(&delivery.data).expect("reply payload is not valid JSON");
if let Some(cid) = delivery.properties.correlation_id().as_ref() {
assert_eq!(cid.as_str(), correlation_id);
}
delivery
.ack(BasicAckOptions::default())
.await
.expect("failed to ack reply message");
payload
}
#[tokio::test] #[tokio::test]
async fn r_and_w_brokers_process_ping_events() { async fn r_and_w_brokers_process_ping_events() {
let cfg = common::load_test_config(); let cfg = common::load_test_config();
@@ -154,3 +229,77 @@ async fn r_and_w_brokers_process_ping_events() {
h.abort(); h.abort();
} }
} }
#[tokio::test]
async fn logger_write_then_fetch_round_trip() {
let cfg = common::load_test_config();
if let Err(e) = rustybeds::brokers::logger_store::init_from_rec_services(&cfg.rec_services, &cfg.id.env_name).await {
eprintln!("SKIP: Mongo logger store unavailable: {}", e);
return;
}
let conn = match try_connect(&cfg.broker_services).await {
Some(c) => c,
None => {
eprintln!("SKIP: RabbitMQ not available at test address");
return;
}
};
let amqp = AmqpConnection::connect(&cfg.broker_services)
.await
.expect("exchange declaration connection failed");
amqp.declare_exchange()
.await
.expect("exchange declaration failed");
let mut handles = brokers::spawn_r_broker_pool(Arc::clone(&conn), &cfg.broker_services)
.await
.expect("rBroker pool failed to start");
handles.extend(
brokers::spawn_w_broker_pool(Arc::clone(&conn), &cfg.broker_services)
.await
.expect("wBroker pool failed to start"),
);
let test_channel = conn
.create_channel()
.await
.expect("failed to create test channel");
let write_request = serde_json::json!({
"template": "Logger",
"data": {
"message_log": "poc-log-message",
"level_log": "info",
"service_log": "app_server"
}
});
let write_reply = request_reply_json(&test_channel, "rec.write", "write", write_request).await;
assert_eq!(write_reply["status"], "ok");
assert_eq!(write_reply["code"], "LOGGER_WRITE");
let fetch_request = serde_json::json!({
"template": "Logger",
"data": {
"limit": 10
}
});
let fetch_reply = request_reply_json(&test_channel, "rec.read", "fetch", fetch_request).await;
assert_eq!(fetch_reply["status"], "ok");
assert_eq!(fetch_reply["code"], "LOGGER_FETCH");
let logs = fetch_reply["logs"].as_array().expect("logs must be an array");
assert!(
logs.iter().any(|v| v["message_log"] == "poc-log-message"),
"fetched logs should include the message just written"
);
for h in handles {
h.abort();
}
}

View File

@@ -12,9 +12,10 @@ The IPL sequence is not arbitrary. Each step depends on the previous one:
1. **Configuration must load first** — every subsequent step reads from it 1. **Configuration must load first** — every subsequent step reads from it
2. **Logging must initialize second** — every subsequent step may emit log events 2. **Logging must initialize second** — every subsequent step may emit log events
3. **RabbitMQ must be reachable third** — it is the transport for everything, including log event routing to admin 3. **Runtime templates must validate third** — startup must reject invalid template manifests before service checks
4. **MongoDB must be reachable fourth** — it is the log persistence store on the admin node, and the primary document store on appServer 4. **RabbitMQ must be reachable fourth** — it is the transport for everything, including log event routing to admin
5. **MariaDB must be reachable fifth** — it is the relational store; non-critical in dev but required in production 5. **MongoDB must be reachable fifth** — it is the log persistence store on the admin node, and the primary document store on appServer
6. **MariaDB must be reachable sixth** — it is the relational store; non-critical in dev but required in production
You cannot initialize logging before loading config because the log destination (syslog vs console, mirror settings) is in the config. You cannot validate RabbitMQ before initializing logging because you need logging to report the result. The order is a dependency chain, not a preference. You cannot initialize logging before loading config because the log destination (syslog vs console, mirror settings) is in the config. You cannot validate RabbitMQ before initializing logging because you need logging to report the result. The order is a dependency chain, not a preference.
@@ -40,9 +41,28 @@ Initializes the `tracing` subscriber with journald and/or console output based o
**Why second:** Config is loaded. Logging destination is known. Every step from here on can emit structured log output. **Why second:** Config is loaded. Logging destination is known. Every step from here on can emit structured log output.
**Note on log routing:** At this point, log output goes to the local console and/or journald. Log events are not yet routed to the admin node's MongoDB `msLogs` collection — that requires RabbitMQ to be up (Step 3). Local logging is the fallback that covers the gap between process start and AMQP connectivity. **Note on log routing:** At this point, log output goes to the local console and/or journald. Log events are not yet routed to the admin node's MongoDB `msLogs` collection — that requires RabbitMQ to be up (Step 4). Local logging is the fallback that covers the gap between process start and AMQP connectivity.
### Step 3: Validate RabbitMQ Reachability (TCP) ### Step 2b: Load and Validate REC Templates
```rust
let runtime_templates = template_registry::load_runtime_rec_templates("templates")
```
Loads every TOML template under `templates/`, selects `schema = "rec"`, and validates internal consistency before network service validation begins.
Current validation scope includes:
- Every protected field must exist in `fields`.
- Every declared index/cache/regex/partial index field must exist in `fields`.
- Every `compound_indexes` name must appear in `index_name_list`.
**Why here:** this catches template drift and invalid declarations early, before brokers or adapters process traffic.
**Environment-aware failure handling:**
- `production`: invalid template registry is fatal
- all other environments: invalid registry is warning-only so local POC workflows can continue
### Step 4: Validate RabbitMQ Reachability (TCP)
```rust ```rust
match services::amqp::validate(&cfg.broker_services) { ... } match services::amqp::validate(&cfg.broker_services) { ... }
@@ -56,7 +76,7 @@ Opens a TCP connection to the configured RabbitMQ broker host and port. Does not
- `production`: unreachable broker is fatal — the node cannot function - `production`: unreachable broker is fatal — the node cannot function
- all other environments: unreachable broker is a warning — IPL continues so developers can work on other components without a running broker - all other environments: unreachable broker is a warning — IPL continues so developers can work on other components without a running broker
### Step 3b: Authenticate to RabbitMQ + Declare Exchange ### Step 4b: Authenticate to RabbitMQ + Declare Exchange
```rust ```rust
let amqp_conn = match services::amqp::AmqpConnection::connect(&cfg.broker_services).await { ... } let amqp_conn = match services::amqp::AmqpConnection::connect(&cfg.broker_services).await { ... }
@@ -74,7 +94,7 @@ Opens a full AMQP session — credentials, vhost, and channel. Then asserts the
- `production`: authentication failure is fatal - `production`: authentication failure is fatal
- all other environments: failure is a warning — `amqp_conn` is `None`, IPL continues - all other environments: failure is a warning — `amqp_conn` is `None`, IPL continues
### Step 4: Validate MongoDB ### Step 5: Validate MongoDB
```rust ```rust
match mongo::validate_all(&cfg.rec_services) { ... } match mongo::validate_all(&cfg.rec_services) { ... }
@@ -86,7 +106,7 @@ Opens a TCP connection to each configured MongoDB node. One entry per BEDS servi
**Environment-aware failure handling:** Same pattern as RabbitMQ — fatal in production, warning in development. **Environment-aware failure handling:** Same pattern as RabbitMQ — fatal in production, warning in development.
### Step 5: Validate MariaDB ### Step 6: Validate MariaDB
```rust ```rust
match mariadb::validate_all(&cfg.rel_services) { ... } match mariadb::validate_all(&cfg.rel_services) { ... }

View File

@@ -159,6 +159,13 @@ Current proof-of-concept verification for the two active appServer brokers is co
- `tests/broker_pool_test.rs` validates that configured rBroker/wBroker pool instances spawn. - `tests/broker_pool_test.rs` validates that configured rBroker/wBroker pool instances spawn.
- `tests/broker_message_flow_test.rs` validates end-to-end message flow by publishing `ping` events to - `tests/broker_message_flow_test.rs` validates end-to-end message flow by publishing `ping` events to
`rec.read` and `rec.write` and asserting broker replies. `rec.read` and `rec.write` and asserting broker replies.
- `tests/broker_message_flow_test.rs` also validates logger sequence round-trip by publishing a `write`
event to `rec.write` with `template="Logger"`, then fetching via `fetch` on `rec.read` and asserting
that the newly written log message is returned.
Current logger sequence in POC now writes and reads through MongoDB (`msLogs`) via the
`Logger` template path (`rec.write` for write, `rec.read` for fetch), using credentials from
the active environment config.
These tests provide a lightweight deployment confidence check while the framework is still in the These tests provide a lightweight deployment confidence check while the framework is still in the
"POC before guardrails" phase. "POC before guardrails" phase.

View File

@@ -12,6 +12,21 @@ templates/
└── mst_logger_rec.toml ← logger collection template └── mst_logger_rec.toml ← logger collection template
``` ```
## Runtime Template Registry (Implemented)
At IPL, BEDS now loads REC templates from `templates/` into a typed runtime registry and validates them before service connectivity checks.
Current runtime validation enforces:
- Any field listed in `protected_fields` must exist in `[fields]`.
- Any field listed in `index_fields` must exist in `[fields]`.
- Any key in `[single_field_indexes]`, `[unique_indexes]`, and `[ttl_indexes]` must exist in `[fields]`.
- Every field used in `[compound_indexes]` and `partial_indexes` qualifiers must exist in `[fields]`.
- Every field used in `regex_pattern_indexing` and `[cache_map]` must exist in `[fields]`.
- Every `compound_indexes` index name must be present in `index_name_list`.
In `production`, template registry validation failures are fatal during IPL. In non-production environments, failures are warning-only to preserve local POC workflows.
## Two Template Types ## Two Template Types
| Type | Schema | Database | Use Case | | Type | Schema | Database | Use Case |

View File

@@ -48,6 +48,10 @@ Immediate objective:
Guardrails are intentionally deferred until POC behavior is stable. Guardrails are intentionally deferred until POC behavior is stable.
Implementation status update:
- Phase A transport stability evidence exists: live RabbitMQ round-trip tests for `rec.read` and `rec.write` ping paths.
- Phase B has started: REC template registry loading and startup validation are now implemented in IPL.
## Must-Keep Invariants ## Must-Keep Invariants
1. AMQP-first data path for application operations. 1. AMQP-first data path for application operations.