Add rBroker + wBroker pool, BrokerPayload, NamasteCore trait stub
- src/brokers/: pool manager, r_broker (rec.read), w_broker (rec.write), BrokerPayload struct, BrokerError type - src/core/: NamasteCore trait — fetch/write/update/delete interface, stubs - IPL step 6: spawns rBroker + wBroker pools after exchange declaration - tests/broker_pool_test.rs: integration tests for pool spawn (skip if broker down) - BrokerPayload unit tests + doctest in payload.rs - Added futures-lite, serde_json to Cargo.toml - README.md, CLAUDE.md, wiki updated to reflect new structure and status Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
33
src/brokers/error.rs
Normal file
33
src/brokers/error.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
//! # brokers/error.rs — Broker Error Types
|
||||
//!
|
||||
//! Defines the error type for all broker task operations in BEDS.
|
||||
//!
|
||||
//! ## Calling Agents
|
||||
//! - `brokers::r_broker` — returned from spawn and consume operations
|
||||
//! - `brokers::mod` — surfaced from pool management
|
||||
//!
|
||||
//! **Author:** mks
|
||||
//! **Version:** 1.0
|
||||
//!
|
||||
//! ## History
|
||||
//! * `2026-04-05` - mks - original coding
|
||||
|
||||
/// Errors that can occur in any BEDS broker task.
|
||||
///
|
||||
/// AMQP protocol errors are wrapped transparently via the `From` impl.
|
||||
/// Additional variants cover broker-specific failure modes.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum BrokerError {
|
||||
#[error("AMQP protocol error: {0}")]
|
||||
Protocol(#[from] lapin::Error),
|
||||
|
||||
#[error("Broker task '{0}' failed to start: {1}")]
|
||||
StartupFailed(String, String),
|
||||
|
||||
#[error("Message decode error in broker '{0}': {1}")]
|
||||
DecodeFailed(String, String),
|
||||
}
|
||||
106
src/brokers/mod.rs
Normal file
106
src/brokers/mod.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
//! # brokers/mod.rs — Broker Pool Manager
|
||||
//!
|
||||
//! Manages the lifecycle of all broker task pools. At IPL, `spawn_r_broker_pool()`
|
||||
//! reads the instance count from config, spawns N rBroker Tokio tasks, and
|
||||
//! returns their JoinHandles to the caller.
|
||||
//!
|
||||
//! Each broker type gets its own pool function following the same pattern.
|
||||
//! The pool manager holds handles but does not supervise — task exit is logged
|
||||
//! by the task itself. Supervision (respawn on crash) is a future addition.
|
||||
//!
|
||||
//! ## Calling Agents
|
||||
//! - `ipl()` in main.rs — calls pool spawn functions after exchange declaration
|
||||
//!
|
||||
//! ## Outputs
|
||||
//! - `Vec<JoinHandle<()>>` per broker type — held for clean shutdown
|
||||
//!
|
||||
//! **Author:** mks
|
||||
//! **Version:** 1.0
|
||||
//!
|
||||
//! ## History
|
||||
//! * `2026-04-05` - mks - original coding
|
||||
|
||||
pub mod error;
|
||||
pub mod payload;
|
||||
pub mod r_broker;
|
||||
pub mod w_broker;
|
||||
|
||||
use std::sync::Arc;
|
||||
use lapin::Connection;
|
||||
|
||||
use crate::config::BrokerServicesConfig;
|
||||
use error::BrokerError;
|
||||
|
||||
/// Spawns the rBroker pool — N tasks as configured in `instances.r_broker`.
|
||||
///
|
||||
/// Each task gets the shared AMQP connection, the queue tag, and its zero-based
|
||||
/// instance index. The connection is wrapped in `Arc` so each task can open
|
||||
/// its own channel without cloning the connection.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `conn` — the authenticated AMQP connection from IPL step 3b
|
||||
/// * `cfg` — broker services config block (queue_tag + instance counts)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `Ok(Vec<JoinHandle<()>>)` — one handle per spawned task.
|
||||
/// `Err(BrokerError)` if any task fails to declare its queue before starting.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
pub async fn spawn_r_broker_pool(
|
||||
conn: Arc<Connection>,
|
||||
cfg: &BrokerServicesConfig,
|
||||
) -> Result<Vec<tokio::task::JoinHandle<()>>, BrokerError> {
|
||||
let count = cfg.app_server.instances.r_broker;
|
||||
let mut handles = Vec::with_capacity(count as usize);
|
||||
|
||||
for i in 0..count {
|
||||
let handle = r_broker::spawn(
|
||||
Arc::clone(&conn),
|
||||
cfg.queue_tag.clone(),
|
||||
i,
|
||||
).await?;
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
tracing::info!("rBroker pool started: {} instance(s)", count);
|
||||
Ok(handles)
|
||||
}
|
||||
|
||||
/// Spawns the wBroker pool — N tasks as configured in `instances.w_broker`.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `conn` — the authenticated AMQP connection from IPL step 3b
|
||||
/// * `cfg` — broker services config block (queue_tag + instance counts)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `Ok(Vec<JoinHandle<()>>)` — one handle per spawned task.
|
||||
/// `Err(BrokerError)` if any task fails to declare its queue before starting.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
pub async fn spawn_w_broker_pool(
|
||||
conn: Arc<Connection>,
|
||||
cfg: &BrokerServicesConfig,
|
||||
) -> Result<Vec<tokio::task::JoinHandle<()>>, BrokerError> {
|
||||
let count = cfg.app_server.instances.w_broker;
|
||||
let mut handles = Vec::with_capacity(count as usize);
|
||||
|
||||
for i in 0..count {
|
||||
let handle = w_broker::spawn(
|
||||
Arc::clone(&conn),
|
||||
cfg.queue_tag.clone(),
|
||||
i,
|
||||
).await?;
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
tracing::info!("wBroker pool started: {} instance(s)", count);
|
||||
Ok(handles)
|
||||
}
|
||||
108
src/brokers/payload.rs
Normal file
108
src/brokers/payload.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
//! # brokers/payload.rs — AMQP Message Payload
|
||||
//!
|
||||
//! Defines the JSON body structure carried in all BEDS broker messages.
|
||||
//! The AMQP envelope handles routing (type header, reply_to, correlation_id);
|
||||
//! this struct is what lives in the message body.
|
||||
//!
|
||||
//! ## Wire Format
|
||||
//!
|
||||
//! ```json
|
||||
//! {
|
||||
//! "template": "usr",
|
||||
//! "data": { "first_name": "joe", "status": "active" }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! `template` names the data object (maps to a NamasteCore implementor).
|
||||
//! `data` carries key/value pairs in user-facing field names — the template
|
||||
//! maps these to actual schema names. Callers never specify primary keys on
|
||||
//! writes; the template generates a GUID and returns it in the reply.
|
||||
//!
|
||||
//! ## Calling Agents
|
||||
//! - `brokers::r_broker` — parsed from message body on fetch events
|
||||
//! - `brokers::w_broker` — parsed from message body on write/update/delete events
|
||||
//!
|
||||
//! **Author:** mks
|
||||
//! **Version:** 1.0
|
||||
//!
|
||||
//! ## History
|
||||
//! * `2026-04-05` - mks - original coding
|
||||
|
||||
use std::collections::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
/// The JSON body of every BEDS broker message.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use rustybeds::brokers::payload::BrokerPayload;
|
||||
///
|
||||
/// let json = r#"{"template":"usr","data":{"first_name":"joe"}}"#;
|
||||
/// let payload: BrokerPayload = serde_json::from_str(json).unwrap();
|
||||
/// assert_eq!(payload.template, "usr");
|
||||
/// assert!(payload.data.contains_key("first_name"));
|
||||
/// ```
|
||||
///
|
||||
/// Both read and write brokers parse this struct from the raw AMQP delivery
|
||||
/// bytes. The operation type is carried in the AMQP `type` message property —
|
||||
/// this struct carries the object identity and data payload.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct BrokerPayload {
|
||||
/// Template identifier — names the NamasteCore implementor to dispatch to.
|
||||
/// Matches the TLA convention from the template file (e.g. `"usr"`, `"pst"`).
|
||||
pub template: String,
|
||||
|
||||
/// Key/value data pairs in user-facing field names.
|
||||
/// For writes: the record to store (pkey excluded — generated by template).
|
||||
/// For reads: query discriminants (field → value to match).
|
||||
/// For deletes: discriminants identifying the record(s) to remove.
|
||||
#[serde(default)]
|
||||
pub data: HashMap<String, Value>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn deserializes_full_payload() {
|
||||
let json = r#"{"template":"usr","data":{"first_name":"joe","status":"active"}}"#;
|
||||
let payload: BrokerPayload = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(payload.template, "usr");
|
||||
assert_eq!(payload.data.len(), 2);
|
||||
assert_eq!(payload.data["first_name"], "joe");
|
||||
assert_eq!(payload.data["status"], "active");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserializes_without_data_field() {
|
||||
// data is optional — fetch by template name alone is valid
|
||||
let json = r#"{"template":"usr"}"#;
|
||||
let payload: BrokerPayload = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(payload.template, "usr");
|
||||
assert!(payload.data.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serializes_round_trip() {
|
||||
let json = r#"{"template":"pst","data":{"title":"hello"}}"#;
|
||||
let payload: BrokerPayload = serde_json::from_str(json).unwrap();
|
||||
let serialized = serde_json::to_string(&payload).unwrap();
|
||||
let round_trip: BrokerPayload = serde_json::from_str(&serialized).unwrap();
|
||||
assert_eq!(round_trip.template, payload.template);
|
||||
assert_eq!(round_trip.data["title"], payload.data["title"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_missing_template() {
|
||||
let json = r#"{"data":{"first_name":"joe"}}"#;
|
||||
let result: Result<BrokerPayload, _> = serde_json::from_str(json);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
240
src/brokers/r_broker.rs
Normal file
240
src/brokers/r_broker.rs
Normal file
@@ -0,0 +1,240 @@
|
||||
//! # brokers/r_broker.rs — Read Broker Task
|
||||
//!
|
||||
//! The rBroker is a Tokio task that handles all non-destructive read events
|
||||
//! from the AMQP exchange. Each instance declares its queue, binds to the
|
||||
//! `rec.read` routing key, enters a consume loop, and dispatches incoming
|
||||
//! events to the appropriate handler.
|
||||
//!
|
||||
//! ## Calling Agents
|
||||
//! - `brokers::mod` — spawns N instances at IPL via `spawn_pool()`
|
||||
//!
|
||||
//! ## Inputs
|
||||
//! - `Arc<lapin::Connection>` — shared AMQP connection from the broker pool
|
||||
//! - `queue_tag: String` — queue name prefix from config (e.g. "dev_", "prod_")
|
||||
//! - `instance_id: u32` — numeric ID for log correlation (0-based)
|
||||
//!
|
||||
//! ## Outputs
|
||||
//! - Publishes reply payloads to the `reply_to` queue specified in each message header
|
||||
//! - Log events to tracing (journald / console per config)
|
||||
//!
|
||||
//! ## Event Types (routing key: rec.read)
|
||||
//!
|
||||
//! | Event | Description | Status |
|
||||
//! |------------|--------------------------------------|-------------|
|
||||
//! | `ping` | Health check — reply ACK + timestamp | Implemented |
|
||||
//! | `shutdown` | Ordered shutdown — cancel consumer | Implemented |
|
||||
//! | `fetch` | REC store read operation | Stub |
|
||||
//!
|
||||
//! **Author:** mks
|
||||
//! **Version:** 1.0
|
||||
//!
|
||||
//! ## History
|
||||
//! * `2026-04-05` - mks - original coding
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures_lite::StreamExt;
|
||||
use lapin::{
|
||||
BasicProperties, Channel, Connection,
|
||||
options::{
|
||||
BasicAckOptions, BasicConsumeOptions, BasicPublishOptions,
|
||||
QueueBindOptions, QueueDeclareOptions,
|
||||
},
|
||||
types::FieldTable,
|
||||
};
|
||||
|
||||
use crate::services::amqp::EXCHANGE_NAME;
|
||||
use super::error::BrokerError;
|
||||
|
||||
/// Routing key this broker binds to.
|
||||
const ROUTING_KEY: &str = "rec.read";
|
||||
|
||||
/// Spawns a single rBroker task and returns immediately.
|
||||
///
|
||||
/// The task runs until it receives a `shutdown` event or the AMQP connection
|
||||
/// is lost. All log output is tagged with the instance ID for correlation.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `conn` — shared AMQP connection; each task opens its own channel
|
||||
/// * `queue_tag` — queue name prefix from config (e.g. `"dev_"`)
|
||||
/// * `instance_id` — zero-based index for log correlation
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `Ok(tokio::task::JoinHandle)` — the task handle; held by the pool manager.
|
||||
/// `Err(BrokerError)` if the channel or queue declaration fails before the task starts.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
pub async fn spawn(
|
||||
conn: Arc<Connection>,
|
||||
queue_tag: String,
|
||||
instance_id: u32,
|
||||
) -> Result<tokio::task::JoinHandle<()>, BrokerError> {
|
||||
// each broker task owns its own channel — channels are cheap, connections are not
|
||||
let channel = conn.create_channel().await?;
|
||||
|
||||
let queue_name = format!("{}rec.read", queue_tag);
|
||||
|
||||
// declare the queue — idempotent; safe to call on restart
|
||||
channel
|
||||
.queue_declare(
|
||||
&queue_name,
|
||||
QueueDeclareOptions {
|
||||
durable: true,
|
||||
..Default::default()
|
||||
},
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// bind the queue to the exchange on the rec.read routing key
|
||||
channel
|
||||
.queue_bind(
|
||||
&queue_name,
|
||||
EXCHANGE_NAME,
|
||||
ROUTING_KEY,
|
||||
QueueBindOptions::default(),
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
tracing::info!("rBroker[{}] queue '{}' declared and bound", instance_id, queue_name);
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
if let Err(e) = run(channel, queue_name, instance_id).await {
|
||||
tracing::error!("rBroker[{}] exited with error: {}", instance_id, e);
|
||||
}
|
||||
});
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
/// The rBroker consume loop.
|
||||
///
|
||||
/// Enters `basic_consume` on the declared queue and processes messages until
|
||||
/// a `shutdown` event is received or the channel closes. Each message is
|
||||
/// acked after processing regardless of outcome — a failed dispatch is
|
||||
/// logged, not requeued.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
async fn run(
|
||||
channel: Channel,
|
||||
queue_name: String,
|
||||
instance_id: u32,
|
||||
) -> Result<(), BrokerError> {
|
||||
let consumer_tag = format!("rbroker-{}", instance_id);
|
||||
|
||||
let mut consumer = channel
|
||||
.basic_consume(
|
||||
&queue_name,
|
||||
&consumer_tag,
|
||||
BasicConsumeOptions::default(),
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
tracing::info!("rBroker[{}] consuming on '{}'", instance_id, queue_name);
|
||||
|
||||
while let Some(delivery) = consumer.next().await {
|
||||
let delivery = match delivery {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
tracing::error!("rBroker[{}] delivery error: {}", instance_id, e);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
// extract the event type from the message type header
|
||||
let event_type = delivery
|
||||
.properties
|
||||
.kind()
|
||||
.as_ref()
|
||||
.map(|s| s.as_str().to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
tracing::debug!("rBroker[{}] received event='{}'", instance_id, event_type);
|
||||
|
||||
let reply_payload: Option<Vec<u8>> = match event_type.as_str() {
|
||||
"ping" => handle_ping(instance_id),
|
||||
"shutdown" => {
|
||||
// ack before exiting so the message is not redelivered
|
||||
let _ = delivery.ack(BasicAckOptions::default()).await;
|
||||
tracing::info!("rBroker[{}] shutdown event received — exiting", instance_id);
|
||||
break;
|
||||
}
|
||||
"fetch" => handle_fetch(&delivery.data, instance_id),
|
||||
unknown => {
|
||||
tracing::warn!("rBroker[{}] unknown event type '{}'", instance_id, unknown);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
// publish reply if the event specified a reply_to queue
|
||||
if let Some(payload) = reply_payload {
|
||||
if let Some(reply_to) = delivery.properties.reply_to().as_ref() {
|
||||
let reply_queue = reply_to.as_str().to_string();
|
||||
let correlation_id = delivery.properties.correlation_id().clone();
|
||||
|
||||
let props = BasicProperties::default()
|
||||
.with_correlation_id(correlation_id.unwrap_or_default());
|
||||
|
||||
if let Err(e) = channel
|
||||
.basic_publish(
|
||||
"", // default exchange — direct to queue by name
|
||||
&reply_queue,
|
||||
BasicPublishOptions::default(),
|
||||
&payload,
|
||||
props,
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::error!("rBroker[{}] reply publish failed: {}", instance_id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let _ = delivery.ack(BasicAckOptions::default()).await;
|
||||
}
|
||||
|
||||
tracing::info!("rBroker[{}] consume loop exited", instance_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles a `ping` event — returns a simple ACK payload with a timestamp.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
fn handle_ping(instance_id: u32) -> Option<Vec<u8>> {
|
||||
let ts = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let response = format!(r#"{{"status":"ok","broker":"rBroker","instance":{},"ts":{}}}"#, instance_id, ts);
|
||||
tracing::debug!("rBroker[{}] ping response: {}", instance_id, response);
|
||||
Some(response.into_bytes())
|
||||
}
|
||||
|
||||
/// Stub handler for `fetch` events.
|
||||
///
|
||||
/// Factory/adapter dispatch is not yet implemented. Returns a not-implemented
|
||||
/// error payload so callers receive a defined response rather than silence.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - stub
|
||||
fn handle_fetch(data: &[u8], instance_id: u32) -> Option<Vec<u8>> {
|
||||
tracing::warn!(
|
||||
"rBroker[{}] fetch event received ({} bytes) — factory dispatch not yet implemented",
|
||||
instance_id,
|
||||
data.len()
|
||||
);
|
||||
let response = r#"{"status":"error","code":"NOT_IMPLEMENTED","message":"factory dispatch not yet implemented"}"#;
|
||||
Some(response.as_bytes().to_vec())
|
||||
}
|
||||
262
src/brokers/w_broker.rs
Normal file
262
src/brokers/w_broker.rs
Normal file
@@ -0,0 +1,262 @@
|
||||
//! # brokers/w_broker.rs — Write Broker Task
|
||||
//!
|
||||
//! The wBroker handles all mutating events on the REC store — writes, updates,
|
||||
//! and deletes. Each instance declares its queue, binds to the `rec.write`
|
||||
//! routing key, and dispatches incoming events to the factory layer.
|
||||
//!
|
||||
//! ## Calling Agents
|
||||
//! - `brokers::mod` — spawns N instances at IPL via `spawn_w_broker_pool()`
|
||||
//!
|
||||
//! ## Inputs
|
||||
//! - `Arc<lapin::Connection>` — shared AMQP connection from the broker pool
|
||||
//! - `queue_tag: String` — queue name prefix from config
|
||||
//! - `instance_id: u32` — numeric ID for log correlation (0-based)
|
||||
//!
|
||||
//! ## Outputs
|
||||
//! - Publishes reply payloads to the `reply_to` queue in each message header
|
||||
//!
|
||||
//! ## Event Types (routing key: rec.write)
|
||||
//!
|
||||
//! | Event | Description | Status |
|
||||
//! |------------|------------------------------------------|-------------|
|
||||
//! | `ping` | Health check — reply ACK + timestamp | Implemented |
|
||||
//! | `shutdown` | Ordered shutdown — cancel consumer | Implemented |
|
||||
//! | `write` | Insert a new REC record (GUID pkey) | Stub |
|
||||
//! | `update` | Update fields on an existing REC record | Stub |
|
||||
//! | `delete` | Remove REC record(s) by discriminants | Stub |
|
||||
//!
|
||||
//! **Author:** mks
|
||||
//! **Version:** 1.0
|
||||
//!
|
||||
//! ## History
|
||||
//! * `2026-04-05` - mks - original coding
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures_lite::StreamExt;
|
||||
use lapin::{
|
||||
BasicProperties, Channel, Connection,
|
||||
options::{
|
||||
BasicAckOptions, BasicConsumeOptions, BasicPublishOptions,
|
||||
QueueBindOptions, QueueDeclareOptions,
|
||||
},
|
||||
types::FieldTable,
|
||||
};
|
||||
|
||||
use crate::services::amqp::EXCHANGE_NAME;
|
||||
use super::error::BrokerError;
|
||||
|
||||
/// Routing key this broker binds to.
|
||||
const ROUTING_KEY: &str = "rec.write";
|
||||
|
||||
/// Spawns a single wBroker task and returns immediately.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `conn` — shared AMQP connection; each task opens its own channel
|
||||
/// * `queue_tag` — queue name prefix from config (e.g. `"dev_"`)
|
||||
/// * `instance_id` — zero-based index for log correlation
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// `Ok(tokio::task::JoinHandle)` — the task handle held by the pool manager.
|
||||
/// `Err(BrokerError)` if channel or queue declaration fails before task start.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
pub async fn spawn(
|
||||
conn: Arc<Connection>,
|
||||
queue_tag: String,
|
||||
instance_id: u32,
|
||||
) -> Result<tokio::task::JoinHandle<()>, BrokerError> {
|
||||
let channel = conn.create_channel().await?;
|
||||
let queue_name = format!("{}rec.write", queue_tag);
|
||||
|
||||
channel
|
||||
.queue_declare(
|
||||
&queue_name,
|
||||
QueueDeclareOptions {
|
||||
durable: true,
|
||||
..Default::default()
|
||||
},
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
channel
|
||||
.queue_bind(
|
||||
&queue_name,
|
||||
EXCHANGE_NAME,
|
||||
ROUTING_KEY,
|
||||
QueueBindOptions::default(),
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
tracing::info!("wBroker[{}] queue '{}' declared and bound", instance_id, queue_name);
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
if let Err(e) = run(channel, queue_name, instance_id).await {
|
||||
tracing::error!("wBroker[{}] exited with error: {}", instance_id, e);
|
||||
}
|
||||
});
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
/// The wBroker consume loop.
|
||||
///
|
||||
/// Mirrors the rBroker pattern. Processes messages until a `shutdown` event
|
||||
/// is received or the channel closes.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
async fn run(
|
||||
channel: Channel,
|
||||
queue_name: String,
|
||||
instance_id: u32,
|
||||
) -> Result<(), BrokerError> {
|
||||
let consumer_tag = format!("wbroker-{}", instance_id);
|
||||
|
||||
let mut consumer = channel
|
||||
.basic_consume(
|
||||
&queue_name,
|
||||
&consumer_tag,
|
||||
BasicConsumeOptions::default(),
|
||||
FieldTable::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
tracing::info!("wBroker[{}] consuming on '{}'", instance_id, queue_name);
|
||||
|
||||
while let Some(delivery) = consumer.next().await {
|
||||
let delivery = match delivery {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
tracing::error!("wBroker[{}] delivery error: {}", instance_id, e);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
let event_type = delivery
|
||||
.properties
|
||||
.kind()
|
||||
.as_ref()
|
||||
.map(|s| s.as_str().to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
tracing::debug!("wBroker[{}] received event='{}'", instance_id, event_type);
|
||||
|
||||
let reply_payload: Option<Vec<u8>> = match event_type.as_str() {
|
||||
"ping" => handle_ping(instance_id),
|
||||
"shutdown" => {
|
||||
let _ = delivery.ack(BasicAckOptions::default()).await;
|
||||
tracing::info!("wBroker[{}] shutdown event received — exiting", instance_id);
|
||||
break;
|
||||
}
|
||||
"write" => handle_write(&delivery.data, instance_id),
|
||||
"update" => handle_update(&delivery.data, instance_id),
|
||||
"delete" => handle_delete(&delivery.data, instance_id),
|
||||
unknown => {
|
||||
tracing::warn!("wBroker[{}] unknown event type '{}'", instance_id, unknown);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(payload) = reply_payload {
|
||||
if let Some(reply_to) = delivery.properties.reply_to().as_ref() {
|
||||
let reply_queue = reply_to.as_str().to_string();
|
||||
let correlation_id = delivery.properties.correlation_id().clone();
|
||||
|
||||
let props = BasicProperties::default()
|
||||
.with_correlation_id(correlation_id.unwrap_or_default());
|
||||
|
||||
if let Err(e) = channel
|
||||
.basic_publish(
|
||||
"",
|
||||
&reply_queue,
|
||||
BasicPublishOptions::default(),
|
||||
&payload,
|
||||
props,
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::error!("wBroker[{}] reply publish failed: {}", instance_id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let _ = delivery.ack(BasicAckOptions::default()).await;
|
||||
}
|
||||
|
||||
tracing::info!("wBroker[{}] consume loop exited", instance_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles a `ping` health check event.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - original coding
|
||||
fn handle_ping(instance_id: u32) -> Option<Vec<u8>> {
|
||||
let ts = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let response = format!(
|
||||
r#"{{"status":"ok","broker":"wBroker","instance":{},"ts":{}}}"#,
|
||||
instance_id, ts
|
||||
);
|
||||
Some(response.into_bytes())
|
||||
}
|
||||
|
||||
/// Stub handler for `write` events.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - stub
|
||||
fn handle_write(data: &[u8], instance_id: u32) -> Option<Vec<u8>> {
|
||||
tracing::warn!(
|
||||
"wBroker[{}] write event ({} bytes) — factory dispatch not yet implemented",
|
||||
instance_id, data.len()
|
||||
);
|
||||
not_implemented_response()
|
||||
}
|
||||
|
||||
/// Stub handler for `update` events.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - stub
|
||||
fn handle_update(data: &[u8], instance_id: u32) -> Option<Vec<u8>> {
|
||||
tracing::warn!(
|
||||
"wBroker[{}] update event ({} bytes) — factory dispatch not yet implemented",
|
||||
instance_id, data.len()
|
||||
);
|
||||
not_implemented_response()
|
||||
}
|
||||
|
||||
/// Stub handler for `delete` events.
|
||||
///
|
||||
/// # History
|
||||
///
|
||||
/// * `2026-04-05` - mks - stub
|
||||
fn handle_delete(data: &[u8], instance_id: u32) -> Option<Vec<u8>> {
|
||||
tracing::warn!(
|
||||
"wBroker[{}] delete event ({} bytes) — factory dispatch not yet implemented",
|
||||
instance_id, data.len()
|
||||
);
|
||||
not_implemented_response()
|
||||
}
|
||||
|
||||
/// Standard NOT_IMPLEMENTED reply payload — used by all stub handlers.
|
||||
fn not_implemented_response() -> Option<Vec<u8>> {
|
||||
Some(
|
||||
r#"{"status":"error","code":"NOT_IMPLEMENTED","message":"factory dispatch not yet implemented"}"#
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
)
|
||||
}
|
||||
Reference in New Issue
Block a user