Archive: Namaste PHP AMQP framework v1.0 (2017-2020)

952 days continuous production uptime, 40k+ tp/s single node.
Original corpo Bitbucket history not included — clean archive commit.
This commit is contained in:
2026-04-05 09:49:30 -07:00
commit 373ebc8c93
1284 changed files with 409372 additions and 0 deletions

View File

@@ -0,0 +1,217 @@
<?php
/**
* Class gatLog
*
* This is the logging class definition that records framework-generated event messages.
*
* Design Notes:
* -------------
* because this is a log, who's events are processed by a FnF queue, we're not going to cache, or use auditing.
* History is limited to the created event and deletes are HARD.
* Only one status is supported: ACTIVE and there are no updates allowed making record-locking unnecessary.
* To reduce overhead, we're not enabling cache timers because recursion.
* The collection does not need GUID tokens but we are storing the passed Broker Event ID
*
* @author mike@givingassistant.org
* @version 2.1.3
*
* HISTORY:
* ========
* 06-07-17 mks original coding
*
*
*/
class xxxLogs
{
public $service = CONFIG_DATABASE_DDB_APPSERVER; // defines the nosql server service configuration
public $schema = TEMPLATE_DB_DDB; // defines the storage schema for the class
public $collection = COLLECTION_MONGO_LOGS; // sets the collection (table) name
public $seqKey = COLLECTION_NOSQL_LOGS_SQK; // sets the sequence key identifier
public $extension = COLLECTION_MONGO_LOGS_EXT; // sets the extension for the collection
public $setCache = false; // set to true to cache class data
public $setDeletes = true; // set to true to allow HARD deletes (otherwise: SOFT)
public $setAuditing = AUDIT_NOT_ENABLED; // set to AUDIT_value constant
public $setJournaling = false; // set to true to allow journaling
public $setUpdates = false; // set to true to allow record updates
public $setHistory = false; // set to true to enable detailed record history tracking
public $setDefaultStatus = STATUS_ACTIVE; // set the default status for each record
public $setSearchStatus = STATUS_ACTIVE; // set the default search status
public $setLocking = false; // set to true to enable record locking for collection
public $setTimers = false; // set to true to enable collection query timers
public $setPKeyType = DB_TOKEN; // sets the primary key type: either ID or TOKEN
/*
* tokens are guids -- if you're using a guid as the pkey for the class, then this value should be false.
* if you're using an integer pkey, and you want a token, you have to explicitly declare
* the token fields in $fields and set this value to true.
* if you're using an integer pkey and you don't want a token, set this value to false.
*/
public $setTokens = false; // set to true: adds the idToken field functionality
public $selfDestruct = true; // set to false if the class contains methods
public $cacheTimer = 0; // number of seconds a tuple will remain in-cache
public $setEnv = ENV_ALL; // defines the env where this class can be accessed
public $setMeta = false; // defines if we'll use the meta package for history
public $fields = [
DB_PKEY => DDB_TYPE_STRING, // GUID because setPKeyType == DB_TOKEN
LOG_FILE => DDB_TYPE_STRING,
LOG_METHOD => DDB_TYPE_STRING,
LOG_LINE => DDB_TYPE_NUMBER,
LOG_CLASS => DDB_TYPE_STRING,
LOG_LEVEL => DDB_TYPE_STRING,
LOG_MESSAGE => DDB_TYPE_STRING,
LOG_STACK_TRACE => DDB_TYPE_LIST,
DB_STATUS => DDB_TYPE_STRING,
DB_HISTORY => DDB_TYPE_LIST,
LOG_IS_EVENT => DDB_TYPE_BOOLEAN,
LOG_EVENT_GUID => DDB_TYPE_STRING,
LOG_CREATED => DDB_TYPE_NUMBER
];
public $fieldTypes = [
DB_PKEY => DATA_TYPE_STRING, // guid
LOG_FILE => DATA_TYPE_STRING,
LOG_METHOD => DATA_TYPE_STRING,
LOG_LINE => DATA_TYPE_INTEGER,
LOG_CLASS => DATA_TYPE_STRING,
LOG_LEVEL => DATA_TYPE_STRING,
LOG_MESSAGE => DATA_TYPE_STRING,
LOG_STACK_TRACE => DATA_TYPE_ARRAY,
DB_STATUS => DATA_TYPE_STRING,
DB_TIMER => DATA_TYPE_DOUBLE,
DB_HISTORY => DATA_TYPE_ARRAY,
LOG_IS_EVENT => DATA_TYPE_BOOL,
LOG_EVENT_GUID => DATA_TYPE_STRING,
LOG_CREATED => DATA_TYPE_INTEGER
];
// in the ddb world, this is the primary composite key for this table
public $indexes = [ DB_PKEY => DDB_INDEX_HASH, LOG_CREATED => DDB_INDEX_RANGE ];
/*
* declaring global and local secondary indexes:
*
* Limit: 5 of each
*
* General Format:
* ---------------
* Each tuple, up to the limit, is a record that contains the following array structure:
*
* [[
* 'name' => INDEX_NAME, // REQUIRED
* 'indexes' => [ KEY_NAME => HASH {, KEY_NAME => RANGE } ], // REQUIRED
* 'projectionType' => { KEYS_ONLY | INCLUDE | ALL }, // REQUIRED
* 'nka' => { [ list of one or more non-key attributes !>20 ] }, // REQUIRED if projection = INCLUDE
* 'throughput' => [ 'rcu' => <integer>, 'wcu' => <integer> ] // REQUIRED for GLOBAL only
* ],....];
*
* secondary index keys must use the key literals as shown above. ('name', 'indexes', 'projectionType', etc.)
*
*/
public $globalIndexes = array(
[
// this creates a partition key based on the log level (fatal, warn, debug, etc.) with a sort key
// based on the method (the class method that created the log event).
// query example: give me all fatal errors
// give me all warnings generate by the method: _fetchData()
// Since the base keys (id, date) are projected onto this index, I am (awaiting testing) assuming
// that you could also range your query based on the creation date.
STRING_NAME => 'index_log_level',
STRING_INDEXES => [ LOG_LEVEL => DDB_INDEX_HASH, LOG_METHOD => DDB_INDEX_RANGE ],
DDB_STRING_PT => DDB_PT_ALL,
STRING_THROUGHPUT => [ CONFIG_DATABASE_READ_CAPACITY_UNITS => 100, CONFIG_DATABASE_WRITE_CAPACITY_UNITS => 100 ]
],
[
// lets add a second global index: key will be the created date, and the sort will be the error level
// this will allow us to answer queries like:
// give me all errors in the last hour
// give me all fatal errors for January
STRING_NAME => 'index_log_created',
STRING_INDEXES => [ LOG_CREATED => DDB_INDEX_HASH, LOG_LEVEL => DDB_INDEX_RANGE ],
DDB_STRING_PT => DDB_PT_INCLUDE,
DDB_STRING_NON_KEY_ATTRIBUTE => [ LOG_FILE, LOG_CLASS, LOG_METHOD, LOG_LINE ],
STRING_THROUGHPUT => [ CONFIG_DATABASE_READ_CAPACITY_UNITS => 100, CONFIG_DATABASE_WRITE_CAPACITY_UNITS => 100 ]
]
);
public $localIndexes = array(
[
// create secondary index using the log-level as the range value making the assumption that the
// base index hash will be used as the local secondary hash
STRING_NAME => 'index_sec_level',
STRING_INDEXES => [ DB_PKEY => DDB_INDEX_HASH, LOG_LEVEL => DDB_INDEX_RANGE ],
DDB_STRING_PT => DDB_PT_ALL
]
);
public $exposedFields = null; // list of fields exposed to clients
public $cacheMap = null; // k->v paired array mapping fields -> cachedField Names
public $binFields = null; // binary fields that have to be encoded
// these fields aren't used in DDB, but are used in mongo, so are here only for code-compatibility
public $uniqueIndexes = null;
public $sparseIndexes = null;
public $subCollections = null;
/**
* __construct() -- public method
*
* we have a constructor to register the destructor.
*
* @author mike@givingassistant.org
* @version 1.0
*
* HISTORY:
* ========
* 06-07-17 mks original coding
*
*/
public function __construct()
{
register_shutdown_function([$this, STRING_DESTRUCTOR]);
}
/**
* __clone() -- private function
*
* Silently disallows cloning of the object
*
* @author mike@givingassistant.org
* @version 1.0
*
* @return null
*
* HISTORY:
* ========
* 06-07-17 mks original coding
*
*/
private function __clone()
{
return(null);
}
/**
* __destruct() -- public function
*
* As of PHP 5.3.10 destructors are not run on shutdown caused by fatal errors.
*
* The destructor is registered as a shut-down function in the constructor -- so any recovery
* efforts should go in this method.
*
* @author mike@givingassistant.org
* @version 1.0
*
* HISTORY:
* ========
* 06-07-17 mks original coding
*
*/
public function __destruct()
{
;
}
}

View File

@@ -0,0 +1,185 @@
<?php
/**
* Class pgtMetrics
*
* This is the metrics class definition that records timer events, usually database queries.
*
* Design Notes:
* -------------
* Metrics is identical to Logs, who's events are processed by a FnF queue, we're not going to cache, or use auditing.
* History is limited to the created event and deletes are HARD.
* Only one status is supported: ACTIVE and there are no updates allowed making record-locking unnecessary.
* To reduce overhead, we're not enabling cache timers because recursive.
* The collection does not need GUID tokens but we are storing the passed session ID in the meta payload for the
* create event - which is the only history event required or logged.
*
* @author mike@givingassistant.org
* @version 1.0
*
* HISTORY:
* ========
* 06-07-17 mks code complete
*
*/
class xxxMetrics
{
public $service = CONFIG_DATABASE_DDB_APPSERVER; // defines the nosql server service configuration
public $schema = TEMPLATE_DB_DDB; // defines the storage schema for the class
public $collection = COLLECTION_MONGO_METRICS; // sets the collection (table) name
public $seqKey = COLLECTION_NOSQL_METRICS_SQK; // sets the sequence key identifier
public $extension = COLLECTION_MONGO_METRICS_EXT; // sets the extension for the collection
public $setCache = false; // set to true to cache class data
public $setDeletes = true; // set to true to allow HARD deletes (otherwise: SOFT)
public $setAuditing = AUDIT_NOT_ENABLED; // set to AUDIT_value constant
public $setJournaling = false; // set to true to enable journaling
public $setUpdates = false; // set to true to allow record updates
public $setHistory = false; // set to true to enable detailed record history tracking
public $setDefaultStatus = STATUS_ACTIVE; // set the default status for each record
public $setSearchStatus = STATUS_ACTIVE; // set the default search status
public $setLocking = false; // set to true to enable record locking for collection
public $setTimers = false; // set to true to enable collection query timers
public $setPKeyType = DB_TOKEN; // sets the primary key type: either ID or TOKEN
/*
* tokens are guids -- if you're using a guid as the pkey for the class, then this value should be false.
* if you're using an integer pkey, and you want a token, you have to explicitly declare
* the token fields in $fields and set this value to true.
* if you're using an integer pkey and you don't want a token, set this value to false.
*/
public $setTokens = false; // set to true: adds the idToken field functionality
public $selfDestruct = true; // set to false if the class contains methods
public $cacheTimer = 0; // number of seconds a tuple will remain in-cache
public $setEnv = ENV_ALL; // defines the env where this class can be accessed
public $setMeta = false; // defines if we'll use the meta package for history
public $fields = [
DB_PKEY => DDB_TYPE_STRING, // GUID because setPKeyType == DB_TOKEN
LOG_FILE => DDB_TYPE_STRING,
LOG_METHOD => DDB_TYPE_STRING,
LOG_LINE => DDB_TYPE_NUMBER,
LOG_CLASS => DDB_TYPE_STRING,
LOG_LEVEL => DDB_TYPE_STRING,
LOG_MESSAGE => DDB_TYPE_STRING,
LOG_STACK_TRACE => DDB_TYPE_LIST,
DB_STATUS => DDB_TYPE_STRING,
DB_TIMER => DDB_TYPE_NUMBER,
DB_HISTORY => DDB_TYPE_LIST,
LOG_IS_EVENT => DDB_TYPE_BOOLEAN,
LOG_EVENT_GUID => DDB_TYPE_STRING,
LOG_CREATED => DDB_TYPE_NUMBER
];
public $fieldTypes = [
DB_PKEY => DATA_TYPE_STRING, // guid
LOG_FILE => DATA_TYPE_STRING,
LOG_METHOD => DATA_TYPE_STRING,
LOG_LINE => DATA_TYPE_INTEGER,
LOG_CLASS => DATA_TYPE_STRING,
LOG_LEVEL => DATA_TYPE_STRING,
LOG_MESSAGE => DATA_TYPE_STRING,
LOG_STACK_TRACE => DATA_TYPE_ARRAY,
DB_STATUS => DATA_TYPE_STRING,
DB_TIMER => DATA_TYPE_DOUBLE,
DB_HISTORY => DATA_TYPE_ARRAY,
LOG_IS_EVENT => DATA_TYPE_BOOL,
LOG_EVENT_GUID => DATA_TYPE_STRING,
LOG_CREATED => DATA_TYPE_INTEGER
];
// in the ddb world, this is the primary composite key for this table
public $indexes = [ DB_PKEY => DDB_INDEX_HASH, LOG_CREATED => DDB_INDEX_RANGE ];
/*
* declaring global and local secondary indexes:
*
* Limit: 5 of each
*
* General Format:
* ---------------
* Each tuple, up to the limit, is a record that contains the following array structure:
*
* [[
* 'name' => INDEX_NAME, // REQUIRED
* 'indexes' => [ KEY_NAME => HASH {, KEY_NAME => RANGE } ], // REQUIRED
* 'projectionType' => { KEYS_ONLY | INCLUDE | ALL }, // REQUIRED
* 'nka' => { [ list of one or more non-key attributes !>20 ] }, // REQUIRED if projection = INCLUDE
* 'throughput' => [ 'rcu' => <integer>, 'wcu' => <integer> ] // REQUIRED for GLOBAL only
* ],....];
*
* secondary index keys must use the key literals as shown above. ('name', 'indexes', 'projectionType', etc.)
*
*/
public $globalIndexes = null;
public $localIndexes = null;
public $exposedFields = null; // list of fields exposed to clients
public $cacheMap = null; // k->v paired array mapping fields -> cachedField Names
public $binFields = null; // binary fields that have to be encoded
// these fields aren't used in DDB, but are used in mongo, so are here only for code-compatibility
public $uniqueIndexes = null;
public $sparseIndexes = null;
public $subCollections = null;
/**
* __construct() -- public method
*
* we have a constructor to register the destructor.
*
* @author mike@givingassistant.org
* @version 1.0
*
* HISTORY:
* ========
* 06-07-17 mks original coding
*
*/
public function __construct()
{
register_shutdown_function([$this, STRING_DESTRUCTOR]);
}
/**
* __clone() -- private function
*
* Silently disallows cloning of the object
*
* @author mike@givingassistant.org
* @version 1.0
*
* @return null
*
* HISTORY:
* ========
* 06-07-17 mks original coding
*
*/
private function __clone()
{
return(null);
}
/**
* __destruct() -- public function
*
* As of PHP 5.3.10 destructors are not run on shutdown caused by fatal errors.
*
* The destructor is registered as a shut-down function in the constructor -- so any recovery
* efforts should go in this method.
*
* @author mike@givingassistant.org
* @version 1.0
*
* HISTORY:
* ========
* 06-07-17 mks original coding
*
*/
public function __destruct()
{
;
}
}

View File

@@ -0,0 +1,87 @@
<?php
/**
* Class: gatTestMySQL
*
* This is the definition for a mysql/mariadb-based test class. Intended usage is for unit-testing for basic CRUD
* operations.
*
* This template should also serve as a guide, or documentation, for creating mysql/mariadb template classes.
*
*
* @author mike@givingassistant.org
* @version 1.0
*
* HISTORY:
* ========
* 06-30-17 mks original coding
*
*/
class gatTestMySQL
{
public $version = 1;
public $schema = TEMPLATE_DB_PDO; // defines the storage schema for the class
public $collection = COLLECTION_MYSQL_TEST; // sets the collection (table) name
public $seqKey = COLLECTION_MYSQL_TEST_SQK; // sets the sequence key identifier
public $extension = COLLECTION_MYSQL_TEST_EXT; // sets the extension for the collection
public $setCache = true; // set to true to cache class data
public $setDeletes = true; // set to true to allow HARD deletes (otherwise: SOFT)
public $setAuditing = AUDIT_NOT_ENABLED; // set to AUDIT_value constant
public $setJournaling = false; // set to true to allow journaling
public $setUpdates = true; // set to true to allow record updates
public $setHistory = false; // set to true to enable detailed record history tracking
public $setDefaultStatus = STATUS_ACTIVE; // set the default status for each record
public $setSearchStatus = STATUS_ACTIVE; // set the default search status
public $setLocking = false; // set to true to enable record locking for collection
public $setTimers = true; // set to true to enable collection query timers
public $setPKey = DB_PKEY; // sets the primary key for the collection
public $selfDestruct = true; // set to false if this class contains methods
/*
* tokens are guids -- if you're using a guid as the pkey for the class, then this value should be false.
* if you're using an integer pkey, and you want a token, you have to explicitly declare
* the token fields in $fields and set this value to true.
* if you're using an integer pkey and you don't want a token, set this value to false.
*/
public $setTokens = false; // set to true: adds the idToken field functionality
public $cacheTimer = 0; // number of seconds a tuple will remain in-cache
public $setEnv = ENV_ALL; // defines the env where this class can be accessed
public $setMeta = false; // defines if we'll use the meta package for history
public $fields = [
DB_PKEY => DATA_TYPE_INTEGER, // pkey (integer) used internally and is REQUIRED
TEST_FIELD_TEST_STRING => DATA_TYPE_STRING,
TEST_FIELD_TEST_DOUBLE => DATA_TYPE_DOUBLE,
TEST_FIELD_TEST_INT => DATA_TYPE_INTEGER,
TEST_FIELD_TEST_BOOL => DATA_TYPE_BOOL,
TEST_FIELD_TEST_OBJECT => DATA_TYPE_OBJECT,
DB_TOKEN => DATA_TYPE_STRING // unique key (string) exposed externally and is REQUIRED
];
// cache-map constants are in ./common/cacheMaps.php
public $cacheMap = [
DB_TOKEN => CM_TST_TOKEN,
TEST_FIELD_TEST_STRING => CM_TST_FIELD_TEST_STRING,
TEST_FIELD_TEST_DOUBLE => CM_TST_FIELD_TEST_DOUBLE,
TEST_FIELD_TEST_INT => CM_TST_FIELD_TEST_INT,
TEST_FIELD_TEST_BOOL => CM_TST_FIELD_TEST_BOOL,
TEST_FIELD_TEST_OBJECT => CM_TST_FIELD_TEST_OBJ
];
// for mysql, all indexed fields are listed in this container regardless of index type. If an field appears but
// is not a unique or compound index, then it is just a regular index.
public $indexes = [ DB_PKEY, TEST_FIELD_TEST_INT, DB_TOKEN ];
// unique indexes listed as an indexed array
public $uniqueIndexes = [ DB_TOKEN ];
// compound indexes are listed as sub-arrays:
// [ [ col-1, ..., col-n ], ..., [] ]
public $compoundIndexes = null;
// exposed fields are mutually exclusive with cacheMaps; one or the other but not both
public $exposedFields = null;
// binary fields require special handling (encoding) and have to be listed here
public $binaryFields = null;
}