Archive: Namaste PHP AMQP framework v1.0 (2017-2020)
952 days continuous production uptime, 40k+ tp/s single node. Original corpo Bitbucket history not included — clean archive commit.
This commit is contained in:
217
classes/templates/deprecated/xxxLogs.class.inc
Normal file
217
classes/templates/deprecated/xxxLogs.class.inc
Normal file
@@ -0,0 +1,217 @@
|
||||
<?php
|
||||
|
||||
/**
|
||||
* Class gatLog
|
||||
*
|
||||
* This is the logging class definition that records framework-generated event messages.
|
||||
*
|
||||
* Design Notes:
|
||||
* -------------
|
||||
* because this is a log, who's events are processed by a FnF queue, we're not going to cache, or use auditing.
|
||||
* History is limited to the created event and deletes are HARD.
|
||||
* Only one status is supported: ACTIVE and there are no updates allowed making record-locking unnecessary.
|
||||
* To reduce overhead, we're not enabling cache timers because recursion.
|
||||
* The collection does not need GUID tokens but we are storing the passed Broker Event ID
|
||||
*
|
||||
* @author mike@givingassistant.org
|
||||
* @version 2.1.3
|
||||
*
|
||||
* HISTORY:
|
||||
* ========
|
||||
* 06-07-17 mks original coding
|
||||
*
|
||||
*
|
||||
*/
|
||||
class xxxLogs
|
||||
{
|
||||
public $service = CONFIG_DATABASE_DDB_APPSERVER; // defines the nosql server service configuration
|
||||
public $schema = TEMPLATE_DB_DDB; // defines the storage schema for the class
|
||||
public $collection = COLLECTION_MONGO_LOGS; // sets the collection (table) name
|
||||
public $seqKey = COLLECTION_NOSQL_LOGS_SQK; // sets the sequence key identifier
|
||||
public $extension = COLLECTION_MONGO_LOGS_EXT; // sets the extension for the collection
|
||||
public $setCache = false; // set to true to cache class data
|
||||
public $setDeletes = true; // set to true to allow HARD deletes (otherwise: SOFT)
|
||||
public $setAuditing = AUDIT_NOT_ENABLED; // set to AUDIT_value constant
|
||||
public $setJournaling = false; // set to true to allow journaling
|
||||
public $setUpdates = false; // set to true to allow record updates
|
||||
public $setHistory = false; // set to true to enable detailed record history tracking
|
||||
public $setDefaultStatus = STATUS_ACTIVE; // set the default status for each record
|
||||
public $setSearchStatus = STATUS_ACTIVE; // set the default search status
|
||||
public $setLocking = false; // set to true to enable record locking for collection
|
||||
public $setTimers = false; // set to true to enable collection query timers
|
||||
public $setPKeyType = DB_TOKEN; // sets the primary key type: either ID or TOKEN
|
||||
|
||||
/*
|
||||
* tokens are guids -- if you're using a guid as the pkey for the class, then this value should be false.
|
||||
* if you're using an integer pkey, and you want a token, you have to explicitly declare
|
||||
* the token fields in $fields and set this value to true.
|
||||
* if you're using an integer pkey and you don't want a token, set this value to false.
|
||||
*/
|
||||
public $setTokens = false; // set to true: adds the idToken field functionality
|
||||
public $selfDestruct = true; // set to false if the class contains methods
|
||||
public $cacheTimer = 0; // number of seconds a tuple will remain in-cache
|
||||
public $setEnv = ENV_ALL; // defines the env where this class can be accessed
|
||||
public $setMeta = false; // defines if we'll use the meta package for history
|
||||
|
||||
public $fields = [
|
||||
DB_PKEY => DDB_TYPE_STRING, // GUID because setPKeyType == DB_TOKEN
|
||||
LOG_FILE => DDB_TYPE_STRING,
|
||||
LOG_METHOD => DDB_TYPE_STRING,
|
||||
LOG_LINE => DDB_TYPE_NUMBER,
|
||||
LOG_CLASS => DDB_TYPE_STRING,
|
||||
LOG_LEVEL => DDB_TYPE_STRING,
|
||||
LOG_MESSAGE => DDB_TYPE_STRING,
|
||||
LOG_STACK_TRACE => DDB_TYPE_LIST,
|
||||
DB_STATUS => DDB_TYPE_STRING,
|
||||
DB_HISTORY => DDB_TYPE_LIST,
|
||||
LOG_IS_EVENT => DDB_TYPE_BOOLEAN,
|
||||
LOG_EVENT_GUID => DDB_TYPE_STRING,
|
||||
LOG_CREATED => DDB_TYPE_NUMBER
|
||||
];
|
||||
|
||||
public $fieldTypes = [
|
||||
DB_PKEY => DATA_TYPE_STRING, // guid
|
||||
LOG_FILE => DATA_TYPE_STRING,
|
||||
LOG_METHOD => DATA_TYPE_STRING,
|
||||
LOG_LINE => DATA_TYPE_INTEGER,
|
||||
LOG_CLASS => DATA_TYPE_STRING,
|
||||
LOG_LEVEL => DATA_TYPE_STRING,
|
||||
LOG_MESSAGE => DATA_TYPE_STRING,
|
||||
LOG_STACK_TRACE => DATA_TYPE_ARRAY,
|
||||
DB_STATUS => DATA_TYPE_STRING,
|
||||
DB_TIMER => DATA_TYPE_DOUBLE,
|
||||
DB_HISTORY => DATA_TYPE_ARRAY,
|
||||
LOG_IS_EVENT => DATA_TYPE_BOOL,
|
||||
LOG_EVENT_GUID => DATA_TYPE_STRING,
|
||||
LOG_CREATED => DATA_TYPE_INTEGER
|
||||
];
|
||||
|
||||
// in the ddb world, this is the primary composite key for this table
|
||||
public $indexes = [ DB_PKEY => DDB_INDEX_HASH, LOG_CREATED => DDB_INDEX_RANGE ];
|
||||
|
||||
/*
|
||||
* declaring global and local secondary indexes:
|
||||
*
|
||||
* Limit: 5 of each
|
||||
*
|
||||
* General Format:
|
||||
* ---------------
|
||||
* Each tuple, up to the limit, is a record that contains the following array structure:
|
||||
*
|
||||
* [[
|
||||
* 'name' => INDEX_NAME, // REQUIRED
|
||||
* 'indexes' => [ KEY_NAME => HASH {, KEY_NAME => RANGE } ], // REQUIRED
|
||||
* 'projectionType' => { KEYS_ONLY | INCLUDE | ALL }, // REQUIRED
|
||||
* 'nka' => { [ list of one or more non-key attributes !>20 ] }, // REQUIRED if projection = INCLUDE
|
||||
* 'throughput' => [ 'rcu' => <integer>, 'wcu' => <integer> ] // REQUIRED for GLOBAL only
|
||||
* ],....];
|
||||
*
|
||||
* secondary index keys must use the key literals as shown above. ('name', 'indexes', 'projectionType', etc.)
|
||||
*
|
||||
*/
|
||||
public $globalIndexes = array(
|
||||
[
|
||||
// this creates a partition key based on the log level (fatal, warn, debug, etc.) with a sort key
|
||||
// based on the method (the class method that created the log event).
|
||||
// query example: give me all fatal errors
|
||||
// give me all warnings generate by the method: _fetchData()
|
||||
// Since the base keys (id, date) are projected onto this index, I am (awaiting testing) assuming
|
||||
// that you could also range your query based on the creation date.
|
||||
STRING_NAME => 'index_log_level',
|
||||
STRING_INDEXES => [ LOG_LEVEL => DDB_INDEX_HASH, LOG_METHOD => DDB_INDEX_RANGE ],
|
||||
DDB_STRING_PT => DDB_PT_ALL,
|
||||
STRING_THROUGHPUT => [ CONFIG_DATABASE_READ_CAPACITY_UNITS => 100, CONFIG_DATABASE_WRITE_CAPACITY_UNITS => 100 ]
|
||||
],
|
||||
[
|
||||
// lets add a second global index: key will be the created date, and the sort will be the error level
|
||||
// this will allow us to answer queries like:
|
||||
// give me all errors in the last hour
|
||||
// give me all fatal errors for January
|
||||
STRING_NAME => 'index_log_created',
|
||||
STRING_INDEXES => [ LOG_CREATED => DDB_INDEX_HASH, LOG_LEVEL => DDB_INDEX_RANGE ],
|
||||
DDB_STRING_PT => DDB_PT_INCLUDE,
|
||||
DDB_STRING_NON_KEY_ATTRIBUTE => [ LOG_FILE, LOG_CLASS, LOG_METHOD, LOG_LINE ],
|
||||
STRING_THROUGHPUT => [ CONFIG_DATABASE_READ_CAPACITY_UNITS => 100, CONFIG_DATABASE_WRITE_CAPACITY_UNITS => 100 ]
|
||||
]
|
||||
);
|
||||
|
||||
public $localIndexes = array(
|
||||
[
|
||||
// create secondary index using the log-level as the range value making the assumption that the
|
||||
// base index hash will be used as the local secondary hash
|
||||
STRING_NAME => 'index_sec_level',
|
||||
STRING_INDEXES => [ DB_PKEY => DDB_INDEX_HASH, LOG_LEVEL => DDB_INDEX_RANGE ],
|
||||
DDB_STRING_PT => DDB_PT_ALL
|
||||
]
|
||||
);
|
||||
|
||||
public $exposedFields = null; // list of fields exposed to clients
|
||||
public $cacheMap = null; // k->v paired array mapping fields -> cachedField Names
|
||||
public $binFields = null; // binary fields that have to be encoded
|
||||
|
||||
// these fields aren't used in DDB, but are used in mongo, so are here only for code-compatibility
|
||||
public $uniqueIndexes = null;
|
||||
public $sparseIndexes = null;
|
||||
public $subCollections = null;
|
||||
|
||||
/**
|
||||
* __construct() -- public method
|
||||
*
|
||||
* we have a constructor to register the destructor.
|
||||
*
|
||||
* @author mike@givingassistant.org
|
||||
* @version 1.0
|
||||
*
|
||||
* HISTORY:
|
||||
* ========
|
||||
* 06-07-17 mks original coding
|
||||
*
|
||||
*/
|
||||
public function __construct()
|
||||
{
|
||||
register_shutdown_function([$this, STRING_DESTRUCTOR]);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* __clone() -- private function
|
||||
*
|
||||
* Silently disallows cloning of the object
|
||||
*
|
||||
* @author mike@givingassistant.org
|
||||
* @version 1.0
|
||||
*
|
||||
* @return null
|
||||
*
|
||||
* HISTORY:
|
||||
* ========
|
||||
* 06-07-17 mks original coding
|
||||
*
|
||||
*/
|
||||
private function __clone()
|
||||
{
|
||||
return(null);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* __destruct() -- public function
|
||||
*
|
||||
* As of PHP 5.3.10 destructors are not run on shutdown caused by fatal errors.
|
||||
*
|
||||
* The destructor is registered as a shut-down function in the constructor -- so any recovery
|
||||
* efforts should go in this method.
|
||||
*
|
||||
* @author mike@givingassistant.org
|
||||
* @version 1.0
|
||||
*
|
||||
* HISTORY:
|
||||
* ========
|
||||
* 06-07-17 mks original coding
|
||||
*
|
||||
*/
|
||||
public function __destruct()
|
||||
{
|
||||
;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user