// //////////////////////////////////////////////////////////////////////////////////////// // // Private members // // //////////////////////////////////////////////////////////////////////////////////////// /** * Add subscribers, i.e. blocks that are listening to messages from this one. * @param subscriberName the subscribing block name * @param filterOptions custom filter options */ function _addSubscriber(blockName, subscriberName, filterOptions){ if (!(blockName in blocks)) { Logger.error("Block '"+subscriberName+"' tried to subsribe to block '"+blockName+"', but no such block exists?"); return; } if (settings.debug) Logger.info("Block '"+subscriberName+"' is subsribing to block '"+blockName+"'"); blocks[blockName].subscribers.push({name: subscriberName, filter: filterOptions}); }
exec(command, function(error, stdout, stderr){ Logger.info("Rabbitmq queue information sent"); if(error){ console.log("error: ", error); } if(stderr){ console.log("stderr: ", stderr); } });
this.process = function(message, context) { Logger.info("Tails ", message.coin); // Acknowledge context.ack(message); // Pass data along context.emit(message); }
mongoose.connection.on('open', function () { // Run repair //Settings.db.repairDatabase(); //mongoose.db.getCollection("$cmd").findOne({repairDatabase:1}); Logger.info(); Settings.db.command({repairDatabase:1}, function(err, result) { Logger.debug(err); process.exit(0); }); });
// //////////////////////////////////////////////////////////////////////////// /** * Acknowledges a message that this block recieved from another block. * Note: this is called on the block's output collector that recieved the message * NOT the block that sent the message. */ function _ack(blockName, msg){ var block = blocks[blockName]; _clearThread(block, msg.id); var processTime = process.hrtime(msg.sentTime); var nanos = processTime[0] * 1000000000 + processTime[1]; var ms = nanos / 1000000; if (settings.debug) Logger.info("["+blockName+"] Message ack, took " + ms + " ms " + msg.id, block.threads); // Remove this message from my list of messages as its been processed client.lrem(getMsgQueueHashId(blockName), -1, msg.id); client.del(getMsgHashId(msg.id)); // If meta data logging is turned on, update the stats on this block if (settings.metaLogging) { _updateBlockMeta(msg.source, 'ack', {processTime:ms}); } };
function _startBlock(blockName){ var block = blocks[blockName]; Logger.info("Starting child process for " + blockName); block.threads = []; if (typeof block.start !== 'undefined'){ block.start(block); } if (typeof block.process !== 'undefined'){ var workerPubSub = redisEmitter(settings.redis); // List for messages sent to the block workerPubSub.on(getPubSubHashId(blockName), function(channel, msg) { if (settings.debug) Logger.debug("["+blockName+"] Got message " + msg.id); _doProcess(blockName, msg); }); setInterval(function(){_checkThreads(blockName)}, 200); if (settings.replay){ setInterval(function(){_processLostMessages(blockName)}, settings.replayTime); } } function _doProcess(blockName, msg, isReprocessed){ var block = blocks[blockName]; if (!block){ Logger.error("block not defined!?"); return; } if (settings.debug) Logger.debug("[" + blockName + "] threads = " + block.threads.length + " limit = " + block.limit); // Set a timeout to release this thread after a long long time, just in case something went wrong if (block.limit == -1 || block.threads.length < block.limit){ block.threads.push(msg.id); try { if (settings.debug) { if (isReprocessed){ Logger.warn("[" + blockName + "] Re-processing message " + msg.id + ". Replay count = " + msg.replays, block.threads); } else { Logger.debug("[" + blockName + "] Processing message " + msg.id + ". Replay count = " + msg.replays, block.threads); } } block.process(msg, block); } catch(err){ Logger.error(err.message, err.stack); _clearThread(block, msg.id); } } else { if (settings.debug) Logger.warn("[" + blockName + "] Overloaded! Can't process this message so rejecting! " + msg.id); } } /** * Periodically check if the threads are still being used, we do this by checking to * see if the message for that thread is still on the stack */ function _checkThreads(blockName){ if (!blockName){ Logger.error("block not defined!?"); return; } var block = blocks[blockName]; if (!('threads' in block)) block.threads = []; if (block.threads.length == 0) return; async.map( block.threads, function(msgId, callback){ client.exists(getMsgHashId(msgId), function(err, val){ if (!val) { _clearThread(block, msgId); } callback(); }); }, function(err, results){ } ); } /** * Periodically check for any messages that haven't been processed yet, which includes any * messages passed into the process method that were not acknowledged! */ function _processLostMessages(blockName){ if (!blockName){ Logger.error("block not defined!?"); return; } client.llen(getMsgQueueHashId(blockName), function(err, noMessages){ var block = blocks[blockName]; //if (noMessages && noMessages > 0){ // Logger.warn("[" + blockName + "] " + noMessages + " unprocessed messages in the queue"); //} if (noMessages > 0 && (block.limit == -1 || block.threads.length <= block.limit)){ // First get all the items in the message list, and starting from the end of the list // look for any messages that are not in the thread list (i.e. aren't currently being processed) client.lrange(getMsgQueueHashId(blockName), 0, -1, function(err, messageList){ if (messageList){ // Find the lost messages, which will be the msgId's in the message List // that are not in the threads list var lost = _.difference(messageList, block.threads); if (settings.debug) { Logger.debug("Lost messages = ", lost); Logger.debug("messageList = ", messageList); Logger.debug("block.threads = ", block.threads); } if (lost && lost.length > 0){ var oldestMsgId = lost[lost.length-1]; // Remove this message from the message list and reprocess client.lrem(getMsgQueueHashId(blockName), -1, oldestMsgId, function(err){ if (err) { Logger.error(err); } else { client.get(getMsgHashId(oldestMsgId), function(err, msgString){ if (err) Logger.error(err); if (msgString){ var msg = JSON.parse(msgString); msg.replays++; if (msg.replays < settings.replayLimit){ // Add back into message queue and attempt to process client.lpush(getMsgQueueHashId(msg.target), msg.id); // Update (msg.replays changed) client.set(getMsgHashId(msg.id), JSON.stringify(msg)) // Now try to re-process _doProcess(blockName, msg, true); } else { // Delete this message client.del(getMsgHashId(msg.id)); if (settings.debug) Logger.error("Message " + msg.id + " has reached replay limit, discarding"); } } }); } }); } } }); } }); //client.keys("msg-*"); } }
function forkWorker(blockName){ // Spin off worker thread for block.... Logger.info(blockName); var worker = cluster.fork({ name: blockName}); workerNames[worker.id] = blockName; }
http.createServer(app).listen(app.get('port'), function(){ Logger.info('Express server listening on port ' + app.get('port')); });
var path = require('path'); var Logger = require('arsenic-logger') var Settings = require('../Settings.js'); var config = require('../../logi-config.js'); var fs = require('fs') var LogiApp = require('../models/LogiApp.js'); var LogiWidget = require('../models/LogiWidget.js'); var appId = '520d1959e042d60aa6000003'; var templateFilename = path.join(__dirname, '../html/view-template.html'); var outputFilename = path.join(__dirname, '../../public/cache/view-'+appId+'.html'); Logger.info("Template file = " + templateFilename); LogiApp.load(appId, function(err, appObj){ Logger.debug(appObj); fs.readFile(templateFilename, 'utf8', function (err,data) { if (err) { return Logger.error(err); } LogiWidget.getAllForLayer(appObj.id, 'visual', function(err, widgets){ var txt = ""; for (var i=0; i<widgets.length; i++){
Logger.debug("Connected to MongoDB!"); }); switch(process.env.NODE_ENV){ case 'production': case 'development': case 'local': //noinspection JSValidateTypes Settings.db = mongoose.connect('mongodb://localhost:27017/mongox', function(err) { if (err) { Logger.fatal("Error connecting to Mongo! Is Mongo running?"); throw err; } }); Logger.setLevel('debug'); break; } } } Logger.info("Setting up connection to database, using " + process.env.NODE_ENV + " environment"); Settings.init(); module.exports = Settings;