cluster.on('exit', function(worker, code, signal) { console.log('Worker ' + worker.process.pid + ' died with code: ' + code + ', and signal: ' + signal); console.log('Starting a new worker'); cluster.fork(); });
cluster.on('death', function (worker) { logger.warn('Worker ' + worker.id + ' died, restarting...'); cluster.fork(); });
cluster.on('exit', function(worker) { var exitCode = worker.process.exitCode; console.log('worker ' + worker.process.pid + ' died ('+exitCode+'). restarting...'); cluster.fork(); });
cluster.on('death', function(worker) { // 当一个工作进程结束时, 重启工作进程 delete workers[worker.pid]; worker = cluster.fork(); workers[worker.pid] = worker; });
var cluster = require('cluster'); var numCPUs = require('os').cpus().length; if (cluster.isMaster) { console.log("numCPUs "+numCPUs); // OK, lets ignore the actual CPUS, so we can see the concurrency numCPUs = 4; for (var i = 0; i < numCPUs; i++) { cluster.fork(); } cluster.on('exit', function(worker, code, signal) { console.log('worker ' + worker.process.pid + ' died'); }); } else { console.log("Worker "+process.pid); //change this line to Your Node.js app entry point. require("./bin/www"); }
cluster.on('exit', function(worker) { cluster.fork(); });
cluster.on( 'death', function ( worker ) { logger.log( "warn", 'worker ' + worker.pid + ' died' ); cluster.fork(); } );
cluster.on("exit", (worker, code, signal) => { util.onError("Worker '" + worker.process.pid + "' died, spinning up another!"); cluster.fork(); });
// Serve everything else app.use(express.static(__dirname + '/../dist')); // Start server var host = config.appConfig.host; var port = config.appConfig.port; if (cluster.isMaster) { cluster .on('disconnect', function(worker) { console.log('PID #' + worker.process.pid + ' died. spawning a new process...'); cluster.fork(); }) .on('fork', function(worker) { console.log('PID #' + worker.process.pid + ' started!'); }); console.log('Maps API 2.0 server will run in ' + cpuCount + ' threads. Spawning the new processes...'); for (var i = 0; i < cpuCount; i++) { cluster.fork({number: i}); } } else { app.listen(port, host, function() { if (process.env.number == 0) { console.log(clc.green('Maps API 2.0 server listening on ' + (host ? host + ':' : '') + port)); } }); }
God.nodeApp = function nodeApp(env_copy, cb){ var clu = null; console.log('Starting execution sequence in -cluster mode- for app name:%s id:%s', env_copy.name, env_copy.pm_id); if (env_copy.node_args && Array.isArray(env_copy.node_args)) { cluster.settings.execArgv = env_copy.node_args; } env_copy._pm2_version = pkg.version; try { // node.js cluster clients can not receive deep-level objects or arrays in the forked process, e.g.: // { "args": ["foo", "bar"], "env": { "foo1": "bar1" }} will be parsed to // { "args": "foo, bar", "env": "[object Object]"} // So we passing a stringified JSON here. clu = cluster.fork({pm2_env: JSON.stringify(env_copy)}); } catch(e) { God.logAndGenerateError(e); return cb(e); } clu.pm2_env = env_copy; /** * Broadcast message to God */ clu.on('message', function cluMessage(msg) { /********************************* * If you edit this function * Do the same in ForkMode.js ! *********************************/ if (msg.data && msg.type) { return God.bus.emit(msg.type ? msg.type : 'process:msg', { at : Utility.getDate(), data : msg.data, process : { pm_id : clu.pm2_env.pm_id, name : clu.pm2_env.name, rev : (clu.pm2_env.versioning && clu.pm2_env.versioning.revision) ? clu.pm2_env.versioning.revision : null } }); } else { if (typeof msg == 'object' && 'node_version' in msg) { clu.pm2_env.node_version = msg.node_version; return false; } return God.bus.emit('process:msg', { at : Utility.getDate(), raw : msg, process : { pm_id : clu.pm2_env.pm_id, name : clu.pm2_env.name } }); } }); return cb(null, clu); };
'use strict'; /* * The goal of this test is to cover the Workers' implementation of * Worker.prototype.destroy. Worker.prototype.destroy is called within * the worker's context: once when the worker is still connected to the * master, and another time when it's not connected to it, so that we cover * both code paths. */ const common = require('../common'); var cluster = require('cluster'); var worker1, worker2; if (cluster.isMaster) { worker1 = cluster.fork(); worker2 = cluster.fork(); [worker1, worker2].forEach(function(worker) { worker.on('disconnect', common.mustCall(function() {})); worker.on('exit', common.mustCall(function() {})); }); } else { if (cluster.worker.id === 1) { // Call destroy when worker is disconnected cluster.worker.process.on('disconnect', function() { cluster.worker.destroy(); }); cluster.worker.disconnect(); } else { // Call destroy when worker is not disconnected yet
var respawner = setTimeout(function() { respawners.done(respawner); cluster.fork({WORKER_ID: worker._rc_wid})._rc_wid = worker._rc_wid; }, time);
CPUS.forEach(function(){ cluster.fork(); });
cluster.on('exit', function(worker){ console.log('Cluster %d dead', worker.process.pid); cluster.fork(); });
function startWorker(){ var worker = cluster.fork(); console.log('Cluster: Worker %d started', worker.id); }
.on('disconnect', function(worker) { console.log('PID #' + worker.process.pid + ' died. spawning a new process...'); cluster.fork(); })
cluster.on('exit', function(worker){ console.log('Worker %d died :(', worker.id); cluster.fork(); })
var fork_worker = function(port) { var worker = cluster.fork({worker_port: port}); };
cpus.forEach(function(cpu) { //A cada iteração, o cluster.fork() intancia um processo filho para cada cpu. cluster.fork(); });
/*process.on('exit',function(){ //yybs(); }); */ function yybs(){ if (cluster.isMaster) { console.log(' MASTER ' + "启动主进程..."); // 初始开启与CPU 数量相同的工作进程 for ( var i = 0; i < numCPUs; i++) { var worker = cluster.fork(); workers[worker.pid] = worker; worker.on('exit', function(code, signal) { if( signal ) { console.log("worker was killed by signal: "+signal); } else if( code !== 0 ) { console.log("worker exited with error code: "+code); } else { console.log("worker success!"); } }); //worker.send(' MASTER ' + '创建子进程:' + worker.id); } cluster.on('fork', function (worker) { console.log(' MASTER ' + 'fork: worker' + worker.id); }); cluster.on('online', function (worker) { console.log(' MASTER ' + 'online: worker' + worker.id); //worker.send(' MASTER ' + '子进程:' + worker.id+'运行成功!'); }); cluster.on('listening', function (worker, address) { console.log(' MASTER ' + 'listening: worker' + worker.id + ',pid:' + worker.process.pid + ', Address:' + address.address + ":" + address.port); }); cluster.on('disconnect', function (worker) { console.log(' MASTER ' + 'disconnect: worker' + worker.id); }); cluster.on('exit', function(worker, code, signal) { console.log(' MASTER ' + 'exit worker' + worker.id + ' died'); var exitCode = worker.process.exitCode; console.log(' MASTER '+ 'worker ' + worker.process.pid + ' died ('+exitCode+'). restarting...'); delete workers[worker.pid]; worker = cluster.fork(); workers[worker.pid] = worker; worker.on('exit', function(code, signal) { if( signal ) { console.log("worker was killed by signal: "+signal); } else if( code !== 0 ) { console.log("worker exited with error code: "+code); } else { console.log("worker success!"); } }); }); /** // 主进程分支 cluster.on('death', function(worker) { // 当一个工作进程结束时,重启工作进程 delete workers[worker.pid]; worker = cluster.fork(); workers[worker.pid] = worker; }); **/ /** //主进程和分支进程间通信 function eachWorker(callback) { for (var id in cluster.workers) { callback(cluster.workers[id]); } } setTimeout(function () { eachWorker(function (worker) { worker.send(' MASTER ' + 'send message to worker' + worker.id); }); }, 3000); Object.keys(cluster.workers).forEach(function(id) { cluster.workers[id].on('message', function(msg){ console.log(' MASTER ' + 'message ' + msg); }); }); **/ } else if (cluster.isWorker) { console.log(' WORKER ' + "start worker ..." + cluster.worker.id); process.on('message', function(msg) { console.log(' WORKER '+msg); process.send(' WORKER worker'+cluster.worker.id+' received!'); }); // 工作进程分支,启动服务器 var app = require('./app'); app.listen(80); } else{} }
var numCPUs = os.cpus().length; var workers = {}; if(cluster.isMaster) { // 主进程分支 cluster.on('death', function(worker) { // 当一个工作进程结束时, 重启工作进程 delete workers[worker.pid]; worker = cluster.fork(); workers[worker.pid] = worker; }); // 初始开启与CPU数量相同的工作进程 for(var i = 0; i < numCPUs; i++) { var worker = cluster.fork(); workers[worker.pid] = worker; } }else { // 工作进程分支,启动服务器 var app = require('./app'); app.listen(app.get('port')); } // 当主进程被终止时, 关闭所有工作进程 process.on('SIGTERM', function() { for(var pid in workers) { process.kill(pid); } process.exit(0); });
cluster.on('disconnect', function (worker) { console.error('Worker disconnect!'); cluster.fork(); });
cluster.on('exit', function (worker) { // Replace the dead worker, we're not sentimental console.log('Worker ' + worker.id + ' died :('); cluster.fork(); });
cluster.on('exit', function (worker, code, signal) { console.log("Worker " + worker.process.pid + "died with code: " + code + ", and signal: " + signal); console.log('Starting a new worker'); cluster.fork(); });
if (cfg.cluster_mode) { // Working in cluster mode var cluster = require('cluster'); var maxClusterWorkerSize = require('os').cpus().length; var workerSize = cfg.cluster_worker_size > maxClusterWorkerSize ? maxClusterWorkerSize : cfg.cluster_worker_size; if (cluster.isMaster) { logger.info("Node [%s] is working",cfg.name); logger.info("Kue is working in cluster mode."); logger.info("Cluster size is ", workerSize); logger.info("Kue is connecting to", redis_options.host + ":" + redis_options.port); logger.info("Queue prefix:", cfg.job_prefix); for (var i = 0; i < workerSize; i++) { cluster.fork(); } queue.promote(); // Start RESTful API listerning kue.app.listen(port); logger.info("Kue RESTful API is listening on port", port); } else if (cluster.isWorker) { // Register workers var registerWorkers = require('./register_workers.js'); registerWorkers(app, queue); }
/* 1. 多核负载均衡 2. 支持即时服务重启 3. 服务稳定性, worker 出错自动重启 */ if(cluster.isMaster) { // cluster.worker 在主进程不能用! // // 妈的 node 0.8 API: cluster.kill ==> cluster.destroy // https://github.com/joyent/node/wiki/API-changes-between-v0.6-and-v0.8 // store worker.pid var workers = [] for(var i = 0; i < cpuNum; i++) { workers.push(cluster.fork()) } process.on('SIGHUP', function() { // master 进程忽略 SIGHUP 信号 }) // watch config file auto restart when dev mode // env === 'development' && gaze([ 'config.js' , 'package.json' , 'server.js' , 'app.js' , 'socket.js' , 'routes/**/*.js'
// Initialize the cluster and get reference to http object var cluster = require('cluster'); var cpus = require('os').cpus().length || 1; // If we're the master, fork for each CPU if ( cluster.isMaster ) { for ( var i = 0; i < cpus; ++i ) cluster.fork({INDEX_OF_PROCESS:i}); cluster.on('exit', function(worker) { var exitCode = worker.process.exitCode; console.log('worker ' + worker.process.pid + ' died ('+exitCode+'). restarting...'); cluster.fork(); }); } else { module.exports = require('./service'); }
* passphrase : String, -- the RSA-encrypted AES key which encrypts the "encrypted" element and that can can be used for encrypted responses * encrypted : { -- AES-Encrypted (by sjcl) object with passphrase * payload : [anything], -- the actual parameters for the given (by URL) action * cert : String -- authentication certificate to authorize the action (optional) * } * } */ var nCPU = os.cpus().length; // Master: Spawn workers if (cluster.isMaster) { log.info("Master", "Detected " + nCPU + " CPU cores, spawning workers"); for (var i = 0; i < nCPU; i++) { monitor(cluster.fork()); } cluster.on("exit", function (worker, code, signal) { log.err("Master", "Worker " + worker.process.pid + " exited with " + signal + "!" + " Restarting after " + settings.worker_restart + " ms"); setTimeout(cluster.fork, settings.worker_restart); }); log.ok("Master", "All workers spawned!"); // Worker: Run API server on shared TCP socket } else { var actions = require("./actions"); log.ok("Worker", "(" + process.pid + ") Process Created!"); http.createServer(function (req, res) {
assert.strictEqual(result, true); } else if (cluster.isMaster) { const checks = { using: false, overwrite: false }; // To check that the cluster extend on the process.env we will overwrite a // property process.env.cluster_test_overwrite = 'old'; // Fork worker const worker = cluster.fork({ 'cluster_test_prop': 'custom', 'cluster_test_overwrite': 'new' }); // Checks worker env worker.on('message', function(data) { checks.using = (data.prop === 'custom'); checks.overwrite = (data.overwrite === 'new'); process.exit(0); }); process.once('exit', function() { assert.ok(checks.using, 'The worker did not receive the correct env.'); assert.ok( checks.overwrite, 'The custom environment did not overwrite the existing environment.'); });
cluster.on('exit', function (worker) { console.log('Worker ' + worker.id + ' died :('); cluster.fork(); });