:beers: module-as-a-process cluster management :beer:
nodejs module-as-a-process cluster management
keeping a set of dynamic module execution up & running across endless Servers, VMs, Containers
node >=v 7.0
Redis >= 2.8.18
npm i eisenhertz
const {
Eisenhertz,
defaultConfig,
defaultLogger
} = require("eisenhertz");
const fetchJobNames = callback => {
callback(null, [
"one",
"two"
]);
};
const fetchJobDetails = (id, callback) => {
let config = {};
switch (id) {
case "one":
config.port = 1337;
config.hi = "hi from one";
break;
case "two":
config.port = 1338;
config.hi = "hi from two";
break;
}
callback(null, {
config
});
};
const eisenhertz = new Eisenhertz(config, defaultLogger());
eisenhertz
.start(fetchJobNames, fetchJobDetails)
.then(() => {});
const { ForkProcess } = require("eisenhertz");
const express = require("express");
const fork = new ForkProcess();
let incomingRequests = 0;
const processCallback = data => {
const app = express();
app.get("/hi", (req, res) => {
incomingRequests++;
res.status(200).json({
message: data.config.hi
});
});
app.listen(data.config.port, () => {
fork.log("ready");
});
};
const metricsCallback = cb => {
cb(null, {
incomingRequests
});
};
fork.connect(processCallback, metricsCallback);
{
prefix: "eh",
redis: {
host: "localhost",
port: 6379,
db: 7
},
redlock: {
driftFactor: 0.01,
retryCount: 2,
retryDelay: 200,
retryJitter: 200
},
settings: {
lockDuration: 4500,
stalledInterval: 4500,
maxStalledCount: 1,
guardInterval: 2500,
retryProcessDelay: 2500
},
properties: {
name: "eh:empty",
maxJobsPerWorker: 2,
masterLock: "eh:master:lock",
masterLockTtl: 2000,
masterLockReAttempt: 4000,
maxInstancesOfJobPerNode: 1
},
jobOptions: {
priority: 1,
delay: 1000,
attempts: 1, //dont touch
repeat: undefined, //dont touch
backoff: undefined, //dont touch
lifo: undefined, //dont touch
timeout: undefined, //dont touch
jobId: undefined, // will be set by TaskHandler
removeOnComplete: true, //dont touch
removeOnFail: true //dont touch
},
fork: {
module: "./fork/ForkProcess.js"
}
}
config.properties.maxInstancesOfJobPerNode
/*
lets you limit the amount of instances of a job
that run on a single node, you can define a job instance
by using ":" as delimiter e.g. jobOne:1, jobOne:2 and jobOne:3
if the limit is reached, the node will return the job with
an error back to the queue after a small timeout
*/
config.properties.maxJobsPerWorker
/*
lets you limit the amount of jobs per worker
it is usually a good idea to limit this to the amount
of cores (* 2 on intel systems) of the node's host
*/