function parseFullValue(res) { return bignum(res[1]).mul('100000000').add(padFrac(res[2])); }
/** * Creates an instance of ConsistentHash. * * @constructor * @this {ConsistentHash} * * @param {Object} options The options object * @param [Object] options.log The optional Bunyan log object. * @param {Object} options.algorithm The hash algorithm object. * @param {String} options.algorithm.algorithm The hash algorithm. * @param {String} options.algorithm.max The max output size of the algorithm. * @param {Number} options.vnodes The number of virtual nodes in the ring. This * can't be changed once set. * @param {String[]} options.pnodes The optional array of physical nodes in the * ring, or the ring topology array. * @param {Object} topology The topology of a previous hash ring. Used to * restore an old hash ring. * @param {Object} topology.pnodeToVnodeMap The mapping of pnode to vnodes of * the serialized topology. * @param {Number} topology.vnodes The number of vnodes in the serialized * topology. */ function ConsistentHash(options) { assert.object(options, 'options'); this.log = options.log; if (!this.log) { this.log = bunyan.createLogger({ name: 'fash', level: (process.env.LOG_LEVEL || 'warn'), stream: process.stderr }); } var self = this; var log = self.log; log.trace('new ConsistentHash with options', options); /** * The hash algorithm used determine the position of a key. */ self.algorithm_ = options.algorithm; /** * The maximum output size of the hash algorithm. Used to determine the * hash interval between each vnode. */ self.algorithmMax_ = bignum(options.algorithm.MAX, 16); /* * The number of virtual nodes to provision in the ring. Once set, this * can't be changed. */ self.vnodeCount_ = options.vnodes || options.topology.vnodes || 100000; self.vnodesBignum_ = bignum(self.vnodeCount_, 10); self.vnodeHashInterval_ = self.algorithmMax_.div(self.vnodesBignum_); /** * The String array of physical nodes in the ring. */ self.pnodes_ = options.pnodes ? options.pnodes.slice() : []; self.pnodes_.sort(); /** * Map of {pnode -> {vnode1 ->{}, vnode2, ...}} Keeps track of the physical * node to virtual node mapping. Also keeps track of an optional data * object. */ self.pnodeToVnodeMap_ = {}; /** * Map of {vnode -> {pnode, data}. Contains the pnode and an optional data * object. If you want the actual pnode, you'll need to dereference as * self.vnodeToPnodeMap_[vnode].pnode -- otherwise you'll get the object. */ self.vnodeToPnodeMap_ = {}; var pnodeMap = {}; if (options.topology) { var topology = options.topology; log.info('ConsistentHash.new: deserializing an already existing ring.'); log.debug({ topology: topology }, 'ConsistentHash.new: previous topology'); self.pnodeToVnodeMap_ = topology.pnodeToVnodeMap; var pnodeKeys = Object.keys(self.pnodeToVnodeMap_); pnodeKeys.forEach(function(pnode) { self.pnodes_.push(pnode); var vnodes = self.pnodeToVnodeMap_[pnode]; Object.keys(vnodes).forEach(function(vnode) { self.vnodeToPnodeMap_[vnode] = { pnode: pnode, data: vnodes[vnode] }; }); }); self.pnodes_.sort(); log.info('ConsistentHash.new: finished deserializing'); log.debug({ pnodeToVnodeMap: self.pnodeToVnodeMap_, vnodeToPnodeMap: self.vnodeToPnodeMap_ }, 'ConsistentHash.new: topology'); } else { log.info('instanting new ring from scratch.'); // instantiate pnodeToVnodeMap_ self.pnodes_.forEach(function(pnode, index) { // make sure there are no duplicate keys in self.pnodes_ if (pnodeMap[pnode]) { throw new verror.VError('Unable to instantiate ring, ' + 'duplicate pnodes in input'); } pnodeMap[pnode] = true; self.pnodeToVnodeMap_[self.pnodes_[index]] = {}; }); // Allocate the vnodes to the pnodes by // vnode % total_pnode = assigned pnode. function allocateVnode() { for (var vnode = 0; vnode < self.vnodeCount_; vnode++) { var pnode = self.pnodes_[vnode % self.pnodes_.length]; var hashspace = self.findHashspace(vnode); log.debug({ hashspace: hashspace, vnode: vnode, pnode: pnode }, 'ConsistentHash.new: assigning hashspace to vnode to pnode'); if (!self.pnodeToVnodeMap_[pnode]) { self.pnodeToVnodeMap_[pnode] = {}; } // assign the pnode->vnode and vnode->pnode maps // set the data here to null since this is a new ring self.pnodeToVnodeMap_[pnode][vnode] = null; self.vnodeToPnodeMap_[vnode] = { pnode: pnode // don't set data here -- since this is a new ring }; log.debug({ vnode: vnode, pnode: pnode }, 'ConsistentHash.new: added vnode to pnode'); } } allocateVnode(); } log.info('instantiated ring'); log.debug({ pnodeToVnodeMap: self.pnodeToVnodeMap_, vnodeToPnodeMap: self.vnodeToPnodeMap_ }, 'ConsistentHash.new: ring state'); }
ConsistentHash.prototype.findVnode = function findVnode(hash) { return parseInt(bignum(hash, 16).div(this.vnodeHashInterval_), 10); };
* * Note that P_stretched is the HKDF-stretched key, computed elsewhere. */ const I = new Buffer('andré@example.org', 'utf8'), P = new Buffer('pässwörd', 'utf8'), P_stretch = new Buffer('5b597db713ef1c0567f8d053e9dde294f917a0a838ddb661a98a67a188bdf491', 'hex'), N = params['2048'].N, g = params['2048'].g, s = new Buffer('00f100000000000000000000000000000000000000000000000000000000009b', 'hex'), b = bignum(('1198277 66042000 95785634 94115500 91527197 08125226 96054476' +'89936430 95227800 29105361 55503035 27745056 25606029 71778328' +'00325345 97331398 44872578 33596964 14341721 63891575 82554932' +'02841499 37367218 83155342 80693274 23189198 73686357 51420460' +'53414928 39548731 87904313 57183095 39706489 29157321 42348352' +'72947679 88835942 53343430 84231300 63326063 44714480 99439808' +'86106931 64826214 24231409 08830704 76916770 00983929 68117727' +'43420990 99723875 98328292 19109897 32876428 83198548 78234173' +'12772399 92628295 46938957 84583632 37146486 38545526 79918828' +'02106605 08721582 00403102 62483181 55961400 94933216 29832845' +'62611677 70805044 44704039 04739431 33561758 53336713 78812960').split(/\s/).join(''), 10).toBuffer(), a = bignum(('1193346 47663227 29136311 34057412 43413916 43482736 31461660' +'12200067 03889414 28162541 13710841 71663800 88052095 43910927' +'47649109 98165425 61560345 50331133 01525500 56221240 12256352' +'06121987 03057065 66763757 03406470 63422988 04247319 00591569' +'75005813 46381864 66966435 73820202 00036915 26156674 01021816' +'29849129 76536206 14440782 97876439 31378219 56464627 16314542' +'15793734 39868081 67341567 89864323 26806001 40897576 06109012' +'50649711 19889621 34960686 05039486 22864591 67629830 47459546' +'90086093 75374681 08474188 47198514 54277570 80362211 87408873' +'99628800 12800917 05751238 00497654 06348391 06888223 63866455'
_.each(array, function(el, index) { result = result.add(bignum(parseInt('0x' + el).toString()).shiftLeft((array.length - (index + 1)) * sectionBits)); });
function processShare(miner, job, blockTemplate, nonce, resultHash){ var shareBuffer = new Buffer(blockTemplate.buffer.length); blockTemplate.buffer.copy(shareBuffer); shareBuffer.writeUInt32BE(job.extraNonce, blockTemplate.reserveOffset); if (typeof(nonce) === 'number' && nonce % 1 === 0) { var nonceBuf = bignum(nonce, 10).toBuffer(); var bufReversed = new Buffer(nonceBuf.toJSON().reverse()); bufReversed.copy(shareBuffer, 1); } else { new Buffer(nonce, 'hex').copy(shareBuffer, 1); } //XMR FIXME: //new Buffer(nonce, 'hex').copy(shareBuffer, 39); var convertedBlob; var hash; var shareType; if (shareTrustEnabled && miner.trust.threshold <= 0 && miner.trust.penalty <= 0 && Math.random() > miner.trust.probability){ hash = new Buffer(resultHash, 'hex'); shareType = 'trusted'; //Fixme do i need this? log('debug', logSystem, 'Share Validator', 'Trusted share from miner ' + miner.login + '@' + miner.ip); } else { convertedBlob = convertBlockBlob(shareBuffer); //XMR FIXME: //hash = cryptoNight(convertedBlob); hash = multiHashing.boolberry(convertedBlob, scratchpad, job.height); shareType = 'valid'; } //BBR FIXME: if (hash.toString('hex') !== resultHash) { //log('warn', logSystem, 'Bad hash from miner %s@%s', [miner.login, miner.ip]); log('warn', logSystem, 'Bad hash from miner ' + miner.login + '@' + miner.ip + '\n scratchpadHeight.height=' + scratchpadHeight.height + ', job.height=' + job.height + '\n calculated hash: ' + hash.toString('hex') + ', transfered hash: ' + resultHash); return false; } var hashArray = hash.toJSON(); hashArray.reverse(); var hashNum = bignum.fromBuffer(new Buffer(hashArray)); var hashDiff = diff1.div(hashNum); if (hashDiff.ge(blockTemplate.difficulty)){ apiInterfaces.rpcDaemon('submitblock', [shareBuffer.toString('hex')], function(error, result){ if (error){ log('error', logSystem, 'Error submitting block at height %d from %s@%s, share type: "%s" - %j', [job.height, miner.login, miner.ip, shareType, error]); recordShareData(miner, job, hashDiff.toString(), false, null, shareType); } else{ var blockFastHash = cryptoNightFast(convertedBlob || convertBlockBlob(shareBuffer)).toString('hex'); log('info', logSystem, 'Block %s found at height %d by miner %s@%s - submit result: %j', [blockFastHash.substr(0, 6), job.height, miner.login, miner.ip, result] ); recordShareData(miner, job, hashDiff.toString(), true, blockFastHash, shareType, blockTemplate); //XMR FIXME: //jobRefresh(); if(aliases_config && aliases_config.aliases_que && aliases_config.aliases_que.length > 0) { aliases_config.aliases_que.shift(); storeAliasesQue(); log('debug', logSystem, 'Alias que updated.'); } } }); } else if (hashDiff.lt(job.difficulty)){ log('warn', logSystem, 'Rejected low difficulty share of %s from %s@%s', [hashDiff.toString(), miner.login, miner.ip]); return false; } else{ recordShareData(miner, job, hashDiff.toString(), false, null, shareType); } return true; }
callback(myerror, login); }); } else { if (!utils.isValidAddress(login, config.poolServer.poolAddress[0])){ sendReply('invalid address used for login'); error = {message: 'invalid address used for login'}; callback(error); return; } callback(myerror, login); } } var diff1 = bignum('FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF', 16); var instanceId = crypto.randomBytes(4); var validBlockTemplates = []; var currentBlockTemplate; var connectedMiners = {}; var bannedIPs = {}; var shareTrustEnabled = config.poolServer.shareTrust && config.poolServer.shareTrust.enabled; var shareTrustStepFloat = shareTrustEnabled ? config.poolServer.shareTrust.stepDown / 100 : 0; var shareTrustMinFloat = shareTrustEnabled ? config.poolServer.shareTrust.min / 100 : 0;
/** * 1) create 'VNODE_COUNT' key which keeps track of the # of vnodes. * 2) create /VNODE/V keys which map vnodes to pnodes. The value is the * corresponding pnode. * 3) create /PNODE/<PNODE>/<VNODE> keys which map pnode to vnodes. The * value is the data of the vnode, defaults to 1 since leveldb requires a * value. * 4) create /PNODE/<PNODE> keys for all pnodes. The value is the set of * all vnodes that belong to this pnode. create /PNODE key which is an * array of all the pnodes. * 5) create algorithm key which contains the algorithm. * 6) create version key which contains the version. * 7) create complete key. */ function createNewRing(callback) { log.info('instantiating new ring from scratch.'); /* * The hash algorithm used determine the position of a key. */ self.algorithm_ = options.algorithm; /* * The maximum output size of the hash algorithm. Used to determine the * hash interval between each vnode. */ self.algorithmMax_ = bignum(options.algorithm.MAX, 16); /* * The number of virtual nodes to provision in the ring. Once set, this * can't be changed. */ self.vnodeCount_ = options.vnodes || options.topology.vnodes || 100000; self.vnodesBignum_ = bignum(self.vnodeCount_, 10); self.algorithm_.VNODE_HASH_INTERVAL = self.algorithmMax_.div(self.vnodesBignum_); /* * The String array of physical nodes in the ring. */ self.pnodes_ = options.pnodes ? options.pnodes.slice() : []; self.pnodes_.sort(); var tasks = [ function openDb(_, _cb) { _cb = once(_cb); levelup(options.location, self.leveldbCfg_, function(err, db) { if (err) { return _cb(new verror.VError(err)); } if (!db) { return _cb( new verror.VError('unable to instantiate db!')); } self.db_ = db; _.db = db; return _cb(); }); }, // step 1 function putVnodeCount(_, _cb) { _cb = once(_cb); _.db.put(LKEY_VNODE_COUNT, self.vnodeCount_, function(err) { if (err) { err = new verror.VError(err); } return _cb(err); }); }, /** * steps 2, 3 * Allocate the vnodes to the pnodes by * vnode % total_pnode = assigned pnode. */ function allocateVnodes(_, _cb) { log.trace('allocateVnodes'); _cb = once(_cb); _.pnodeToVnodeMap = {}; /* * #21 Batch up the vnode puts here. Becauase we are running in * a tight for loop, _every_ put get enqueued onto the node * work queue before they actually get procecessed. This means * that we are allocating a huge amount of memory and not * deallocating it until every node has been enqueued. On * sufficiently large counts of vnodes, say 10 000 000, we * effectively fragment the heap such that either malloc(1), * mmap(1) or v8 will fail to grow the heap because of * fragmentation and cause the process to fail with OOME. Hence * we want to batch up the puts in 1000 vnode increments and * let them finish before enqueueing more puts. */ var batch = _.db.batch(); // use this emitter to control the serial puts of vnodes. var emitter = new EventEmitter(); emitter.on('enqueue', function (vnode) { if (vnode >= self.vnodeCount_) { //done -- then batch up any remaining operations. batch.write(function (err) { if (err) { return _cb(new verror.VError('unable to ' + 'allocate some vnodes')); } return _cb(); }); } else if (vnode % 1000 === 1) { batch.write(function (err) { if (err) { return _cb(new verror.VError('unable to ' + 'allocate some vnodes')); } batch = _.db.batch(); // only invoke putVnode when the batch has finished allocateVnodeImpl(vnode, function () { emitter.emit('enqueue', ++vnode); }); }); } else { allocateVnodeImpl(vnode, function () { emitter.emit('enqueue', ++vnode); }); } }); var allocateVnodeImpl = function (vnode, _cb1) { var pnode = self.pnodes_[vnode % self.pnodes_.length]; var hashspace = common.findHashspace({ vnode: vnode, vnodeHashInterval: self.algorithm_.VNODE_HASH_INTERVAL, log: log }); log.debug({ hashspace: hashspace, vnode: vnode, pnode: pnode }, 'ConsistentHash.new: assigning hashspace to vnode to ' + 'pnode'); /* * assign the pnode->vnode and vnode->pnode maps * set the data here to null since this is a new ring */ batch.put(sprintf(LKEY_VNODE_V, vnode), pnode); /* * we put the vnode in the path, to avoid having to put all * vnodes under 1 key */ var pnodePath = sprintf(LKEY_PNODE_P_V, pnode, vnode); batch.put(pnodePath, LVAL_NULL); // cache the pnopdeToVnode mapping for step 4 if (!_.pnodeToVnodeMap[pnode]) { _.pnodeToVnodeMap[pnode] = []; } _.pnodeToVnodeMap[pnode].push(vnode); log.debug({ vnode: vnode, pnode: pnode }, 'ConsistentHash.new: added vnode to pnode'); return _cb1(); }; emitter.emit('enqueue', 0); }, // step 4 function writePnodeKeys(_, _cb) { log.trace('writePnodeKeys'); _cb = once(_cb); var pnodeMap = {}; for (var i = 0; i < self.pnodes_.length; i++) { var pnode = self.pnodes_[i]; if (pnodeMap[pnode]) { return _cb(new verror.VError('Unable to instantiate ' + 'duplicate pnodes')); } pnodeMap[pnode] = true; log.debug({ pnode: '/PNODE/' + pnode, vnodes: _.pnodeToVnodeMap[pnode] }, 'writing vnode list for pnode'); _.db.put(sprintf(LKEY_PNODE_P, pnode), _.pnodeToVnodeMap[pnode], function(err) { if (err) { err = new verror.VError(err); } return _cb(err); }); } _.db.put(LKEY_PNODE, Object.keys(pnodeMap), function(err) { if (err) { err = new verror.VError(err); } return _cb(err); }); }, function writeVnodeDataArray(_, _cb) { _cb = once(_cb); _.db.put(LKEY_VNODE_DATA, [], function(err) { if (err) { err = new verror.VError(err); } return _cb(err); }); }, function writeMetadata(_, _cb) { // step 5 // hacky clone the algorithm object. var algorithm = JSON.parse(JSON.stringify(self.algorithm_)); algorithm.VNODE_HASH_INTERVAL = self.algorithm_.VNODE_HASH_INTERVAL.toString(16); _.batch = _.db.batch().put(LKEY_ALGORITHM, algorithm); // step 6 _.batch = _.batch.put(LKEY_VERSION, fash.VERSION); // step 7 _.batch = _.batch.put(LKEY_COMPLETE, 1); return _cb(); }, function commit(_, _cb) { _.batch.write(function(err) { if (err) { err = new verror.VError(err); } return _cb(err); }); } ]; vasync.pipeline({funcs: tasks, arg: {}}, function(err) { if (err) { err = new verror.VError(err, 'unable to create ring'); } log.info({ err: err }, 'finished instantiated new ring'); dtrace._fash_probes['new-done'].fire(function() { return ([err ? err.message : null, 'createNewRing']); }); return callback(err, self); }); }
this.readAuthService1(function(error, authService1Value) { if (error) { return callback(error); } sec = bignum(authService1Value).powm(exp, mod); var authService2Data = new Buffer(16); // fill in authService2Data with address authService2Data[0] = this.addressData[5]; authService2Data[1] = this.addressData[4]; authService2Data[2] = this.addressData[3]; authService2Data[3] = this.addressData[2]; authService2Data[4] = this.addressData[1]; authService2Data[5] = this.addressData[0]; authService2Data[6] = this.addressData[3]; authService2Data[7] = this.addressData[4]; authService2Data[8] = this.addressData[5]; authService2Data[9] = this.addressData[0]; authService2Data[10] = this.addressData[1]; authService2Data[11] = this.addressData[2]; authService2Data[12] = this.addressData[1]; authService2Data[13] = this.addressData[3]; authService2Data[14] = this.addressData[2]; authService2Data[15] = this.addressData[4]; // encrypt var fixedKeyHexString = (this._peripheral.advertisement.localName === 'EST') ? 'c54fc29163e4457b8a9ac9868e1b3a9a' : // "new" fixed key (v3) 'ff8af207013625c2d810097f20d3050f'; // original fixed key var key = new Buffer(fixedKeyHexString, 'hex'); var iv = new Buffer('00000000000000000000000000000000', 'hex'); var cipher = crypto.createCipheriv('aes128', key, iv); cipher.setAutoPadding(false); authService2Data = cipher.update(authService2Data); // fill in key with sec var secData = new Buffer(4); secData.writeUInt32BE(sec, 0); key[0] = secData[3]; key[1] = secData[2]; key[2] = secData[1]; key[3] = secData[0]; key[4] = secData[0]; key[5] = secData[1]; key[6] = secData[2]; key[7] = secData[3]; key[8] = secData[3]; key[9] = secData[0]; key[10] = secData[2]; key[11] = secData[1]; key[12] = secData[0]; key[13] = secData[3]; key[14] = secData[1]; key[15] = secData[2]; // decrypt var decipher = crypto.createDecipheriv('aes128', key, iv); decipher.setAutoPadding(false); authService2Data = decipher.update(authService2Data); this.writeAuthService2(authService2Data, function(error) { callback(error); }.bind(this)); }.bind(this));
STable.addInternalMethod('$$setInt', function(value) { this[name] = bignum(value); });