exports.delWine = function(req, res) { console.log("delWine", req.params.id); var db = levelup('/tmp/dprk.db', {valueEncoding: 'json'}, function (err, db) { db.del( req.params.id, function (err) { if (err) { res.send("E"); }else { res.send(req.params.id); } db.close(); } ); }); };
tape('read stream (gt)', function (t) { var db = levelup('no-location', {db: memdown}) var stream = multileveldown.server(db) var client = multileveldown.client() stream.pipe(client.createRpcStream()).pipe(stream) client.batch([{type: 'put', key: 'hello', value: 'world'}, {type: 'put', key: 'hej', value: 'verden'}], function (err) { t.error(err, 'no err') var rs = client.createReadStream({gt: 'hej'}) rs.pipe(concat(function (datas) { t.same(datas.length, 1) t.same(datas[0], {key: 'hello', value: 'world'}) t.end() })) }) })
module.exports = exports = function (opts) { opts = opts || {}; properties.sendTimeout = opts.sendTimeout || properties.sendTimeout; properties.maxTimeout = opts.maxTimeout || properties.maxTimeout; properties.deadQueue = opts.deadQueue || properties.deadQueue; properties.maxQueueConcurrentSend = opts.maxQueueConcurrentSend || properties.maxQueueConcurrentSend; dbpath = opts.db || program.fsdb; dbpath = dbpath ? path.resolve(dbpath) : path.resolve(process.cwd(), './failsafedb'); db = levelup(dbpath, leveldbOpts); logger.debug('Using db at path ' + dbpath); setTimeout(processFailures, properties.sendTimeout); h.addActor(mid, monitoring); return exports; };
test('add while reading', function (t) { var numEntries = 15 // t.plan(15) var numQueued = 0 var batchSize = 3 var numProcessed = 0 var log = new Log(nextName(), { db: leveldown, valueEncoding: 'json' }) log.setMaxListeners(0) var ldb = levelup(nextName(), { db: leveldown, valueEncoding: 'json' }) var base = SimpleBase({ timeout: false, db: ldb, log: log, process: function (entry, cb) { setTimeout(function () { numProcessed++ t.pass() cb() }, 200) } }) base.on('live', function () { t.equal(numProcessed, numQueued) if (numQueued < numEntries) { numQueued += 3 addEntries(log, batchSize) } else { t.end() } }) base.on('error', t.error) numQueued += 3 addEntries(log, batchSize) })
rimraf(__dirname + '/db', function (err) { if (err) throw err var db = levelup(__dirname + '/db') var opts if(setup) opts = setup(db) var m = manifest(db) var server = net.createServer(function (con) { con.on('error', function () { /* noop */ }) var server = multilevel.server(db, opts) con.on('data', function (data) { DEBUG && console.log('S <- ' + data.toString()) }) server.on('data', function (data) { DEBUG && console.log('S -> ' + data.toString()) }) con.pipe(server).pipe(con) }) freeport(function (err, port) { if (err) throw err server.listen(port, function () { var _db = multilevel.client(m) var con = net.connect(port) con.on('error', function () { /* noop */}) _db.pipe(con).pipe(_db) _db.on('data', function (data) { DEBUG && console.log('C -> ' + data) }) cb(_db, dispose) function dispose () { server.close() db.close() con.destroy() } }) }) })
var openDb = function (callback) { if (!self.db || self.db.isClosed()) { levelup(self.dbPath, { createIfMissing: true, keyEncoding: 'binary', valueEncoding: 'json' }, function (err, lp) { if (lp) { self.db = lp; callback(); } else { openDb(callback); } }); } else { callback(); } };
function openDb(_, _cb) { log.info('ConsistentHash.new.deserialize: opening db'); //TODO: check to make sure there's nothing already at this //location levelup(options.location, self.leveldbCfg_, function(err, db) { if (err) { return _cb(new verror.VError(err)); } if (!db) { return _cb( new verror.VError('unable to instantiate db!')); } self.db_ = db; _.db = db; return _cb(); }); },
test('actual query', function (t) { t.plan(3); var db = tree(sublevel(level('./test_db', {valueEncoding:'json', db: require('memdown') }))); db.batch(schools.features.map(function (item) { return { key: item.id, value: item }; }), function (err) { t.error(err); db.once('uptodate', function () { db.treeQuery([ -73.050957972443328, 41.813217236760636, -71.941805307670336, 42.95940187161159], function (err, resp) { t.error(err); t.equals(resp.length, 82, 'got right number'); }); }); }); });
this.levelredis = function() { //see this being injected by DI in the future var redisURL = url.parse(process.env.REDISTOGO_URL); var redisclient = redis.createClient(redisURL.port, redisURL.hostname, {no_ready_check: true}); redisclient.auth(redisURL.auth.split(":")[1]); var levelupdb = levelup('TeamZoneDB', { valueEncoding: 'json', // the 'db' option replaces LevelDOWN db: levelstore, redis: redisclient }); var leveldb = sublevel(levelupdb); return { leveldb: leveldb, redis: redisclient }; }
test('big query', function (t) { t.plan(3); var db = tree(sublevel(level('./test_db', {valueEncoding:'json', db: require('memdown') }))); db.batch(schools.features.map(function (item) { return { key: item.id, value: item }; }), function (err) { t.error(err); db.once('uptodate', function () { db.treeQuery([-100,0,0,80], function (err, resp) { t.error(err); t.equals(resp.length, 330, 'got then all'); }); }); }); });
DB.prototype.start = function(callback) { var self = this; if (!fs.existsSync(this.dataPath)) { mkdirp.sync(this.dataPath); } this.genesis = Block.fromBuffer(this.node.services.bitcoind.genesisBuffer); this.store = levelup(this.dataPath, { db: this.levelupStore, maxOpenFiles: this.maxOpenFiles }); this.node.services.bitcoind.on('tx', this.transactionHandler.bind(this)); this.once('ready', function() { log.info('Bitcoin Database Ready'); // Notify that there is a new tip self.node.services.bitcoind.on('tip', function(height) { if(!self.node.stopping) { self.sync(); } }); }); async.series([ function(next) { self._checkVersion(next); }, function(next) { self._setVersion(next); } ], function(err) { if (err) { return callback(err); } self.loadTip(function(err) { if (err) { return callback(err); } self.sync(); self.emit('ready'); setImmediate(callback); }); }); };
AllDataStorage.prototype.put = function put (key, value, options, callback) { var self = this; // handle optionality of options if (typeof options === "function") { callback = options; options = {}; } // check that we are writing within the allowable interval range if (key < self.previousInterval.start) { return callback(new Error("key " + key + " too old, last acceptable: " + self.previousMarker)); } else if (key > self.nextInterval.end) { return callback(new Error("key " + key + " too far into the future, " + "furthest acceptable: " + self.nextInterval.end)); } var chosenInterval; // if we are writing into a future interval, assume we are close to interval // switch and create new interval if necessary and put data in it if (key > self.nextInterval.start) { if (!self.nextInterval.interval) { self.nextInterval.interval = levelup(path.join(self.location, self.nextInterval.start), { cacheSize: self.cacheSize, compression: self.compression, createIfMissing: true, errorIfExists: false, keyEncoding: self.keyEncoding, valueEncoding: self.valueEncoding }); } chosenInterval = self.nextInterval; } else if (key > self.currentInterval.start) { chosenInterval = self.currentInterval; } else if (key > self.previousInterval.start) { chosenInterval = self.previousInterval; } chosenInterval.interval.put(key, value, options, callback); };
tape('del', function (t) { var db = levelup('no-location', {db: memdown}) var stream = multileveldown.server(db) var client = multileveldown.client() stream.pipe(client.createRpcStream()).pipe(stream) client.put('hello', 'world', function (err) { t.error(err, 'no err') client.del('hello', function (err) { t.error(err, 'no err') client.get('hello', function (err) { t.ok(err, 'had error') t.ok(err.notFound, 'not found err') t.end() }) }) }) })
tape('readonly', function (t) { var db = levelup('no-location', {db: memdown}) db.put('hello', 'verden') var stream = multileveldown.server(db, {readonly: true}) var client = multileveldown.client() stream.pipe(client.createRpcStream()).pipe(stream) client.put('hello', 'world', function (err) { t.ok(err, 'put failed') client.get('hello', function (err, value) { t.error(err, 'no err') t.same(value, 'verden', 'old value') t.end() }) }) })
rimraf(path, function () { levelup(path, {createIfMissing: true}, function (err, db) { map(db) reduce(db) db.map.add(function test (key, value, emit) { console.log('MAP', ''+key, ''+value) var n = Number(''+value) emit(['numbers', 'square'], Math.pow(n, 2)) emit(['numbers', 'sqrt'], Math.sqrt(n)) }) db.reduce.add({ name: 'test', depth: 1, reduce: function (acc, value, key) { console.log(acc, value, key) return Number(acc) + Number(value) }, initial: 0 }) db.put('a', 1) db.put('b', 2) db.put('c', 3) db.once('queue:drain', function () { db.put('c', '6') db.del('a') }) db.on('reduce:test', function (key, col) { console.log('REDUCE', key, col) }) //WARNING. if you use depth: > 0 db.reduce.view('test', [ true ]) .on('data', function (data) { console.log('DATA', data) }) }) })
cursor.createReadStream({start: start, end: end, reverse: reverse, limit: 1}).on('data', function(data) { read = true if ((interest.exclude == null) || (!interest.exclude.matches(new ndn.Name.Component(data.key)))) { console.log('Suffix is not excluded', data.key); if (data.key == "%00") { console.log('got to data'); if ((interest.minSuffixComponents == null) || (suffixIndex >= interest.minSuffixComponents )) { console.log('more than minimum suffix components'); level(data.value,{db:down}).get('0', function(err, data){ if (interest.publisherPublicKeyDigest != undefined) { var d = new ndn.Data() d.decode(data) if (ndn.DataUtils.arraysEqual(d.signedInfo.publisher.publisherPublicKeyDigest, interest.publisherPublicKeyDigest.publisherPublicKeyDigest)) { transport.send(data) } else { crawl(q, contentKey) } } else { transport.send(data) } }) } else { console.log('not enough suffix') crawl(q, contentKey) } } else { console.log('keep crawling') if ((interest.maxSuffixComponents == null) || (suffixIndex < interest.maxSuffixComponents)) { suffixIndex++ crawl(data.value) } else { console.log('reached max suffix'); crawl(q, data.key) } } } else { console.log('name component is excluded in interest,') crawl(q, data.key) } }).on('end', function(err,data){
test('should be able to stream a list of parents', function (t) { var db = childish(levelup('/db'+dbi++, { db: memdown })); var key = 'p1/c1'; db.put('p1/c1', function (err) { t.error(err); db.put('p1/c2', function (err) { t.error(err); db.put('p2/c1', function (err) { t.error(err); var nodes = []; db.parents('c1') .on('data', nodes.push.bind(nodes)) .on('end', function () { t.deepEqual(nodes, ['p1','p2']); t.end(); }); }); }); }); });
tape('batch', function (t) { var db = levelup('no-location', {db: memdown}) var stream = multileveldown.server(db) var client = multileveldown.client() stream.pipe(client.createRpcStream()).pipe(stream) client.batch([{type: 'put', key: 'hello', value: 'world'}, {type: 'put', key: 'hej', value: 'verden'}], function (err) { t.error(err, 'no err') client.get('hello', function (err, value) { t.error(err, 'no err') t.same(value, 'world') client.get('hej', function (err, value) { t.error(err, 'no err') t.same(value, 'verden') t.end() }) }) }) })
process.once('uncaughtException', function(e) { console.log(e) console.warn("Error trying to connect") console.log("Trying to connect to binary instance") db = levelup('./logdb') var server = net.createServer(function(con) { con.pipe(multilevel.server(db)).pipe(con) }).listen(port, function() { console.log('%s listing at %s', "LevelDB", port) }) db.on('error', function(e){ console.warn("There was an error connecting to binary instance") console.warn("Shutting process down") process.exit(0) }) callback(db) })
test('read stream returns live data', function (t) { t.plan(1) var log = new Log(nextName(), { db: leveldown, valueEncoding: 'json' }) log.setMaxListeners(0) var ldb = levelup(nextName(), { db: leveldown, valueEncoding: 'json' }) var putOp = { key: 'count', value: 0, type: 'put' } var dbEntries = [putOp] var numEntries = 5 var base = SimpleBase({ db: ldb, log: log, process: function (entry, cb) { if (putOp.value + 1 > numEntries) { return cb() } else { putOp.value++ addEntries(log, 1) // add more cb(dbEntries.slice()) } } }) collect(base.createReadStream(), function (err, entries) { if (err) throw err delete putOp.type t.deepEqual(entries, dbEntries) }) addEntries(log, 1) })
test("escaping '?...' values", function(t){ var db = level(memdown); Transactor(db, {}, function(err, transactor){ if(err) return t.end(err); λ.series([ λ.curry(transactor.transact, [["0", "_db/attribute", "name"], ["0", "_db/type" , "String"]]), λ.curry(transactor.transact, [["1", "name", "?notavar"], ["2", "name", "notavar"], ["3", "name", "\\?notavar"], ["4", "name", "\\\\"], ["5", "name", "?_"]]) ], function(err){ if(err) return t.end(err); var fb = transactor.connection.snap(); λ.concurrent({ should_be_a_var: λ.curry(q, fb, [["?id", "name", "?notavar"]]), bind_it: λ.curry(q, fb, [["?id", "name", "?name"]], [{"?name": "?notavar"}]), escape_it: λ.curry(q, fb, [["?id", "name", "\\?notavar"]]), bind_it2: λ.curry(q, fb, [["?id", "name", "?name"]], [{"?name": "\\?notavar"}]), not_actually_escaped: λ.curry(q, fb, [["?id", "name", "\\\\?notavar"]]), double_slash: λ.curry(q, fb, [["?id", "name", "\\\\\\"]]), double_slash_bind: λ.curry(q, fb, [["?id", "name", "?name"]], [{"?name": "\\\\"}]), not_a_throw_away: λ.curry(q, fb, [["?id", "name", "\\?_"]]), not_a_throw_away2: λ.curry(q, fb, [["?id", "name", "?name"]], [{"?name": "?_"}]), }, function(err, r){ t.deepEqual(_.sortBy(r.should_be_a_var, "?id"), [{"?id": "1", "?notavar": "?notavar"}, {"?id": "2", "?notavar": "notavar"}, {"?id": "3", "?notavar": "\\?notavar"}, {"?id": "4", "?notavar": "\\\\"}, {"?id": "5", "?notavar": "?_"}]); t.deepEqual(r.bind_it, [{"?id": "1", "?name": "?notavar"}]); t.deepEqual(r.escape_it, [{"?id": "1"}]); t.deepEqual(r.bind_it2, [{"?id": "3", "?name": "\\?notavar"}]); t.deepEqual(r.not_actually_escaped, [{"?id": "3"}]); t.deepEqual(r.double_slash, [{"?id": "4"}]); t.deepEqual(r.double_slash_bind, [{"?id": "4", "?name": "\\\\"}]); t.deepEqual(r.not_a_throw_away, [{"?id": "5"}]); t.deepEqual(r.not_a_throw_away2, [{"?id": "5", "?name": "?_"}]); t.end(err); }); }); }); });
emailPlugin.init = function(config) { logger.info('Using emailstore plugin'); var path = globalConfig.leveldb + '/emailstore' + (globalConfig.name ? ('-' + globalConfig.name) : ''); emailPlugin.db = config.db || globalConfig.db || levelup(path); emailPlugin.email = config.emailTransport || nodemailer.createTransport(config.email); emailPlugin.textTemplate = config.textTemplate || 'copay.plain'; emailPlugin.htmlTemplate = config.htmlTemplate || 'copay.html'; emailPlugin.crypto = config.crypto || crypto; emailPlugin.confirmUrl = ( process.env.INSIGHT_EMAIL_CONFIRM_HOST || config.confirmUrl || 'https://insight.bitpay.com' ) + globalConfig.apiPrefix + '/email/validate'; emailPlugin.redirectUrl = ( config.redirectUrl || 'https://copay.io/in/app?confirmed=true' ); };
this.Open = function(db, leveldbArg) { if (db === null) { var levelupdb = levelup(connection, { valueEncoding: 'json', // the 'db' option replaces LevelDOWN db: function (connection) { return new levelAzureDown(connection) } }); levelDb = sublevel(levelupdb); teamsDb = levelDb.sublevel('teams'); } else { //supporting the passing in of mocked database teamsDb = db; if (leveldbArg !== undefined) levelDb = leveldbArg; } }
test(name, function (t) { var location = '__ttl-' + Math.random() , db t._end = t.end t.end = function () { db.close(function (err) { t.notOk(err, 'no error on close()') rimraf(location, t._end.bind(t)) }) } levelup(location, function (err, _db) { t.notOk(err, 'no error on open()') var createReadStream = _db.createReadStream.bind(_db) db = ttl(_db, { checkFrequency: 50 }) fn(db, t, createReadStream) }) })
before(function(done) { var db = this.db = level(path,{ keyEncoding: 'utf8', valueEncoding: 'utf8' }) var batch = [] for(var key in input) { batch.push({ type: 'put', key: key, value: input[key] }) } var self = this db.batch(batch, ok(function() { self.server = net.createServer(function(con) { con.pipe(readable.server(db)).pipe(con) }) self.server.listen(port, done) })) })
tape('bypasses getItem for keys-only db streams', function (t) { var origGetItem = Storage.prototype.getItem; Storage.prototype.getItem = function () { throw new Error('shouldn\'t get called for keys-only db streams'); }; var db = levelup('ooga', { db: leveldown }); var batch = [ { key:'a', value: '1', type: 'put' }, { key:'b', value: '2', type: 'put' }, { key:'c', value: '3', type: 'put' }, ]; db.batch(batch, function () { db.createKeyStream({ start: 'c' }) .on('data', function (key) { t.equals(key, 'c'); }) .on('end', function () { // unhack getItem Storage.prototype.getItem = origGetItem; t.end(); console.timeEnd('TESTS') }); }); });
function testConnections (numberOfClients, callback) { /* istanbul ignore next */ if (storageBackEnd.clearGlobalStore) { storageBackEnd.clearGlobalStore() } // Use an in-memory storage back-end. var path = LEVELDOWN_PATH + '-' + uuid() var level = levelup(path, {db: storageBackEnd}) var dataLog = SimpleLog(level) // Use an in-memory blob store. var blobs = require('abstract-blob-store')() // Pipe log messages to nowhere. var serverLog = pino({}, devnull()) var emitter = new (require('events').EventEmitter)() var handler = require('./')( serverLog, dataLog, blobs, emitter, sha256 ) var server = net.createServer() .on('connection', handler) .once('close', function () { level.close(function () { rimraf.sync(path) }) }) .listen(0, function () { var serverPort = this.address().port var clients = [] for (var n = 0; n < numberOfClients; n++) { var client = net.connect(serverPort) var clientJSON = duplexJSON(client) clientJSON.socket = client clients.push(clientJSON) } if (numberOfClients === 1) { callback(clients[0], server, serverPort) } else { callback(clients, server, serverPort) } }) }
tape('bypasses getItem for keys-only db streams', function (t) { var origGetItem = LocalStorage.prototype.getItem; LocalStorage.prototype.getItem = function () { throw new Error('shouldn\'t get called for keys-only db streams'); }; var db = levelup('ooga', { db: leveldown }); var batch = [ { key: 'a', value: '1', type: 'put' }, { key: 'b', value: '2', type: 'put' }, { key: 'c', value: '3', type: 'put' }, ]; db.batch(batch, function () { db.createKeyStream({ start: 'c' }) .on('data', function (key) { t.equals(key, 'c'); db.close(function (err) { t.notOk(err, 'no error'); // unhack getItem LocalStorage.prototype.getItem = origGetItem; t.end(); }); }); }); });
function testReport () { var db = sublevel(levelup(path.join(__dirname, 'db'), {valueEncoding: 'json'})) var sub = db.sublevel('nodejs/live.nodejs.org', {valueEncoding: 'json'}) sub = main.getDatabase(sub) db.get('meta', function (e, info) { var u = 'https://api.github.com/repos/nodejs/live.nodejs.org/collaborators' ghrequest(u, function (e, results) { if (e) return cb(e) var collabs = results.map(function (r) {return r.login}) sub.byTime.createReadStream() .on('data', function (o) { var obj = o.value report(obj, collabs, info.starttime, info.endtime) }) .on('end', function () { printall() }) }) }) }
function createBlockchain(cb) { createIfNotExists(dbDir); var blockDB = levelup(dbDir + this.id); this.blockchain = new Blockchain(blockDB, false); var block = new Block({ header: { coinbase: util.toBuffer(this.coinbase), gasLimit: util.toBuffer(this.gasLimit), number: 0, difficulty: util.toBuffer(this.difficulty), timestamp: new Buffer(util.nowHex(), 'hex') }, transactions: [], uncleHeaders: [] }); this.blockchain.putBlock(block, cb); function createIfNotExists(dir) { if (!fs.existsSync(dir)) { fs.mkdirSync(dir); } } }