Пример #1
0
        }, (cb) => {
          const file = new UnixFS('file')
          const parentNode = new mDAG.DAGNode()
          links.forEach((l) => {
            file.addBlockSize(l.leafSize)
            const link = new mDAG.DAGLink(l.Name, l.Size, l.Hash)
            parentNode.addRawLink(link)
          })

          parentNode.data = file.marshal()
          dagService.add(parentNode, (err) => {
            if (err) {
              return log.err(err)
            }

            const pathSplit = path.split('/')
            const fileName = pathSplit[pathSplit.length - 1]

            callback(null, {
              Hash: parentNode.multihash(),
              Size: parentNode.size(),
              Name: fileName
            }) && cb()
          })
        }))
Пример #2
0
      .pipe(through2((chunk, enc, cb) => {
        // 1. create the unixfs merkledag node
        // 2. add its hash and size to the leafs array

        // TODO - Support really large files
        // a) check if we already reach max chunks if yes
        // a.1) create a parent node for all of the current leaves
        // b.2) clean up the leaves array and add just the parent node

        const l = new UnixFS('file', chunk)
        const n = new merkleDAG.DAGNode(l.marshal())

        dagService.add(n, (err) => {
          if (err) {
            this.push({error: 'Failed to store chunk of: ${fl.path}'})
            return cb(err)
          }

          leaves.push({
            Hash: n.multihash(),
            Size: n.size(),
            leafSize: l.fileSize(),
            Name: ''
          })

          cb()
        })
      }, (cb) => {
Пример #3
0
      function traverse (tree, path, done) {
        const keys = Object.keys(tree)
        let tmpTree = tree
        keys.map((key) => {
          if (typeof tmpTree[key] === 'object' &&
              !Buffer.isBuffer(tmpTree[key])) {
            tmpTree[key] = traverse.call(this, tmpTree[key], path ? path + '/' + key : key, done)
          }
        })

        // at this stage, all keys are multihashes
        // create a dir node
        // add all the multihashes as links
        // return this new node multihash

        const d = new UnixFS('directory')
        const n = new merkleDAG.DAGNode()

        keys.forEach((key) => {
          const b58mh = bs58.encode(tmpTree[key])
          const l = new merkleDAG.DAGLink(
              key, mhIndex[b58mh].size, tmpTree[key])
          n.addRawLink(l)
        })

        n.data = d.marshal()

        pendingWrites++
        dagService.add(n, (err) => {
          pendingWrites--
          if (err) {
            this.push({error: 'failed to store dirNode'})
          } else if (path) {
            const el = {
              path: path,
              multihash: n.multihash(),
              yes: 'no',
              size: n.size()
            }
            this.push(el)
          }

          if (pendingWrites <= 0) {
            done()
          }
        })

        if (!path) {
          return
        }

        mhIndex[bs58.encode(n.multihash())] = { size: n.size() }
        return n.multihash()
      }
Пример #4
0
      }, (cb) => {
        if (leaves.length === 1) {
          // 1. add to the files array {path: <>, hash: <>}
          // 2. emit the path + hash

          const el = {
            path: fl.path,
            multihash: leaves[0].Hash,
            size: leaves[0].Size,
            dataSize: leaves[0].leafSize
          }

          files.push(el)
          this.push(el)
          return done(cb)
        }
        // 1. create a parent node and add all the leafs
        // 2. add to the files array {path: <>, hash: <>}
        // 3. emit the path + hash of the parent node

        const f = new UnixFS('file')
        const n = new merkleDAG.DAGNode()

        leaves.forEach((leaf) => {
          f.addBlockSize(leaf.leafSize)
          const l = new merkleDAG.DAGLink(leaf.Name, leaf.Size, leaf.Hash)
          n.addRawLink(l)
        })

        n.data = f.marshal()
        dagService.add(n, (err) => {
          if (err) {
            // this.emit('error', `Failed to store: ${fl.path}`)
            this.push({ error: 'Failed to store chunk of: ${fl.path}' })
            return cb()
          }

          const el = {
            path: fl.path,
            multihash: n.multihash(),
            size: n.size()
            // dataSize: f.fileSize()
          }

          files.push(el)
          // this.emit('file', el)
          this.push(el)
          return done(cb)
        })
      }))
Пример #5
0
        .pipe(through2((chunk, enc, cb) => {
          // TODO: check if this is right (I believe it should be type 'raw'
          // https://github.com/ipfs/go-ipfs/issues/2331
          const raw = new UnixFS('file', chunk)

          const node = new mDAG.DAGNode(raw.marshal())

          dagService.add(node, function (err) {
            if (err) {
              return log.err(err)
            }
            links.push({
              Hash: node.multihash(),
              Size: node.size(),
              leafSize: raw.fileSize(),
              Name: ''
            })

            cb()
          })
        }, (cb) => {
Пример #6
0
  function fileImporter (path, callback) {
    const stats = fs.statSync(path)
    if (stats.size > CHUNK_SIZE) {
      const links = [] // { Hash: , Size: , Name: }

      fs.createReadStream(path)
        .pipe(new FixedSizeChunker(CHUNK_SIZE))
        .pipe(through2((chunk, enc, cb) => {
          // TODO: check if this is right (I believe it should be type 'raw'
          // https://github.com/ipfs/go-ipfs/issues/2331
          const raw = new UnixFS('file', chunk)

          const node = new mDAG.DAGNode(raw.marshal())

          dagService.add(node, function (err) {
            if (err) {
              return log.err(err)
            }
            links.push({
              Hash: node.multihash(),
              Size: node.size(),
              leafSize: raw.fileSize(),
              Name: ''
            })

            cb()
          })
        }, (cb) => {
          const file = new UnixFS('file')
          const parentNode = new mDAG.DAGNode()
          links.forEach((l) => {
            file.addBlockSize(l.leafSize)
            const link = new mDAG.DAGLink(l.Name, l.Size, l.Hash)
            parentNode.addRawLink(link)
          })

          parentNode.data = file.marshal()
          dagService.add(parentNode, (err) => {
            if (err) {
              return log.err(err)
            }

            const pathSplit = path.split('/')
            const fileName = pathSplit[pathSplit.length - 1]

            callback(null, {
              Hash: parentNode.multihash(),
              Size: parentNode.size(),
              Name: fileName
            }) && cb()
          })
        }))
    } else {
      // create just one file node with the data directly
      const fileUnixFS = new UnixFS('file', fs.readFileSync(path))
      const fileNode = new mDAG.DAGNode(fileUnixFS.marshal())

      dagService.add(fileNode, (err) => {
        if (err) {
          return log.err(err)
        }

        const split = path.split('/')
        const fileName = split[split.length - 1]

        callback(null, {
          Hash: fileNode.multihash(),
          Size: fileNode.size(),
          Name: fileName
        })
      })
    }
  }
Пример #7
0
  function dirImporter (path, callback) {
    const files = fs.readdirSync(path)
    const dirUnixFS = new UnixFS('directory')
    const dirNode = new mDAG.DAGNode()

    if (files.length === 0) {
      dirNode.data = dirUnixFS.marshal()
      dagService.add(dirNode, (err) => {
        if (err) {
          return callback(err)
        }

        const split = path.split('/')
        const dirName = split[split.length - 1]

        callback(null, {
          Hash: dirNode.multihash(),
          Size: dirNode.size(),
          Name: dirName
        })
      })
      return
    }

    async.map(
      files,
      (file, cb) => {
        const filePath = path + '/' + file
        const stats = fs.statSync(filePath)
        if (stats.isFile()) {
          return fileImporter(filePath, cb)
        } if (stats.isDirectory()) {
          return dirImporter(filePath, cb)
        } else {
          return cb(new Error('Found a weird file' + path + file))
        }
      },
      (err, results) => {
        if (err) {
          return callback(err)
        }
        results.forEach((result) => {
          dirNode.addRawLink(new mDAG.DAGLink(result.Name, result.Size, result.Hash))
        })

        dirNode.data = dirUnixFS.marshal()

        dagService.add(dirNode, (err) => {
          if (err) {
            return callback(err)
          }

          const split = path.split('/')
          const dirName = split[split.length - 1]

          callback(null, {
            Hash: dirNode.multihash(),
            Size: dirNode.size(),
            Name: dirName
          })
        })
      })
  }
Пример #8
0
  this._write = (fl, enc, next) => {
    this.read()
    counter++
    if (!fl.content) {
      // 1. create the empty dir dag node
      // 2. write it to the dag store
      // 3. add to the files array {path: <>, hash: <>}
      // 4. emit the path + hash
      const d = new UnixFS('directory')
      const n = new merkleDAG.DAGNode()
      n.data = d.marshal()
      dagService.add(n, (err) => {
        if (err) {
          this.emit('error', `Failed to store: ${fl.path}`)
          return
        }
        const el = {
          path: fl.path,
          multihash: n.multihash(),
          size: n.size(),
          dataSize: d.fileSize()
        }
        files.push(el)
        this.push(el)
        counter--
        next()
      })
      return
    }

    // Convert a buffer to a readable stream
    if (Buffer.isBuffer(fl.content)) {
      const r = streamifier.createReadStream(fl.content)
      fl.content = r
    }

    // Bail if 'content' is not readable
    if (!isStream.isReadable(fl.content)) {
      this.emit('error', new Error('"content" is not a Buffer nor Readable stream'))
      return
    }

    const leaves = []
    fl.content
      .pipe(fsc(CHUNK_SIZE))
      .pipe(through2((chunk, enc, cb) => {
        // 1. create the unixfs merkledag node
        // 2. add its hash and size to the leafs array

        // TODO - Support really large files
        // a) check if we already reach max chunks if yes
        // a.1) create a parent node for all of the current leaves
        // b.2) clean up the leaves array and add just the parent node

        const l = new UnixFS('file', chunk)
        const n = new merkleDAG.DAGNode(l.marshal())

        dagService.add(n, (err) => {
          if (err) {
            this.push({error: 'Failed to store chunk of: ${fl.path}'})
            return cb(err)
          }

          leaves.push({
            Hash: n.multihash(),
            Size: n.size(),
            leafSize: l.fileSize(),
            Name: ''
          })

          cb()
        })
      }, (cb) => {
        if (leaves.length === 1) {
          // 1. add to the files array {path: <>, hash: <>}
          // 2. emit the path + hash

          const el = {
            path: fl.path,
            multihash: leaves[0].Hash,
            size: leaves[0].Size,
            dataSize: leaves[0].leafSize
          }

          files.push(el)
          this.push(el)
          return done(cb)
        }
        // 1. create a parent node and add all the leafs
        // 2. add to the files array {path: <>, hash: <>}
        // 3. emit the path + hash of the parent node

        const f = new UnixFS('file')
        const n = new merkleDAG.DAGNode()

        leaves.forEach((leaf) => {
          f.addBlockSize(leaf.leafSize)
          const l = new merkleDAG.DAGLink(leaf.Name, leaf.Size, leaf.Hash)
          n.addRawLink(l)
        })

        n.data = f.marshal()
        dagService.add(n, (err) => {
          if (err) {
            // this.emit('error', `Failed to store: ${fl.path}`)
            this.push({ error: 'Failed to store chunk of: ${fl.path}' })
            return cb()
          }

          const el = {
            path: fl.path,
            multihash: n.multihash(),
            size: n.size()
            // dataSize: f.fileSize()
          }

          files.push(el)
          // this.emit('file', el)
          this.push(el)
          return done(cb)
        })
      }))

    function done (cb) {
      counter--
      next()
      cb()
    }
  }