示例#1
0
文件: s3.js 项目: bermi/grunt-s3
  exports.sync = function (src, dest, opts) {
    var dfd = new _.Deferred();
    var options = _.clone(opts);

    // Pick out the configuration options we need for the client.
    options = _.pick(options, [ 'region', 'endpoint', 'port', 'key', 'secret', 'access', 'bucket', 'verify', 'debug' ]);

    updateAmazonConfig(options.key, options.secret);

    var client = new aws.S3(options);

    if (options.debug) {
      return dfd.resolve(util.format(MSG_SKIP_DEBUG, client.bucket, src)).promise();
    }

    // Check for the file on s3
    if( !options.verify ) {
      client.headObject({ Bucket: options.bucket, Key: dest }, function (err, res) {
        var upload;

        // If the file was not found, then we should be able to continue with a normal upload procedure
        if (err && err.statusCode === 404) {
          upload = exports.upload( src, dest, opts);
          // pass through the dfd state
          upload.then( dfd.resolve, dfd.reject );
        } else if (!res || err ) {
          dfd.reject(makeError(MSG_ERR_DOWNLOAD, src, err || res.statusCode));
        } else {
          // the file exists so do nothing with that
          dfd.resolve(util.format(MSG_SKIP_SUCCESS, src));
        }
      });
    } else {
      // verify was truthy, so we need to make sure that this file is actually the file it thinks it is
      client.getObject({ Bucket: options.bucket, Key: dest }, function(err, res) {
        var upload;

        // If the file was not found, then we should be able to continue with a normal upload procedure
        if (err && err.statusCode === 404) {
          upload = exports.upload( src, dest, opts);
          // pass through the dfd state
          upload.then( dfd.resolve, dfd.reject );
        } 
        else if (!res || err) {
          dfd.reject(makeError(MSG_ERR_DOWNLOAD, src, err || res.statusCode));
        } 
        else {
          // the file exists so let's check to make sure it's the right file, if not, we'll update it
          // Read the local file so we can get its md5 hash.
          fs.readFile(src, function (err, data) {
            var remoteHash, localHash;

            if (err) {
              dfd.reject(makeError(MSG_ERR_UPLOAD, src, err));
            }
            else {
              // The etag head in the response from s3 has double quotes around
              // it. Strip them out.
              remoteHash = res.ETag.replace(/"/g, '');

              // Get an md5 of the local file so we can verify the upload.
              localHash = crypto.createHash('md5').update(data).digest('hex');

              if (remoteHash === localHash) {
                // the file exists and is the same so do nothing with that
                dfd.resolve(util.format(MSG_SKIP_MATCHES, src));
              }
              else {
                fs.stat( src, function(err, stats) {
                  var remoteWhen, localWhen, upload;

                  if (err) {
                    dfd.reject(makeError(MSG_ERR_UPLOAD, src, err));
                  } 
                  else {
                    // which one is newer? if local is newer, we should upload it
                    remoteWhen = new Date(res.LastModified || "0"); // earliest date possible if no header is returned
                    localWhen = new Date(stats.mtime || "1"); // make second earliest date possible if mtime isn't set

                    if( localWhen > remoteWhen ) {
                      // default is that local is newer, only upload when it is
                      upload = exports.upload( src, dest, opts);
                      // pass through the dfd state
                      upload.then( dfd.resolve, dfd.reject );
                    } else {
                      dfd.resolve(util.format(MSG_SKIP_OLDER, src));
                    }
                  }
                });
              }
            }
          });
        }
      });
    }

    return dfd.promise();
  };
exports.handler = function(event, context) {

	console.log(JSON.stringify(event));

	var s3 = new AWS.S3();
	var cfn = new AWS.CloudFormation();
	var properties = {};
	async.series([
		function(callback) {
			var params = {
				Bucket: event.bucket,
				Key: event.jarKey
			};
			console.log("fetching jar file. . . ");
			console.log(JSON.stringify(params));
			s3.getObject(params, function(err, data) {
				if (err) {
					console.log(err);
					context.fail(err);
				} else {
					console.log(data);
					var jarFile = "/tmp/" + path.basename(event.jarKey);
					console.log("JAR FILE: " + jarFile);
					properties["jarFile"] = jarFile;
					fs.writeFileSync(jarFile, data.Body);
				}
				callback(null, null);
			});
		},
		function(callback) {
			console.log("describing stack. . . ");
			cfn.describeStacks({StackName: event.stackName}, function(err, data) {
				if ( err ) {
					console.log(err);
					context.fail(err);
				} else {
					console.log(data);
          data.Stacks[0].Outputs.forEach(function(record) {
          	properties[record.OutputKey] = record.OutputValue;
          });
				}
				callback(null, null);
			});
		},
		function(callback) {
			var params = {
				Bucket: event.bucket,
				Key: event.swaggerKey
			};
			console.log("fetching swagger file. . . ");
			console.log(JSON.stringify(params));
			s3.getObject(params, function(err, data) {
				if (err) {
					console.log(err);
					context.fail(err);
				} else {
					console.log(data);
					var swaggerFile = "/tmp/" + path.basename(event.swaggerKey);
					var json = data.Body.toString("utf-8");
					properties["swaggerFile"] = swaggerFile;
					json = json
						.replace(/MyRestaurantLambdaFunctionArn/g, properties.RestaurantLambdaArn)
						.replace(/MyRestaurantSearchLambdaFunctionArn/g, properties.RestaurantSearchLambdaArn)
						.replace(/MyDriverLambdaFunctionArn/g, properties.DriverLambdaArn)
						.replace(/MyDriverSearchLambdaFunctionArn/g, properties.DriverSearchLambdaArn)
						.replace(/MyUserProfileLambdaFunctionArn/g, properties.UserProfileLambdaArn)
						.replace(/MyPredictionLambdaFunctionArn/g, properties.PredictionLambdaArn)
						.replace(/MyLambdaExecutionRoleArn/g, properties.ApplicationExecutionRoleArn)
						.replace(/region/g, event.region);
					console.log("SWAGGER JSON: " + json);
					fs.writeFileSync(swaggerFile, json);
				}
				callback(null, null);
			});
		},
		function(callback) {
			var child = spawn("java", ["-jar", properties["jarFile"], "--region", event.region, "--create", properties["swaggerFile"]]);
			child.stdout.on('data', function(data) {
				console.log('stdout: ' + data);
			});

			child.stderr.on('data', function(data) {
				console.log('stderr: ' + data);
			});

			child.on('error', function(code) {
				// Error NO ENTry = Can not find the file.
				console.log('error: child process exited with code ' + code);
				callback(null, null);
			});

			child.on('close', function(code) {
				console.log('close: child process exited with code ' + code);
				callback(null, null);
			});
		},
		function(callback) {
			context.done();
			callback(null, null);
		}
	]);
}
示例#3
0
 after(function() {
   AWS.S3.restore();
   AWS.SNS.restore();
 });
//AWS upload
function uploadPhoto (user, photo){
  var s3 = new AWS.S3({params: {Bucket: 'chatherophotos', Key: user.id}});
     s3.upload({Body: photo}, function() {
      console.log('Successfully uploaded photo');
     });
   }
示例#5
0
function s3GetObject (path, callback) {
	var params = s3SplitPath (path);
	s3.getObject (params, function (err, data) {
		callback (data);
		});
	}
var getObject = module.exports.getObject = function(s3Uri, callback) {
  var params = getS3Params(s3Uri);

  return callback(s3.getObject(params));
};
示例#7
0
    .get(function(req, res) {
        var s3 = new AWS.S3();
        var params = { Bucket: config.AWS.Bucket, Key: req.params.id };

        s3.getObject(params).createReadStream().pipe(res);
    })
示例#8
0
文件: corsHeaders.js 项目: scality/S3
 beforeEach(done => s3.putBucketCors(corsParams, done));
示例#9
0
文件: corsHeaders.js 项目: scality/S3
 next => s3.createBucket({
     Bucket: bucket,
     ACL: 'public-read',
 }, next),
示例#10
0
require('dotenv').config();
var AWS = require('aws-sdk');
var jsonData = require('./jsonData.js');
var fs = require('fs');
var json2csv = require('json2csv');

try {
  var csv = json2csv({ data: jsonData.data });
} catch (err) {
  console.error(err);
}

var s3 = new AWS.S3({accessKeyId: process.env.ACCESS_KEY_ID, secretAccessKey: process.env.SECRET_ACCESS_KEY});
var params = {Bucket: process.env.BUCKET_NAME, Key: 'test-csv-2', Body: csv};
var getParams = {Bucket: process.env.BUCKET_NAME, Key: 'test-csv-2'};

s3.getObject(getParams, function(err, data) {
  var wstream = fs.createWriteStream('./test-csv-2.csv');
  wstream.write(data.Body);
});

// s3.upload(params, {}, function(err, data) {
  // console.log(err, data);
// });
示例#11
0
    uploadFile: function (req) {
        var s3 = new AWS.S3(); // Based on Glacier's example: http://docs.aws.amazon.com/AWSJavaScriptSDK/guide/examples.html#Amazon_Glacier__Multi-part_Upload

        // File
        console.dir(req.files.file);
        var fileName = req.files.file.name;
        var filePath = req.files.file.path;
        fileKey = fileName;
        var buffer = fs.readFileSync(filePath);
        // S3 Upload options
        bucket = 'cornell-dochub';

        // Upload
        startTime = new Date();
        var partNum = 0;
        var partSize = 1024 * 1024 * 5; // Minimum 5MB per chunk (except the last part) http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
        numPartsLeft = Math.ceil(buffer.length / partSize);
        var maxUploadTries = 3;
		var fileType = req.files.file.type;
        var multiPartParams = {
            Bucket: bucket,
            Key: fileKey,
            ContentType: fileType
        };
		
		
		var isAcceptedFileType = false;
		var typesLength = acceptedFileTypes.length;
		for (var i = 0; i < typesLength; i++) {
			console.dir(acceptedFileTypes[i]);
			if (fileType == acceptedFileTypes[i].mimeType) {
				isAcceptedFileType = true;
			}
		}
		if (!isAcceptedFileType) {return;}
		
        console.log(multiPartParams.ContentType);


        // Multipart
        console.log("Creating multipart upload for:", fileKey);
        s3.createMultipartUpload(multiPartParams, function (mpErr, multipart) {
            if (mpErr) {
                console.log('Error!', mpErr);
                return;
            }
            console.log("Got upload ID", multipart.UploadId);

            // Grab each partSize chunk and upload it as a part
            for (var rangeStart = 0; rangeStart < buffer.length; rangeStart += partSize) {
                partNum++;
                var end = Math.min(rangeStart + partSize, buffer.length),
                    partParams = {
                        Body: buffer.slice(rangeStart, end),
                        Bucket: bucket,
                        Key: fileKey,
                        PartNumber: String(partNum),
                        UploadId: multipart.UploadId
                    };

                // Send a single part
                console.log('Uploading part: #', partParams.PartNumber, ', Range start:', rangeStart);
                module.exports.uploadPart(s3, multipart, partParams);
            }
        });
    },
示例#12
0
var AWS = require('aws-sdk');
AWS.config.region = 'ap-northeast-2';
var s3 = new AWS.S3();
var file = require('fs').createWriteStream('logo.png');
var params = {Bucket:'codingeverybody2', Key:'logo.png'};
s3.getObject(params).createReadStream().pipe(file);
示例#13
0
文件: s3.js 项目: vepasto/grunt-aws
  grunt.registerMultiTask("s3", DESC, function() {

    //normalize files array (force expand)
    var files = [];
    this.files.forEach(function(file) {
      var cwd = file.cwd || '';
      files = files.concat(file.src.map(function(src) {
        var s = path.join(cwd, src),
            d = (cwd||file.src.length>1) ? ((file.dest||'')+src) : file.dest || src;
        return {src: s, dest: d};
      }));
    });

    //skip directories since there are only files on s3
    files = files.filter(function(file) {
      return !grunt.file.isDir(file.src);
    });

    if(!files.length)
      return grunt.log.ok("No files matched");

    //mark as async
    var done = this.async();
    //get options
    var opts = this.options(DEFAULTS);

    //checks
    if(!opts.bucket)
      grunt.fail.warn("No 'bucket' has been specified");

    //custom mime types
    if(typeof opts.mime === 'object')
      mime.define(opts.mime);
    if(typeof opts.mimeDefault === 'string')
      mime.default_type = opts.mimeDefault;

    //whitelist allowed keys
    AWS.config.update(_.pick(opts,
      'accessKeyId',
      'secretAccessKey',
      'region',
      'sslEnabled',
      'maxRetries',
      'httpOptions'
    ), true);

    //s3 client
    var S3 = new AWS.S3({signatureVersion: opts.signatureVersion});

    //dry run prefix
    var DRYRUN = opts.dryRun ? "[DRYRUN] " : "";

    //retrieve cache for this bucket
    var cache = CacheMgr.get(opts.bucket);

    if(!cache.options)
      cache.options = {};
    if(!cache.prefixes)
      cache.prefixes = {};
    if(!cache.files)
      cache.files = {};

    //base object (lacks Body and Key)
    var baseObject = {
      ACL: opts.access,
      Bucket: opts.bucket
    };

    //set gzip encoding
    if(opts.gzip)
      baseObject.ContentEncoding = 'gzip';

    //use allowed headers
    if(typeof opts.headers === 'object')
      _.extend(baseObject, _.pick(
        opts.headers,
        //http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#putObject-property
        'ContentLength',
        'ContentType',
        'ContentDisposition',
        'ContentEncoding',
        'CacheControl',
        'Expires',
        'GrantFullControl',
        'GrantRead',
        'GrantReadACP',
        'GrantWriteACP',
        'ServerSideEncryption',
        'StorageClass',
        'WebsiteRedirectLocation'
      ));

    //convert numbers and dates
    if(typeof baseObject.CacheControl === 'number')
      baseObject.CacheControl = "max-age="+baseObject.CacheControl+", public";
    else if (typeof baseObject.CacheControl === 'object') {
      var val = baseObject.CacheControl,
          maxage = val.MaxAge || null,
          swr = val.StaleWhileRevalidate || null;
      if (!maxage) {
        grunt.fail.warn("max_age is required for Cache-Control header");
      }
      if (swr) {
        baseObject.CacheControl = "max-age="+maxage+", stale-while-revalidate="+swr+", public";
      } else {
        baseObject.CacheControl = "max-age="+maxage+", public";
      }
    }

    if(baseObject.Expires instanceof Date)
      baseObject.Expires = baseObject.Expires.toUTCString();

    //use meta data headers
    if(typeof opts.meta === 'object')
      baseObject.Metadata = opts.meta;

    //calculate options hash
    var optionsHash = hash(JSON.stringify(baseObject), 'sha256');
    var currOptionsHash = cache.options[this.target];

    //maintain stats
    var stats = { puts: 0, dels: 0, refreshed: false, newOptions: optionsHash !== currOptionsHash };

    if(stats.newOptions)
      cache.options[this.target] = optionsHash;

    var subtasks = [];

    //create the bucket if it does not exist
    if(opts.createBucket)
      subtasks.push(createBucket);

    //enable webhosting
    if(opts.enableWeb)
      subtasks.push(enableWebHosting);

    if(!opts.cache)
      subtasks.push(getFileList);

    subtasks.push(copyAllFiles);

    //start!
    async.series(subtasks, taskComplete);

    //------------------------------------------------

    function createBucket(callback) {
      //check the bucket doesn't exist first
      S3.listBuckets(function(err, data){
        if(err) {
          err.message = 'createBucket:S3.listBuckets: ' + err.message;
          return callback(err);
        }
        var existingBucket = _.detect(data.Buckets, function(bucket){
          return opts.bucket === bucket.Name;
        });
        if(existingBucket){
          grunt.log.writeln('Existing bucket found.');
          callback();
        }else{
          grunt.log.writeln('Creating bucket ' + opts.bucket + '...');
          //create the bucket using the bucket, access and region options
          if (opts.dryRun) return callback();
          S3.createBucket({
            Bucket: opts.bucket,
            ACL: opts.access,
            CreateBucketConfiguration: { LocationConstraint: opts.region }
          }, function(err, data){
            if(err) {
              err.message = 'createBucket:S3.listBuckets:S3.createBucket: ' + err.message;
              return callback(err);
            }
            grunt.log.writeln('New bucket\'s location is: ' + data.Location);
            // Disable caching if bucket is newly created
            opts.cache = false;
            callback();
          });
        }
      });
    }

    function enableWebHosting(callback) {
      var defaultWebOptions = {
        "grunt-overwrite": false,
        IndexDocument: { Suffix : 'index.html' }
      };
      var webOptions = _.isObject(opts.enableWeb) ? opts.enableWeb : defaultWebOptions;

      S3.getBucketWebsite({ Bucket:opts.bucket }, function(err){
        if ((err && err.name === 'NoSuchWebsiteConfiguration') || webOptions["grunt-overwrite"]){
          delete webOptions["grunt-overwrite"];
          //opts.enableWeb can be the params for WebsiteRedirectLocation.
          //Otherwise, just set the index.html as default suffix
          grunt.log.writeln('Enabling website configuration on ' + opts.bucket + '...');
          if (opts.dryRun) return callback();
          S3.putBucketWebsite({
            Bucket: opts.bucket,
            WebsiteConfiguration: webOptions
          }, callback);
        } else {
          if(err){
            err.message = 'enableWebHosting:S3.getBucketWebsite: ' + err.message;
          }
          return callback(err);
        }
      });
    }

    function getFileList(callback) {
      //calculate prefix
      var prefix = null, pindex = Infinity;
      files.forEach(function(file) {
        if(prefix === null) {
          prefix = file.dest;
          return;
        }
        var i = 0;
        while(i < prefix.length &&
              i < file.dest.length &&
              file.dest.charAt(i) === prefix.charAt(i)) i++;
        pindex = Math.min(i, pindex);
      });
      prefix = prefix.substr(0, pindex);

      //get prefix's earliest refresh time
      var refreshedAt = 0;
      for(var p in cache.prefixes)
        if(prefix.indexOf(p) === 0)
          refreshedAt = Math.max(refreshedAt, cache.prefixes[p]);

      //already have list
      if(cache.files &&
         refreshedAt &&
         opts.cacheTTL &&
         opts.cacheTTL > (Date.now() - refreshedAt)) {
        grunt.verbose.writeln("Using cached object list prefixed with '" + prefix + "'");
        return callback();
      }

      //fetch all objects, beginning with key ''
      fetchObjects('');

      function fetchObjects(marker) {
        var msg = "Retrieving list of existing objects";
        msg += prefix ? " prefixed with '" + prefix + "'" : "";
        msg += marker ? (" after '" + marker + "'") : "";
        msg += "...";
        grunt.log.writeln(msg);

        S3.listObjects({
          Bucket: opts.bucket,
          Marker: marker,
          Prefix: prefix
        }, function(err, objs) {
          if(err) {
            err.message = 'getFileList:fetchObjects:S3.listObjects: ' + err.message;
            return callback(err);
          }

          //store results
          objs.Contents.forEach(function(obj) {
            cache.files[obj.Key] = JSON.parse(obj.ETag);
          });
          cache.prefixes[prefix] = Date.now();
          stats.refreshed = true;

          if(objs.IsTruncated)
            fetchObjects(objs.Contents.pop().Key);
          else
            callback();
        });
      }
    }

    function copyAllFiles(callback) {
      //asynchrously loop through all files
      async.eachLimit(files, opts.concurrent, getFile, callback);
    }

    function getFile(file, callback) {
      //extract src and dest
      var src = file.src,
          contents = fs.readFileSync(src),
          dest = file.dest;

      if(opts.gzip) {
        zlib.gzip(contents, function(err, compressed) {
          copyFile(src, compressed, dest, callback);
        });
      } else {
        copyFile(contents, contents, dest, callback);
      }
    }

    function copyFile(src, contents, dest, callback) {

      //skip existing files
      var etag = cache.files[dest];
      if(opts.cache &&
         !stats.newOptions &&
         etag && etag === hash(contents, 'md5')) {
        grunt.log.ok(DRYRUN + "No change '" + dest + "'");
        callback();
        return;
      }

      if(!opts.overwrite && etag) {
        grunt.log.ok(DRYRUN + "File already exists '" + dest + "'");
        callback();
        return;
      }

      //fake successful upload
      if(opts.dryRun)
        return putComplete();

      //extend the base object
      var object = Object.create(baseObject);
      object.Key = dest;
      object.Body = contents;
      if(!object.ContentType)
        object.ContentType = mime.lookup(dest);

      // Set a default charset
      if (opts.charset) object.ContentType += '; charset=' + opts.charset;

      //upload!
      S3.putObject(object, putComplete);

      function putComplete(err, results) {
        if(err) {
          return callback("Put '" + dest + "' failed...\n" + err + "\n ");
        }
        grunt.log.ok(DRYRUN + "Put '" + dest + "'");
        if(!opts.dryRun)
          stats.puts++;
        if(results)
          cache.files[dest] = JSON.parse(results.ETag);
        callback();
      }

    }

    function taskComplete(err) {
      if(err) {
        grunt.fail.warn(err);
        return done(false);
      }

      //all done
      grunt.log.ok("Put " + stats.puts + " files");
      if(opts.cache && (stats.puts || stats.dels || stats.refreshed || stats.newOptions))
        CacheMgr.put(cache);
      done(err);
    }
  });
示例#14
0
文件: s3.js 项目: bccloud/incloud
var AWS = require('aws-sdk');
AWS.config={
    accessKeyId: "F-A-GL21Y4UIQLUYGEFY",
    secretAccessKey: "***********==",
	httpOptions:{proxy:'http://cs1-bj.incloud.org.cn:80'}
}
var s3 = new AWS.S3();
s3.listBuckets(function(err, data) {
  for (var index in data.Buckets) {
    var bucket = data.Buckets[index];
    console.log("Bucket: ", bucket.Name, ' : ', bucket.CreationDate);
  }
});
exports.handler = function(event, context) {
    var bucket = event.Records[0].s3.bucket.name;
    var key = event.Records[0].s3.object.key;
    console.log('Getting content from S3...');
    s3.getObject({Bucket: bucket, Key: key}, function(err, data) {
        if (err) {
            console.log("Error getting object " + key + " from bucket " + bucket +
                ". Make sure they exist and your bucket is in the same region as this function.");
            context.fail ("Error getting file: " + err)      
        } else {
            //var jsonData =  JSON.parse(data.Body.toString());
            textData = data.Body.toString('utf8');
            parsedBody = JSON.parse(textData);
            var sjsu = parsedBody.sjsu;
            var sjpd = parsedBody.sjpd;
            //First Hash Tag
            //console.log(sjsu.length);
            for( var i = 0; i < sjsu.length; i++)
               {
                    var obj1 =  sjsu[i];
                    console.log(obj1.time);
               } 
               var array1 = [];
               for( i = 0; i < sjsu.length; i++)
               {
                    obj1 =  sjsu[i];
                    if(array1.indexOf(obj1.location)<0 && obj1.location !== "")
                    array1.push(obj1.location);
               }
               //console.log(array1.length);
               if(array1.length !==0)
               console.log(array1);
               var reachCount1=0;
               for( i = 0; i < sjsu.length; i++)
               {
                    obj1 =  sjsu[i];
                    reachCount1 +=obj1.followerCount;
               }
               console.log(reachCount1);
           //Second Hash Tag
            //console.log(sjpd.length);
            for( i = 0; i < sjpd.length; i++)
               {
                    var obj2 =  sjpd[i];
                    console.log(obj2.time);
               } 
               var array2 = [];
               for( i = 0; i < sjpd.length; i++)
               {
                    obj2 =  sjpd[i];
                    if(array2.indexOf(obj2.location)<0 && obj1.location !== "")
                    array2.push(obj2.location);
               }
               //console.log(array2.length);
               if(array2.length !==0)
               console.log(array2);
               var reachCount2=0;
               for( i = 0; i < sjpd.length; i++)
               {
                    obj2 =  sjpd[i];
                    reachCount2 +=obj2.followerCount;
               }
               console.log(reachCount2);
            context.succeed();
        }
    });

};
示例#16
0
文件: corsHeaders.js 项目: scality/S3
 next => s3.putBucketCors(corsParams, next),
示例#17
0
exports.storeScript = function (aUser, aMeta, aBuf, aCallback, aUpdate) {
  var s3 = new AWS.S3();
  var scriptName = null;
  var installName = aUser.name + '/';
  var isLibrary = typeof aMeta === 'string';
  var libraries = [];
  var requires = null;
  var match = null;
  var collaborators = null;
  var rLibrary = new RegExp(
    '^(?:(?:(?:https?:)?\/\/' +
      (isPro ? 'openuserjs\.org' : 'localhost:8080') +
        ')?\/(?:libs\/src|src\/libs)\/)?(.*?)([^\/]*\.js)$', '');

  if (!aMeta) { return aCallback(null); }

  if (!isLibrary) {
    scriptName = cleanFilename(aMeta.name, '');

    // Can't install a script without a @name (maybe replace with random value)
    if (!scriptName) { return aCallback(null); }

    if (!isLibrary && aMeta.oujs && aMeta.oujs.author
        && aMeta.oujs.author != aUser.name && aMeta.oujs.collaborator) {
      collaborators = aMeta.oujs.collaborator;
      if ((typeof collaborators === 'string'
          && collaborators === aUser.name)
          || (collaborators instanceof Array
          && collaborators.indexOf(aUser.name) > -1)) {
        installName = aMeta.oujs.author + '/';
      } else {
        collaborators = null;
      }
    }

    installName += scriptName + '.user.js';

    if (aMeta.require) {
      if (typeof aMeta.require === 'string') {
        requires = [aMeta.require];
      } else {
        requires = aMeta.require;
      }

      requires.forEach(function (aRequire) {
        match = rLibrary.exec(aRequire);
        if (match) {
          if (!match[1]) {
            match[1] = aUser.name + '/';
          }

          if (!/\.user\.js$/.test(match[2])) {
            libraries.push(match[1] + match[2]);
          }
        }
      });
    }
  } else {
    scriptName = cleanFilename(aMeta.replace(/^\s+|\s+$/g, ''), '');
    if (!scriptName) { return aCallback(null); }

    installName += scriptName + '.js';
  }

  // Prevent a removed script from being reuploaded
  findDeadorAlive(Script, { installName: caseInsensitive(installName) }, true,
    function (aAlive, aScript, aRemoved) {
      if (aRemoved || (!aScript && (aUpdate || collaborators))) {
        return aCallback(null);
      } else if (!aScript) {
        // New script
        aScript = new Script({
          name: isLibrary ? aMeta : aMeta.name,
          author: aUser.name,
          installs: 0,
          rating: 0,
          about: '',
          updated: new Date(),
          votes: 0,
          flags: 0,
          installName: installName,
          fork: null,
          meta: isLibrary ? { name: aMeta } : aMeta,
          isLib: isLibrary,
          uses: isLibrary ? null : libraries,
          _authorId: aUser._id
        });
      } else {
        // Script already exists.
        if (!aScript.isLib) {
          if (collaborators && (aScript.meta.oujs && aScript.meta.oujs.author != aMeta.oujs.author
              || (aScript.meta.oujs && JSON.stringify(aScript.meta.oujs.collaborator) !=
             JSON.stringify(aMeta.oujs.collaborator)))) {
            return aCallback(null);
          }
          aScript.meta = aMeta;
          aScript.uses = libraries;
        }
        aScript.updated = new Date();
        aScript.installsSinceUpdate = 0;
      }

      aScript.save(function (aErr, aScript) {
        s3.putObject({ Bucket: bucketName, Key: installName, Body: aBuf },
          function (aErr, aData) {
            // Don't save a script if storing failed
            if (aErr) {
              console.error(aUser.name, '-', installName);
              console.error(JSON.stringify(aErr));
              console.error(JSON.stringify(aScript.toObject()));
              return aCallback(null);
            }

            if (aUser.role === userRoles.length - 1) {
              var userDoc = aUser;
              if (!userDoc.save) {
                // We're probably using req.session.user which may have gotten serialized.
                userDoc = new User(userDoc);
              }
              --userDoc.role;
              userDoc.save(function (aErr, aUser) { aCallback(aScript); });
            } else {
              aCallback(aScript);
            }
          });
      });
    });
};
示例#18
0
文件: corsHeaders.js 项目: scality/S3
 next => s3.putBucketWebsite({ Bucket: bucket,
     WebsiteConfiguration: webConfig }, next),
示例#19
0
 .then(function() {
   var photoKey = photo.photoKeyFromSize(size);
   return s3.getObject({'Bucket': 'bjm-phototools', 'Key': photoKey}).createReadStream();
 });
示例#20
0
文件: corsHeaders.js 项目: scality/S3
 next => s3.putObject({
     Bucket: bucket,
     Key: 'index.html',
     ACL: 'public-read',
 }, next),
示例#21
0
 promise = new Promise((resolve, reject) => {
   this._s3Client.createBucket(() => {
     this._hasBucket = true;
     resolve();
   });
 }); 
示例#22
0
文件: corsHeaders.js 项目: scality/S3
 afterEach(done => {
     s3.deleteBucketCors({ Bucket: bucket }, done);
 });
示例#23
0
const client = function (mozaik) {

    mozaik.loadApiConfig(config);

    AWS.config.region = config.get('aws.region');

    const ec2            = new AWS.EC2();
    const cloudFormation = new AWS.CloudFormation();
    const cloudWatch     = new AWS.CloudWatch();
    const s3             = new AWS.S3();

    return {
        stacks() {
            const def = Promise.defer();

            cloudFormation.describeStacks({}, function (err, data) {
                if (err) {
                    def.reject(err);
                } else {
                    def.resolve(data.Stacks);
                }
            });

            return def.promise;
        },

        buckets() {
            const def     = Promise.defer();
            const buckets = [];

            s3.listBuckets({}, function (err, data) {
                if (err) {
                    def.reject(err);
                } else {
                    const bucketPromise = [];
                    const resolvers = [];

                    data.Buckets.forEach(function () {
                        var p =  new Promise( function(resolve, reject){
                            resolvers.push(resolve);
                        });
                        p.then(function(bucket) {
                          buckets.push(bucket);
                        });
                        bucketPromise.push(p);
                    });

                    Promise.all(bucketPromise).then(function() {
                        def.resolve(buckets);
                    });

                    data.Buckets.forEach(function (bucketData, idx) {
                      var bucket = {
                        name:         bucketData.Name,
                        creationDate: bucketData.CreationDate,
                        size: -1
                      }

                      var endTime = new Date()
                      var startTime = new Date()
                      // TODO: (07/04/16 benni) Use moment.js
                      startTime.setDate(startTime.getDate() - 1)
                      var params = {
                          StartTime: startTime.toISOString(),
                          EndTime: endTime.toISOString(),
                          Period: 60*60*24,
                          MetricName: 'BucketSizeBytes',
                          Namespace: 'AWS/S3',
                          Statistics: [ 'Average' ],
                          Dimensions: [
                            { Name: 'BucketName', Value: bucket.name },
                            { Name: 'StorageType', Value: 'StandardStorage' },
                          ],
                          Unit: 'Bytes'
                      };

                      cloudWatch.getMetricStatistics(params, function(err, data) {
                        if (!err && data && data.Datapoints && data.Datapoints.length > 0) {
                          bucket.size = data.Datapoints[0].Average
                        }
                        resolvers[idx](bucket)
                      });

                    });
                }
            });
            return def.promise;
        },

        instances() {
            const def             = Promise.defer();
            const amis            = [];
            const vpcsInstanceIds = {};
            const reservations       = [];
            const instances       = [];

            ec2.describeInstances({}, function (err, data) {
                if (err) {
                    def.reject(err);
                } else {
                    const reservationPromise = [];
                    const reservationResolvers = {};
                    const instancePromise = [];
                    const instanceResolvers = {};

                    data.Reservations.forEach(function (reservation, idx) {
                      var p = new Promise( function(resolve, reject){
                        reservationResolvers[idx] = resolve;
                      });
                      reservationPromise.push(p);
                    });

                    Promise.all(reservationPromise).then(function() {
                      def.resolve(instances);
                    });

                    data.Reservations.forEach(function (reservation, idx) {

                        reservation.Instances.forEach(function (instance, iidx) {
                          var p = new Promise( function(resolve, reject){
                            instanceResolvers[`${idx}-${iidx}`] = resolve;
                          });
                          p.then(function(instance) {
                            instances.push(instance);
                          });
                          instancePromise.push(p);
                        });

                        Promise.all(instancePromise).then(function(values) {
                          reservationResolvers[idx](values);
                        });

                        reservation.Instances.forEach(function (instanceData, iidx) {
                            var instance = {
                                id:               instanceData.InstanceId,
                                state:            instanceData.State.Name,
                                type:             instanceData.InstanceType,
                                privateIpAddress: instanceData.PrivateIpAddress,
                                publicIpAddress:  instanceData.PublicIpAddress || null,
                                vpc:              instanceData.VpcId,
                                loadBalancers:    [],
                                securityGroups:   [],
                                load:             -1
                            };

                            instanceData.SecurityGroups.forEach(function (sg) {
                                instance.securityGroups.push(sg.GroupId);
                            });

                            instance.tags = {};
                            instanceData.Tags.forEach(function (tag) {
                                instance.tags[tag.Key.toLowerCase()] = tag.Value;
                            });

                            instance.name = instance.tags.name || null;

                            amis.push(instanceData.ImageId);

                            if (typeof instance.vpc != 'undefined') {
                                if (!vpcsInstanceIds[instance.vpc]) {
                                    vpcsInstanceIds[instance.vpc] = [];
                                }
                                vpcsInstanceIds[instance.vpc].push(instance.id);
                            }

                            var endTime = new Date()
                            var startTime = new Date()
                            // TODO: (07/04/16 benni) Use moment.js
                            startTime.setMinutes(startTime.getMinutes() - 10)
                            var params = {
                                StartTime: startTime.toISOString(),
                                EndTime: endTime.toISOString(),
                                Period: 60*10,
                                MetricName: 'CPUUtilization',
                                Namespace: 'AWS/EC2',
                                Statistics: [ 'Average' ],
                                Dimensions: [
                                  {
                                    Name: 'InstanceId',
                                    Value: instance.id
                                  }
                                ],
                                Unit: 'Percent'
                            };

                            cloudWatch.getMetricStatistics(params, function (err, data) {
                                if (!err && data && data.Datapoints && data.Datapoints.length > 0) {
                                  instance.load = data.Datapoints[0].Average
                                }
                                instanceResolvers[`${idx}-${iidx}`](instance)
                            });
                        });
                    });
                }
            });

            return def.promise;
        }
    };
};
示例#24
0
文件: corsHeaders.js 项目: scality/S3
 beforeEach(done => {
     s3.putBucketCors(corsParams, done);
 });
示例#25
0
var crypto = require('crypto');
var sprom = require('sprom');
var async = require('async');
var fs = require('fs');

var FILENAME = 'pageranks-with-descriptions';

var sha1 = function(input){
    var str = crypto.createHash('sha1').update(input).digest('hex');
    while (str.charAt(0) === '0') {
      str = str.substring(1);
    }
    return str;
};

var s3 = new AWS.S3();
var pageRankParams = {Bucket: 'cis555-bucket', Key: 'pagerank-out/part-r-00000'};

fs.truncateSync(FILENAME, 0);

var idx = 0;
var errors = 0;
var delimiter = '\u2603\u2603\u2603';
var worker = function (line, callback) {
    idx++;
    if (idx % 100 === 0) {
      console.log('processed ' + idx + ' docs; ' + errors + ' errors so far')
    }
    var urlPageRank = line.split('\t');
    var filename = sha1(urlPageRank[0]);
    var htmlParams = {Bucket: 'cis555-bucket', Key: 'crawl-real/' + filename};
示例#26
0
            imageSchema.virtual('image_s3').get(function(){
                var path = require('path');
                var mkdirp = require('mkdirp');

                var AWS = require('aws-sdk');
                AWS.config.update(app.njax.config.aws);
                var s3 = new AWS.S3();
                var _this = this;

                return {
                    url:'http://s3.amazonaws.com/' + app.njax.config.aws.bucket_name  +  '/' + this.image,
                    getFile:function(local_file_path, callback){
                        if(!callback && _.isFunction(local_file_path)){
                            callback = local_file_path;
                            local_file_path = _this.image;
                        }
                        /*if(!local_file_path || (!app.njax.isTmpdir(local_file_path)){
                            local_file_path = app.njax.tmpdir(local_file_path);
                        }*/
                        var dir_name = path.dirname(local_file_path);
                        if(!fs.existsSync(dir_name)){
                            mkdirp.sync(dir_name);
                        }
                        if(app.njax.config.local_file_cache){
                            var cache_path = app.njax.cachedir(_this.image);
                            var content = null;
                            if(fs.existsSync(cache_path)){
                                content = fs.readFileSync(
                                    cache_path
                                );
                            }


                            fs.writeFileSync(
                                local_file_path,
                                content
                            );
                            return callback(null,content, local_file_path);
                        }
                        
                        async.series([
                            function(cb){
                                mkdirp(path.dirname(local_file_path), function (err) {
                                    if(err) return callback(err);
                                    return cb();
                                });
                            },
                            function(cb){
                                var stream = require('fs').createWriteStream(local_file_path);
                                var params = {
                                    Bucket: app.njax.config.aws.bucket_name,
                                    Key:_this.image
                                }
                                var body = '';
                                s3.getObject(params).
                                    on('error', function(err, response) {
                                        if(err) return callback(err, response);
                                    }).
                                    on('httpData',function (chunk) {
                                        stream.write(chunk);
                                        body += chunk;
                                    }).
                                    on('httpDone',function () {
                                        stream.end(null, null, function(){
                                            callback(null, body, local_file_path);
                                        });

                                    }).
                                    send();
                            }
                        ]);
                    },
                    setFile:function(file_path, callback){
                        var content = fs.readFileSync(file_path);
                        async.series([
                            function(cb){
                                var params = {
                                    Bucket: app.njax.config.aws.bucket_name,
                                    Key: file_path,
                                    Body: content,
                                    ACL: 'public-read',
                                    ContentLength: content.length
                                };
                                s3.putObject(params, function (err, aws_ref) {
                                    if (err) {
                                        return callback(err);
                                    }
                                    _this.image = file_path;
                                    return cb(null);
                                });
                            },
                            function(cb){
                                _this.save(function(err){
                                    if(err) return callback(err);
                                    return cb();
                                });
                            },
                            function(cb){
                                return callback();
                            }
                        ]);
                    }
                }
            });
示例#27
0
const props = properties.load('s3watcher');

const s3IngestBucket = props['s3.ingest.bucket'];

const config = {
    region:      props['aws.region'],
    baseUrl:     props['loader.uri'],
    apiKey:      props['auth.key.s3watcher'],
    failBucket:  props['s3.fail.bucket'],
    s3UrlExpiry: 60,
    stage:       'DEV'
};
const configJson = JSON.stringify(config, null, 2);

const s3 = new AWS.S3({});
console.log('Writing to s3://' +s3IngestBucket+ '/config.json');

s3.putObject({
    Bucket: s3IngestBucket,
    Key: 'config.json',
    Body: configJson
}, function(err, data) {
    if (err) {
        console.error("Failed to upload: ", err);
        process.exit(1);
    }

    console.log('Done');
});
示例#28
0
 after(() => {
     awsSdk.S3.restore();
     http.request.restore();
     path.join.restore();
 });
示例#29
0
      fse.copySync(testServiceDir, tmpDir, { clobber: true, preserveTimestamps: true });
    }

    replaceTextInFile('serverless.yml', templateName, serviceName);

    process.env.TOPIC_1 = `${serviceName}-1`;
    process.env.TOPIC_2 = `${serviceName}-1`;
    process.env.BUCKET_1 = `${serviceName}-1`;
    process.env.BUCKET_2 = `${serviceName}-2`;

    // return the name of the CloudFormation stack
    return `${serviceName}-dev`;
  },

  createAndRemoveInBucket(bucketName) {
    const S3 = new AWS.S3({ region: 'us-east-1' });
    BbPromise.promisifyAll(S3, { suffix: 'Promised' });

    const params = {
      Bucket: bucketName,
      Key: 'object',
      Body: 'hello world',
    };

    return S3.putObjectPromised(params)
      .then(() => {
        delete params.Body;
        return S3.deleteObjectPromised(params);
      });
  },
示例#30
0
文件: s3.js 项目: bermi/grunt-s3
  exports.pull = exports.download = function (src, dest, opts) {
    var dfd = new _.Deferred();
    var options = _.clone(opts);

    // Pick out the configuration options we need for the client.
    options = _.pick(options, [ 'region', 'endpoint', 'port', 'key', 'secret', 'access', 'bucket', 'debug' ]);

    updateAmazonConfig(options.key, options.secret);

    var client = new aws.S3(options);

    if (options.debug) {
      return dfd.resolve(util.format(MSG_DOWNLOAD_DEBUG, client.bucket, src, path.relative(process.cwd(), dest))).promise();
    }

    // Create a local stream we can write the downloaded file to.
    var file = fs.createWriteStream(dest);

    // get the file from s3
    client.getObject({ Bucket: options.bucket, Key: src }, function(err, res) {
      // If there was a download error or no RequestId, assume something went wrong.
      if (err || !res.RequestId) {
        return dfd.reject(makeError(MSG_ERR_DOWNLOAD, src, err || res.statusCode));
      }

      // The etag head in the response from s3 has double quotes around it.
      // Strip them out.
      var remoteHash = res.ETag.replace(/"/g, '');

      // Get an md5 of the local file so we can verify the download.
      var localHash = crypto.createHash('md5').update(res.Body).digest('hex');

      if (remoteHash !== localHash) {
        return dfd.reject(makeError(MSG_ERR_CHECKSUM, 'Download', localHash, remoteHash, src));
      }

      if(res.ContentEncoding == 'gzip') {
        zlib.gunzip(res.Body, function(err, data){
          if(err) {
            dfd.reject(makeError(MSG_ERR_DOWNLOAD, src, err));
          }
          else {
            fs.writeFile(dest, data, function(err){
              if (err) {
                dfd.reject(err);
              }
              else {
                var msg = util.format(MSG_DOWNLOAD_SUCCESS, src, localHash);
                dfd.resolve(msg);
              }
            });
          }
        });
      }
      else {
        fs.writeFile(dest, res.Body, function(err){
          if (err) {
            dfd.reject(err);
          }
          else {
            var msg = util.format(MSG_DOWNLOAD_SUCCESS, src, localHash);
            dfd.resolve(msg);
          }
        });
      }

    });

    return dfd.promise();
  };