forked from SaltwaterC/aws2js
-
Notifications
You must be signed in to change notification settings - Fork 0
S3 Client
SaltwaterC edited this page Jul 30, 2012
·
55 revisions
aws.load('s3', [accessKeyId], [secretAccessKey])
- s3.get(path, [query], resBodyHandler, callback)
- s3.head(path, callback)
- s3.del(path, callback)
- s3.put(path, headers, reqBodyHandler, callback)
- s3.post(path, headers, reqBodyHandler, callback)
- s3.signUrl(protocol, method, path, expires, headers)
The S3 POST method is not fully implemented. Its use case may be replaced by the s3.put(). The POST method doesn't bring any benefit while implementing multipart/form-data into the API implies a lot of overhead for zero benefits. The existing s3.post() low level method is in use by the S3 Multipart API.
- s3.setCredentials(accessKeyId, secretAccessKey)
- s3.setMaxSockets(numberOfSockets)
- s3.getEndPoint()
- s3.setBucket(bucketName)
- s3.setEndPoint(endpointPrefix)
- s3.createBucket(bucketName, cannedAcl, region, callback)
- s3.setBucketAcl(bucketName, cannedAcl, callback)
- s3.putFile(path, file, cannedAcl, headers, callback)
- s3.putStream(path, stream, cannedAcl, headers, callback)
- s3.putBuffer(path, buffer, cannedAcl, headers, callback)
- s3.setObjectAcl(path, cannedAcl, callback)
- s3.setObjectMeta(path, cannedAcl, headers, callback)
- s3.getLifeCycle(callback)
- s3.delLifeCycle(callback)
- s3.putLifeCycleRule(id, prefix, expireInDays, callback)
- s3.delLifeCycleRule(id, callback)
- s3.copyObject(source, destination, acl, [headers], callback)
- s3.moveObject(source, destination, acl, [headers], callback)
- s3.initUpload(path, cannedAcl, headers, callback)
- s3.abortUpload(path, uploadId, callback)
- s3.completeUpload(path, uploadId, uploadParts, callback)
- s3.putFilePart(path, partNumber, uploadId, fileHandler, callback)
- s3.putStreamPart(path, partNumber, uploadId, stream, headers, callback)
- s3.putBufferPart(path, partNumber, uploadId, buffer, callback)
- s3.putFileMultipart(path, file, cannedAcl, headers, partSize, callback)
var s3 = require('aws2js').load('s3', accessKeyId, secretAccessKey);
All the API specific actions can be made by using the low level methods. The S3 helpers are provided for convenience.
// creates the foo bucket with private access into the us-east-1 region
s3.createBucket('foo', false, false, function (error, result) {
});
// creates the bar bucket with public-read access into the Northern California region
s3.createBucket('bar', 'public-read', 'us-west-1', function (error, result) {
});
// makes the bar bucket to be private accesible
s3.setBucketAcl('bar', 'private', function (error, result) {
});
// uploads the foo.png file into the foo bucket with public-read access
s3.setBucket('foo');
s3.putFile('foo.png', '/path/to/foo.png', 'public-read', {}, function (error, result) {
});
// uploads the buffer contents as the S3 file contents
s3.setBucket('foo');
s3.putBuffer('foo.txt', new Buffer('bar', 'utf8'), 'public-read', {'content-type': 'text/plain'}, function (error, result) {
});
// uploads the Readable Stream contents as the S3 file contents
s3.setBucket('foo');
var stream = fs.ReadStream('./foo.txt'); // may be any object implementing the Readable Stream interface
s3.putStream('foo.txt', stream, 'public-read', {'content-type': 'text/plain', 'content-length': streamLength}, function (error, result) {
});
// changes the acl to private for an existing bar.jpg object of the bar bucket
s3.setBucket('bar');
s3.setObjectAcl('bar.jpg', 'private', function (error, result) {
});
// changes the object meta data in order to specify the cache control header
s3.setBucket('foo');
s3.setObjectMeta('foo.png', 'public-read', {'cache-control': 'public'}, function (error, result) {
})