S3Client
注意
在某些情况下,使用此库的操作可能会影响性能并扭曲您的测试结果。
为确保准确的结果,请考虑在setup
和teardown
生命周期函数中执行这些操作。这些函数在测试运行之前和之后运行,对测试结果没有影响。
S3Client
与 AWS 简单存储服务 (S3) 交互。
通过它,您可以执行列出存储桶、列出存储桶中的对象或从存储桶下载对象等操作。有关受支持操作的完整列表,请参阅方法。
专门的 s3.js
jslib 捆绑包和包含一切的 aws.js
捆绑包都包含 S3Client
。
方法
函数 | 描述 |
---|---|
listBuckets() | 列出经过身份验证的用户有权访问的存储桶 |
listObjects(bucketName, [prefix]) | 列出存储桶中包含的对象 |
getObject(bucketName, objectKey) | 从存储桶下载对象 |
putObject(bucketName, objectKey, data) | 将对象上传到存储桶 |
deleteObject(bucketName, objectKey) | 从存储桶删除对象 |
copyObject(sourceBucket, sourceKey, destinationBucket, destinationKey) | 将对象从一个存储桶复制到另一个存储桶 |
createMultipartUpload(bucketName, objectKey) | 为给定 objectKey 在存储桶中创建分段上传 |
uploadPart(bucketName, objectKey, uploadId, partNumber, data) | 在分段上传中上传一部分 |
completeMultipartUpload(bucketName, objectKey, uploadId, parts) | 完成先前组装的分段上传 |
abortMultipartUpload(bucketName, objectKey, uploadId) | 中止分段上传 |
抛出
S3 Client 方法在失败时会抛出错误。
Error | 条件 |
---|---|
InvalidSignatureError | 提供了无效凭据时。 |
S3ServiceError | AWS 回复所请求操作时发生错误。 |
示例
import { check } from 'k6';
import exec from 'k6/execution';
import http from 'k6/http';
import {
AWSConfig,
S3Client,
} from 'https://jslib.k6.io/aws/0.13.0/s3.js';
const awsConfig = new AWSConfig({
region: __ENV.AWS_REGION,
accessKeyId: __ENV.AWS_ACCESS_KEY_ID,
secretAccessKey: __ENV.AWS_SECRET_ACCESS_KEY,
});
const s3 = new S3Client(awsConfig);
const testBucketName = 'test-jslib-aws';
const testInputFileKey = 'productIDs.json';
const testOutputFileKey = `results-${Date.now()}.json`;
export async function setup() {
// If our test bucket does not exist, abort the execution.
const buckets = await s3.listBuckets();
if (buckets.filter((b) => b.name === testBucketName).length == 0) {
exec.test.abort();
}
// If our test object does not exist, abort the execution.
const objects = await s3.listObjects(testBucketName);
if (objects.filter((o) => o.key === testInputFileKey).length == 0) {
exec.test.abort();
}
// Download the S3 object containing our test data
const inputObject = await s3.getObject(testBucketName, testInputFileKey);
// Let's return the downloaded S3 object's data from the
// setup function to allow the default function to use it.
return {
productIDs: JSON.parse(inputObject.data),
};
}
export default async function (data) {
// Pick a random product ID from our test data
const randomProductID = data.productIDs[Math.floor(Math.random() * data.productIDs.length)];
// Query our ecommerce website's product page using the ID
const res = await http.asyncRequest('GET', `http://your.website.com/product/${randomProductID}/`);
check(res, { 'is status 200': res.status === 200 });
}
export async function handleSummary(data) {
// Once the load test is over, let's upload the results to our
// S3 bucket. This is executed after teardown.
await s3.putObject(testBucketName, testOutputFileKey, JSON.stringify(data));
}
分段上传
import crypto from 'k6/crypto';
import exec from 'k6/execution';
import {
AWSConfig,
S3Client,
} from 'https://jslib.k6.io/aws/0.13.0/s3.js';
const awsConfig = new AWSConfig({
region: __ENV.AWS_REGION,
accessKeyId: __ENV.AWS_ACCESS_KEY_ID,
secretAccessKey: __ENV.AWS_SECRET_ACCESS_KEY,
sessionToken: __ENV.AWS_SESSION_TOKEN,
});
const s3 = new S3Client(awsConfig);
const testBucketName = 'test-jslib-aws';
const testFileKey = 'multipart.txt';
export default async function () {
// List the buckets the AWS authentication configuration
// gives us access to.
const buckets = await s3.listBuckets();
// If our test bucket does not exist, abort the execution.
if (buckets.filter((b) => b.name === testBucketName).length == 0) {
exec.test.abort();
}
// Produce random bytes to upload of size ~12MB, that
// we will upload in two 6MB parts. This is done as the
// minimum part size supported by S3 is 5MB.
const bigFile = crypto.randomBytes(12 * 1024 * 1024);
// Initialize a multipart upload
const multipartUpload = await s3.createMultipartUpload(testBucketName, testFileKey);
// Upload the first part
const firstPartData = bigFile.slice(0, 6 * 1024 * 1024);
const firstPart = await s3.uploadPart(
testBucketName,
testFileKey,
multipartUpload.uploadId,
1,
firstPartData
);
// Upload the second part
const secondPartData = bigFile.slice(6 * 1024 * 1024, 12 * 1024 * 1024);
const secondPart = await s3.uploadPart(
testBucketName,
testFileKey,
multipartUpload.uploadId,
2,
secondPartData
);
// Complete the multipart upload
await s3.completeMultipartUpload(testBucketName, testFileKey, multipartUpload.uploadId, [
firstPart,
secondPart,
]);
// Let's redownload it verify it's correct, and delete it
const obj = await s3.getObject(testBucketName, testFileKey);
await s3.deleteObject(testBucketName, testFileKey);
}