diff --git a/tuts/176-s3-encryption/README.md b/tuts/176-s3-encryption/README.md new file mode 100644 index 00000000..07b47f31 --- /dev/null +++ b/tuts/176-s3-encryption/README.md @@ -0,0 +1,36 @@ +# S3 Encryption + +An AWS CLI tutorial that demonstrates S3 operations. + +## Running + +```bash +bash s3-encryption.sh +``` + +To auto-run with cleanup: + +```bash +echo 'y' | bash s3-encryption.sh +``` + +## What it does + +1. Creating bucket"; B="enc-tut-$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)-$(aws sts get-caller-identity --query Account --output text)"; aws s3api create-bucket --bucket "$B" > /dev/null; echo "Step 2: Enabling SSE-S3"; aws s3api put-bucket-encryption --bucket "$B" --server-side-encryption-configuration '{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"AES256"}}]}'; echo "Step 3: Checking encryption"; aws s3api get-bucket-encryption --bucket "$B" --query "ServerSideEncryptionConfiguration.Rules[0].ApplyServerSideEncryptionByDefault" --output table; echo "Step 4: Uploading encrypted object"; echo test > /tmp/enc.txt; aws s3 cp /tmp/enc.txt "s3://$B/test.txt" --quiet; aws s3api head-object --bucket "$B" --key test.txt --query "{Encryption:ServerSideEncryption}" --output table; echo "Do you want to clean up? (y/n): "; read -r C; [[ "$C" =~ ^[Yy]$ ]] && { aws s3 rm "s3://$B" --recursive --quiet; aws s3 rb "s3://$B + +## Resources created + +- Bucket +- Bucket Encryption + +The script prompts you to clean up resources when it finishes. + +## Cost + +Free tier eligible for most operations. Clean up resources after use to avoid charges. + +## Related docs + +- [AWS CLI s3 reference](https://docs.aws.amazon.com/cli/latest/reference/s3/index.html) +- [AWS CLI s3api reference](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + diff --git a/tuts/176-s3-encryption/REVISION-HISTORY.md b/tuts/176-s3-encryption/REVISION-HISTORY.md new file mode 100644 index 00000000..ea668c9c --- /dev/null +++ b/tuts/176-s3-encryption/REVISION-HISTORY.md @@ -0,0 +1,8 @@ +# Revision History: 176-s3-encryption + +## Shell (CLI script) + +### 2026-04-14 v1 published +- Type: functional +- Initial version + diff --git a/tuts/176-s3-encryption/s3-encryption.md b/tuts/176-s3-encryption/s3-encryption.md new file mode 100644 index 00000000..fd8c2846 --- /dev/null +++ b/tuts/176-s3-encryption/s3-encryption.md @@ -0,0 +1,15 @@ +# S3 Encryption + +## Prerequisites + +1. AWS CLI installed and configured (`aws configure`) +2. Appropriate IAM permissions for the AWS services used + +## Step 1: Creating bucket"; B="enc-tut-$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)-$(aws sts get-caller-identity --query Account --output text)"; aws s3api create-bucket --bucket "$B" > /dev/null; echo "Step 2: Enabling SSE-S3"; aws s3api put-bucket-encryption --bucket "$B" --server-side-encryption-configuration '{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"AES256"}}]}'; echo "Step 3: Checking encryption"; aws s3api get-bucket-encryption --bucket "$B" --query "ServerSideEncryptionConfiguration.Rules[0].ApplyServerSideEncryptionByDefault" --output table; echo "Step 4: Uploading encrypted object"; echo test > /tmp/enc.txt; aws s3 cp /tmp/enc.txt "s3://$B/test.txt" --quiet; aws s3api head-object --bucket "$B" --key test.txt --query "{Encryption:ServerSideEncryption}" --output table; echo "Do you want to clean up? (y/n): "; read -r C; [[ "$C" =~ ^[Yy]$ ]] && { aws s3 rm "s3://$B" --recursive --quiet; aws s3 rb "s3://$B + +The script handles this step automatically. See `s3-encryption.sh` for the exact CLI commands. + +## Cleanup + +The script prompts you to clean up all created resources. If you need to clean up manually, check the script log for the resource names that were created. + diff --git a/tuts/176-s3-encryption/s3-encryption.sh b/tuts/176-s3-encryption/s3-encryption.sh new file mode 100644 index 00000000..66e6e65d --- /dev/null +++ b/tuts/176-s3-encryption/s3-encryption.sh @@ -0,0 +1,4 @@ +#!/bin/bash +WORK_DIR=$(mktemp -d); exec > >(tee -a "$WORK_DIR/s3-encryption.log") 2>&1 +REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null))}; [ -z "$REGION" ] && echo "ERROR: No region" && exit 1; export AWS_DEFAULT_REGION="$REGION"; echo "Region: $REGION" +echo "Step 1: Creating bucket"; B="enc-tut-$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)-$(aws sts get-caller-identity --query Account --output text)"; aws s3api create-bucket --bucket "$B" > /dev/null; echo "Step 2: Enabling SSE-S3"; aws s3api put-bucket-encryption --bucket "$B" --server-side-encryption-configuration '{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"AES256"}}]}'; echo "Step 3: Checking encryption"; aws s3api get-bucket-encryption --bucket "$B" --query "ServerSideEncryptionConfiguration.Rules[0].ApplyServerSideEncryptionByDefault" --output table; echo "Step 4: Uploading encrypted object"; echo test > /tmp/enc.txt; aws s3 cp /tmp/enc.txt "s3://$B/test.txt" --quiet; aws s3api head-object --bucket "$B" --key test.txt --query "{Encryption:ServerSideEncryption}" --output table; echo "Do you want to clean up? (y/n): "; read -r C; [[ "$C" =~ ^[Yy]$ ]] && { aws s3 rm "s3://$B" --recursive --quiet; aws s3 rb "s3://$B"; echo Done; } diff --git a/tuts/180-s3-access-points/README.md b/tuts/180-s3-access-points/README.md new file mode 100644 index 00000000..83c47834 --- /dev/null +++ b/tuts/180-s3-access-points/README.md @@ -0,0 +1,40 @@ +# S3 Access Points + +An AWS CLI tutorial that demonstrates S3 operations. + +## Running + +```bash +bash s3-access-points.sh +``` + +To auto-run with cleanup: + +```bash +echo 'y' | bash s3-access-points.sh +``` + +## What it does + +1. Creating bucket +2. Creating access point: $AP_NAME +3. Getting access point details +4. Listing access points + +## Resources created + +- Access Point +- Bucket + +The script prompts you to clean up resources when it finishes. + +## Cost + +Free tier eligible for most operations. Clean up resources after use to avoid charges. + +## Related docs + +- [AWS CLI s3 reference](https://docs.aws.amazon.com/cli/latest/reference/s3/index.html) +- [AWS CLI s3api reference](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +- [AWS CLI s3control reference](https://docs.aws.amazon.com/cli/latest/reference/s3control/index.html) + diff --git a/tuts/180-s3-access-points/REVISION-HISTORY.md b/tuts/180-s3-access-points/REVISION-HISTORY.md new file mode 100644 index 00000000..caa03c09 --- /dev/null +++ b/tuts/180-s3-access-points/REVISION-HISTORY.md @@ -0,0 +1,8 @@ +# Revision History: 180-s3-access-points + +## Shell (CLI script) + +### 2026-04-14 v1 published +- Type: functional +- Initial version + diff --git a/tuts/180-s3-access-points/s3-access-points.md b/tuts/180-s3-access-points/s3-access-points.md new file mode 100644 index 00000000..75078d11 --- /dev/null +++ b/tuts/180-s3-access-points/s3-access-points.md @@ -0,0 +1,27 @@ +# S3 Access Points + +## Prerequisites + +1. AWS CLI installed and configured (`aws configure`) +2. Appropriate IAM permissions for the AWS services used + +## Step 1: Creating bucket + +The script handles this step automatically. See `s3-access-points.sh` for the exact CLI commands. + +## Step 2: Creating access point: $AP_NAME + +The script handles this step automatically. See `s3-access-points.sh` for the exact CLI commands. + +## Step 3: Getting access point details + +The script handles this step automatically. See `s3-access-points.sh` for the exact CLI commands. + +## Step 4: Listing access points + +The script handles this step automatically. See `s3-access-points.sh` for the exact CLI commands. + +## Cleanup + +The script prompts you to clean up all created resources. If you need to clean up manually, check the script log for the resource names that were created. + diff --git a/tuts/180-s3-access-points/s3-access-points.sh b/tuts/180-s3-access-points/s3-access-points.sh new file mode 100644 index 00000000..19640282 --- /dev/null +++ b/tuts/180-s3-access-points/s3-access-points.sh @@ -0,0 +1,17 @@ +#!/bin/bash +WORK_DIR=$(mktemp -d); exec > >(tee -a "$WORK_DIR/ap.log") 2>&1 +REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null))}; [ -z "$REGION" ] && echo "ERROR: No region" && exit 1; export AWS_DEFAULT_REGION="$REGION"; ACCOUNT=$(aws sts get-caller-identity --query Account --output text); echo "Region: $REGION" +RANDOM_ID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1); BUCKET="ap-tut-${RANDOM_ID}-${ACCOUNT}"; AP_NAME="tut-ap-${RANDOM_ID}" +handle_error() { echo "ERROR on line $1"; trap - ERR; cleanup; exit 1; }; trap 'handle_error $LINENO' ERR +cleanup() { echo ""; echo "Cleaning up..."; aws s3control delete-access-point --account-id "$ACCOUNT" --name "$AP_NAME" 2>/dev/null && echo " Deleted access point"; aws s3 rm "s3://$BUCKET" --recursive --quiet 2>/dev/null; aws s3 rb "s3://$BUCKET" 2>/dev/null && echo " Deleted bucket"; rm -rf "$WORK_DIR"; echo "Done."; } +echo "Step 1: Creating bucket" +if [ "$REGION" = "us-east-1" ]; then aws s3api create-bucket --bucket "$BUCKET" > /dev/null; else aws s3api create-bucket --bucket "$BUCKET" --create-bucket-configuration LocationConstraint="$REGION" > /dev/null; fi +echo "Step 2: Creating access point: $AP_NAME" +aws s3control create-access-point --account-id "$ACCOUNT" --name "$AP_NAME" --bucket "$BUCKET" > /dev/null +echo " Access point created" +echo "Step 3: Getting access point details" +aws s3control get-access-point --account-id "$ACCOUNT" --name "$AP_NAME" --query '{Name:Name,Bucket:Bucket,NetworkOrigin:NetworkOrigin}' --output table +echo "Step 4: Listing access points" +aws s3control list-access-points --account-id "$ACCOUNT" --bucket "$BUCKET" --query 'AccessPointList[].{Name:Name,Bucket:Bucket}' --output table +echo ""; echo "Tutorial complete." +echo "Do you want to clean up? (y/n): "; read -r CHOICE; [[ "$CHOICE" =~ ^[Yy]$ ]] && cleanup diff --git a/tuts/186-s3-object-lock/README.md b/tuts/186-s3-object-lock/README.md new file mode 100644 index 00000000..565f08e8 --- /dev/null +++ b/tuts/186-s3-object-lock/README.md @@ -0,0 +1,41 @@ +# S3 Object Lock + +An AWS CLI tutorial that demonstrates S3 operations. + +## Running + +```bash +bash s3-object-lock.sh +``` + +To auto-run with cleanup: + +```bash +echo 'y' | bash s3-object-lock.sh +``` + +## What it does + +1. Creating bucket with Object Lock +2. Setting default retention (1 day governance mode) +3. Getting lock configuration +4. Uploading a locked object +5. Verifying lock + +## Resources created + +- Bucket +- Bucket Versioning +- Object Lock Configuration + +The script prompts you to clean up resources when it finishes. + +## Cost + +Free tier eligible for most operations. Clean up resources after use to avoid charges. + +## Related docs + +- [AWS CLI s3 reference](https://docs.aws.amazon.com/cli/latest/reference/s3/index.html) +- [AWS CLI s3api reference](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + diff --git a/tuts/186-s3-object-lock/REVISION-HISTORY.md b/tuts/186-s3-object-lock/REVISION-HISTORY.md new file mode 100644 index 00000000..753182ed --- /dev/null +++ b/tuts/186-s3-object-lock/REVISION-HISTORY.md @@ -0,0 +1,8 @@ +# Revision History: 186-s3-object-lock + +## Shell (CLI script) + +### 2026-04-14 v1 published +- Type: functional +- Initial version + diff --git a/tuts/186-s3-object-lock/s3-object-lock.md b/tuts/186-s3-object-lock/s3-object-lock.md new file mode 100644 index 00000000..50141a8c --- /dev/null +++ b/tuts/186-s3-object-lock/s3-object-lock.md @@ -0,0 +1,31 @@ +# S3 Object Lock + +## Prerequisites + +1. AWS CLI installed and configured (`aws configure`) +2. Appropriate IAM permissions for the AWS services used + +## Step 1: Creating bucket with Object Lock + +The script handles this step automatically. See `s3-object-lock.sh` for the exact CLI commands. + +## Step 2: Setting default retention (1 day governance mode) + +The script handles this step automatically. See `s3-object-lock.sh` for the exact CLI commands. + +## Step 3: Getting lock configuration + +The script handles this step automatically. See `s3-object-lock.sh` for the exact CLI commands. + +## Step 4: Uploading a locked object + +The script handles this step automatically. See `s3-object-lock.sh` for the exact CLI commands. + +## Step 5: Verifying lock + +The script handles this step automatically. See `s3-object-lock.sh` for the exact CLI commands. + +## Cleanup + +The script prompts you to clean up all created resources. If you need to clean up manually, check the script log for the resource names that were created. + diff --git a/tuts/186-s3-object-lock/s3-object-lock.sh b/tuts/186-s3-object-lock/s3-object-lock.sh new file mode 100644 index 00000000..d06a66ef --- /dev/null +++ b/tuts/186-s3-object-lock/s3-object-lock.sh @@ -0,0 +1,23 @@ +#!/bin/bash +WORK_DIR=$(mktemp -d); exec > >(tee -a "$WORK_DIR/tut.log") 2>&1 +REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null))}; [ -z "$REGION" ] && echo "ERROR: No region" && exit 1; export AWS_DEFAULT_REGION="$REGION"; echo "Region: $REGION" +RANDOM_ID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1); ACCOUNT=$(aws sts get-caller-identity --query Account --output text) +B="lock-tut-${RANDOM_ID}-${ACCOUNT}" +handle_error() { echo "ERROR on line $1"; trap - ERR; cleanup; exit 1; }; trap 'handle_error $LINENO' ERR +cleanup() { echo "Cleaning up..."; echo " Object Lock buckets require all versions to expire before deletion."; echo " Manual: aws s3api delete-bucket --bucket $B (after retention expires)"; rm -rf "$WORK_DIR"; echo "Done."; } +echo "Step 1: Creating bucket with Object Lock" +aws s3api create-bucket --bucket "$B" --object-lock-enabled-for-bucket > /dev/null +aws s3api put-bucket-versioning --bucket "$B" --versioning-configuration Status=Enabled +echo "Step 2: Setting default retention (1 day governance mode)" +aws s3api put-object-lock-configuration --bucket "$B" --object-lock-configuration '{"ObjectLockEnabled":"Enabled","Rule":{"DefaultRetention":{"Mode":"GOVERNANCE","Days":1}}}' +echo "Step 3: Getting lock configuration" +aws s3api get-object-lock-configuration --bucket "$B" --query "ObjectLockConfiguration.Rule.DefaultRetention.{Mode:Mode,Days:Days}" --output table +echo "Step 4: Uploading a locked object" +echo "protected data" > "$WORK_DIR/data.txt" +aws s3 cp "$WORK_DIR/data.txt" "s3://$B/data.txt" --quiet +echo " Object uploaded with governance-mode retention" +echo "Step 5: Verifying lock" +aws s3api head-object --bucket "$B" --key data.txt --query "{Lock:ObjectLockMode,Retain:ObjectLockRetainUntilDate}" --output table +echo ""; echo "Tutorial complete." +echo "Note: Object Lock prevents deletion until retention expires." +echo "Do you want to clean up? (y/n): "; read -r C; [[ "$C" =~ ^[Yy]$ ]] && cleanup diff --git a/tuts/190-s3-transfer-acceleration/README.md b/tuts/190-s3-transfer-acceleration/README.md new file mode 100644 index 00000000..4516f4e7 --- /dev/null +++ b/tuts/190-s3-transfer-acceleration/README.md @@ -0,0 +1,39 @@ +# S3 Transfer Acceleration + +An AWS CLI tutorial that demonstrates S3 operations. + +## Running + +```bash +bash s3-transfer-acceleration.sh +``` + +To auto-run with cleanup: + +```bash +echo 'y' | bash s3-transfer-acceleration.sh +``` + +## What it does + +1. Creating bucket +2. Enabling Transfer Acceleration +3. Getting acceleration status +4. Accelerated endpoint + +## Resources created + +- Bucket +- Bucket Accelerate Configuration + +The script prompts you to clean up resources when it finishes. + +## Cost + +Free tier eligible for most operations. Clean up resources after use to avoid charges. + +## Related docs + +- [AWS CLI s3 reference](https://docs.aws.amazon.com/cli/latest/reference/s3/index.html) +- [AWS CLI s3api reference](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + diff --git a/tuts/190-s3-transfer-acceleration/REVISION-HISTORY.md b/tuts/190-s3-transfer-acceleration/REVISION-HISTORY.md new file mode 100644 index 00000000..2317cd5d --- /dev/null +++ b/tuts/190-s3-transfer-acceleration/REVISION-HISTORY.md @@ -0,0 +1,8 @@ +# Revision History: 190-s3-transfer-acceleration + +## Shell (CLI script) + +### 2026-04-14 v1 published +- Type: functional +- Initial version + diff --git a/tuts/190-s3-transfer-acceleration/s3-transfer-acceleration.md b/tuts/190-s3-transfer-acceleration/s3-transfer-acceleration.md new file mode 100644 index 00000000..b5184061 --- /dev/null +++ b/tuts/190-s3-transfer-acceleration/s3-transfer-acceleration.md @@ -0,0 +1,27 @@ +# S3 Transfer Acceleration + +## Prerequisites + +1. AWS CLI installed and configured (`aws configure`) +2. Appropriate IAM permissions for the AWS services used + +## Step 1: Creating bucket + +The script handles this step automatically. See `s3-transfer-acceleration.sh` for the exact CLI commands. + +## Step 2: Enabling Transfer Acceleration + +The script handles this step automatically. See `s3-transfer-acceleration.sh` for the exact CLI commands. + +## Step 3: Getting acceleration status + +The script handles this step automatically. See `s3-transfer-acceleration.sh` for the exact CLI commands. + +## Step 4: Accelerated endpoint + +The script handles this step automatically. See `s3-transfer-acceleration.sh` for the exact CLI commands. + +## Cleanup + +The script prompts you to clean up all created resources. If you need to clean up manually, check the script log for the resource names that were created. + diff --git a/tuts/190-s3-transfer-acceleration/s3-transfer-acceleration.sh b/tuts/190-s3-transfer-acceleration/s3-transfer-acceleration.sh new file mode 100644 index 00000000..8409a3e7 --- /dev/null +++ b/tuts/190-s3-transfer-acceleration/s3-transfer-acceleration.sh @@ -0,0 +1,17 @@ +#!/bin/bash +WORK_DIR=$(mktemp -d); exec > >(tee -a "$WORK_DIR/tut.log") 2>&1 +REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null))}; [ -z "$REGION" ] && echo "ERROR: No region" && exit 1; export AWS_DEFAULT_REGION="$REGION"; echo "Region: $REGION" +RANDOM_ID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1); ACCOUNT=$(aws sts get-caller-identity --query Account --output text) +B="accel-tut-${RANDOM_ID}-${ACCOUNT}" +cleanup() { echo "Cleaning up..."; aws s3 rm "s3://$B" --recursive --quiet 2>/dev/null; aws s3 rb "s3://$B" 2>/dev/null && echo " Deleted bucket"; rm -rf "$WORK_DIR"; echo "Done."; } +echo "Step 1: Creating bucket" +aws s3api create-bucket --bucket "$B" > /dev/null +echo "Step 2: Enabling Transfer Acceleration" +aws s3api put-bucket-accelerate-configuration --bucket "$B" --accelerate-configuration Status=Enabled +echo " Acceleration enabled" +echo "Step 3: Getting acceleration status" +aws s3api get-bucket-accelerate-configuration --bucket "$B" --query '{Status:Status}' --output table +echo "Step 4: Accelerated endpoint" +echo " https://${B}.s3-accelerate.amazonaws.com" +echo ""; echo "Tutorial complete." +echo "Do you want to clean up? (y/n): "; read -r C; [[ "$C" =~ ^[Yy]$ ]] && cleanup diff --git a/tuts/196-s3-inventory/README.md b/tuts/196-s3-inventory/README.md new file mode 100644 index 00000000..59e3f3ac --- /dev/null +++ b/tuts/196-s3-inventory/README.md @@ -0,0 +1,38 @@ +# S3 Inventory + +An AWS CLI tutorial that demonstrates S3 operations. + +## Running + +```bash +bash s3-inventory.sh +``` + +To auto-run with cleanup: + +```bash +echo 'y' | bash s3-inventory.sh +``` + +## What it does + +1. Creating source and destination buckets +2. Configuring inventory +3. Getting inventory configuration + +## Resources created + +- Bucket +- Bucket Inventory Configuration + +The script prompts you to clean up resources when it finishes. + +## Cost + +Free tier eligible for most operations. Clean up resources after use to avoid charges. + +## Related docs + +- [AWS CLI s3 reference](https://docs.aws.amazon.com/cli/latest/reference/s3/index.html) +- [AWS CLI s3api reference](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + diff --git a/tuts/196-s3-inventory/REVISION-HISTORY.md b/tuts/196-s3-inventory/REVISION-HISTORY.md new file mode 100644 index 00000000..4490ec63 --- /dev/null +++ b/tuts/196-s3-inventory/REVISION-HISTORY.md @@ -0,0 +1,8 @@ +# Revision History: 196-s3-inventory + +## Shell (CLI script) + +### 2026-04-14 v1 published +- Type: functional +- Initial version + diff --git a/tuts/196-s3-inventory/s3-inventory.md b/tuts/196-s3-inventory/s3-inventory.md new file mode 100644 index 00000000..d673f852 --- /dev/null +++ b/tuts/196-s3-inventory/s3-inventory.md @@ -0,0 +1,23 @@ +# S3 Inventory + +## Prerequisites + +1. AWS CLI installed and configured (`aws configure`) +2. Appropriate IAM permissions for the AWS services used + +## Step 1: Creating source and destination buckets + +The script handles this step automatically. See `s3-inventory.sh` for the exact CLI commands. + +## Step 2: Configuring inventory + +The script handles this step automatically. See `s3-inventory.sh` for the exact CLI commands. + +## Step 3: Getting inventory configuration + +The script handles this step automatically. See `s3-inventory.sh` for the exact CLI commands. + +## Cleanup + +The script prompts you to clean up all created resources. If you need to clean up manually, check the script log for the resource names that were created. + diff --git a/tuts/196-s3-inventory/s3-inventory.sh b/tuts/196-s3-inventory/s3-inventory.sh new file mode 100644 index 00000000..0fdd2cc5 --- /dev/null +++ b/tuts/196-s3-inventory/s3-inventory.sh @@ -0,0 +1,17 @@ +#!/bin/bash +WORK_DIR=$(mktemp -d); exec > >(tee -a "$WORK_DIR/tut.log") 2>&1 +REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null))}; [ -z "$REGION" ] && echo "ERROR: No region" && exit 1; export AWS_DEFAULT_REGION="$REGION"; echo "Region: $REGION" +RANDOM_ID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1); ACCOUNT=$(aws sts get-caller-identity --query Account --output text) +SRC="inv-src-${RANDOM_ID}-${ACCOUNT}"; DST="inv-dst-${RANDOM_ID}-${ACCOUNT}" +handle_error() { echo "ERROR on line $1"; trap - ERR; cleanup; exit 1; }; trap '"'"'handle_error $LINENO'"'"' ERR +cleanup() { echo "Cleaning up..."; aws s3api delete-bucket-inventory-configuration --bucket "$SRC" --id tutorial-inventory 2>/dev/null; aws s3 rm "s3://$SRC" --recursive --quiet 2>/dev/null; aws s3 rb "s3://$SRC" 2>/dev/null; aws s3 rm "s3://$DST" --recursive --quiet 2>/dev/null; aws s3 rb "s3://$DST" 2>/dev/null; rm -rf "$WORK_DIR"; echo "Done."; } +echo "Step 1: Creating source and destination buckets" +aws s3api create-bucket --bucket "$SRC" > /dev/null; aws s3api create-bucket --bucket "$DST" > /dev/null +echo "Step 2: Configuring inventory" +aws s3api put-bucket-inventory-configuration --bucket "$SRC" --id tutorial-inventory --inventory-configuration "{\"Destination\":{\"S3BucketDestination\":{\"Bucket\":\"arn:aws:s3:::$DST\",\"Format\":\"CSV\"}},\"IsEnabled\":true,\"Id\":\"tutorial-inventory\",\"IncludedObjectVersions\":\"Current\",\"Schedule\":{\"Frequency\":\"Weekly\"},\"OptionalFields\":[\"Size\",\"LastModifiedDate\",\"StorageClass\"]}" +echo " Inventory configured (weekly, CSV)" +echo "Step 3: Getting inventory configuration" +aws s3api get-bucket-inventory-configuration --bucket "$SRC" --id tutorial-inventory --query "InventoryConfiguration.{Id:Id,Enabled:IsEnabled,Frequency:Schedule.Frequency}" --output table +echo " Note: First inventory report generates within 48 hours" +echo ""; echo "Tutorial complete." +echo "Do you want to clean up? (y/n): "; read -r C; [[ "$C" =~ ^[Yy]$ ]] && cleanup