agent: |
EBS Management
- 1mgc791zWLyRgPRGzROnzFilter Out and Delete Old AWS EBS Snapshots
1
Filter Out and Delete Old AWS EBS Snapshots
There was a problem that the LLM was not able to address. Please rephrase your prompt and try again.This runbook identifies and removes old Amazon Elastic Block Store (EBS) snapshots. By setting a specific age threshold it scans through designated AWS regions, pinpoints snapshots that surpass the age limit, and subsequently deletes them. This operation not only ensures efficient resource utilization but also aids in minimizing storage costs, promoting a cleaner and more cost-effective cloud environment.
inputsoutputsdays_old=60 #Hardcoded for one time resultcopied1- 1.1cE2VyVuRge1jnahfJtVuFilter Out Old AWS EBS Snapshots
1.1
Filter Out Old AWS EBS Snapshots
There was a problem that the LLM was not able to address. Please rephrase your prompt and try again.This task identifies old Amazon Elastic Block Store (EBS) snapshots. By setting an age threshold, it scans across specified AWS regions, highlighting snapshots that exceed the set duration. This facilitates better management, paving the way for timely deletions and efficient storage utilization.
inputsoutputsimport boto3 from botocore.exceptions import ClientError from datetime import datetime, timedelta creds = _get_creds(cred_label)['creds'] access_key = creds['username'] secret_key = creds['password'] def find_old_snapshots(ec2_client, days_old, region): """ Find EBS snapshots in a specified AWS region that are older than a given number of days. Args: ec2_client (boto3.client): Boto3 EC2 client object. days_old (int): The age in days to consider an EBS snapshot as old. region (str): The AWS region to search for old snapshots. Returns: list[str]: List of old snapshot IDs. Returns None if there's an error. """ old_snapshots = [] # Initialize an empty list to store the IDs of old snapshots try: # Fetch all snapshots owned by the current AWS account snapshots = ec2_client.describe_snapshots(OwnerIds=['self'])['Snapshots'] # Calculate the cutoff date for old snapshots, removing timezone information to make it "naive" cutoff_date = datetime.now().replace(tzinfo=None) - timedelta(days=days_old) # Loop through each snapshot to check its age for snapshot in snapshots: # Remove timezone information from the snapshot's start time to make it "naive" snapshot_time_naive = snapshot['StartTime'].replace(tzinfo=None) # Compare snapshot's start time with the cutoff date if snapshot_time_naive < cutoff_date: old_snapshots.append(snapshot['SnapshotId']) # Append old snapshot IDs to the list return old_snapshots # Return the list of old snapshot IDs except ClientError as e: print(f"A ClientError occurred in region {region}: {e}") # Handle any ClientErrors return None except Exception as e: print(f"An unknown error occurred in region {region}: {e}") # Handle any general exceptions return None # List of AWS regions to check for old snapshots #regions_to_check = ['us-east-1', 'us-east-2'] #, 'us-west-2'] # Age in days to consider an EBS snapshot as old #days_old = 5 # Initialize an empty dictionary to store the snapshot IDs by region snapshots_by_region = {} # Initialize a list to store regions where no old snapshots were found regions_without_snapshots = [] # Loop through each AWS region to find old snapshots for region in regions: #print(f"Checking region {region}...") ec2_client = boto3.client('ec2', aws_access_key_id=access_key,aws_secret_access_key=secret_key,region_name=region) # Initialize EC2 client for the region old_snapshots = find_old_snapshots(ec2_client, int(days_old), region) # Find old snapshots in the region # If old snapshots are found, add them to the dictionary if old_snapshots: snapshots_by_region[region] = old_snapshots else: regions_without_snapshots.append(region) # Print the resulting dictionary print("\nSummary of old snapshots by region:") for region, snapshot_ids in snapshots_by_region.items(): print(f"{region}: {snapshot_ids}") # Print regions without old snapshots if regions_without_snapshots: print(f"\nNo old snapshots found in the following regions: {', '.join(regions_without_snapshots)}") context.skip_sub_tasks=Truecopied1.1- 1.1.1lfKDvBDnuorGNlk0HPYZDelete AWS EBS Snapshots
1.1.1
There was a problem that the LLM was not able to address. Please rephrase your prompt and try again.This task removes specified Amazon Elastic Block Store (EBS) snapshots. Designed to streamline storage management, this procedure efficiently purges selected snapshots across designated AWS regions, ensuring optimal resource utilization and reducing unnecessary storage costs.
inputsoutputsimport boto3 from botocore.exceptions import ClientError creds = _get_creds(cred_label)['creds'] access_key = creds['username'] secret_key = creds['password'] # Function to delete a list of EBS snapshots given their IDs def delete_snapshots(ec2_client, snapshot_ids, region): """ Delete a list of specified EBS snapshots in a given AWS region. Args: ec2_client (boto3.client): Boto3 EC2 client object. snapshot_ids (list[str]): List of EBS snapshot IDs to be deleted. region (str): The AWS region where the snapshots are located. Returns: None: This function does not return any value. """ for snapshot_id in snapshot_ids: try: # Delete the snapshot ec2_client.delete_snapshot(SnapshotId=snapshot_id) print(f"Deleted snapshot {snapshot_id} in region {region}") # Confirm deletion except ClientError as e: print(f"Could not delete snapshot {snapshot_id} in region {region}: {e}") # Handle any ClientErrors ''' #Example structure of snapshots_by_region # Dictionary mapping AWS regions to their respective old snapshots snapshots_by_region = { 'us-east-1': ['snap-04cbc2182c8f5e1ed', 'snap-0004bbdd1e7b0d35c'], 'us-west-2': [] # Just as an example, no snapshots listed for us-west-2 } ''' # Loop through each AWS region in the dictionary to delete old snapshots for region, old_snapshots in snapshots_by_region.items(): print(f"Checking region {region}...") ec2_client = boto3.client('ec2',aws_access_key_id=access_key,aws_secret_access_key=secret_key, region_name=region) # Initialize EC2 client for the region # Delete old snapshots if any are found for the current region if old_snapshots: print(f"Found {len(old_snapshots)} old snapshots in {region}. Deleting them...") delete_snapshots(ec2_client, old_snapshots, region) else: print(f"No old snapshots found in {region}.") # Confirm if no old snapshots are found in the current regioncopied1.1.1
- 2ndq2l3t3WMHwLKhdvdFJModify AWS EBS Volume type
2
There was a problem that the LLM was not able to address. Please rephrase your prompt and try again.This runbook is a cost-effective and performance-optimized operational upgrade. In this case we see that GP3 volumes are less expensive than older types like GP2, while offering the ability to independently scale throughput and IOPS. This provides granular control over performance, reduces management overhead, and can lead to substantial cost savings. The conversion is typically straightforward and backward-compatible, making it an efficient way to modernize your AWS storage infrastructure.
inputsoutputs2- 2.1Sg0kfgJSTZA6IV4RnXurFilter out AWS EBS Volumes that are not GP3 type
2.1
Filter out AWS EBS Volumes that are not GP3 type
There was a problem that the LLM was not able to address. Please rephrase your prompt and try again.This task scans through your EBS volumes across different regions to flag those that are using older volume types like GP2. Knowing which volumes are not yet on GP3 allows you to target them for an upgrade, helping you take advantage of lower costs and improved performance features. This can be particularly useful for large organizations looking to streamline their AWS infrastructure and reduce operational costs.
inputsoutputsimport boto3 from botocore.exceptions import ClientError creds = _get_creds(cred_label)['creds'] access_key = creds['username'] secret_key = creds['password'] def get_non_gp3_volumes(regions): """ Get the IDs of all EBS volumes that are not of type 'gp3'. Args: regions: List of AWS regions to check. Returns: Dictionary where the key is the region and the value is a list of non-GP3 volume IDs in that region. """ non_gp3_volumes_by_region = {} # Initialize dictionary to hold non-GP3 volumes by region for region in regions: try: # Initialize EC2 resource for the region ec2 = boto3.resource('ec2',aws_access_key_id=access_key,aws_secret_access_key=secret_key, region_name=region) # Initialize list to hold non-GP3 volumes for the region non_gp3_volumes = [] # Loop through all volumes in the region for volume in ec2.volumes.all(): if volume.volume_type != 'gp3': # Check if the volume is not of type 'gp3' non_gp3_volumes.append(volume.id) # Add volume ID to the list # If non-GP3 volumes are found in the current region, add them to the dictionary if non_gp3_volumes: non_gp3_volumes_by_region[region] = non_gp3_volumes print(f"Found {len(non_gp3_volumes)} non-GP3 volumes in {region}.") else: print(f"No non-GP3 volumes found in {region}.") except ClientError as e: print(f"A botocore exception occurred in region {region}: {e.response['Error']['Message']}") except Exception as e: print(f"An unknown error occurred in region {region}: {e}") return non_gp3_volumes_by_region # Define AWS regions to check #regions_to_check = ['us-east-1', 'us-west-2'] # Get non-GP3 volumes non_gp3_volumes_by_region = get_non_gp3_volumes(regions=regions_to_check) # Print the result print("Summary of non-GP3 volumes by region:") for region, volume_ids in non_gp3_volumes_by_region.items(): print(f"{region}: {volume_ids}") context.skip_sub_tasks = Truecopied2.1- 2.1.1z5AM9Rn0mPmWRrMAFQdRChange AWS EBS Volumes to GP3 type
2.1.1
Change AWS EBS Volumes to GP3 type
There was a problem that the LLM was not able to address. Please rephrase your prompt and try again.This task involves converting older volume types like GP2 and others to GP3. The transition to GP3 can result in cost savings and potentially improved performance, as GP3 volumes offer better throughput and the same baseline performance at a lower cost per GB. This operation is essential for organizations looking to modernize their storage infrastructure and optimize cloud expenses.
inputsoutputsimport boto3 from botocore.exceptions import ClientError creds = _get_creds(cred_label)['creds'] access_key = creds['username'] secret_key = creds['password'] def modify_volumes_to_gp3(non_gp3_volumes_by_region): """ Modify EBS volumes to GP3 type. Args: non_gp3_volumes_by_region: Dictionary where the key is the region and the value is a list of non-GP3 volume IDs in that region. Returns: None """ # Loop through each region in the dictionary for region, volume_ids in non_gp3_volumes_by_region.items(): print(f"Checking region {region}...") # Initialize an EC2 client for the specific region ec2_client = boto3.client('ec2', aws_access_key_id=access_key,aws_secret_access_key=secret_key,region_name=region) # If there are non-GP3 volumes in the region, proceed to modify them if volume_ids: print(f"Found {len(volume_ids)} non-GP3 volumes in region {region}. Modifying them to GP3...") for volume_id in volume_ids: try: # Modify the volume to GP3 ec2_client.modify_volume( VolumeId=volume_id, VolumeType= ebs_volume_type ) print(f"Successfully modified volume {volume_id} to GP3 type in region {region}.") except ClientError as e: print(f"An error occurred while modifying volume {volume_id} in region {region}: {e.response['Error']['Message']}") else: print(f"No non-GP3 volumes found in region {region}.") ''' # Dictionary containing non-GP3 volume IDs, keyed by region non_gp3_volumes_by_region = { 'us-east-1': ['vol-0ae92bb63cdc04a67'] } ''' # Modify the non-GP3 volumes to GP3 type if non_gp3_volumes_by_region: modify_volumes_to_gp3(non_gp3_volumes_by_region) else: print("No non-GP3 volumes found.")copied2.1.1