diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 2a62c83..b823b10 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -51,6 +51,7 @@ jobs: JWKS_URL: ${{ steps.import-stacks-vars-to-output.outputs.JWKS_URL }} DATA_ACCESS_ROLE_ARN: ${{ steps.import-stacks-vars-to-output.outputs.DATA_ACCESS_ROLE_ARN }} DB_ALLOCATED_STORAGE: ${{ vars.DB_ALLOCATED_STORAGE }} + DB_INSTANCE_TYPE: ${{ vars.DB_INSTANCE_TYPE }} GIT_REPOSITORY: ${{ github.repository}} COMMIT_SHA: ${{ github.sha }} AUTHOR: ${{ github.actor }} diff --git a/cdk/PgBouncer.ts b/cdk/PgBouncer.ts new file mode 100644 index 0000000..afca8a5 --- /dev/null +++ b/cdk/PgBouncer.ts @@ -0,0 +1,227 @@ +import { + aws_ec2 as ec2, + aws_iam as iam, + aws_secretsmanager as secretsmanager, + Stack, +} from "aws-cdk-lib"; +import { Construct } from "constructs"; + +import * as fs from "fs"; +import * as path from "path"; + +// used to populate pgbouncer config: +// see https://www.pgbouncer.org/config.html for details +export interface PgBouncerConfigProps { + poolMode?: "transaction" | "session" | "statement"; + maxClientConn?: number; + defaultPoolSize?: number; + minPoolSize?: number; + reservePoolSize?: number; + reservePoolTimeout?: number; + maxDbConnections?: number; + maxUserConnections?: number; +} + +export interface PgBouncerProps { + /** + * Name for the pgbouncer instance + */ + instanceName: string; + + /** + * VPC to deploy PgBouncer into + */ + vpc: ec2.IVpc; + + /** + * The RDS instance to connect to + */ + database: { + instanceType: ec2.InstanceType; + connections: ec2.Connections; + secret: secretsmanager.ISecret; + }; + + /** + * Whether to deploy in public subnet + * @default false + */ + usePublicSubnet?: boolean; + + /** + * Instance type for PgBouncer + * @default t3.micro + */ + instanceType?: ec2.InstanceType; + + /** + * PgBouncer configuration options + */ + pgBouncerConfig?: PgBouncerConfigProps; +} + +export class PgBouncer extends Construct { + public readonly instance: ec2.Instance; + public readonly endpoint: string; + + // The max_connections parameter in PgBouncer determines the maximum number of + // connections to open on the actual database instance. We want that number to + // be slightly smaller than the actual max_connections value on the RDS instance + // so we perform this calculation. + + // TODO: move this to eoapi-cdk where we already have a complete map of instance + // type and memory + private readonly instanceMemoryMapMb: Record = { + "t3.micro": 1024, + "t3.small": 2048, + "t3.medium": 4096, + }; + + private calculateMaxConnections(dbInstanceType: ec2.InstanceType): number { + const memoryMb = this.instanceMemoryMapMb[dbInstanceType.toString()]; + if (!memoryMb) { + throw new Error( + `Unsupported instance type: ${dbInstanceType.toString()}`, + ); + } + + // RDS calculates the available memory as the total instance memory minus some + // constant for OS overhead + const memoryInBytes = (memoryMb - 300) * 1024 ** 2; + + // The default max_connections setting follows this formula: + return Math.min(Math.round(memoryInBytes / 9531392), 5000); + } + + private getDefaultConfig( + dbInstanceType: ec2.InstanceType, + ): Required { + // calculate approximate max_connections setting for this RDS instance type + const maxConnections = this.calculateMaxConnections(dbInstanceType); + + // maxDbConnections (and maxUserConnections) are the only settings that need + // to be responsive to the database size/max_connections setting + return { + poolMode: "transaction", + maxClientConn: 1000, + defaultPoolSize: 5, + minPoolSize: 0, + reservePoolSize: 5, + reservePoolTimeout: 5, + maxDbConnections: maxConnections - 10, + maxUserConnections: maxConnections - 10, + }; + } + + constructor(scope: Construct, id: string, props: PgBouncerProps) { + super(scope, id); + + // Set defaults for optional props + const defaultInstanceType = ec2.InstanceType.of( + ec2.InstanceClass.T3, + ec2.InstanceSize.MICRO, + ); + + const instanceType = props.instanceType ?? defaultInstanceType; + const defaultConfig = this.getDefaultConfig(props.database.instanceType); + + // Merge provided config with defaults + const pgBouncerConfig: Required = { + ...defaultConfig, + ...props.pgBouncerConfig, + }; + + // Create role for PgBouncer instance to enable writing to CloudWatch + const role = new iam.Role(this, "InstanceRole", { + assumedBy: new iam.ServicePrincipal("ec2.amazonaws.com"), + managedPolicies: [ + iam.ManagedPolicy.fromAwsManagedPolicyName( + "AmazonSSMManagedInstanceCore", + ), + iam.ManagedPolicy.fromAwsManagedPolicyName( + "CloudWatchAgentServerPolicy", + ), + ], + }); + + // Add policy to allow reading RDS credentials from Secrets Manager + role.addToPolicy( + new iam.PolicyStatement({ + actions: ["secretsmanager:GetSecretValue"], + resources: [props.database.secret.secretArn], + }), + ); + + // Create PgBouncer instance + this.instance = new ec2.Instance(this, "Instance", { + vpc: props.vpc, + vpcSubnets: { + subnetType: props.usePublicSubnet + ? ec2.SubnetType.PUBLIC + : ec2.SubnetType.PRIVATE_WITH_EGRESS, + }, + instanceType, + instanceName: props.instanceName, + machineImage: ec2.MachineImage.fromSsmParameter( + "/aws/service/canonical/ubuntu/server/jammy/stable/current/amd64/hvm/ebs-gp2/ami-id", + { os: ec2.OperatingSystemType.LINUX }, + ), + role, + blockDevices: [ + { + deviceName: "/dev/xvda", + volume: ec2.BlockDeviceVolume.ebs(20, { + volumeType: ec2.EbsDeviceVolumeType.GP3, + encrypted: true, + deleteOnTermination: true, + }), + }, + ], + userData: this.loadUserDataScript(pgBouncerConfig, props.database), + userDataCausesReplacement: true, + }); + + // Allow PgBouncer to connect to RDS + props.database.connections.allowFrom( + this.instance, + ec2.Port.tcp(5432), + "Allow PgBouncer to connect to RDS", + ); + + // Set the endpoint + this.endpoint = this.instance.instancePrivateIp; + } + + private loadUserDataScript( + pgBouncerConfig: Required>, + database: { secret: secretsmanager.ISecret }, + ): ec2.UserData { + const userDataScript = ec2.UserData.forLinux(); + + // Set environment variables with configuration parameters + userDataScript.addCommands( + 'export SECRET_ARN="' + database.secret.secretArn + '"', + 'export REGION="' + Stack.of(this).region + '"', + 'export POOL_MODE="' + pgBouncerConfig.poolMode + '"', + 'export MAX_CLIENT_CONN="' + pgBouncerConfig.maxClientConn + '"', + 'export DEFAULT_POOL_SIZE="' + pgBouncerConfig.defaultPoolSize + '"', + 'export MIN_POOL_SIZE="' + pgBouncerConfig.minPoolSize + '"', + 'export RESERVE_POOL_SIZE="' + pgBouncerConfig.reservePoolSize + '"', + 'export RESERVE_POOL_TIMEOUT="' + + pgBouncerConfig.reservePoolTimeout + + '"', + 'export MAX_DB_CONNECTIONS="' + pgBouncerConfig.maxDbConnections + '"', + 'export MAX_USER_CONNECTIONS="' + + pgBouncerConfig.maxUserConnections + + '"', + ); + + // Load the startup script + const scriptPath = path.join(__dirname, "./scripts/pgbouncer-setup.sh"); + let script = fs.readFileSync(scriptPath, "utf8"); + + userDataScript.addCommands(script); + + return userDataScript; + } +} diff --git a/cdk/PgStacInfra.ts b/cdk/PgStacInfra.ts index 0bf0843..4513b5b 100644 --- a/cdk/PgStacInfra.ts +++ b/cdk/PgStacInfra.ts @@ -8,7 +8,7 @@ import { aws_cloudfront as cloudfront, aws_cloudfront_origins as origins, } from "aws-cdk-lib"; -import { Aws, Duration, RemovalPolicy, Stack, StackProps, Tags } from "aws-cdk-lib"; +import { Aws, Duration, RemovalPolicy, Stack, StackProps } from "aws-cdk-lib"; import { Construct } from "constructs"; import { BastionHost, @@ -22,6 +22,7 @@ import { import { DomainName } from "@aws-cdk/aws-apigatewayv2-alpha"; import { readFileSync } from "fs"; import { load } from "js-yaml"; +import { PgBouncer } from "./PgBouncer"; export class PgStacInfra extends Stack { constructor(scope: Construct, id: string, props: Props) { @@ -33,6 +34,7 @@ export class PgStacInfra extends Stack { vpc, stage, version, + dbInstanceType, jwksUrl, dataAccessRoleArn, allocatedStorage, @@ -40,7 +42,7 @@ export class PgStacInfra extends Stack { titilerBucketsPath, } = props; - const maapLoggingBucket = new s3.Bucket(this, 'maapLoggingBucket', { + const maapLoggingBucket = new s3.Bucket(this, "maapLoggingBucket", { accessControl: s3.BucketAccessControl.LOG_DELIVERY_WRITE, removalPolicy: RemovalPolicy.DESTROY, blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, @@ -54,6 +56,7 @@ export class PgStacInfra extends Stack { ], }); + // Pgstac Database const { db, pgstacSecret } = new PgStacDatabase(this, "pgstac-db", { vpc, allowMajorVersionUpgrade: true, @@ -66,11 +69,27 @@ export class PgStacInfra extends Stack { : ec2.SubnetType.PRIVATE_ISOLATED, }, allocatedStorage: allocatedStorage, - // set instance type to t3.micro if stage is test, otherwise t3.small - instanceType: - stage === "test" - ? ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.MICRO) - : ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.SMALL), + instanceType: dbInstanceType, + }); + + // PgBouncer + const pgBouncer = new PgBouncer(this, "pgbouncer", { + instanceName: `pgbouncer-${stage}`, + vpc: props.vpc, + database: { + instanceType: dbInstanceType, + connections: db.connections, + secret: pgstacSecret, + }, + usePublicSubnet: props.dbSubnetPublic, + pgBouncerConfig: { + poolMode: "transaction", + maxClientConn: 1000, + defaultPoolSize: 20, + minPoolSize: 10, + reservePoolSize: 5, + reservePoolTimeout: 5, + }, }); const apiSubnetSelection: ec2.SubnetSelection = { @@ -79,6 +98,7 @@ export class PgStacInfra extends Stack { : ec2.SubnetType.PRIVATE_WITH_EGRESS, }; + // STAC API const stacApiLambda = new PgStacApiLambda(this, "pgstac-api", { apiEnv: { NAME: `MAAP STAC API (${stage})`, @@ -107,6 +127,7 @@ export class PgStacInfra extends Stack { sourceArn: props.stacApiIntegrationApiArn, }); + // titiler-pgstac const fileContents = readFileSync(titilerBucketsPath, "utf8"); const buckets = load(fileContents) as string[]; @@ -175,6 +196,19 @@ export class PgStacInfra extends Stack { titilerPgstacApi.titilerPgstacLambdaFunction.addToRolePolicy(permission); }); + // Configure titiler-pgstac for pgbouncer + titilerPgstacApi.titilerPgstacLambdaFunction.connections.allowTo( + pgBouncer.instance, + ec2.Port.tcp(5432), + "allow connections from titiler", + ); + + titilerPgstacApi.titilerPgstacLambdaFunction.addEnvironment( + "PGBOUNCER_HOST", + pgBouncer.endpoint, + ); + + // STAC Ingestor new BastionHost(this, "bastion-host", { vpc, db, @@ -219,7 +253,7 @@ export class PgStacInfra extends Stack { }); // STAC Browser Infrastructure - const stacBrowserBucket = new s3.Bucket(this, 'stacBrowserBucket', { + const stacBrowserBucket = new s3.Bucket(this, "stacBrowserBucket", { accessControl: s3.BucketAccessControl.PRIVATE, removalPolicy: RemovalPolicy.DESTROY, blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, @@ -227,24 +261,29 @@ export class PgStacInfra extends Stack { enforceSSL: true, }); - const stacBrowserOrigin = new cloudfront.Distribution(this, 'stacBrowserDistro', { - defaultBehavior: { origin: new origins.S3Origin(stacBrowserBucket) }, - defaultRootObject: 'index.html', - domainNames: [props.stacBrowserCustomDomainName], - certificate: acm.Certificate.fromCertificateArn( - this, - "stacBrowserCustomDomainNameCertificate", - props.stacBrowserCertificateArn, - ), - enableLogging: true, - logBucket: maapLoggingBucket, - logFilePrefix: 'stac-browser', - }); - + const stacBrowserOrigin = new cloudfront.Distribution( + this, + "stacBrowserDistro", + { + defaultBehavior: { origin: new origins.S3Origin(stacBrowserBucket) }, + defaultRootObject: "index.html", + domainNames: [props.stacBrowserCustomDomainName], + certificate: acm.Certificate.fromCertificateArn( + this, + "stacBrowserCustomDomainNameCertificate", + props.stacBrowserCertificateArn, + ), + enableLogging: true, + logBucket: maapLoggingBucket, + logFilePrefix: "stac-browser", + }, + ); + new StacBrowser(this, "stac-browser", { bucketArn: stacBrowserBucket.bucketArn, - stacCatalogUrl: props.stacApiCustomDomainName.startsWith('https://') ? - props.stacApiCustomDomainName : `https://${props.stacApiCustomDomainName}/`, + stacCatalogUrl: props.stacApiCustomDomainName.startsWith("https://") + ? props.stacApiCustomDomainName + : `https://${props.stacApiCustomDomainName}/`, githubRepoTag: props.stacBrowserRepoTag, websiteIndexDocument: "index.html", }); @@ -252,31 +291,35 @@ export class PgStacInfra extends Stack { const accountId = Aws.ACCOUNT_ID; const distributionArn = `arn:aws:cloudfront::${accountId}:distribution/${stacBrowserOrigin.distributionId}`; - stacBrowserBucket.addToResourcePolicy(new iam.PolicyStatement({ - sid: 'AllowCloudFrontServicePrincipal', - effect: iam.Effect.ALLOW, - actions: ['s3:GetObject'], - principals: [new iam.ServicePrincipal('cloudfront.amazonaws.com')], - resources: [stacBrowserBucket.arnForObjects('*')], - conditions: { - 'StringEquals': { - 'aws:SourceArn': distributionArn, - } - } - })); - - maapLoggingBucket.addToResourcePolicy(new iam.PolicyStatement({ - sid: 'AllowCloudFrontServicePrincipal', - effect: iam.Effect.ALLOW, - actions: ['s3:PutObject'], - resources: [maapLoggingBucket.arnForObjects('AWSLogs/*')], - principals: [new iam.ServicePrincipal('cloudfront.amazonaws.com')], - conditions: { - 'StringEquals': { - 'aws:SourceArn': distributionArn, - }, - }, - })); + stacBrowserBucket.addToResourcePolicy( + new iam.PolicyStatement({ + sid: "AllowCloudFrontServicePrincipal", + effect: iam.Effect.ALLOW, + actions: ["s3:GetObject"], + principals: [new iam.ServicePrincipal("cloudfront.amazonaws.com")], + resources: [stacBrowserBucket.arnForObjects("*")], + conditions: { + StringEquals: { + "aws:SourceArn": distributionArn, + }, + }, + }), + ); + + maapLoggingBucket.addToResourcePolicy( + new iam.PolicyStatement({ + sid: "AllowCloudFrontServicePrincipal", + effect: iam.Effect.ALLOW, + actions: ["s3:PutObject"], + resources: [maapLoggingBucket.arnForObjects("AWSLogs/*")], + principals: [new iam.ServicePrincipal("cloudfront.amazonaws.com")], + conditions: { + StringEquals: { + "aws:SourceArn": distributionArn, + }, + }, + }), + ); } } @@ -294,6 +337,11 @@ export interface Props extends StackProps { */ version: string; + /** + * RDS Instance type + */ + dbInstanceType: ec2.InstanceType; + /** * Flag to control whether database should be deployed into a * public subnet. diff --git a/cdk/app.ts b/cdk/app.ts index 86964e3..083bda0 100644 --- a/cdk/app.ts +++ b/cdk/app.ts @@ -8,6 +8,7 @@ import { PgStacInfra } from "./PgStacInfra"; const { stage, version, + dbInstanceType, buildStackName, tags, jwksUrl, @@ -21,7 +22,7 @@ const { titilerPgStacApiCustomDomainName, stacBrowserRepoTag, stacBrowserCustomDomainName, - stacBrowserCertificateArn + stacBrowserCertificateArn, } = new Config(); export const app = new cdk.App({}); @@ -38,6 +39,7 @@ new PgStacInfra(app, buildStackName("pgSTAC"), { stage, version, jwksUrl, + dbInstanceType, terminationProtection: false, bastionIpv4AllowList: [ "66.17.119.38/32", // Jamison diff --git a/cdk/config.ts b/cdk/config.ts index cda9938..5925029 100644 --- a/cdk/config.ts +++ b/cdk/config.ts @@ -1,6 +1,9 @@ +import * as aws_ec2 from "aws-cdk-lib/aws-ec2"; + export class Config { readonly stage: string; readonly version: string; + readonly dbInstanceType: aws_ec2.InstanceType; readonly tags: Record; readonly jwksUrl: string; readonly dataAccessRoleArn: string; @@ -18,32 +21,61 @@ export class Config { constructor() { // These are required environment variables and cannot be undefined const requiredVariables = [ - { name: 'STAGE', value: process.env.STAGE }, - { name: 'JWKS_URL', value: process.env.JWKS_URL }, - { name: 'DATA_ACCESS_ROLE_ARN', value: process.env.DATA_ACCESS_ROLE_ARN }, - { name: 'STAC_API_INTEGRATION_API_ARN', value: process.env.STAC_API_INTEGRATION_API_ARN }, - { name: 'DB_ALLOCATED_STORAGE', value: process.env.DB_ALLOCATED_STORAGE }, - { name: 'MOSAIC_HOST', value: process.env.MOSAIC_HOST }, - { name: 'STAC_BROWSER_REPO_TAG', value: process.env.STAC_BROWSER_REPO_TAG }, - { name: 'STAC_BROWSER_CUSTOM_DOMAIN_NAME', value: process.env.STAC_BROWSER_CUSTOM_DOMAIN_NAME }, - { name: 'STAC_BROWSER_CERTIFICATE_ARN', value: process.env.STAC_BROWSER_CERTIFICATE_ARN }, - { name: 'STAC_API_CUSTOM_DOMAIN_NAME', value: process.env.STAC_API_CUSTOM_DOMAIN_NAME }, + { name: "STAGE", value: process.env.STAGE }, + { name: "DB_INSTANCE_TYPE", value: process.env.DB_INSTANCE_TYPE }, + { name: "JWKS_URL", value: process.env.JWKS_URL }, + { name: "DATA_ACCESS_ROLE_ARN", value: process.env.DATA_ACCESS_ROLE_ARN }, + { + name: "STAC_API_INTEGRATION_API_ARN", + value: process.env.STAC_API_INTEGRATION_API_ARN, + }, + { name: "DB_ALLOCATED_STORAGE", value: process.env.DB_ALLOCATED_STORAGE }, + { name: "MOSAIC_HOST", value: process.env.MOSAIC_HOST }, + { + name: "STAC_BROWSER_REPO_TAG", + value: process.env.STAC_BROWSER_REPO_TAG, + }, + { + name: "STAC_BROWSER_CUSTOM_DOMAIN_NAME", + value: process.env.STAC_BROWSER_CUSTOM_DOMAIN_NAME, + }, + { + name: "STAC_BROWSER_CERTIFICATE_ARN", + value: process.env.STAC_BROWSER_CERTIFICATE_ARN, + }, + { + name: "STAC_API_CUSTOM_DOMAIN_NAME", + value: process.env.STAC_API_CUSTOM_DOMAIN_NAME, + }, ]; for (const variable of requiredVariables) { if (!variable.value) { - throw new Error(`Must provide ${variable.name}`); + throw new Error(`Must provide ${variable.name}`); } } this.stage = process.env.STAGE!; + this.jwksUrl = process.env.JWKS_URL!; this.dataAccessRoleArn = process.env.DATA_ACCESS_ROLE_ARN!; this.stacApiIntegrationApiArn = process.env.STAC_API_INTEGRATION_API_ARN!; + + try { + this.dbInstanceType = new aws_ec2.InstanceType( + process.env.DB_INSTANCE_TYPE!, + ); + } catch (error) { + throw new Error( + `Invalid DB_INSTANCE_TYPE: ${process.env.DB_INSTANCE_TYPE!}. Error: ${error}`, + ); + } + this.dbAllocatedStorage = Number(process.env.DB_ALLOCATED_STORAGE!); this.mosaicHost = process.env.MOSAIC_HOST!; this.stacBrowserRepoTag = process.env.STAC_BROWSER_REPO_TAG!; - this.stacBrowserCustomDomainName = process.env.STAC_BROWSER_CUSTOM_DOMAIN_NAME!; + this.stacBrowserCustomDomainName = + process.env.STAC_BROWSER_CUSTOM_DOMAIN_NAME!; this.stacBrowserCertificateArn = process.env.STAC_BROWSER_CERTIFICATE_ARN!; this.stacApiCustomDomainName = process.env.STAC_API_CUSTOM_DOMAIN_NAME!; diff --git a/cdk/handlers/raster_handler.py b/cdk/handlers/raster_handler.py index c03737a..aab126f 100644 --- a/cdk/handlers/raster_handler.py +++ b/cdk/handlers/raster_handler.py @@ -8,11 +8,12 @@ # Update postgres env variables before importing titiler.pgstac.main pgstac_secret_arn = os.environ["PGSTAC_SECRET_ARN"] +pgbouncer_host = os.getenv("PGBOUNCER_HOST") secret = get_secret_dict(pgstac_secret_arn) os.environ.update( { - "postgres_host": secret["host"], + "postgres_host": pgbouncer_host or secret["host"], "postgres_dbname": secret["dbname"], "postgres_user": secret["username"], "postgres_pass": secret["password"], @@ -20,11 +21,10 @@ } ) +from eoapi.raster.main import app from mangum import Mangum from titiler.pgstac.db import connect_to_db -from eoapi.raster.main import app - logging.getLogger("mangum.lifespan").setLevel(logging.ERROR) logging.getLogger("mangum.http").setLevel(logging.ERROR) diff --git a/cdk/scripts/pgbouncer-setup.sh b/cdk/scripts/pgbouncer-setup.sh new file mode 100644 index 0000000..bcbcb1b --- /dev/null +++ b/cdk/scripts/pgbouncer-setup.sh @@ -0,0 +1,258 @@ +#!/bin/bash +set -euxo pipefail + +# These variables will be replaced by the TypeScript code +SECRET_ARN=${SECRET_ARN} +REGION=${REGION} +POOL_MODE=${POOL_MODE} +MAX_CLIENT_CONN=${MAX_CLIENT_CONN} +DEFAULT_POOL_SIZE=${DEFAULT_POOL_SIZE} +MIN_POOL_SIZE=${MIN_POOL_SIZE} +RESERVE_POOL_SIZE=${RESERVE_POOL_SIZE} +RESERVE_POOL_TIMEOUT=${RESERVE_POOL_TIMEOUT} +MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS} +MAX_USER_CONNECTIONS=${MAX_USER_CONNECTIONS} +CLOUDWATCH_CONFIG="/opt/aws/amazon-cloudwatch-agent/bin/config.json" + +# Add the postgres repository +curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - +sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + +# Install required packages +apt-get update + +# Function that makes sure we don't hit a dpkg lock error +wait_for_dpkg_lock() { + while fuser /var/lib/dpkg/lock-frontend /var/lib/dpkg/lock >/dev/null 2>&1; do + echo "Waiting for dpkg lock to be released..." + sleep 2 + done +} + +wait_for_dpkg_lock +DEBIAN_FRONTEND=noninteractive apt-get install -y pgbouncer jq awscli + +echo "Fetching secret from ARN: ${SECRET_ARN}" + +# Before handling secrets, turn off command tracing +set +x +SECRET=$(aws secretsmanager get-secret-value --secret-id ${SECRET_ARN} --region ${REGION} --query SecretString --output text) + +# Parse database credentials without echoing +DB_HOST=$(echo "$SECRET" | jq -r '.host') +DB_PORT=$(echo "$SECRET" | jq -r '.port') +DB_NAME=$(echo "$SECRET" | jq -r '.dbname') +DB_USER=$(echo "$SECRET" | jq -r '.username') +DB_PASSWORD=$(echo "$SECRET" | jq -r '.password') + +echo 'Creating PgBouncer configuration...' + +# Create pgbouncer.ini +cat < /etc/pgbouncer/pgbouncer.ini +[databases] +* = host=$DB_HOST port=$DB_PORT dbname=$DB_NAME + +[pgbouncer] +listen_addr = 0.0.0.0 +listen_port = 5432 +auth_type = md5 +auth_file = /etc/pgbouncer/userlist.txt +pool_mode = ${POOL_MODE} +max_client_conn = ${MAX_CLIENT_CONN} +default_pool_size = ${DEFAULT_POOL_SIZE} +min_pool_size = ${MIN_POOL_SIZE} +reserve_pool_size = ${RESERVE_POOL_SIZE} +reserve_pool_timeout = ${RESERVE_POOL_TIMEOUT} +max_db_connections = ${MAX_DB_CONNECTIONS} +max_user_connections = ${MAX_USER_CONNECTIONS} +max_prepared_statements = 10 +ignore_startup_parameters = application_name,search_path +logfile = /var/log/pgbouncer/pgbouncer.log +pidfile = /var/run/pgbouncer/pgbouncer.pid +admin_users = $DB_USER +stats_users = $DB_USER +log_connections = 1 +log_disconnections = 1 +log_pooler_errors = 1 +log_stats = 1 +stats_period = 60 +EOC + +# Create userlist.txt without echoing sensitive info +{ + echo "\"$DB_USER\" \"$DB_PASSWORD\"" +} > /etc/pgbouncer/userlist.txt + +# Turn command tracing back on +set -x + +# Set correct permissions +chown postgres:postgres /etc/pgbouncer/pgbouncer.ini /etc/pgbouncer/userlist.txt +chmod 600 /etc/pgbouncer/pgbouncer.ini /etc/pgbouncer/userlist.txt + +# Configure logging +# ensure /var/run/pgbouncer gets created on boot +cat < /etc/tmpfiles.d/pgbouncer.conf +d /var/run/pgbouncer 0755 postgres postgres - +EOC + +mkdir -p /var/log/pgbouncer /var/run/pgbouncer +chown postgres:postgres /var/log/pgbouncer /var/run/pgbouncer +chmod 755 /var/log/pgbouncer /var/run/pgbouncer + +touch /var/log/pgbouncer/pgbouncer.log +chown postgres:postgres /var/log/pgbouncer/pgbouncer.log +chmod 640 /var/log/pgbouncer/pgbouncer.log + +# Enable and start pgbouncer service +systemctl enable pgbouncer +systemctl restart pgbouncer + + +cat < /etc/logrotate.d/pgbouncer +/var/log/pgbouncer/pgbouncer.log { + daily + rotate 7 + compress + delaycompress + missingok + copytruncate + create 640 postgres postgres +} +EOC + +# Create monitoring scripts directory +mkdir -p /opt/pgbouncer/scripts + +# Create the health check script +cat <<'EOC' > /opt/pgbouncer/scripts/check.sh +#!/bin/bash +echo $(/bin/systemctl is-active pgbouncer) +if ! /bin/systemctl is-active --quiet pgbouncer; then + # If it's not active, attempt to start it + echo "$(date): PgBouncer is not running, attempting to restart" | logger -t pgbouncer-monitor + /bin/systemctl start pgbouncer + + # Check if the restart was successful + if /bin/systemctl is-active --quiet pgbouncer; then + echo "$(date): PgBouncer successfully restarted" | logger -t pgbouncer-monitor + else + echo "$(date): Failed to restart PgBouncer" | logger -t pgbouncer-monitor + fi +else + # If it's already active, no action is needed + echo "$(date): PgBouncer is running; no action needed" | logger -t pgbouncer-monitor +fi +EOC +chmod +x /opt/pgbouncer/scripts/check.sh + +# enable cron job +cat <<'EOC' > /opt/pgbouncer/scripts/crontab.txt +# PgBouncer health check - run every minute +* * * * * /opt/pgbouncer/scripts/check.sh +EOC + +crontab /opt/pgbouncer/scripts/crontab.txt + +if ! crontab -l; then + echo 'Failed to install crontab' | logger -t pgbouncer-setup + exit 1 +fi + +# Create CloudWatch configuration directory +mkdir -p /opt/pgbouncer/cloudwatch + +# Install CloudWatch agent +if ! wget -q https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb; then + echo 'Failed to download CloudWatch agent' | logger -t pgbouncer-setup + exit 1 +fi + +if ! dpkg -i amazon-cloudwatch-agent.deb; then + echo 'Failed to install CloudWatch agent' | logger -t pgbouncer-setup + exit 1 +fi + +# Create CloudWatch config +cat < ${CLOUDWATCH_CONFIG} +{ + "agent": { + "metrics_collection_interval": 60, + "run_as_user": "root" + }, + "logs": { + "logs_collected": { + "files": { + "collect_list": [ + { + "file_path": "/var/log/pgbouncer/pgbouncer.log", + "log_group_name": "/pgbouncer/logs", + "log_stream_name": "{instance_id}", + "timestamp_format": "%Y-%m-%d %H:%M:%S", + "multi_line_start_pattern": "{timestamp_format}", + "retention_in_days": 14 + }, + { + "file_path": "/var/log/syslog", + "log_group_name": "/pgbouncer/system-logs", + "log_stream_name": "{instance_id}", + "timestamp_format": "%b %d %H:%M:%S", + "retention_in_days": 14 + } + ] + } + } + }, + "metrics": { + "metrics_collected": { + "procstat": [ + { + "pattern": "pgbouncer", + "measurement": [ + "cpu_usage", + "memory_rss", + "read_bytes", + "write_bytes", + "read_count", + "write_count", + "num_fds" + ] + } + ], + "mem": { + "measurement": [ + "mem_used_percent" + ] + }, + "disk": { + "measurement": [ + "used_percent" + ] + } + }, + "aggregation_dimensions": [["InstanceId"]] + } +} +EOC + +# Verify the config file exists +if [ ! -f ${CLOUDWATCH_CONFIG} ]; then + echo 'CloudWatch config file not created' | logger -t pgbouncer-setup + exit 1 +fi + +# Start CloudWatch agent +if ! /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c file:${CLOUDWATCH_CONFIG}; then + echo 'Failed to configure CloudWatch agent' | logger -t pgbouncer-setup + exit 1 +fi + +systemctl enable amazon-cloudwatch-agent +systemctl start amazon-cloudwatch-agent + +# Verify CloudWatch agent is running +if ! systemctl is-active amazon-cloudwatch-agent; then + echo 'CloudWatch agent failed to start' | logger -t pgbouncer-setup + exit 1 +fi + diff --git a/package.json b/package.json index 94b8dab..99659fc 100644 --- a/package.json +++ b/package.json @@ -24,7 +24,7 @@ }, "dependencies": { "aws-cdk-lib": "^2.130.0", - "eoapi-cdk": "7.2.1", + "eoapi-cdk": "7.3.0", "constructs": "^10.3.0", "source-map-support": "^0.5.16" }