Prerequisites
This article uses skopeo
to copy container images betweeen registries. Please install for your host OS according to the documentation here:
Deployment Steps
Create Immuta image repositories in ECR
Copy AWS_ACCOUNT_ID = $( aws sts get-caller-identity --query Account --output text )
AWS_REGION = us-east-1
for image in audit-service audit-export-cronjob cache classify-service detect-temporal-worker immuta-service temporal-admin-tools temporal-proxy temporal-server ; do
aws ecr create-repository \
--repository-name immuta/ $image \
--region ${AWS_REGION};
done
Authenticate to ECR
Copy aws ecr get-login-password --region ${AWS_REGION} | skopeo login --username AWS --password-stdin ${AWS_ACCOUNT_ID} .dkr.ecr. ${AWS_REGION} .amazonaws.com
Copy images to ECR
Copy export IMMUTA_VERSION = 2024.3.0
for image in audit-service audit-export-cronjob cache classify-service detect-temporal-worker immuta-service temporal-admin-tools temporal-proxy temporal-server ; do
skopeo copy docker://ocir.immuta.com/stable/ $image : ${IMMUTA_VERSION} docker:// ${AWS_ACCOUNT_ID} .dkr.ecr. ${AWS_REGION} .amazonaws.com/immuta/ $image : ${IMMUTA_VERSION};
done
Deploy EKS Cluster
Run eksctl create cluster
with a file like the one below but updated with appropriate values for the destination environment
Copy eksctl create cluster -f immuta-lts.yaml
Cluster Config
Copy apiVersion : eksctl.io/v1alpha5
iam :
withOIDC : true
kind : ClusterConfig
kubernetesNetworkConfig :
ipFamily : IPv4
managedNodeGroups :
- name : immuta-lts
amiFamily : AmazonLinux2
desiredCapacity : 3
disableIMDSv1 : true
disablePodIMDS : false
instanceType : m5.xlarge
iam :
withAddonPolicies :
albIngress : true
awsLoadBalancerController : true
certManager : true
cloudWatch : true
ebs : true
externalDNS : true
labels :
alpha.eksctl.io/cluster-name : immuta-lts
alpha.eksctl.io/nodegroup-name : immuta-lts
maxSize : 3
minSize : 3
privateNetworking : true
tags :
alpha.eksctl.io/nodegroup-name : immuta-lts
alpha.eksctl.io/nodegroup-type : managed
volumeIOPS : 3000
volumeSize : 80
volumeThroughput : 125
volumeType : gp3
metadata :
name : immuta-lts
region : us-east-1
version : "1.29"
Create service account for the EBS CSI Driver and AWS Load Balancer Controller
This creates an IAM role and associates it with a kubernetes service account. For the ebs-csi-controller we only create the IAM role and allow the addon to create the service account.
Prerequisites
If this is the first time running an EKS Cluster with the AWS Load Balancer Controller the IAM policy must be created first:
Copy curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.7.2/docs/install/iam_policy.json
aws iam create-policy \
--policy-name AWSLoadBalancerControllerIAMPolicy \
--policy-document file://iam-policy.json
Copy EKS_CLUSTER_NAME = immuta-lts
eksctl create iamserviceaccount --cluster ${EKS_CLUSTER_NAME} \
--name ebs-csi-controller-sa \
--namespace kube-system \
--role-name ${EKS_CLUSTER_NAME} -ebs-csi-driver-role \
--role-only \
--attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \
--approve
Copy eksctl create iamserviceaccount --cluster ${EKS_CLUSTER_NAME} \
--name aws-load-balancer-controller \
--namespace kube-system \
--attach-policy-arn arn:aws:iam:: ${AWS_ACCOUNT_ID} :policy/AWSLoadBalancerControllerIAMPolicy \
--approve
Enable the EBS CSI Driver Addon
Copy eksctl create addon --name aws-ebs-csi-driver \
--cluster ${EKS_CLUSTER_NAME} \
--service-account-role-arn arn:aws:iam:: ${AWS_ACCOUNT_ID} :role/ ${EKS_CLUSTER_NAME} -ebs-csi-driver-role
Deploy the AWS Load Balancer Controller
Copy helm repo add eks https://aws.github.io/eks-charts
helm install aws-load-balancer-controller eks/aws-load-balancer-controller \
--namespace kube-system \
--set clusterName= ${EKS_CLUSTER_NAME} \
--set serviceAccount.create= false \
--set serviceAccount.name=aws-load-balancer-controller
Deploy external-dns
If you control the DNS zone your application is being deployed to from the existing AWS account, you can automatically make DNS updates by installing external-dns
Copy helm repo add external-dns https://kubernetes-sigs.github.io/external-dns
helm upgrade --install --create-namespace --namespace=external-dns external-dns external-dns/external-dns
Create RDS Instance in EKS VPC
Copy EKS_VPC_ID = $( aws ec2 describe-vpcs --filters "Name=tag:Name,Values=eksctl-${EKS_CLUSTER_NAME}-cluster/VPC" --query "Vpcs[*].VpcId" --output text )
EKS_SUBNET_IDS = $( aws ec2 describe-subnets --filters "Name=tag:alpha.eksctl.io/cluster-name,Values=${EKS_CLUSTER_NAME}" --query "Subnets[*].SubnetId" --output json )
EKS_SHARED_SG_ID = $( aws ec2 describe-security-groups --filters "Name=tag:Name,Values=eksctl-${EKS_CLUSTER_NAME}-cluster/ClusterSharedNodeSecurityGroup" --query "SecurityGroups[*].GroupId" --output text )
DB_STORAGE = 150
DB_USER = postgres
DB_PASSWORD = immuta-lts-postgres-password
DB_INSTANCE_CLASS = db.m5.large
aws rds create-db-subnet-group --db-subnet-group-name ${EKS_CLUSTER_NAME} -group \
--db-subnet-group-description "Subnet Group for ${EKS_CLUSTER_NAME} RDS" \
--subnet-ids ${EKS_SUBNET_IDS}
aws rds create-db-instance --db-instance-identifier ${EKS_CLUSTER_NAME} \
--db-instance-class ${DB_INSTANCE_CLASS} \
--engine postgres \
--master-username ${DB_USER} \
--master-user-password ${DB_PASSWORD} \
--allocated-storage ${DB_STORAGE} \
--vpc-security-group-ids ${EKS_SHARED_SG_ID} \
--db-subnet-group-name ${EKS_CLUSTER_NAME} -group
Create Bastion EC2 to configure RDS
Allow inbound SSH to the shared node security group. Recommend limiting CIDR to something more narrow than in this example:
Copy aws ec2 authorize-security-group-ingress --group-id ${EKS_SHARED_SG_ID} \
--protocol tcp \
--port 22 \
--cidr 0.0.0.0/0
Launch an instance using the latest Amazon Linux 2023 image
Copy PUBLIC_SUBNET_ID = $( aws ec2 describe-subnets --filters "Name=tag:Name,Values=eksctl-${EKS_CLUSTER_NAME}*Public*" --query "Subnets[0].SubnetId" --output text )
SSH_KEY_NAME = immuta-ps-shared
BASTION_INSTANCE_TYPE = t3.micro
RDS_ENDPOINT = $( aws rds describe-db-instances --db-instance-identifier ${EKS_CLUSTER_NAME} --query "DBInstances[*].Endpoint.Address" --output text )
aws ec2 run-instances --image-id resolve:ssm:/aws/service/ami-amazon-linux-latest/al2023-ami-kernel-default-x86_64 \
--instance-type ${BASTION_INSTANCE_TYPE} \
--region ${AWS_REGION} \
--key-name ${SSH_KEY_NAME} \
--security-group-ids ${EKS_SHARED_SG_ID} \
--subnet-id ${PUBLIC_SUBNET_ID} \
--associate-public-ip-address \
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=${EKS_CLUSTER_NAME}-bastion}]"
SSH Into the EC2 instance and install postgresql client
Copy BASTION_PUBLIC_IP = $( aws ec2 describe-instances --filters "Name=tag:Name,Values=${EKS_CLUSTER_NAME}-bastion" --query "Reservations[*].Instances[*].PublicIpAddress" --output text )
RDS_ENDPOINT = $( aws rds describe-db-instances --db-instance-identifier ${EKS_CLUSTER_NAME} --query "DBInstances[*].Endpoint.Address" --output text )
echo $RDS_ENDPOINT
ssh -i ~/.ssh/immuta-ps-shared.pem ec2-user@ ${BASTION_PUBLIC_IP}
sudo yum install -y postgresql15
Connect to the RDS endpoint using the credentials set when creating the instance
Copy psql postgresql://postgres:immuta-lts-postgres-password@immuta-lts.cfzynskvahpp.us-east-1.rds.amazonaws.com
Complete the steps in the Immuta documentation for first time database setup
Create an Opensearch Domain
Copy EKS_SUBNET_IDS_TEXT = $( aws ec2 describe-subnets --filters "Name=tag:alpha.eksctl.io/cluster-name,Values=${EKS_CLUSTER_NAME}" --query "Subnets[*].SubnetId" --output text | awk -v OFS= "," '$1=$1' )
PRIVATE_SUBNET_ID = $( aws ec2 describe-subnets --filters "Name=tag:Name,Values=eksctl-${EKS_CLUSTER_NAME}*Private*" --query "Subnets[0].SubnetId" --output text )
OPENSEARCH_INSTANCE_TYPE = m5.xlarge.search
OPENSEARCH_INSTANCE_COUNT = 1
OPENSEARCH_VOLUME_SIZE = 100
OPENSEARCH_USERNAME = immuta-audit-service
OPENSEARCH_PASSWORD = RandomPassword123!
aws opensearch create-domain --domain-name ${EKS_CLUSTER_NAME} \
--cluster-config "InstanceType=${OPENSEARCH_INSTANCE_TYPE},InstanceCount=${OPENSEARCH_INSTANCE_COUNT}" \
--ebs-options "EBSEnabled=true,VolumeType=gp2,VolumeSize=${OPENSEARCH_VOLUME_SIZE}" \
--vpc-options "SecurityGroupIds=${EKS_SHARED_SG_ID},SubnetIds=${PRIVATE_SUBNET_ID}" \
--advanced-security-options "Enabled=true,InternalUserDatabaseEnabled=true,MasterUserOptions={MasterUserName=${OPENSEARCH_USERNAME},MasterUserPassword=${OPENSEARCH_PASSWORD}}" \
--node-to-node-encryption-options "Enabled=true" \
--encryption-at-rest "Enabled=true" \
--domain-endpoint-options "EnforceHTTPS=true"
--access-policies "{\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"*\"},\"Action\": \"es:*\",\"Resource\": \"arn:aws:es:${AWS_REGION}:${AWS_ACCOUNT_ID}:domain/${EKS_CLUSTER_NAME}/*\"}]}"
Install Immuta
Copy NAMESPACE = immuta
helm upgrade --install -n ${NAMESPACE} immuta \
./immuta-enterprise-2024.3.0.tgz -f immuta-values.yaml
Example values
Copy global :
imageRegistry : 231431240278.dkr.ecr.us-east-1.amazonaws.com
imageTag : 2024.3.0
imageRepositoryMap :
stable/audit-service : immuta/audit-service
stable/audit-export-cronjob : immuta/audit-export-cronjob
stable/cache : immuta/cache
stable/classify-service : immuta/classify-service
stable/detect-temporal-worker : immuta/detect-temporal-worker
stable/immuta-service : immuta/immuta-service
stable/temporal-admin-tools : immuta/temporal-admin-tools
stable/temporal-proxy : immuta/temporal-proxy
stable/temporal-server : immuta/temporal-server
postgresql :
host : dbhostname.dns.com
port : 5432
username : immuta_temporal
password : immuta_temporal
ssl : true
audit :
enabled : true
postgresql :
database : immuta_temporal
config :
elasticsearchEndpoint : https://my.elastic-cloud.com/
elasticsearchUsername : temporal
elasticsearchPassword : abcd1234
secure :
extraConfig :
publicImmutaUrl : https://temporal.immuta.us
postgresql :
database : immuta_temporal
ingress :
annotations :
alb.ingress.kubernetes.io/backend-protocol : HTTP
alb.ingress.kubernetes.io/group.name : immuta
alb.ingress.kubernetes.io/listen-ports : '[{"HTTP": 80}, {"HTTPS":443}]'
alb.ingress.kubernetes.io/scheme : internet-facing
alb.ingress.kubernetes.io/ssl-redirect : "443"
alb.ingress.kubernetes.io/target-type : ip
ingressClassName : alb
hostname : temporal.immuta.us
tls : true
temporal :
enabled : true
server :
additionalVolumes :
- name : secret-with-certs
secret :
secretName : secret-with-certs
extraVolumeMounts :
- name : secret-with-certs
mountPath : /certs/
config :
persistence :
default :
sql :
database : temporal
tls :
caFile : /certs/global-bundle.pem
enabled : true
visibility :
sql :
database : temporal_visibility
tls :
caFile : /certs/global-bundle.pem
enabled : true
username : immuta
password : immuta
Additional Annotations for ALB
All available annotations for the AWS Load Balancer Controller can be found at the link below. It may be worth noting enabling deletion protection via:
Copy alb.ingress.kubernetes.io/load-balancer-attributes: deletion_protection.enabled= true
Last updated 3 months ago