in testbed/stack.ts [14:92]
constructor(scope: cdk.Construct, id: string = "testbed", props: TestbedProps) {
super(scope, id, props)
const vpc = new ec2.Vpc(this, id, {
cidr: '10.0.0.0/16',
maxAzs: 99,
subnetConfiguration: [
{
name: 'pub-subnet-1',
subnetType: ec2.SubnetType.PUBLIC,
cidrMask: 24,
},
{
name: 'priv-subnet-1',
subnetType: ec2.SubnetType.PRIVATE_WITH_NAT,
cidrMask: 24,
},
],
});
//ToDo: revisit once this is resolved - https://github.com/aws/aws-cdk/issues/5927
//create private subnets for KIT operator CP nodes/pods in all AZs
for (let index = 0; index < cdk.Stack.of(this).availabilityZones.length; index++) {
//Also, pick up non overlapping cidrs with KIT operator DP nodes;
let privateSubnet = this.createPrivateSubnetForVPC(id, vpc, `10.${index + 20}.0.0/16`, cdk.Stack.of(this).availabilityZones[index])
//Tag private subnets for KIT CP
Tags.of(privateSubnet).add('kit/hostcluster', `${id}-controlplane`)
let natSubnet = this.createPublicSubnetForVPC(id, vpc, `10.0.80.${index * 16}/28`, cdk.Stack.of(this).availabilityZones[index])
this.configureNatProviderForPrivateSubnet(vpc, natSubnet, privateSubnet)
}
// index<=8 will give us 9 /16 cidrs additionally to make a mega VPC for DP nodes.
for (let index = 0; index <= 8; index++) {
let privateSubnet = this.createPrivateSubnetForVPC(id, vpc, `10.${index + 1}.0.0/16`, cdk.Stack.of(this).availabilityZones[index % cdk.Stack.of(this).availabilityZones.length])
//Tag private subnets for KIT DP
Tags.of(privateSubnet).add('kit/hostcluster', `${id}-dataplane`)
let natSubnet = this.createPublicSubnetForVPC(id, vpc, `10.0.64.${index * 16}/28`, cdk.Stack.of(this).availabilityZones[index % cdk.Stack.of(this).availabilityZones.length])
this.configureNatProviderForPrivateSubnet(vpc, natSubnet, privateSubnet)
}
const cluster = new eks.Cluster(this, 'cluster', {
clusterName: id,
vpc: vpc,
role: new iam.Role(this, 'cluster-role', {
assumedBy: new iam.ServicePrincipal('eks.amazonaws.com'),
managedPolicies: [
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKSClusterPolicy'),
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKSVPCResourceController')
]
}),
version: eks.KubernetesVersion.V1_19,
defaultCapacity: 0,
})
cluster.addNodegroupCapacity('node-group', {
nodegroupName: 'default',
subnets: vpc.selectSubnets({
subnetType: ec2.SubnetType.PRIVATE_WITH_NAT
}),
nodeRole: new iam.Role(this, 'node-role', {
assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'),
managedPolicies: [
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKSWorkerNodePolicy'),
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEC2ContainerRegistryReadOnly'),
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKS_CNI_Policy'),
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKSVPCResourceController') // Allows us to use Security Groups for pods
]
}),
})
// service account used by tekton workflows.
let sa = cluster.addServiceAccount('test-executor', { name: 'test-executor' })
sa.role.addManagedPolicy({ managedPolicyArn: 'arn:aws:iam::aws:policy/AdministratorAccess' })
cluster.awsAuth.addRoleMapping(sa.role, {
username: sa.role.roleName,
groups: ['system:masters']
})
new Addons(this, `${id}-addons`, { cluster: cluster, repositories: props.repositories })
// Tag all resources for discovery by Karpenter
Tags.of(this).add(`kubernetes.io/cluster/${id}`, "owned")
}