in pkg/nitro_enclaves_device_plugin/device_plugin.go [251:279]
func NewNitroEnclavesDevicePlugin(config *config.PluginConfig) *NitroEnclavesDevicePlugin {
if err := config.Validate(); err != nil {
glog.Errorf("invalid plugin config: %v", err)
}
glog.V(0).Infof("Initializing Nitro Enclaves device plugin with following params: %v", config)
// devs slice, determines the pluginapi.ListAndWatchResponse, which lets the kubelet know about the available/allocatable "aws.ec2.nitro/nitro_enclaves" devices
// in a k8s worker node. Number of devices, in this context does not represent number of "nitro_enclaves" device files present in the host,
// instead it can be interpreted as number pods that can share the same host device file. The same host device file "nitro_enclaves",
// can be mounted into multiple pods, which can be used to run an enclave.
// This lets us schedule 2 or more pods requiring nitro_enclaves device on the same k8s node/EC2 instance.
devs := []*pluginapi.Device{}
for i := 0; i < config.MaxEnclavesPerNode; i++ {
devs = append(devs, &pluginapi.Device{
ID: generateDeviceID(deviceName),
Health: pluginapi.Healthy,
})
}
glog.V(0).Infof("Enclave devices added: %v", config.MaxEnclavesPerNode)
return &NitroEnclavesDevicePlugin{
dev: devs,
pdef: &NEPluginDefinitions{},
stop: make(chan interface{}),
health: make(chan *pluginapi.Device),
}
}