in pkg/authorizer/token_retriever.go [34:72]
func ARMTokenForBinding(ctx context.Context, spec msiacrpullv1beta2.AcrPullBindingSpec, tenantId, clientId, serviceAccountToken string) (azcore.AccessToken, error) {
env := environment(spec.ACR.Environment, spec.ACR.CloudConfig)
var credential azcore.TokenCredential
var err error
switch {
case spec.Auth.ManagedIdentity != nil:
var id azidentity.ManagedIDKind
if spec.Auth.ManagedIdentity.ClientID != "" {
id = azidentity.ClientID(spec.Auth.ManagedIdentity.ClientID)
} else if spec.Auth.ManagedIdentity.ResourceID != "" {
id = azidentity.ResourceID(spec.Auth.ManagedIdentity.ResourceID)
}
credential, err = azidentity.NewManagedIdentityCredential(&azidentity.ManagedIdentityCredentialOptions{ID: id})
case spec.Auth.WorkloadIdentity != nil:
// n.b. the built-in azidentity.WorkloadIdentityCredential assumes we're loading a service account token
// from a file in a Pod, where the Kubernetes API server is rotating it, etc. Unfortunately that is not
// our use-case here, and we certainly don't want to centralize every service account token we ever mint
// in the filesystem of this controller, so we can use the lower-level client assertion credential instead.
credential, err = azidentity.NewClientAssertionCredential(tenantId, clientId, func(ctx context.Context) (string, error) {
return serviceAccountToken, nil
}, &azidentity.ClientAssertionCredentialOptions{
ClientOptions: azcore.ClientOptions{
Cloud: cloud.Configuration{
ActiveDirectoryAuthorityHost: env.ActiveDirectoryAuthorityHost,
},
},
DisableInstanceDiscovery: true,
})
}
if err != nil {
return azcore.AccessToken{}, fmt.Errorf("failed to build credential: %w", err)
}
if credential == nil {
// this should never happen with the validation we have on the CRD
panic(fmt.Errorf("programmer error: ACRPullBinding.Spec.Auth has no method: %#v", spec.Auth))
}
return credential.GetToken(ctx, policy.TokenRequestOptions{Scopes: []string{env.Services[cloud.ResourceManager].Audience + "/.default"}})
}