in agent/main.go [56:175]
func main() {
var (
port int
debug bool
version bool
)
flag.IntVar(&port, "port", defaultPort, "Vsock port to listen to")
flag.BoolVar(&debug, "debug", false, "Turn on debug mode")
flag.BoolVar(&version, "version", false, "Show the version")
flag.Parse()
if debug {
logrus.SetLevel(logrus.DebugLevel)
}
if version {
showVersion()
return
}
signals := make(chan os.Signal, 32)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, unix.SIGCHLD)
shimCtx, shimCancel := context.WithCancel(namespaces.WithNamespace(context.Background(), defaultNamespace))
group, shimCtx := errgroup.WithContext(shimCtx)
// Ensure this process is a subreaper or else containers created via runc will
// not be its children.
if err := system.SetSubreaper(enableSubreaper); err != nil {
log.G(shimCtx).WithError(err).Fatal("failed to set shim as subreaper")
}
// Create a runc task service that can be used via GRPC.
// This can be wrapped to add missing functionality (like
// running multiple containers inside one Firecracker VM)
log.G(shimCtx).Info("creating task service")
server, err := ttrpc.NewServer()
if err != nil {
log.G(shimCtx).WithError(err).Fatal("failed to create ttrpc server")
}
eventExchange := &event.ExchangeCloser{Exchange: exchange.NewExchange()}
eventbridge.RegisterGetterService(server, eventbridge.NewGetterService(shimCtx, eventExchange))
taskService, err := NewTaskService(shimCtx, shimCancel, eventExchange)
if err != nil {
log.G(shimCtx).WithError(err).Fatal("failed to create task service")
}
taskAPI.RegisterTaskService(server, taskService)
dh, err := newDriveHandler(blockPath, drivePath)
if err != nil {
log.G(shimCtx).WithError(err).Fatal("failed to create drive handler")
}
drivemount.RegisterDriveMounterService(server, dh)
ioproxy.RegisterIOProxyService(server, &ioProxyHandler{
runcService: taskService.runcService,
taskManager: taskService.taskManager,
})
// Run ttrpc over vsock
vsockLogger := log.G(shimCtx).WithField("port", port)
listener, err := vm.VSockListener(shimCtx, vsockLogger, uint32(port))
if err != nil {
log.G(shimCtx).WithError(err).Fatalf("failed to listen to vsock on port %d", port)
}
group.Go(func() error {
err := server.Serve(shimCtx, listener)
if err == ttrpc.ErrServerClosed {
// Calling server.Shutdown() from another goroutine will cause ErrServerClosed, which is fine.
return nil
}
return err
})
group.Go(func() error {
defer func() {
log.G(shimCtx).Info("stopping ttrpc server")
// use context.Background() instead of the canceled shimCtx, which should allow shutdown to
// flush any pending responses before actually stopping the server. We're choosing to not
// use any timeouts here, deferring instead to the the higher-level runtime shim's shutdown
// timeouts. If we get blocked here, the runtime shim will eventually force the VM to shutdown
// after its timeouts.
if err := server.Shutdown(context.Background()); err != nil {
log.G(shimCtx).WithError(err).Errorf("failed to close ttrpc server")
}
}()
for {
select {
case s := <-signals:
switch s {
case unix.SIGCHLD:
if err := reaper.Reap(); err != nil {
log.G(shimCtx).WithError(err).Error("reap error")
}
case syscall.SIGINT, syscall.SIGTERM:
shimCancel()
return nil
}
case <-shimCtx.Done():
return shimCtx.Err()
}
}
})
err = group.Wait()
log.G(shimCtx).Info("shutting down agent")
if err != nil && err != context.Canceled {
log.G(shimCtx).WithError(err).Error("shim error")
panic(err)
}
}