def __init__()

in azure_functions_worker/dispatcher.py [0:0]


    def __init__(self, loop: BaseEventLoop, host: str, port: int,
                 worker_id: str, request_id: str,
                 grpc_connect_timeout: float,
                 grpc_max_msg_len: int = -1) -> None:
        self._loop = loop
        self._host = host
        self._port = port
        self._request_id = request_id
        self._worker_id = worker_id
        self._function_data_cache_enabled = False
        self._functions = functions.Registry()
        self._shmem_mgr = SharedMemoryManager()
        self._old_task_factory = None

        # Used to store metadata returns
        self._function_metadata_result = None
        self._function_metadata_exception = None

        # Used for checking if appinsights is enabled
        self._azure_monitor_available = False
        # Used for checking if open telemetry is enabled
        self._otel_libs_available = False
        self._context_api = None
        self._trace_context_propagator = None

        # We allow the customer to change synchronous thread pool max worker
        # count by setting the PYTHON_THREADPOOL_THREAD_COUNT app setting.
        #   For 3.[6|7|8] The default value is 1.
        #   For 3.9, we don't set this value by default but we honor incoming
        #     the app setting.
        self._sync_call_tp: concurrent.futures.Executor = (
            self._create_sync_call_tp(self._get_sync_tp_max_workers())
        )

        self._grpc_connect_timeout: float = grpc_connect_timeout
        # This is set to -1 by default to remove the limitation on msg size
        self._grpc_max_msg_len: int = grpc_max_msg_len
        self._grpc_resp_queue: queue.Queue = queue.Queue()
        self._grpc_connected_fut = loop.create_future()
        self._grpc_thread: threading.Thread = threading.Thread(
            name='grpc-thread', target=self.__poll_grpc)