in quickstarts/whereami/app.py [0:0]
def grpc_serve():
# the +5 you see below re: max_workers is a hack to avoid thread starvation
# working on a proper workaround
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()+5),
interceptors=(PromServerInterceptor(),)) # interceptor for metrics
# Add the application servicer to the server.
whereami_pb2_grpc.add_WhereamiServicer_to_server(WhereamigRPC(), server)
# Create a health check servicer. We use the non-blocking implementation
# to avoid thread starvation.
health_servicer = health.HealthServicer(
experimental_non_blocking=True,
experimental_thread_pool=futures.ThreadPoolExecutor(max_workers=1))
health_pb2_grpc.add_HealthServicer_to_server(health_servicer, server)
# Create a tuple of all of the services we want to export via reflection.
services = tuple(
service.full_name
for service in whereami_pb2.DESCRIPTOR.services_by_name.values()) + (
reflection.SERVICE_NAME, health.SERVICE_NAME)
# Start an end point to expose metrics at host:$grpc_metrics_port/metrics
start_http_server(port=grpc_metrics_port) # starts a flask server for metrics
# Add the reflection service to the server.
reflection.enable_server_reflection(services, server)
server.add_insecure_port(host_ip + ':' + str(grpc_serving_port))
server.start()
# Mark all services as healthy.
overall_server_health = ""
for service in services + (overall_server_health,):
health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
# Park the main application thread.
server.wait_for_termination()