fbthrift--ThriftServer解析

2020-09-01  本文已影响0人  zlcook

使用案例

GraphDaemon.cpp

/*
DEFINE_int32(num_netio_threads, 0,
                "Number of networking threads, 0 for number of physical CPU cores");
DEFINE_int32(num_accept_threads, 1, "Number of threads to accept incoming connections");
DEFINE_int32(num_worker_threads, 0, "Number of threads to execute user queries");
DEFINE_bool(reuse_port, true, "Whether to turn on the SO_REUSEPORT option");
DEFINE_int32(listen_backlog, 1024, "Backlog of the listen socket");
*/

   auto threadFactory = std::make_shared<folly::NamedThreadFactory>("graph-netio");
    auto ioThreadPool = std::make_shared<folly::IOThreadPoolExecutor>(
                            FLAGS_num_netio_threads, std::move(threadFactory));
    gServer = std::make_unique<apache::thrift::ThriftServer>();
    gServer->setIOThreadPool(ioThreadPool);

    auto interface = std::make_shared<GraphService>();
    status = interface->init(ioThreadPool);
    if (!status.ok()) {
        LOG(ERROR) << status;
        return EXIT_FAILURE;
    }

    gServer->setInterface(std::move(interface));
    gServer->setAddress(localIP, FLAGS_port);
    // fbthrift-2018.08.20 always enables SO_REUSEPORT once `setReusePort' is called
    // which had been fixed in later version.
    if (FLAGS_reuse_port) {
        gServer->setReusePort(FLAGS_reuse_port);
    }
    gServer->setIdleTimeout(std::chrono::seconds(FLAGS_client_idle_timeout_secs));
    gServer->setNumCPUWorkerThreads(FLAGS_num_worker_threads);
    gServer->setCPUWorkerThreadName("executor");
    gServer->setNumAcceptThreads(FLAGS_num_accept_threads);
    gServer->setListenBacklog(FLAGS_listen_backlog);
    gServer->setThreadStackSizeMB(5);

    FLOG_INFO("Starting nebula-graphd on %s:%d\n", localIP.c_str(), FLAGS_port);
    try {
        gServer->serve();  // Blocking wait until shut down via gServer->stop()
    } catch (const std::exception &e) {
        FLOG_ERROR("Exception thrown while starting the RPC server: %s", e.what());
        return EXIT_FAILURE;
    }

Thrift中的工作模型

ThriftServer.h 相关option设置

可选:
auto handler = std::make_shared<ServiceHandler>(kvStore);
auto proc_factory = std::make_shared<ThriftServerAsyncProcessorFactory<ServiceHandler>>(
                           handler);
server->setProcessorFactory(proc_factory);
serve方法中:根据setNumCPUWorkerThreads的结果(getNumCPUWorkerThreads)
创建ThreadManager然后调用setThreadManager。

void ThriftServer::setupThreadManager() {
  if (!threadManager_) {
    std::shared_ptr<apache::thrift::concurrency::ThreadManager> threadManager(
        PriorityThreadManager::newPriorityThreadManager(
            getNumCPUWorkerThreads(), true /*stats*/));
    threadManager->enableCodel(getEnableCodel());
    // If a thread factory has been specified, use it.
    if (threadFactory_) {
      threadManager->threadFactory(threadFactory_);
    }
    auto poolThreadName = getCPUWorkerThreadName();
    if (!poolThreadName.empty()) {
      threadManager->setNamePrefix(poolThreadName);
    }
    threadManager->start();
    setThreadManager(threadManager);
  }
}
# The number of threads to execute user queries, 0 for # of CPU cores
gServer->setNumCPUWorkerThreads(FLAGS_num_worker_threads);
gServer->setCPUWorkerThreadName("executor");

# The number of threads to accept incoming connections
gServer->setNumAcceptThreads(FLAGS_num_accept_threads);
上一篇下一篇

猜你喜欢

热点阅读