From ef5d0b5e97ec8e6fa395d377b09aa7755e345b4f Mon Sep 17 00:00:00 2001 From: Dimitry Andric Date: Mon, 18 Dec 2017 20:12:36 +0000 Subject: Vendor import of lldb trunk r321017: https://llvm.org/svn/llvm-project/lldb/trunk@321017 --- source/Host/common/TaskPool.cpp | 120 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 source/Host/common/TaskPool.cpp (limited to 'source/Host/common/TaskPool.cpp') diff --git a/source/Host/common/TaskPool.cpp b/source/Host/common/TaskPool.cpp new file mode 100644 index 000000000000..156a07942998 --- /dev/null +++ b/source/Host/common/TaskPool.cpp @@ -0,0 +1,120 @@ +//===--------------------- TaskPool.cpp -------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "lldb/Host/TaskPool.h" +#include "lldb/Host/ThreadLauncher.h" + +#include // for uint32_t +#include // for queue +#include // for thread + +namespace lldb_private { + +namespace { +class TaskPoolImpl { +public: + static TaskPoolImpl &GetInstance(); + + void AddTask(std::function &&task_fn); + +private: + TaskPoolImpl(); + + static lldb::thread_result_t WorkerPtr(void *pool); + + static void Worker(TaskPoolImpl *pool); + + std::queue> m_tasks; + std::mutex m_tasks_mutex; + uint32_t m_thread_count; +}; + +} // end of anonymous namespace + +TaskPoolImpl &TaskPoolImpl::GetInstance() { + static TaskPoolImpl g_task_pool_impl; + return g_task_pool_impl; +} + +void TaskPool::AddTaskImpl(std::function &&task_fn) { + TaskPoolImpl::GetInstance().AddTask(std::move(task_fn)); +} + +TaskPoolImpl::TaskPoolImpl() : m_thread_count(0) {} + +unsigned GetHardwareConcurrencyHint() { + // std::thread::hardware_concurrency may return 0 + // if the value is not well defined or not computable. + static const unsigned g_hardware_concurrency = + std::max(1u, std::thread::hardware_concurrency()); + return g_hardware_concurrency; +} + +void TaskPoolImpl::AddTask(std::function &&task_fn) { + const size_t min_stack_size = 8 * 1024 * 1024; + + std::unique_lock lock(m_tasks_mutex); + m_tasks.emplace(std::move(task_fn)); + if (m_thread_count < GetHardwareConcurrencyHint()) { + m_thread_count++; + // Note that this detach call needs to happen with the m_tasks_mutex held. + // This prevents the thread + // from exiting prematurely and triggering a linux libc bug + // (https://sourceware.org/bugzilla/show_bug.cgi?id=19951). + lldb_private::ThreadLauncher::LaunchThread("task-pool.worker", WorkerPtr, + this, nullptr, min_stack_size) + .Release(); + } +} + +lldb::thread_result_t TaskPoolImpl::WorkerPtr(void *pool) { + Worker((TaskPoolImpl *)pool); + return 0; +} + +void TaskPoolImpl::Worker(TaskPoolImpl *pool) { + while (true) { + std::unique_lock lock(pool->m_tasks_mutex); + if (pool->m_tasks.empty()) { + pool->m_thread_count--; + break; + } + + std::function f = std::move(pool->m_tasks.front()); + pool->m_tasks.pop(); + lock.unlock(); + + f(); + } +} + +void TaskMapOverInt(size_t begin, size_t end, + const llvm::function_ref &func) { + const size_t num_workers = std::min(end, GetHardwareConcurrencyHint()); + std::atomic idx{begin}; + + auto wrapper = [&idx, end, &func]() { + while (true) { + size_t i = idx.fetch_add(1); + if (i >= end) + break; + func(i); + } + }; + + std::vector> futures; + futures.reserve(num_workers); + for (size_t i = 0; i < num_workers; i++) + futures.push_back(TaskPool::AddTask(wrapper)); + for (size_t i = 0; i < num_workers; i++) + futures[i].wait(); +} + +} // namespace lldb_private + -- cgit v1.3