aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Support/Parallel.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Parallel.cpp113
1 files changed, 48 insertions, 65 deletions
diff --git a/contrib/llvm-project/llvm/lib/Support/Parallel.cpp b/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
index 523665d14b02..621bccbf2a4c 100644
--- a/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
@@ -8,17 +8,14 @@
#include "llvm/Support/Parallel.h"
#include "llvm/Config/llvm-config.h"
-#include "llvm/Support/ManagedStatic.h"
#if LLVM_ENABLE_THREADS
#include "llvm/Support/Threading.h"
#include <atomic>
-#include <future>
#include <stack>
#include <thread>
-#include <vector>
namespace llvm {
namespace parallel {
@@ -35,57 +32,62 @@ public:
static Executor *getDefaultExecutor();
};
+#if defined(_MSC_VER)
+/// An Executor that runs tasks via ConcRT.
+class ConcRTExecutor : public Executor {
+ struct Taskish {
+ Taskish(std::function<void()> Task) : Task(Task) {}
+
+ std::function<void()> Task;
+
+ static void run(void *P) {
+ Taskish *Self = static_cast<Taskish *>(P);
+ Self->Task();
+ concurrency::Free(Self);
+ }
+ };
+
+public:
+ virtual void add(std::function<void()> F) {
+ Concurrency::CurrentScheduler::ScheduleTask(
+ Taskish::run, new (concurrency::Alloc(sizeof(Taskish))) Taskish(F));
+ }
+};
+
+Executor *Executor::getDefaultExecutor() {
+ static ConcRTExecutor exec;
+ return &exec;
+}
+
+#else
/// An implementation of an Executor that runs closures on a thread pool
/// in filo order.
class ThreadPoolExecutor : public Executor {
public:
- explicit ThreadPoolExecutor(unsigned ThreadCount = hardware_concurrency()) {
+ explicit ThreadPoolExecutor(unsigned ThreadCount = hardware_concurrency())
+ : Done(ThreadCount) {
// Spawn all but one of the threads in another thread as spawning threads
// can take a while.
- Threads.reserve(ThreadCount);
- Threads.resize(1);
- std::lock_guard<std::mutex> Lock(Mutex);
- Threads[0] = std::thread([&, ThreadCount] {
- for (unsigned i = 1; i < ThreadCount; ++i) {
- Threads.emplace_back([=] { work(); });
- if (Stop)
- break;
+ std::thread([&, ThreadCount] {
+ for (size_t i = 1; i < ThreadCount; ++i) {
+ std::thread([=] { work(); }).detach();
}
- ThreadsCreated.set_value();
work();
- });
- }
-
- void stop() {
- {
- std::lock_guard<std::mutex> Lock(Mutex);
- if (Stop)
- return;
- Stop = true;
- }
- Cond.notify_all();
- ThreadsCreated.get_future().wait();
+ }).detach();
}
~ThreadPoolExecutor() override {
- stop();
- std::thread::id CurrentThreadId = std::this_thread::get_id();
- for (std::thread &T : Threads)
- if (T.get_id() == CurrentThreadId)
- T.detach();
- else
- T.join();
+ std::unique_lock<std::mutex> Lock(Mutex);
+ Stop = true;
+ Lock.unlock();
+ Cond.notify_all();
+ // Wait for ~Latch.
}
- struct Deleter {
- static void call(void *Ptr) { ((ThreadPoolExecutor *)Ptr)->stop(); }
- };
-
void add(std::function<void()> F) override {
- {
- std::lock_guard<std::mutex> Lock(Mutex);
- WorkStack.push(F);
- }
+ std::unique_lock<std::mutex> Lock(Mutex);
+ WorkStack.push(F);
+ Lock.unlock();
Cond.notify_one();
}
@@ -101,41 +103,22 @@ private:
Lock.unlock();
Task();
}
+ Done.dec();
}
std::atomic<bool> Stop{false};
std::stack<std::function<void()>> WorkStack;
std::mutex Mutex;
std::condition_variable Cond;
- std::promise<void> ThreadsCreated;
- std::vector<std::thread> Threads;
+ parallel::detail::Latch Done;
};
Executor *Executor::getDefaultExecutor() {
- // The ManagedStatic enables the ThreadPoolExecutor to be stopped via
- // llvm_shutdown() which allows a "clean" fast exit, e.g. via _exit(). This
- // stops the thread pool and waits for any worker thread creation to complete
- // but does not wait for the threads to finish. The wait for worker thread
- // creation to complete is important as it prevents intermittent crashes on
- // Windows due to a race condition between thread creation and process exit.
- //
- // The ThreadPoolExecutor will only be destroyed when the static unique_ptr to
- // it is destroyed, i.e. in a normal full exit. The ThreadPoolExecutor
- // destructor ensures it has been stopped and waits for worker threads to
- // finish. The wait is important as it prevents intermittent crashes on
- // Windows when the process is doing a full exit.
- //
- // The Windows crashes appear to only occur with the MSVC static runtimes and
- // are more frequent with the debug static runtime.
- //
- // This also prevents intermittent deadlocks on exit with the MinGW runtime.
- static ManagedStatic<ThreadPoolExecutor, object_creator<ThreadPoolExecutor>,
- ThreadPoolExecutor::Deleter>
- ManagedExec;
- static std::unique_ptr<ThreadPoolExecutor> Exec(&(*ManagedExec));
- return Exec.get();
+ static ThreadPoolExecutor exec;
+ return &exec;
+}
+#endif
}
-} // namespace
static std::atomic<int> TaskGroupInstances;