erebus

view liberebus/src/threadpool.cc @ 32:b1fc96c71bcc

- lambert BRDF importance sampling - UI + commandline arguments - font rendering for showing status/progress
author John Tsiombikas <nuclear@member.fsf.org>
date Sat, 07 Jun 2014 13:36:36 +0300
parents 53a98c148bf8
children d15ee526daa6
line source
1 #include <algorithm>
2 #include <chrono>
3 #include "threadpool.h"
5 using namespace std::chrono;
7 ThreadPool::ThreadPool(int num_threads)
8 {
9 quit = false;
10 qsize = 0;
11 nactive = 0;
13 if(num_threads == -1) {
14 num_threads = std::thread::hardware_concurrency();
15 }
17 printf("creating thread pool with %d threads\n", num_threads);
19 thread = new std::thread[num_threads];
20 for(int i=0; i<num_threads; i++) {
21 thread[i] = std::thread(&ThreadPool::thread_func, this);
23 #ifdef _MSC_VER
24 /* detach the thread to avoid having to join them in the destructor, which
25 * causes a deadlock in msvc implementation when called after main returns
26 */
27 thread[i].detach();
28 #endif
29 }
30 this->num_threads = num_threads;
31 }
33 ThreadPool::~ThreadPool()
34 {
35 #ifdef _MSC_VER
36 workq_mutex.lock();
37 workq.clear();
38 qsize = 0;
39 workq_mutex.unlock();
40 #endif
42 quit = true;
43 workq_condvar.notify_all();
45 printf("ThreadPool: waiting for %d worker threads to stop ", num_threads);
46 fflush(stdout);
47 #ifndef _MSC_VER
48 for(int i=0; i<num_threads; i++) {
49 thread[i].join();
50 putchar('.');
51 fflush(stdout);
52 }
53 #else
54 // spin until all threads are done...
55 std::unique_lock<std::mutex> lock(workq_mutex);
56 while(nactive > 0) {
57 lock.unlock();
58 std::this_thread::sleep_for(std::chrono::milliseconds(128));
59 putchar('.');
60 fflush(stdout);
61 lock.lock();
62 }
63 #endif // _MSC_VER
65 putchar('\n');
66 delete [] thread;
67 }
69 void ThreadPool::add_work(std::function<void ()> func)
70 {
71 add_work(func, std::function<void ()>{});
72 }
74 void ThreadPool::add_work(std::function<void ()> work_func, std::function<void ()> done_func)
75 {
76 std::unique_lock<std::mutex> lock(workq_mutex);
77 workq.push_back(WorkItem{work_func, done_func});
78 ++qsize;
79 workq_condvar.notify_all();
80 }
82 int ThreadPool::queued() const
83 {
84 std::unique_lock<std::mutex> lock(workq_mutex);
85 return qsize;
86 }
88 int ThreadPool::active() const
89 {
90 std::unique_lock<std::mutex> lock(workq_mutex);
91 return nactive;
92 }
94 int ThreadPool::pending() const
95 {
96 std::unique_lock<std::mutex> lock(workq_mutex);
97 return nactive + qsize;
98 }
100 long ThreadPool::wait()
101 {
102 auto start_time = steady_clock::now();
104 std::unique_lock<std::mutex> lock(workq_mutex);
105 done_condvar.wait(lock, [this](){ return nactive == 0 && workq.empty(); });
107 auto dur = steady_clock::now() - start_time;
108 return duration_cast<milliseconds>(dur).count();
109 }
111 long ThreadPool::wait(long timeout)
112 {
113 auto start_time = steady_clock::now();
114 duration<long, std::milli> dur, timeout_dur(std::max(timeout, 5L));
116 std::unique_lock<std::mutex> lock(workq_mutex);
117 while(timeout_dur.count() > 0 && (nactive > 0 || !workq.empty())) {
118 if(done_condvar.wait_for(lock, timeout_dur) == std::cv_status::timeout) {
119 break;
120 }
121 dur = duration_cast<milliseconds>(steady_clock::now() - start_time);
122 timeout_dur = milliseconds(std::max(timeout, 5L)) - dur;
123 }
125 /*printf("waited for: %ld ms (%ld req) (na %d,qs %d,em %s)\n", dur.count(), timeout,
126 nactive, qsize, workq.empty() ? "true" : "false");*/
127 return dur.count();
128 }
130 void ThreadPool::thread_func()
131 {
132 std::unique_lock<std::mutex> lock(workq_mutex);
133 for(;;) {
134 if(quit) break;
136 workq_condvar.wait(lock);
138 while(!quit && !workq.empty()) {
139 WorkItem witem = workq.front();
140 workq.pop_front();
141 ++nactive;
142 --qsize;
143 lock.unlock();
145 witem.work();
146 if(witem.done) {
147 witem.done();
148 }
150 lock.lock();
151 --nactive;
152 done_condvar.notify_all();
153 }
154 }
155 }