libresman
diff src/threadpool.c @ 6:410c19c735b2
- removed the glew dependency
- initial thread pool implementation
author | John Tsiombikas <nuclear@member.fsf.org> |
---|---|
date | Mon, 03 Feb 2014 05:22:09 +0200 |
parents | |
children | 03f3de659c32 |
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/threadpool.c Mon Feb 03 05:22:09 2014 +0200 1.3 @@ -0,0 +1,189 @@ 1.4 +#include <stdio.h> 1.5 +#include <stdlib.h> 1.6 +#include <string.h> 1.7 +#include <pthread.h> 1.8 +#include "threadpool.h" 1.9 + 1.10 +struct work_item { 1.11 + void *data; 1.12 + struct work_item *next; 1.13 +}; 1.14 + 1.15 +struct thread_pool { 1.16 + pthread_t *workers; 1.17 + int num_workers; 1.18 + 1.19 + pthread_mutex_t work_lock; 1.20 + pthread_cond_t work_cond; 1.21 + 1.22 + tpool_work_func work_func; 1.23 + void *cls; 1.24 + 1.25 + struct work_item *work_list, *work_list_tail; 1.26 + int work_count; 1.27 +}; 1.28 + 1.29 + 1.30 +static void *thread_func(void *tp); 1.31 +static struct work_item *alloc_node(void); 1.32 +static void free_node(struct work_item *node); 1.33 +static int get_processor_count(void); 1.34 + 1.35 + 1.36 + 1.37 +int tpool_init(struct thread_pool *tpool, int num_threads) 1.38 +{ 1.39 + int i; 1.40 + 1.41 + memset(tpool, 0, sizeof *tpool); 1.42 + 1.43 + if(num_threads <= 0) { 1.44 + num_threads = get_processor_count(); 1.45 + } 1.46 + tpool->num_workers = num_threads; 1.47 + 1.48 + printf("initializing thread pool with %d worker threads\n", num_threads); 1.49 + 1.50 + for(i=0; i<num_threads; i++) { 1.51 + if(pthread_create(tpool->workers + i, 0, thread_func, tpool) == -1) { 1.52 + fprintf(stderr, "%s: failed to create thread %d\n", __FUNCTION__, i); 1.53 + tpool_destroy(tpool); 1.54 + return -1; 1.55 + } 1.56 + } 1.57 + 1.58 + pthread_mutex_init(&tpool->work_lock, 0); 1.59 + pthread_cond_init(&tpool->work_cond, 0); 1.60 + return 0; 1.61 +} 1.62 + 1.63 +void tpool_destroy(struct thread_pool *tpool) 1.64 +{ 1.65 + int i; 1.66 + for(i=0; i<tpool->num_workers; i++) { 1.67 + void *ret; 1.68 + pthread_join(tpool->workers[i], &ret); 1.69 + } 1.70 + 1.71 + pthread_mutex_destroy(&tpool->work_lock); 1.72 + pthread_cond_destroy(&tpool->work_cond); 1.73 +} 1.74 + 1.75 +void tpool_set_work_func(struct thread_pool *tpool, tpool_work_func func, void *cls) 1.76 +{ 1.77 + tpool->work_func = func; 1.78 + tpool->cls = cls; 1.79 +} 1.80 + 1.81 +int tpool_add_work(struct thread_pool *tpool, void *data) 1.82 +{ 1.83 + struct work_item *node; 1.84 + 1.85 + if(!(node = alloc_node())) { 1.86 + fprintf(stderr, "%s: failed to allocate new work item node\n", __FUNCTION__); 1.87 + return -1; 1.88 + } 1.89 + node->data = data; 1.90 + node->next = 0; 1.91 + 1.92 + pthread_mutex_lock(&tpool->work_lock); 1.93 + 1.94 + if(!tpool->work_list) { 1.95 + tpool->work_list = tpool->work_list_tail = node; 1.96 + } else { 1.97 + tpool->work_list_tail->next = node; 1.98 + tpool->work_list_tail = node; 1.99 + } 1.100 + 1.101 + pthread_mutex_unlock(&tpool->work_lock); 1.102 + return 0; 1.103 +} 1.104 + 1.105 + 1.106 +static void *thread_func(void *tp) 1.107 +{ 1.108 + struct work_item *job; 1.109 + struct thread_pool *tpool = tp; 1.110 + 1.111 + pthread_mutex_lock(&tpool->work_lock); 1.112 + for(;;) { 1.113 + /* while there aren't any work items to do go to sleep on the condvar */ 1.114 + pthread_cond_wait(&tpool->work_cond, &tpool->work_lock); 1.115 + if(!tpool->work_list) { 1.116 + continue; /* spurious wakeup, go back to sleep */ 1.117 + } 1.118 + 1.119 + job = tpool->work_list; 1.120 + tpool->work_list = tpool->work_list->next; 1.121 + 1.122 + tpool->work_func(job->data, tpool->cls); 1.123 + } 1.124 + pthread_mutex_unlock(&tpool->work_lock); 1.125 + return 0; 1.126 +} 1.127 + 1.128 +/* TODO: custom allocator */ 1.129 +static struct work_item *alloc_node(void) 1.130 +{ 1.131 + return malloc(sizeof(struct work_item)); 1.132 +} 1.133 + 1.134 +static void free_node(struct work_item *node) 1.135 +{ 1.136 + free(node); 1.137 +} 1.138 + 1.139 +/* The following highly platform-specific code detects the number 1.140 + * of processors available in the system. It's used by the thread pool 1.141 + * to autodetect how many threads to spawn. 1.142 + * Currently works on: Linux, BSD, Darwin, and Windows. 1.143 + */ 1.144 + 1.145 +#if defined(__APPLE__) && defined(__MACH__) 1.146 +# ifndef __unix__ 1.147 +# define __unix__ 1 1.148 +# endif /* unix */ 1.149 +# ifndef __bsd__ 1.150 +# define __bsd__ 1 1.151 +# endif /* bsd */ 1.152 +#endif /* apple */ 1.153 + 1.154 +#if defined(unix) || defined(__unix__) 1.155 +#include <unistd.h> 1.156 + 1.157 +# ifdef __bsd__ 1.158 +# include <sys/sysctl.h> 1.159 +# endif 1.160 +#endif 1.161 + 1.162 +#if defined(WIN32) || defined(__WIN32__) 1.163 +#include <windows.h> 1.164 +#endif 1.165 + 1.166 + 1.167 +static int get_processor_count(void) 1.168 +{ 1.169 +#if defined(unix) || defined(__unix__) 1.170 +# if defined(__bsd__) 1.171 + /* BSD systems provide the num.processors through sysctl */ 1.172 + int num, mib[] = {CTL_HW, HW_NCPU}; 1.173 + size_t len = sizeof num; 1.174 + 1.175 + sysctl(mib, 2, &num, &len, 0, 0); 1.176 + return num; 1.177 + 1.178 +# elif defined(__sgi) 1.179 + /* SGI IRIX flavour of the _SC_NPROC_ONLN sysconf */ 1.180 + return sysconf(_SC_NPROC_ONLN); 1.181 +# else 1.182 + /* Linux (and others?) have the _SC_NPROCESSORS_ONLN sysconf */ 1.183 + return sysconf(_SC_NPROCESSORS_ONLN); 1.184 +# endif /* bsd/sgi/other */ 1.185 + 1.186 +#elif defined(WIN32) || defined(__WIN32__) 1.187 + /* under windows we need to call GetSystemInfo */ 1.188 + SYSTEM_INFO info; 1.189 + GetSystemInfo(&info); 1.190 + return info.dwNumberOfProcessors; 1.191 +#endif 1.192 +}