libresman
changeset 6:410c19c735b2
- removed the glew dependency
- initial thread pool implementation
author | John Tsiombikas <nuclear@member.fsf.org> |
---|---|
date | Mon, 03 Feb 2014 05:22:09 +0200 |
parents | bd9b4ff19c93 |
children | 04362221f12d |
files | examples/imgthumbs/src/main.c examples/imgthumbs/src/opengl.h examples/imgthumbs/src/thumbs.c src/resman.h src/threadpool.c src/threadpool.h |
diffstat | 6 files changed, 204 insertions(+), 7 deletions(-) [+] |
line diff
1.1 --- a/examples/imgthumbs/src/main.c Sat Feb 01 08:02:08 2014 +0200 1.2 +++ b/examples/imgthumbs/src/main.c Mon Feb 03 05:22:09 2014 +0200 1.3 @@ -57,8 +57,6 @@ 1.4 1.5 static int init(void) 1.6 { 1.7 - glewInit(); 1.8 - 1.9 thumbs = create_thumbs(path); 1.10 return 0; 1.11 }
2.1 --- a/examples/imgthumbs/src/opengl.h Sat Feb 01 08:02:08 2014 +0200 2.2 +++ b/examples/imgthumbs/src/opengl.h Mon Feb 03 05:22:09 2014 +0200 2.3 @@ -1,8 +1,6 @@ 2.4 #ifndef OPENGL_H_ 2.5 #define OPENGL_H_ 2.6 2.7 -#include <GL/glew.h> 2.8 - 2.9 #ifdef __APPLE__ 2.10 #include <GLUT/glut.h> 2.11 #else
3.1 --- a/examples/imgthumbs/src/thumbs.c Sat Feb 01 08:02:08 2014 +0200 3.2 +++ b/examples/imgthumbs/src/thumbs.c Mon Feb 03 05:22:09 2014 +0200 3.3 @@ -7,6 +7,10 @@ 3.4 #include "opengl.h" 3.5 #include "thumbs.h" 3.6 3.7 +#ifndef GL_COMPRESSED_RGB 3.8 +#define GL_COMPRESSED_RGB 0x84ed 3.9 +#endif 3.10 + 3.11 struct thumbnail *create_thumbs(const char *dirpath) 3.12 { 3.13 DIR *dir; 3.14 @@ -14,7 +18,7 @@ 3.15 struct thumbnail *list = 0; 3.16 3.17 unsigned int intfmt = GL_COMPRESSED_RGB; 3.18 - if(!GLEW_ARB_texture_compression) { 3.19 + if(!strstr(glGetString(GL_EXTENSIONS), "GL_ARB_texture_compression")) { 3.20 printf("warning, no texture compression available.\n"); 3.21 intfmt = GL_RGB; 3.22 }
4.1 --- a/src/resman.h Sat Feb 01 08:02:08 2014 +0200 4.2 +++ b/src/resman.h Mon Feb 03 05:22:09 2014 +0200 4.3 @@ -12,6 +12,10 @@ 4.4 4.5 struct resman; 4.6 4.7 +#ifdef __cplusplus 4.8 +extern "C" { 4.9 +#endif 4.10 + 4.11 struct resman *resman_create(void); 4.12 void resman_free(struct resman *rman); 4.13 4.14 @@ -31,5 +35,9 @@ 4.15 void resman_set_res_data(struct resman *rman, int res_id, void *data); 4.16 void *resman_get_res_data(struct resman *rman, int res_id); 4.17 4.18 +#ifdef __cplusplus 4.19 +} 4.20 +#endif 4.21 + 4.22 4.23 #endif /* RESOURCE_MANAGER_H_ */
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 5.2 +++ b/src/threadpool.c Mon Feb 03 05:22:09 2014 +0200 5.3 @@ -0,0 +1,189 @@ 5.4 +#include <stdio.h> 5.5 +#include <stdlib.h> 5.6 +#include <string.h> 5.7 +#include <pthread.h> 5.8 +#include "threadpool.h" 5.9 + 5.10 +struct work_item { 5.11 + void *data; 5.12 + struct work_item *next; 5.13 +}; 5.14 + 5.15 +struct thread_pool { 5.16 + pthread_t *workers; 5.17 + int num_workers; 5.18 + 5.19 + pthread_mutex_t work_lock; 5.20 + pthread_cond_t work_cond; 5.21 + 5.22 + tpool_work_func work_func; 5.23 + void *cls; 5.24 + 5.25 + struct work_item *work_list, *work_list_tail; 5.26 + int work_count; 5.27 +}; 5.28 + 5.29 + 5.30 +static void *thread_func(void *tp); 5.31 +static struct work_item *alloc_node(void); 5.32 +static void free_node(struct work_item *node); 5.33 +static int get_processor_count(void); 5.34 + 5.35 + 5.36 + 5.37 +int tpool_init(struct thread_pool *tpool, int num_threads) 5.38 +{ 5.39 + int i; 5.40 + 5.41 + memset(tpool, 0, sizeof *tpool); 5.42 + 5.43 + if(num_threads <= 0) { 5.44 + num_threads = get_processor_count(); 5.45 + } 5.46 + tpool->num_workers = num_threads; 5.47 + 5.48 + printf("initializing thread pool with %d worker threads\n", num_threads); 5.49 + 5.50 + for(i=0; i<num_threads; i++) { 5.51 + if(pthread_create(tpool->workers + i, 0, thread_func, tpool) == -1) { 5.52 + fprintf(stderr, "%s: failed to create thread %d\n", __FUNCTION__, i); 5.53 + tpool_destroy(tpool); 5.54 + return -1; 5.55 + } 5.56 + } 5.57 + 5.58 + pthread_mutex_init(&tpool->work_lock, 0); 5.59 + pthread_cond_init(&tpool->work_cond, 0); 5.60 + return 0; 5.61 +} 5.62 + 5.63 +void tpool_destroy(struct thread_pool *tpool) 5.64 +{ 5.65 + int i; 5.66 + for(i=0; i<tpool->num_workers; i++) { 5.67 + void *ret; 5.68 + pthread_join(tpool->workers[i], &ret); 5.69 + } 5.70 + 5.71 + pthread_mutex_destroy(&tpool->work_lock); 5.72 + pthread_cond_destroy(&tpool->work_cond); 5.73 +} 5.74 + 5.75 +void tpool_set_work_func(struct thread_pool *tpool, tpool_work_func func, void *cls) 5.76 +{ 5.77 + tpool->work_func = func; 5.78 + tpool->cls = cls; 5.79 +} 5.80 + 5.81 +int tpool_add_work(struct thread_pool *tpool, void *data) 5.82 +{ 5.83 + struct work_item *node; 5.84 + 5.85 + if(!(node = alloc_node())) { 5.86 + fprintf(stderr, "%s: failed to allocate new work item node\n", __FUNCTION__); 5.87 + return -1; 5.88 + } 5.89 + node->data = data; 5.90 + node->next = 0; 5.91 + 5.92 + pthread_mutex_lock(&tpool->work_lock); 5.93 + 5.94 + if(!tpool->work_list) { 5.95 + tpool->work_list = tpool->work_list_tail = node; 5.96 + } else { 5.97 + tpool->work_list_tail->next = node; 5.98 + tpool->work_list_tail = node; 5.99 + } 5.100 + 5.101 + pthread_mutex_unlock(&tpool->work_lock); 5.102 + return 0; 5.103 +} 5.104 + 5.105 + 5.106 +static void *thread_func(void *tp) 5.107 +{ 5.108 + struct work_item *job; 5.109 + struct thread_pool *tpool = tp; 5.110 + 5.111 + pthread_mutex_lock(&tpool->work_lock); 5.112 + for(;;) { 5.113 + /* while there aren't any work items to do go to sleep on the condvar */ 5.114 + pthread_cond_wait(&tpool->work_cond, &tpool->work_lock); 5.115 + if(!tpool->work_list) { 5.116 + continue; /* spurious wakeup, go back to sleep */ 5.117 + } 5.118 + 5.119 + job = tpool->work_list; 5.120 + tpool->work_list = tpool->work_list->next; 5.121 + 5.122 + tpool->work_func(job->data, tpool->cls); 5.123 + } 5.124 + pthread_mutex_unlock(&tpool->work_lock); 5.125 + return 0; 5.126 +} 5.127 + 5.128 +/* TODO: custom allocator */ 5.129 +static struct work_item *alloc_node(void) 5.130 +{ 5.131 + return malloc(sizeof(struct work_item)); 5.132 +} 5.133 + 5.134 +static void free_node(struct work_item *node) 5.135 +{ 5.136 + free(node); 5.137 +} 5.138 + 5.139 +/* The following highly platform-specific code detects the number 5.140 + * of processors available in the system. It's used by the thread pool 5.141 + * to autodetect how many threads to spawn. 5.142 + * Currently works on: Linux, BSD, Darwin, and Windows. 5.143 + */ 5.144 + 5.145 +#if defined(__APPLE__) && defined(__MACH__) 5.146 +# ifndef __unix__ 5.147 +# define __unix__ 1 5.148 +# endif /* unix */ 5.149 +# ifndef __bsd__ 5.150 +# define __bsd__ 1 5.151 +# endif /* bsd */ 5.152 +#endif /* apple */ 5.153 + 5.154 +#if defined(unix) || defined(__unix__) 5.155 +#include <unistd.h> 5.156 + 5.157 +# ifdef __bsd__ 5.158 +# include <sys/sysctl.h> 5.159 +# endif 5.160 +#endif 5.161 + 5.162 +#if defined(WIN32) || defined(__WIN32__) 5.163 +#include <windows.h> 5.164 +#endif 5.165 + 5.166 + 5.167 +static int get_processor_count(void) 5.168 +{ 5.169 +#if defined(unix) || defined(__unix__) 5.170 +# if defined(__bsd__) 5.171 + /* BSD systems provide the num.processors through sysctl */ 5.172 + int num, mib[] = {CTL_HW, HW_NCPU}; 5.173 + size_t len = sizeof num; 5.174 + 5.175 + sysctl(mib, 2, &num, &len, 0, 0); 5.176 + return num; 5.177 + 5.178 +# elif defined(__sgi) 5.179 + /* SGI IRIX flavour of the _SC_NPROC_ONLN sysconf */ 5.180 + return sysconf(_SC_NPROC_ONLN); 5.181 +# else 5.182 + /* Linux (and others?) have the _SC_NPROCESSORS_ONLN sysconf */ 5.183 + return sysconf(_SC_NPROCESSORS_ONLN); 5.184 +# endif /* bsd/sgi/other */ 5.185 + 5.186 +#elif defined(WIN32) || defined(__WIN32__) 5.187 + /* under windows we need to call GetSystemInfo */ 5.188 + SYSTEM_INFO info; 5.189 + GetSystemInfo(&info); 5.190 + return info.dwNumberOfProcessors; 5.191 +#endif 5.192 +}
6.1 --- a/src/threadpool.h Sat Feb 01 08:02:08 2014 +0200 6.2 +++ b/src/threadpool.h Mon Feb 03 05:22:09 2014 +0200 6.3 @@ -3,7 +3,7 @@ 6.4 6.5 struct thread_pool; 6.6 6.7 -typedef void (*tpool_work_func)(void*); 6.8 +typedef void (*tpool_work_func)(void *data, void *cls); 6.9 6.10 #define TPOOL_AUTO 0 6.11 int tpool_init(struct thread_pool *tpool, int num_threads); 6.12 @@ -11,6 +11,6 @@ 6.13 6.14 void tpool_set_work_func(struct thread_pool *tpool, tpool_work_func func, void *cls); 6.15 6.16 -/* TODO cont. */ 6.17 +int tpool_add_work(struct thread_pool *tpool, void *data); 6.18 6.19 #endif /* THREAD_POOL_H_ */