$FreeBSD$ This patch works around POSIX thread implementation differences between FreeBSD's libthr and Linux's NPTL: - We do not support static allocations of mutex and condition variable. Instead, they are allocated dynamically when they are used for the first time if they are properly initialized with PTHREAD_MUTEX_INITIALIZER and PTHREAD_COND_INITIALIZER. Thus, we explicitly initialize and destroy them to be safer. - We must initialize mutex before calling pthread_cond_wait(3). Otherwise, it fails with EINVAL. - We must lock mutex before calling pthread_cond_wait(3). Otherwise, it fails with EPERM. This is a POSIX requirement. - We must join threads via pthread_join(3) after calling pthread_cancel(3). Otherwise, we may destroy mutex or condition variable in use. --- icedtea6-1.9.1/plugin/icedteanp/IcedTeaNPPlugin.cc.orig 2010-10-04 17:26:45.636097000 -0400 +++ icedtea6-1.9.1/plugin/icedteanp/IcedTeaNPPlugin.cc 2010-10-13 14:23:10.000000000 -0400 @@ -2355,6 +2355,10 @@ NP_Shutdown (void) pthread_cancel(plugin_request_processor_thread2); pthread_cancel(plugin_request_processor_thread3); + pthread_join(plugin_request_processor_thread1, NULL); + pthread_join(plugin_request_processor_thread2, NULL); + pthread_join(plugin_request_processor_thread3, NULL); + java_to_plugin_bus->unSubscribe(plugin_req_proc); plugin_to_java_bus->unSubscribe(java_req_proc); //internal_bus->unSubscribe(java_req_proc); --- icedtea6-1.9.1/plugin/icedteanp/IcedTeaPluginRequestProcessor.cc.orig 2010-08-06 07:05:21.996828000 -0400 +++ icedtea6-1.9.1/plugin/icedteanp/IcedTeaPluginRequestProcessor.cc 2010-10-13 14:23:10.000000000 -0400 @@ -63,6 +63,12 @@ PluginRequestProcessor::PluginRequestPro this->pendingRequests = new std::map(); internal_req_ref_counter = 0; + + pthread_mutex_init(&message_queue_mutex, NULL); + pthread_mutex_init(&syn_write_mutex, NULL); + pthread_mutex_init(&tc_mutex, NULL); + + pthread_cond_init(&cond_message_available, NULL); } /** @@ -77,6 +83,12 @@ PluginRequestProcessor::~PluginRequestPr if (pendingRequests) delete pendingRequests; + + pthread_mutex_destroy(&message_queue_mutex); + pthread_mutex_destroy(&syn_write_mutex); + pthread_mutex_destroy(&tc_mutex); + + pthread_cond_destroy(&cond_message_available); } /** @@ -701,6 +713,14 @@ PluginRequestProcessor::finalize(std::ve plugin_to_java_bus->post(response.c_str()); } +static void +queue_cleanup(void* data) +{ + + pthread_mutex_destroy((pthread_mutex_t*) data); + + PLUGIN_DEBUG("Queue processing stopped.\n"); +} void* queue_processor(void* data) @@ -709,10 +729,14 @@ queue_processor(void* data) PluginRequestProcessor* processor = (PluginRequestProcessor*) data; std::vector* message_parts = NULL; std::string command; - pthread_mutex_t wait_mutex = PTHREAD_MUTEX_INITIALIZER; // This is needed for API compat. and is unused + pthread_mutex_t wait_mutex = PTHREAD_MUTEX_INITIALIZER; PLUGIN_DEBUG("Queue processor initialized. Queue = %p\n", message_queue); + pthread_mutex_init(&wait_mutex, NULL); + + pthread_cleanup_push(queue_cleanup, (void*) &wait_mutex); + while (true) { pthread_mutex_lock(&message_queue_mutex); @@ -780,14 +804,17 @@ queue_processor(void* data) } else { - pthread_cond_wait(&cond_message_available, &wait_mutex); - pthread_testcancel(); + pthread_mutex_lock(&wait_mutex); + pthread_cond_wait(&cond_message_available, &wait_mutex); + pthread_mutex_unlock(&wait_mutex); } message_parts = NULL; + + pthread_testcancel(); } - PLUGIN_DEBUG("Queue processing stopped.\n"); + pthread_cleanup_pop(1); } /******************************************