Formatting fixes

Removing extraneous { } bracket sections.  Unindenting blocks of
code as a result.  Also removing empty #ifdef KMP_STUB

llvm-svn: 258986
This commit is contained in:
Jonathan Peyton
2016-01-27 21:02:04 +00:00
parent bf0cc3a241
commit 749b4d51ed
4 changed files with 84 additions and 146 deletions

View File

@@ -6757,7 +6757,7 @@ __kmp_parallel_initialize( void )
__kmp_suspend_initialize();
# if defined(USE_LOAD_BALANCE)
#if defined(USE_LOAD_BALANCE)
if ( __kmp_global.g.g_dynamic_mode == dynamic_default ) {
__kmp_global.g.g_dynamic_mode = dynamic_load_balance;
}

View File

@@ -203,9 +203,6 @@ __kmp_print_version_2( void ) {
return;
}; // if
__kmp_version_2_printed = TRUE;
#ifndef KMP_STUB
#endif // KMP_STUB
} // __kmp_print_version_2
// end of file //

View File

@@ -992,117 +992,77 @@ __kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
KMP_MB(); /* Flush all pending memory write invalidates. */
#ifdef KMP_THREAD_ATTR
{
status = pthread_attr_init( &thread_attr );
if ( status != 0 ) {
__kmp_msg(
kmp_ms_fatal,
KMP_MSG( CantInitThreadAttrs ),
KMP_ERR( status ),
__kmp_msg_null
);
}; // if
status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
if ( status != 0 ) {
__kmp_msg(
kmp_ms_fatal,
KMP_MSG( CantSetWorkerState ),
KMP_ERR( status ),
__kmp_msg_null
);
}; // if
status = pthread_attr_init( &thread_attr );
if ( status != 0 ) {
__kmp_msg(kmp_ms_fatal, KMP_MSG( CantInitThreadAttrs ), KMP_ERR( status ), __kmp_msg_null);
}; // if
status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
if ( status != 0 ) {
__kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerState ), KMP_ERR( status ), __kmp_msg_null);
}; // if
/* Set stack size for this thread now.
* The multiple of 2 is there because on some machines, requesting an unusual stacksize
* causes the thread to have an offset before the dummy alloca() takes place to create the
* offset. Since we want the user to have a sufficient stacksize AND support a stack offset, we
* alloca() twice the offset so that the upcoming alloca() does not eliminate any premade
* offset, and also gives the user the stack space they requested for all threads */
stack_size += gtid * __kmp_stkoffset * 2;
/* Set stack size for this thread now.
* The multiple of 2 is there because on some machines, requesting an unusual stacksize
* causes the thread to have an offset before the dummy alloca() takes place to create the
* offset. Since we want the user to have a sufficient stacksize AND support a stack offset, we
* alloca() twice the offset so that the upcoming alloca() does not eliminate any premade
* offset, and also gives the user the stack space they requested for all threads */
stack_size += gtid * __kmp_stkoffset * 2;
KA_TRACE( 10, ( "__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
"__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
KA_TRACE( 10, ( "__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
"__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
# ifdef _POSIX_THREAD_ATTR_STACKSIZE
status = pthread_attr_setstacksize( & thread_attr, stack_size );
status = pthread_attr_setstacksize( & thread_attr, stack_size );
# ifdef KMP_BACKUP_STKSIZE
if ( status != 0 ) {
if ( ! __kmp_env_stksize ) {
stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
__kmp_stksize = KMP_BACKUP_STKSIZE;
KA_TRACE( 10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
"__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
"bytes\n",
gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
);
status = pthread_attr_setstacksize( &thread_attr, stack_size );
}; // if
}; // if
if ( status != 0 ) {
if ( ! __kmp_env_stksize ) {
stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
__kmp_stksize = KMP_BACKUP_STKSIZE;
KA_TRACE( 10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
"__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
"bytes\n",
gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
);
status = pthread_attr_setstacksize( &thread_attr, stack_size );
}; // if
}; // if
# endif /* KMP_BACKUP_STKSIZE */
if ( status != 0 ) {
__kmp_msg(
kmp_ms_fatal,
KMP_MSG( CantSetWorkerStackSize, stack_size ),
KMP_ERR( status ),
KMP_HNT( ChangeWorkerStackSize ),
__kmp_msg_null
);
}; // if
if ( status != 0 ) {
__kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerStackSize, stack_size ), KMP_ERR( status ),
KMP_HNT( ChangeWorkerStackSize ), __kmp_msg_null);
}; // if
# endif /* _POSIX_THREAD_ATTR_STACKSIZE */
}
#endif /* KMP_THREAD_ATTR */
{
status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (void *) th );
if ( status != 0 || ! handle ) { // ??? Why do we check handle??
status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (void *) th );
if ( status != 0 || ! handle ) { // ??? Why do we check handle??
#ifdef _POSIX_THREAD_ATTR_STACKSIZE
if ( status == EINVAL ) {
__kmp_msg(
kmp_ms_fatal,
KMP_MSG( CantSetWorkerStackSize, stack_size ),
KMP_ERR( status ),
KMP_HNT( IncreaseWorkerStackSize ),
__kmp_msg_null
);
};
if ( status == ENOMEM ) {
__kmp_msg(
kmp_ms_fatal,
KMP_MSG( CantSetWorkerStackSize, stack_size ),
KMP_ERR( status ),
KMP_HNT( DecreaseWorkerStackSize ),
__kmp_msg_null
);
};
if ( status == EINVAL ) {
__kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerStackSize, stack_size ), KMP_ERR( status ),
KMP_HNT( IncreaseWorkerStackSize ), __kmp_msg_null);
};
if ( status == ENOMEM ) {
__kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerStackSize, stack_size ), KMP_ERR( status ),
KMP_HNT( DecreaseWorkerStackSize ), __kmp_msg_null);
};
#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
if ( status == EAGAIN ) {
__kmp_msg(
kmp_ms_fatal,
KMP_MSG( NoResourcesForWorkerThread ),
KMP_ERR( status ),
KMP_HNT( Decrease_NUM_THREADS ),
__kmp_msg_null
);
}; // if
KMP_SYSFAIL( "pthread_create", status );
}; // if
if ( status == EAGAIN ) {
__kmp_msg(kmp_ms_fatal, KMP_MSG( NoResourcesForWorkerThread ), KMP_ERR( status ),
KMP_HNT( Decrease_NUM_THREADS ), __kmp_msg_null);
}; // if
KMP_SYSFAIL( "pthread_create", status );
}; // if
th->th.th_info.ds.ds_thread = handle;
}
th->th.th_info.ds.ds_thread = handle;
#ifdef KMP_THREAD_ATTR
{
status = pthread_attr_destroy( & thread_attr );
if ( status ) {
__kmp_msg(
kmp_ms_warning,
KMP_MSG( CantDestroyThreadAttrs ),
KMP_ERR( status ),
__kmp_msg_null
);
}; // if
}
status = pthread_attr_destroy( & thread_attr );
if ( status ) {
__kmp_msg(kmp_ms_warning, KMP_MSG( CantDestroyThreadAttrs ), KMP_ERR( status ), __kmp_msg_null);
}; // if
#endif /* KMP_THREAD_ATTR */
KMP_MB(); /* Flush all pending memory write invalidates. */
@@ -1350,35 +1310,24 @@ __kmp_reap_worker( kmp_info_t *th )
but if the worker dies after the pthread_kill call and before the pthread_join
call, it will still hang. */
{
status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
if (status == ESRCH) {
KA_TRACE( 10, ("__kmp_reap_worker: worker T#%d does not exist, returning\n",
th->th.th_info.ds.ds_gtid ) );
}
else {
KA_TRACE( 10, ("__kmp_reap_worker: try to join with worker T#%d\n",
th->th.th_info.ds.ds_gtid ) );
status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
if (status == ESRCH) {
KA_TRACE( 10, ("__kmp_reap_worker: worker T#%d does not exist, returning\n", th->th.th_info.ds.ds_gtid ) );
}
else {
KA_TRACE( 10, ("__kmp_reap_worker: try to join with worker T#%d\n", th->th.th_info.ds.ds_gtid ) );
status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
#ifdef KMP_DEBUG
/* Don't expose these to the user until we understand when they trigger */
if ( status != 0 ) {
__kmp_msg(
kmp_ms_fatal,
KMP_MSG( ReapWorkerError ),
KMP_ERR( status ),
__kmp_msg_null
);
}
if ( exit_val != th ) {
KA_TRACE( 10, ( "__kmp_reap_worker: worker T#%d did not reap properly, "
"exit_val = %p\n",
th->th.th_info.ds.ds_gtid, exit_val ) );
}
#endif /* KMP_DEBUG */
}
/* Don't expose these to the user until we understand when they trigger */
if ( status != 0 ) {
__kmp_msg(kmp_ms_fatal, KMP_MSG( ReapWorkerError ), KMP_ERR( status ), __kmp_msg_null);
}
if ( exit_val != th ) {
KA_TRACE( 10, ( "__kmp_reap_worker: worker T#%d did not reap properly, exit_val = %p\n",
th->th.th_info.ds.ds_gtid, exit_val ) );
}
#endif /* KMP_DEBUG */
}
KA_TRACE( 10, ("__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) );

View File

@@ -1373,11 +1373,9 @@ __kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
(LPTHREAD_START_ROUTINE) & __kmp_launch_worker,
(LPVOID) th, &idThread ) );
{
handle = CreateThread( NULL, (SIZE_T) stack_size,
(LPTHREAD_START_ROUTINE) __kmp_launch_worker,
(LPVOID) th, STACK_SIZE_PARAM_IS_A_RESERVATION, &idThread );
}
handle = CreateThread( NULL, (SIZE_T) stack_size,
(LPTHREAD_START_ROUTINE) __kmp_launch_worker,
(LPVOID) th, STACK_SIZE_PARAM_IS_A_RESERVATION, &idThread );
KA_TRACE( 10, ( "__kmp_create_worker: (after) stack_size = %"
KMP_SIZE_T_SPEC
@@ -1387,19 +1385,13 @@ __kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
(LPTHREAD_START_ROUTINE) & __kmp_launch_worker,
(LPVOID) th, idThread, handle ) );
{
if ( handle == 0 ) {
DWORD error = GetLastError();
__kmp_msg(
kmp_ms_fatal,
KMP_MSG( CantCreateThread ),
KMP_ERR( error ),
__kmp_msg_null
);
} else {
th->th.th_info.ds.ds_thread = handle;
}
}
if ( handle == 0 ) {
DWORD error = GetLastError();
__kmp_msg(kmp_ms_fatal, KMP_MSG( CantCreateThread ), KMP_ERR( error ), __kmp_msg_null);
} else {
th->th.th_info.ds.ds_thread = handle;
}
KMP_MB(); /* Flush all pending memory write invalidates. */
}