Fix use of --gpu-max-threads-per-block

pull/2883/head
Jens Steube 3 years ago
parent 674ca7d88f
commit 219bed457f

@ -10498,9 +10498,8 @@ static bool load_kernel (hashcat_ctx_t *hashcat_ctx, hc_device_param_t *device_p
// TODO HIP // TODO HIP
// no -offload-arch= aka --gpu-architecture because hiprtc gets native arch from hip_context // no -offload-arch= aka --gpu-architecture because hiprtc gets native arch from hip_context
hc_asprintf (&hiprtc_options[0], "--gpu-max-threads-per-block=%u", device_param->kernel_threads); hiprtc_options[0] = "--gpu-max-threads-per-block=64";
hiprtc_options[1] = "";
hiprtc_options[1] = "-O3";
hiprtc_options[2] = ""; hiprtc_options[2] = "";
hiprtc_options[3] = ""; hiprtc_options[3] = "";
@ -11589,7 +11588,7 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
char device_name_chksum_amp_mp[HCBUFSIZ_TINY] = { 0 }; char device_name_chksum_amp_mp[HCBUFSIZ_TINY] = { 0 };
const size_t dnclen_amp_mp = snprintf (device_name_chksum_amp_mp, HCBUFSIZ_TINY, "%d-%d-%d-%d-%u-%s-%s-%s-%d", const size_t dnclen_amp_mp = snprintf (device_name_chksum_amp_mp, HCBUFSIZ_TINY, "%d-%d-%d-%d-%u-%s-%s-%s",
backend_ctx->comptime, backend_ctx->comptime,
backend_ctx->cuda_driver_version, backend_ctx->cuda_driver_version,
backend_ctx->hip_driver_version, backend_ctx->hip_driver_version,
@ -11597,8 +11596,7 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
device_param->opencl_platform_vendor_id, device_param->opencl_platform_vendor_id,
device_param->device_name, device_param->device_name,
device_param->opencl_device_version, device_param->opencl_device_version,
device_param->opencl_driver_version, device_param->opencl_driver_version);
device_param->kernel_threads);
md5_ctx_t md5_ctx; md5_ctx_t md5_ctx;
@ -11889,7 +11887,7 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
const u32 extra_value = (user_options->attack_mode == ATTACK_MODE_ASSOCIATION) ? ATTACK_MODE_ASSOCIATION : ATTACK_MODE_NONE; const u32 extra_value = (user_options->attack_mode == ATTACK_MODE_ASSOCIATION) ? ATTACK_MODE_ASSOCIATION : ATTACK_MODE_NONE;
const size_t dnclen = snprintf (device_name_chksum, HCBUFSIZ_TINY, "%d-%d-%d-%d-%u-%s-%s-%s-%d-%u-%d-%u-%s", const size_t dnclen = snprintf (device_name_chksum, HCBUFSIZ_TINY, "%d-%d-%d-%d-%u-%s-%s-%s-%d-%u-%u-%s",
backend_ctx->comptime, backend_ctx->comptime,
backend_ctx->cuda_driver_version, backend_ctx->cuda_driver_version,
backend_ctx->hip_driver_version, backend_ctx->hip_driver_version,
@ -11899,7 +11897,6 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
device_param->opencl_device_version, device_param->opencl_device_version,
device_param->opencl_driver_version, device_param->opencl_driver_version,
device_param->vector_width, device_param->vector_width,
device_param->kernel_threads,
hashconfig->kern_type, hashconfig->kern_type,
extra_value, extra_value,
build_options_module_buf); build_options_module_buf);
@ -14617,11 +14614,20 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
} }
else else
{ {
device_param->kernel_threads_min = MIN (device_param->kernel_threads_min, 64);
device_param->kernel_threads_max = MIN (device_param->kernel_threads_max, 64); device_param->kernel_threads_max = MIN (device_param->kernel_threads_max, 64);
} }
} }
} }
// we
if (device_param->opencl_device_vendor_id == VENDOR_ID_AMD_USE_HIP)
{
device_param->kernel_threads_min = MIN (device_param->kernel_threads_min, 64);
device_param->kernel_threads_max = MIN (device_param->kernel_threads_max, 64);
}
/** /**
* now everything that depends on threads and accel, basically dynamic workload * now everything that depends on threads and accel, basically dynamic workload
*/ */

Loading…
Cancel
Save