Begin renaming API specific variables in backend section

pull/2022/head
Jens Steube 5 years ago
parent d73c0ac8a9
commit d862458ab5

@ -76,13 +76,13 @@ typedef struct timespec hc_timer_t;
#endif
#if defined (_WIN)
typedef HANDLE hc_thread_t;
typedef HANDLE hc_thread_mutex_t;
typedef HANDLE hc_thread_semaphore_t;
typedef HANDLE hc_thread_t;
typedef HANDLE hc_thread_mutex_t;
typedef HANDLE hc_thread_semaphore_t;
#else
typedef pthread_t hc_thread_t;
typedef pthread_mutex_t hc_thread_mutex_t;
typedef sem_t hc_thread_semaphore_t;
typedef pthread_t hc_thread_t;
typedef pthread_mutex_t hc_thread_mutex_t;
typedef sem_t hc_thread_semaphore_t;
#endif
// enums
@ -995,27 +995,17 @@ typedef struct link_speed
typedef struct hc_device_param
{
CUdevice device_cuda;
u32 device_id;
cl_device_id device;
cl_device_type device_type;
u8 pcie_bus;
u8 pcie_device;
u8 pcie_function;
u32 device_id;
u32 platform_devices_id; // for mapping with hms devices
bool skipped; // permanent
bool skipped_warning; // iteration
st_status_t st_status;
int sm_major;
int sm_minor;
u32 kernel_exec_timeout;
u8 pcie_bus;
u8 pcie_device;
u8 pcie_function;
u32 device_processors;
u64 device_maxmem_alloc;
u64 device_global_mem;
@ -1023,7 +1013,13 @@ typedef struct hc_device_param
u32 device_maxclock_frequency;
size_t device_maxworkgroup_size;
u64 device_local_mem_size;
cl_device_local_mem_type device_local_mem_type;
int device_local_mem_type;
int sm_major;
int sm_minor;
u32 kernel_exec_timeout;
st_status_t st_status;
u32 vector_width;
@ -1223,6 +1219,50 @@ typedef struct hc_device_param
double spin_damp;
void *kernel_params[PARAMCNT];
void *kernel_params_mp[PARAMCNT];
void *kernel_params_mp_r[PARAMCNT];
void *kernel_params_mp_l[PARAMCNT];
void *kernel_params_amp[PARAMCNT];
void *kernel_params_tm[PARAMCNT];
void *kernel_params_memset[PARAMCNT];
void *kernel_params_atinit[PARAMCNT];
void *kernel_params_decompress[PARAMCNT];
u32 kernel_params_buf32[PARAMCNT];
u64 kernel_params_buf64[PARAMCNT];
u32 kernel_params_mp_buf32[PARAMCNT];
u64 kernel_params_mp_buf64[PARAMCNT];
u32 kernel_params_mp_r_buf32[PARAMCNT];
u64 kernel_params_mp_r_buf64[PARAMCNT];
u32 kernel_params_mp_l_buf32[PARAMCNT];
u64 kernel_params_mp_l_buf64[PARAMCNT];
u32 kernel_params_amp_buf32[PARAMCNT];
u64 kernel_params_amp_buf64[PARAMCNT];
u32 kernel_params_memset_buf32[PARAMCNT];
u64 kernel_params_memset_buf64[PARAMCNT];
u32 kernel_params_atinit_buf32[PARAMCNT];
u64 kernel_params_atinit_buf64[PARAMCNT];
u32 kernel_params_decompress_buf32[PARAMCNT];
u64 kernel_params_decompress_buf64[PARAMCNT];
// API: cuda
CUdevice cuda_device;
// API: opencl
cl_device_id device;
cl_device_type opencl_device_type;
cl_platform_id platform;
cl_uint device_vendor_id;
@ -1296,40 +1336,6 @@ typedef struct hc_device_param
cl_mem d_st_salts_buf;
cl_mem d_st_esalts_buf;
void *kernel_params[PARAMCNT];
void *kernel_params_mp[PARAMCNT];
void *kernel_params_mp_r[PARAMCNT];
void *kernel_params_mp_l[PARAMCNT];
void *kernel_params_amp[PARAMCNT];
void *kernel_params_tm[PARAMCNT];
void *kernel_params_memset[PARAMCNT];
void *kernel_params_atinit[PARAMCNT];
void *kernel_params_decompress[PARAMCNT];
u32 kernel_params_buf32[PARAMCNT];
u64 kernel_params_buf64[PARAMCNT];
u32 kernel_params_mp_buf32[PARAMCNT];
u64 kernel_params_mp_buf64[PARAMCNT];
u32 kernel_params_mp_r_buf32[PARAMCNT];
u64 kernel_params_mp_r_buf64[PARAMCNT];
u32 kernel_params_mp_l_buf32[PARAMCNT];
u64 kernel_params_mp_l_buf64[PARAMCNT];
u32 kernel_params_amp_buf32[PARAMCNT];
u64 kernel_params_amp_buf64[PARAMCNT];
u32 kernel_params_memset_buf32[PARAMCNT];
u64 kernel_params_memset_buf64[PARAMCNT];
u32 kernel_params_atinit_buf32[PARAMCNT];
u64 kernel_params_atinit_buf64[PARAMCNT];
u32 kernel_params_decompress_buf32[PARAMCNT];
u64 kernel_params_decompress_buf64[PARAMCNT];
} hc_device_param_t;
typedef struct backend_ctx
@ -1352,18 +1358,6 @@ typedef struct backend_ctx
int opencl_devices_cnt;
int opencl_devices_active;
int cuda_driver_version;
cl_uint platforms_cnt;
cl_platform_id *platforms;
char **platforms_vendor;
char **platforms_name;
char **platforms_version;
bool *platforms_skipped;
cl_uint platform_devices_cnt;
cl_device_id *platform_devices;
u32 devices_cnt;
u32 devices_active;
@ -1374,9 +1368,7 @@ typedef struct backend_ctx
u64 kernel_power_all;
u64 kernel_power_final; // we save that so that all divisions are done from the same base
u64 opencl_platforms_filter;
u64 devices_filter;
cl_device_type device_types_filter;
double target_msec;
@ -1389,6 +1381,25 @@ typedef struct backend_ctx
int force_jit_compilation;
// cuda
int cuda_driver_version;
// opencl
cl_uint platforms_cnt;
cl_platform_id *platforms;
char **platforms_vendor;
char **platforms_name;
char **platforms_version;
bool *platforms_skipped;
cl_uint platform_devices_cnt;
cl_device_id *platform_devices;
u64 opencl_platforms_filter;
cl_device_type opencl_device_types_filter;
} backend_ctx_t;
typedef enum kernel_workload

@ -191,9 +191,9 @@ static bool setup_devices_filter (hashcat_ctx_t *hashcat_ctx, const char *opencl
return true;
}
static bool setup_device_types_filter (hashcat_ctx_t *hashcat_ctx, const char *opencl_device_types, cl_device_type *out)
static bool setup_opencl_device_types_filter (hashcat_ctx_t *hashcat_ctx, const char *opencl_device_types, cl_device_type *out)
{
cl_device_type device_types_filter = 0;
cl_device_type opencl_device_types_filter = 0;
if (opencl_device_types)
{
@ -211,14 +211,14 @@ static bool setup_device_types_filter (hashcat_ctx_t *hashcat_ctx, const char *o
if (device_type < 1 || device_type > 3)
{
event_log_error (hashcat_ctx, "Invalid device_type %d specified.", device_type);
event_log_error (hashcat_ctx, "Invalid OpenCL device-type %d specified.", device_type);
hcfree (device_types);
return false;
}
device_types_filter |= 1u << device_type;
opencl_device_types_filter |= 1u << device_type;
} while ((next = strtok_r (NULL, ",", &saveptr)) != NULL);
@ -229,10 +229,10 @@ static bool setup_device_types_filter (hashcat_ctx_t *hashcat_ctx, const char *o
// Do not use CPU by default, this often reduces GPU performance because
// the CPU is too busy to handle GPU synchronization
device_types_filter = CL_DEVICE_TYPE_ALL & ~CL_DEVICE_TYPE_CPU;
opencl_device_types_filter = CL_DEVICE_TYPE_ALL & ~CL_DEVICE_TYPE_CPU;
}
*out = device_types_filter;
*out = opencl_device_types_filter;
return true;
}
@ -330,7 +330,7 @@ static bool write_kernel_binary (hashcat_ctx_t *hashcat_ctx, char *kernel_file,
return true;
}
static bool test_instruction (hashcat_ctx_t *hashcat_ctx, cl_context context, cl_device_id device, const char *kernel_buf)
static bool opencl_test_instruction (hashcat_ctx_t *hashcat_ctx, cl_context context, cl_device_id device, const char *kernel_buf)
{
int CL_rc;
@ -1040,7 +1040,6 @@ int hc_cuDriverGetVersion (hashcat_ctx_t *hashcat_ctx, int *driverVersion)
return 0;
}
// OpenCL
int ocl_init (hashcat_ctx_t *hashcat_ctx)
@ -1744,6 +1743,8 @@ int hc_clReleaseEvent (hashcat_ctx_t *hashcat_ctx, cl_event event)
return 0;
}
// Backend
int gidd_to_pw_t (hashcat_ctx_t *hashcat_ctx, hc_device_param_t *device_param, const u64 gidd, pw_t *pw)
{
pw_idx_t pw_idx;
@ -3555,13 +3556,13 @@ int backend_ctx_init (hashcat_ctx_t *hashcat_ctx)
* OpenCL device type selection
*/
cl_device_type device_types_filter;
cl_device_type opencl_device_types_filter;
const bool rc_device_types_filter = setup_device_types_filter (hashcat_ctx, user_options->opencl_device_types, &device_types_filter);
const bool rc_opencl_device_types_filter = setup_opencl_device_types_filter (hashcat_ctx, user_options->opencl_device_types, &opencl_device_types_filter);
if (rc_device_types_filter == false) return -1;
if (rc_opencl_device_types_filter == false) return -1;
backend_ctx->device_types_filter = device_types_filter;
backend_ctx->opencl_device_types_filter = opencl_device_types_filter;
/**
* Backend structures
@ -3701,7 +3702,7 @@ int backend_ctx_init (hashcat_ctx_t *hashcat_ctx)
if ((device_types_all & (CL_DEVICE_TYPE_GPU | CL_DEVICE_TYPE_ACCELERATOR)) == 0)
{
device_types_filter |= CL_DEVICE_TYPE_CPU;
opencl_device_types_filter |= CL_DEVICE_TYPE_CPU;
}
// In another case, when the user uses --stdout, using CPU devices is much faster to setup
@ -3711,11 +3712,11 @@ int backend_ctx_init (hashcat_ctx_t *hashcat_ctx)
{
if (device_types_all & CL_DEVICE_TYPE_CPU)
{
device_types_filter = CL_DEVICE_TYPE_CPU;
opencl_device_types_filter = CL_DEVICE_TYPE_CPU;
}
}
backend_ctx->device_types_filter = device_types_filter;
backend_ctx->opencl_device_types_filter = opencl_device_types_filter;
}
backend_ctx->enabled = true;
@ -3820,21 +3821,21 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
backend_ctx->backend_device_from_cuda[cuda_devices_idx] = backend_devices_idx;
backend_ctx->backend_device_to_cuda[backend_devices_idx] = cuda_devices_idx;
CUdevice device_cuda;
CUdevice cuda_device;
int CU_rc;
CU_rc = hc_cuDeviceGet (hashcat_ctx, &device_cuda, cuda_devices_idx);
CU_rc = hc_cuDeviceGet (hashcat_ctx, &cuda_device, cuda_devices_idx);
if (CU_rc == -1) return -1;
device_param->device_cuda = device_cuda;
device_param->cuda_device = cuda_device;
// device_name
char *device_name = (char *) hcmalloc (HCBUFSIZ_TINY);
CU_rc = hc_cuDeviceGetName (hashcat_ctx, device_name, HCBUFSIZ_TINY, device_cuda);
CU_rc = hc_cuDeviceGetName (hashcat_ctx, device_name, HCBUFSIZ_TINY, cuda_device);
if (CU_rc == -1) return -1;
@ -3844,26 +3845,28 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
hc_string_trim_trailing (device_name);
// max_compute_units
// device_processors
int device_processors;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &device_processors, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &device_processors, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, cuda_device);
if (CU_rc == -1) return -1;
device_param->device_processors = device_processors;
// device_global_mem
// device_global_mem, device_maxmem_alloc, device_available_mem
size_t bytes;
CU_rc = hc_cuDeviceTotalMem (hashcat_ctx, &bytes, device_cuda);
CU_rc = hc_cuDeviceTotalMem (hashcat_ctx, &bytes, cuda_device);
if (CU_rc == -1) return -1;
device_param->device_global_mem = (u64) bytes;
device_param->device_maxmem_alloc = (u64) bytes;
device_param->device_available_mem = 0;
// sm_minor, sm_major
@ -3871,11 +3874,11 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
int sm_major = 0;
int sm_minor = 0;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &sm_major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &sm_major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuda_device);
if (CU_rc == -1) return -1;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &sm_minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &sm_minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuda_device);
if (CU_rc == -1) return -1;
@ -3886,7 +3889,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
int device_maxworkgroup_size;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &device_maxworkgroup_size, CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &device_maxworkgroup_size, CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK, cuda_device);
if (CU_rc == -1) return -1;
@ -3896,7 +3899,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
int device_maxclock_frequency;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &device_maxclock_frequency, CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &device_maxclock_frequency, CU_DEVICE_ATTRIBUTE_CLOCK_RATE, cuda_device);
if (CU_rc == -1) return -1;
@ -3907,11 +3910,11 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
int pci_bus_id_nv; // is cl_uint the right type for them??
int pci_slot_id_nv;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &pci_bus_id_nv, CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &pci_bus_id_nv, CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, cuda_device);
if (CU_rc == -1) return -1;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &pci_slot_id_nv, CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &pci_slot_id_nv, CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, cuda_device);
if (CU_rc == -1) return -1;
@ -3923,7 +3926,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
int kernel_exec_timeout;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &kernel_exec_timeout, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &kernel_exec_timeout, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, cuda_device);
if (CU_rc == -1) return -1;
@ -3933,7 +3936,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
int max_shared_memory_per_block;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &max_shared_memory_per_block, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &max_shared_memory_per_block, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK, cuda_device);
if (CU_rc == -1) return -1;
@ -3948,7 +3951,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
int device_max_constant_buffer_size;
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &device_max_constant_buffer_size, CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY, device_cuda);
CU_rc = hc_cuDeviceGetAttribute (hashcat_ctx, &device_max_constant_buffer_size, CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY, cuda_device);
if (CU_rc == -1) return -1;
@ -4138,17 +4141,17 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
device_param->platform = platform;
// device_type
// opencl_device_type
cl_device_type device_type;
cl_device_type opencl_device_type;
CL_rc = hc_clGetDeviceInfo (hashcat_ctx, device_param->device, CL_DEVICE_TYPE, sizeof (device_type), &device_type, NULL);
CL_rc = hc_clGetDeviceInfo (hashcat_ctx, device_param->device, CL_DEVICE_TYPE, sizeof (opencl_device_type), &opencl_device_type, NULL);
if (CL_rc == -1) return -1;
device_type &= ~CL_DEVICE_TYPE_DEFAULT;
opencl_device_type &= ~CL_DEVICE_TYPE_DEFAULT;
device_param->device_type = device_type;
device_param->opencl_device_type = opencl_device_type;
// device_name
@ -4461,7 +4464,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
// However, Intel has much better SIMD control over their own hardware
// It makes sense to give them full control over their own hardware
if (device_type & CL_DEVICE_TYPE_CPU)
if (opencl_device_type & CL_DEVICE_TYPE_CPU)
{
if (device_param->device_vendor_id == VENDOR_ID_AMD_USE_INTEL)
{
@ -4481,7 +4484,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
// Disable such devices unless the user forces to use it
#if !defined (__APPLE__)
if (device_type & CL_DEVICE_TYPE_GPU)
if (opencl_device_type & CL_DEVICE_TYPE_GPU)
{
if ((device_param->device_vendor_id == VENDOR_ID_INTEL_SDK) || (device_param->device_vendor_id == VENDOR_ID_INTEL_BEIGNET))
{
@ -4504,7 +4507,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
device_param->skipped = true;
}
if ((backend_ctx->device_types_filter & (device_type)) == 0)
if ((backend_ctx->opencl_device_types_filter & (opencl_device_type)) == 0)
{
device_param->skipped = true;
}
@ -4525,7 +4528,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
// vendor specific
if (device_param->device_type & CL_DEVICE_TYPE_GPU)
if (device_param->opencl_device_type & CL_DEVICE_TYPE_GPU)
{
if ((device_param->platform_vendor_id == VENDOR_ID_AMD) && (device_param->device_vendor_id == VENDOR_ID_AMD))
{
@ -4546,7 +4549,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
}
}
if (device_param->device_type & CL_DEVICE_TYPE_GPU)
if (device_param->opencl_device_type & CL_DEVICE_TYPE_GPU)
{
if ((device_param->platform_vendor_id == VENDOR_ID_AMD) && (device_param->device_vendor_id == VENDOR_ID_AMD))
{
@ -4614,7 +4617,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
{
if ((user_options->force == false) && (user_options->opencl_info == false))
{
if (device_type & CL_DEVICE_TYPE_CPU)
if (opencl_device_type & CL_DEVICE_TYPE_CPU)
{
if (device_param->platform_vendor_id == VENDOR_ID_INTEL_SDK)
{
@ -4662,7 +4665,7 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
}
}
}
else if (device_type & CL_DEVICE_TYPE_GPU)
else if (opencl_device_type & CL_DEVICE_TYPE_GPU)
{
if (device_param->platform_vendor_id == VENDOR_ID_AMD)
{
@ -4811,36 +4814,36 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
if (CL_rc == -1) return -1;
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) && (device_param->platform_vendor_id == VENDOR_ID_AMD))
if ((device_param->opencl_device_type & CL_DEVICE_TYPE_GPU) && (device_param->platform_vendor_id == VENDOR_ID_AMD))
{
const bool has_vadd3 = test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; __asm__ __volatile__ (\"V_ADD3_U32 %0, 0, 0, 0;\" : \"=v\"(r)); }");
const bool has_vadd3 = opencl_test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; __asm__ __volatile__ (\"V_ADD3_U32 %0, 0, 0, 0;\" : \"=v\"(r)); }");
device_param->has_vadd3 = has_vadd3;
const bool has_vbfe = test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; __asm__ __volatile__ (\"V_BFE_U32 %0, 0, 0, 0;\" : \"=v\"(r)); }");
const bool has_vbfe = opencl_test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; __asm__ __volatile__ (\"V_BFE_U32 %0, 0, 0, 0;\" : \"=v\"(r)); }");
device_param->has_vbfe = has_vbfe;
const bool has_vperm = test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; __asm__ __volatile__ (\"V_PERM_B32 %0, 0, 0, 0;\" : \"=v\"(r)); }");
const bool has_vperm = opencl_test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; __asm__ __volatile__ (\"V_PERM_B32 %0, 0, 0, 0;\" : \"=v\"(r)); }");
device_param->has_vperm = has_vperm;
}
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) && (device_param->platform_vendor_id == VENDOR_ID_NV))
if ((device_param->opencl_device_type & CL_DEVICE_TYPE_GPU) && (device_param->platform_vendor_id == VENDOR_ID_NV))
{
const bool has_bfe = test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; asm volatile (\"bfe.u32 %0, 0, 0, 0;\" : \"=r\"(r)); }");
const bool has_bfe = opencl_test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; asm volatile (\"bfe.u32 %0, 0, 0, 0;\" : \"=r\"(r)); }");
device_param->has_bfe = has_bfe;
const bool has_lop3 = test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; asm volatile (\"lop3.b32 %0, 0, 0, 0, 0;\" : \"=r\"(r)); }");
const bool has_lop3 = opencl_test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; asm volatile (\"lop3.b32 %0, 0, 0, 0, 0;\" : \"=r\"(r)); }");
device_param->has_lop3 = has_lop3;
const bool has_mov64 = test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { ulong r; uint a; uint b; asm volatile (\"mov.b64 %0, {%1, %2};\" : \"=l\"(r) : \"r\"(a), \"r\"(b)); }");
const bool has_mov64 = opencl_test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { ulong r; uint a; uint b; asm volatile (\"mov.b64 %0, {%1, %2};\" : \"=l\"(r) : \"r\"(a), \"r\"(b)); }");
device_param->has_mov64 = has_mov64;
const bool has_prmt = test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; asm volatile (\"prmt.b32 %0, 0, 0, 0;\" : \"=r\"(r)); }");
const bool has_prmt = opencl_test_instruction (hashcat_ctx, context, device_param->device, "__kernel void test () { uint r; asm volatile (\"prmt.b32 %0, 0, 0, 0;\" : \"=r\"(r)); }");
device_param->has_prmt = has_prmt;
}
@ -4853,9 +4856,9 @@ int backend_ctx_devices_init (hashcat_ctx_t *hashcat_ctx, const int comptime)
device_param->device_available_mem = device_param->device_global_mem - MAX_ALLOC_CHECKS_SIZE;
#if defined (_WIN)
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) && (device_param->platform_vendor_id == VENDOR_ID_NV))
if ((device_param->opencl_device_type & CL_DEVICE_TYPE_GPU) && (device_param->platform_vendor_id == VENDOR_ID_NV))
#else
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) && ((device_param->platform_vendor_id == VENDOR_ID_NV) || (device_param->platform_vendor_id == VENDOR_ID_AMD)))
if ((device_param->opencl_device_type & CL_DEVICE_TYPE_GPU) && ((device_param->platform_vendor_id == VENDOR_ID_NV) || (device_param->platform_vendor_id == VENDOR_ID_AMD)))
#endif
{
// OK, so the problem here is the following:
@ -5246,7 +5249,7 @@ static u32 get_kernel_threads (hashcat_ctx_t *hashcat_ctx, const hc_device_param
// for CPU we just do 1 ...
if (device_param->device_type & CL_DEVICE_TYPE_CPU)
if (device_param->opencl_device_type & CL_DEVICE_TYPE_CPU)
{
if ((1 >= kernel_threads_min) && (1 <= kernel_threads_max))
{
@ -5420,11 +5423,11 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
if (user_options->slow_candidates == true)
{
tuningdb_entry = tuning_db_search (hashcat_ctx, device_param->device_name, device_param->device_type, 0, hashconfig->hash_mode);
tuningdb_entry = tuning_db_search (hashcat_ctx, device_param->device_name, device_param->opencl_device_type, 0, hashconfig->hash_mode);
}
else
{
tuningdb_entry = tuning_db_search (hashcat_ctx, device_param->device_name, device_param->device_type, user_options->attack_mode, hashconfig->hash_mode);
tuningdb_entry = tuning_db_search (hashcat_ctx, device_param->device_name, device_param->opencl_device_type, user_options->attack_mode, hashconfig->hash_mode);
}
if (tuningdb_entry == NULL || tuningdb_entry->vector_width == -1)
@ -5458,7 +5461,7 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
if ((hashconfig->opti_type & OPTI_TYPE_OPTIMIZED_KERNEL) == 0)
{
if (device_param->device_type & CL_DEVICE_TYPE_GPU)
if (device_param->opencl_device_type & CL_DEVICE_TYPE_GPU)
{
vector_width = 1;
}
@ -5483,11 +5486,11 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
if (user_options->slow_candidates == true)
{
tuningdb_entry = tuning_db_search (hashcat_ctx, device_param->device_name, device_param->device_type, 0, hashconfig->hash_mode);
tuningdb_entry = tuning_db_search (hashcat_ctx, device_param->device_name, device_param->opencl_device_type, 0, hashconfig->hash_mode);
}
else
{
tuningdb_entry = tuning_db_search (hashcat_ctx, device_param->device_name, device_param->device_type, user_options->attack_mode, hashconfig->hash_mode);
tuningdb_entry = tuning_db_search (hashcat_ctx, device_param->device_name, device_param->opencl_device_type, user_options->attack_mode, hashconfig->hash_mode);
}
// user commandline option override tuning db
@ -5738,15 +5741,15 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
// we don't have sm_* on vendors not NV but it doesn't matter
#if defined (DEBUG)
build_options_len += snprintf (build_options_buf + build_options_len, build_options_sz - build_options_len, "-D LOCAL_MEM_TYPE=%u -D VENDOR_ID=%u -D CUDA_ARCH=%u -D HAS_VPERM=%u -D HAS_VADD3=%u -D HAS_VBFE=%u -D HAS_BFE=%u -D HAS_LOP3=%u -D HAS_MOV64=%u -D HAS_PRMT=%u -D VECT_SIZE=%u -D DEVICE_TYPE=%u -D DGST_R0=%u -D DGST_R1=%u -D DGST_R2=%u -D DGST_R3=%u -D DGST_ELEM=%u -D KERN_TYPE=%u -D _unroll ", device_param->device_local_mem_type, device_param->platform_vendor_id, (device_param->sm_major * 100) + (device_param->sm_minor * 10), device_param->has_vperm, device_param->has_vadd3, device_param->has_vbfe, device_param->has_bfe, device_param->has_lop3, device_param->has_mov64, device_param->has_prmt, device_param->vector_width, (u32) device_param->device_type, hashconfig->dgst_pos0, hashconfig->dgst_pos1, hashconfig->dgst_pos2, hashconfig->dgst_pos3, hashconfig->dgst_size / 4, kern_type);
build_options_len += snprintf (build_options_buf + build_options_len, build_options_sz - build_options_len, "-D LOCAL_MEM_TYPE=%u -D VENDOR_ID=%u -D CUDA_ARCH=%u -D HAS_VPERM=%u -D HAS_VADD3=%u -D HAS_VBFE=%u -D HAS_BFE=%u -D HAS_LOP3=%u -D HAS_MOV64=%u -D HAS_PRMT=%u -D VECT_SIZE=%u -D DEVICE_TYPE=%u -D DGST_R0=%u -D DGST_R1=%u -D DGST_R2=%u -D DGST_R3=%u -D DGST_ELEM=%u -D KERN_TYPE=%u -D _unroll ", device_param->device_local_mem_type, device_param->platform_vendor_id, (device_param->sm_major * 100) + (device_param->sm_minor * 10), device_param->has_vperm, device_param->has_vadd3, device_param->has_vbfe, device_param->has_bfe, device_param->has_lop3, device_param->has_mov64, device_param->has_prmt, device_param->vector_width, (u32) device_param->opencl_device_type, hashconfig->dgst_pos0, hashconfig->dgst_pos1, hashconfig->dgst_pos2, hashconfig->dgst_pos3, hashconfig->dgst_size / 4, kern_type);
#else
build_options_len += snprintf (build_options_buf + build_options_len, build_options_sz - build_options_len, "-D LOCAL_MEM_TYPE=%u -D VENDOR_ID=%u -D CUDA_ARCH=%u -D HAS_VPERM=%u -D HAS_VADD3=%u -D HAS_VBFE=%u -D HAS_BFE=%u -D HAS_LOP3=%u -D HAS_MOV64=%u -D HAS_PRMT=%u -D VECT_SIZE=%u -D DEVICE_TYPE=%u -D DGST_R0=%u -D DGST_R1=%u -D DGST_R2=%u -D DGST_R3=%u -D DGST_ELEM=%u -D KERN_TYPE=%u -D _unroll -w ", device_param->device_local_mem_type, device_param->platform_vendor_id, (device_param->sm_major * 100) + (device_param->sm_minor * 10), device_param->has_vperm, device_param->has_vadd3, device_param->has_vbfe, device_param->has_bfe, device_param->has_lop3, device_param->has_mov64, device_param->has_prmt, device_param->vector_width, (u32) device_param->device_type, hashconfig->dgst_pos0, hashconfig->dgst_pos1, hashconfig->dgst_pos2, hashconfig->dgst_pos3, hashconfig->dgst_size / 4, kern_type);
build_options_len += snprintf (build_options_buf + build_options_len, build_options_sz - build_options_len, "-D LOCAL_MEM_TYPE=%u -D VENDOR_ID=%u -D CUDA_ARCH=%u -D HAS_VPERM=%u -D HAS_VADD3=%u -D HAS_VBFE=%u -D HAS_BFE=%u -D HAS_LOP3=%u -D HAS_MOV64=%u -D HAS_PRMT=%u -D VECT_SIZE=%u -D DEVICE_TYPE=%u -D DGST_R0=%u -D DGST_R1=%u -D DGST_R2=%u -D DGST_R3=%u -D DGST_ELEM=%u -D KERN_TYPE=%u -D _unroll -w ", device_param->device_local_mem_type, device_param->platform_vendor_id, (device_param->sm_major * 100) + (device_param->sm_minor * 10), device_param->has_vperm, device_param->has_vadd3, device_param->has_vbfe, device_param->has_bfe, device_param->has_lop3, device_param->has_mov64, device_param->has_prmt, device_param->vector_width, (u32) device_param->opencl_device_type, hashconfig->dgst_pos0, hashconfig->dgst_pos1, hashconfig->dgst_pos2, hashconfig->dgst_pos3, hashconfig->dgst_size / 4, kern_type);
#endif
build_options_buf[build_options_len] = 0;
/*
if (device_param->device_type & CL_DEVICE_TYPE_CPU)
if (device_param->opencl_device_type & CL_DEVICE_TYPE_CPU)
{
if (device_param->platform_vendor_id == VENDOR_ID_INTEL_SDK)
{
@ -5826,7 +5829,7 @@ int backend_session_begin (hashcat_ctx_t *hashcat_ctx)
if (device_param->platform_vendor_id == VENDOR_ID_APPLE)
{
if (device_param->device_type & CL_DEVICE_TYPE_CPU)
if (device_param->opencl_device_type & CL_DEVICE_TYPE_CPU)
{
cache_disable = true;
}

@ -1351,7 +1351,7 @@ int hm_get_threshold_slowdown_with_device_id (hashcat_ctx_t *hashcat_ctx, const
if (hwmon_ctx->hm_device[device_id].threshold_slowdown_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -1411,7 +1411,7 @@ int hm_get_threshold_shutdown_with_device_id (hashcat_ctx_t *hashcat_ctx, const
if (hwmon_ctx->hm_device[device_id].threshold_shutdown_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -1459,7 +1459,7 @@ int hm_get_temperature_with_device_id (hashcat_ctx_t *hashcat_ctx, const u32 dev
if (hwmon_ctx->hm_device[device_id].temperature_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -1542,7 +1542,7 @@ int hm_get_fanpolicy_with_device_id (hashcat_ctx_t *hashcat_ctx, const u32 devic
if (hwmon_ctx->hm_device[device_id].fanpolicy_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -1600,7 +1600,7 @@ int hm_get_fanspeed_with_device_id (hashcat_ctx_t *hashcat_ctx, const u32 device
if (hwmon_ctx->hm_device[device_id].fanspeed_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -1689,7 +1689,7 @@ int hm_get_buslanes_with_device_id (hashcat_ctx_t *hashcat_ctx, const u32 device
if (hwmon_ctx->hm_device[device_id].buslanes_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -1755,7 +1755,7 @@ int hm_get_utilization_with_device_id (hashcat_ctx_t *hashcat_ctx, const u32 dev
if (hwmon_ctx->hm_device[device_id].utilization_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -1807,7 +1807,7 @@ int hm_get_memoryspeed_with_device_id (hashcat_ctx_t *hashcat_ctx, const u32 dev
if (hwmon_ctx->hm_device[device_id].memoryspeed_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -1873,7 +1873,7 @@ int hm_get_corespeed_with_device_id (hashcat_ctx_t *hashcat_ctx, const u32 devic
if (hwmon_ctx->hm_device[device_id].corespeed_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -1939,7 +1939,7 @@ int hm_get_throttle_with_device_id (hashcat_ctx_t *hashcat_ctx, const u32 device
if (hwmon_ctx->hm_device[device_id].throttle_get_supported == false) return -1;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) return -1;
if (backend_ctx->devices_param[device_id].device_vendor_id == VENDOR_ID_AMD)
{
@ -2108,7 +2108,7 @@ int hwmon_ctx_init (hashcat_ctx_t *hashcat_ctx)
if (device_param->skipped == true) continue;
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
if ((device_param->opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
if (device_param->device_vendor_id != VENDOR_ID_NV) continue;
@ -2158,7 +2158,7 @@ int hwmon_ctx_init (hashcat_ctx_t *hashcat_ctx)
if (device_param->skipped == true) continue;
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
if ((device_param->opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
if (device_param->device_vendor_id != VENDOR_ID_NV) continue;
@ -2227,7 +2227,7 @@ int hwmon_ctx_init (hashcat_ctx_t *hashcat_ctx)
if (device_param->skipped == true) continue;
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
if ((device_param->opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
if (device_param->device_vendor_id != VENDOR_ID_AMD) continue;
@ -2275,7 +2275,7 @@ int hwmon_ctx_init (hashcat_ctx_t *hashcat_ctx)
{
hc_device_param_t *device_param = &backend_ctx->devices_param[device_id];
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
if ((device_param->opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
hm_adapters_sysfs[hm_adapters_id].sysfs = device_id;
@ -2320,7 +2320,7 @@ int hwmon_ctx_init (hashcat_ctx_t *hashcat_ctx)
if (device_param->skipped == true) continue;
if ((device_param->device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
if ((device_param->opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
const u32 platform_devices_id = device_param->platform_devices_id;

@ -88,7 +88,7 @@ char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAY
u32 fixed_local_size = 0;
if (device_param->device_type & CL_DEVICE_TYPE_CPU)
if (device_param->opencl_device_type & CL_DEVICE_TYPE_CPU)
{
fixed_local_size = 1;
}

@ -289,7 +289,7 @@ bool module_unstable_warning (MAYBE_UNUSED const hashconfig_t *hashconfig, MAYBE
if (device_param->platform_vendor_id == VENDOR_ID_APPLE)
{
// trap 6
if ((device_param->device_vendor_id == VENDOR_ID_INTEL_SDK) && (device_param->device_type & CL_DEVICE_TYPE_GPU))
if ((device_param->device_vendor_id == VENDOR_ID_INTEL_SDK) && (device_param->opencl_device_type & CL_DEVICE_TYPE_GPU))
{
return true;
}

@ -76,7 +76,7 @@ char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAY
u32 fixed_local_size = 0;
if (device_param->device_type & CL_DEVICE_TYPE_CPU)
if (device_param->opencl_device_type & CL_DEVICE_TYPE_CPU)
{
fixed_local_size = 1;
}

@ -68,7 +68,7 @@ char *module_jit_build_options (MAYBE_UNUSED const hashconfig_t *hashconfig, MAY
u32 fixed_local_size = 0;
if (device_param->device_type & CL_DEVICE_TYPE_CPU)
if (device_param->opencl_device_type & CL_DEVICE_TYPE_CPU)
{
fixed_local_size = 1;
}

@ -120,7 +120,7 @@ static int monitor (hashcat_ctx_t *hashcat_ctx)
if (device_param->skipped == true) continue;
if ((backend_ctx->devices_param[device_id].device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
if ((backend_ctx->devices_param[device_id].opencl_device_type & CL_DEVICE_TYPE_GPU) == 0) continue;
const int temperature = hm_get_temperature_with_device_id (hashcat_ctx, device_id);

@ -687,7 +687,7 @@ void opencl_info (hashcat_ctx_t *hashcat_ctx)
if (device_param->platform != platform_id) continue;
cl_device_type device_type = device_param->device_type;
cl_device_type opencl_device_type = device_param->opencl_device_type;
cl_uint device_vendor_id = device_param->device_vendor_id;
char *device_vendor = device_param->device_vendor;
char *device_name = device_param->device_name;
@ -700,7 +700,7 @@ void opencl_info (hashcat_ctx_t *hashcat_ctx)
char *driver_version = device_param->driver_version;
event_log_info (hashcat_ctx, " Device ID #%u", devices_idx + 1);
event_log_info (hashcat_ctx, " Type : %s", ((device_type & CL_DEVICE_TYPE_CPU) ? "CPU" : ((device_type & CL_DEVICE_TYPE_GPU) ? "GPU" : "Accelerator")));
event_log_info (hashcat_ctx, " Type : %s", ((opencl_device_type & CL_DEVICE_TYPE_CPU) ? "CPU" : ((opencl_device_type & CL_DEVICE_TYPE_GPU) ? "GPU" : "Accelerator")));
event_log_info (hashcat_ctx, " Vendor ID : %u", device_vendor_id);
event_log_info (hashcat_ctx, " Vendor : %s", device_vendor);
event_log_info (hashcat_ctx, " Name : %s", device_name);

Loading…
Cancel
Save