1
0
mirror of https://github.com/hashcat/hashcat.git synced 2024-12-23 07:08:19 +00:00

More nvapi preparations and display skip reason in benchmark

This commit is contained in:
jsteube 2016-06-03 11:33:59 +02:00
parent 34bcbb3091
commit 040bbd416a
5 changed files with 94 additions and 43 deletions

View File

@ -115,8 +115,8 @@ static inline int CPU_ISSET (int num, cpu_set_t *cs) { return (cs->count & (1 <
*/ */
#include "ext_ADL.h" #include "ext_ADL.h"
#include "ext_nvml.h"
#include "ext_nvapi.h" #include "ext_nvapi.h"
#include "ext_nvml.h"
/** /**
* shared stuff * shared stuff
@ -1430,12 +1430,14 @@ void fsync (int fd);
#ifdef HAVE_HWMON #ifdef HAVE_HWMON
int hm_get_adapter_index_nvml (HM_ADAPTER_NVML nvGPUHandle[DEVICES_MAX]);
int get_adapters_num_adl (void *adl, int *iNumberAdapters); int get_adapters_num_adl (void *adl, int *iNumberAdapters);
int hm_get_adapter_index_adl (hm_attrs_t *hm_device, u32 *valid_adl_device_list, int num_adl_adapters, LPAdapterInfo lpAdapterInfo); int hm_get_adapter_index_adl (hm_attrs_t *hm_device, u32 *valid_adl_device_list, int num_adl_adapters, LPAdapterInfo lpAdapterInfo);
int hm_get_adapter_index_nvapi (HM_ADAPTER_NVAPI nvapiGPUHandle[DEVICES_MAX]);
int hm_get_adapter_index_nvml (HM_ADAPTER_NVML nvmlGPUHandle[DEVICES_MAX]);
LPAdapterInfo hm_get_adapter_info_adl (void *adl, int iNumberAdapters); LPAdapterInfo hm_get_adapter_info_adl (void *adl, int iNumberAdapters);
u32 *hm_get_list_valid_adl_adapters (int iNumberAdapters, int *num_adl_adapters, LPAdapterInfo lpAdapterInfo); u32 *hm_get_list_valid_adl_adapters (int iNumberAdapters, int *num_adl_adapters, LPAdapterInfo lpAdapterInfo);

View File

@ -152,11 +152,12 @@ NATIVE_OBJS := obj/ext_OpenCL.NATIVE.o obj/shared.NATIVE.o obj/rp_k
ifeq ($(UNAME),Linux) ifeq ($(UNAME),Linux)
NATIVE_OBJS += obj/ext_ADL.NATIVE.o NATIVE_OBJS += obj/ext_ADL.NATIVE.o
NATIVE_OBJS += obj/ext_nvapi.NATIVE.o
NATIVE_OBJS += obj/ext_nvml.NATIVE.o NATIVE_OBJS += obj/ext_nvml.NATIVE.o
endif endif
LINUX_32_OBJS := obj/ext_OpenCL.LINUX.32.o obj/shared.LINUX.32.o obj/rp_kernel_on_cpu.LINUX.32.o obj/ext_ADL.LINUX.32.o obj/ext_nvml.LINUX.32.o LINUX_32_OBJS := obj/ext_OpenCL.LINUX.32.o obj/shared.LINUX.32.o obj/rp_kernel_on_cpu.LINUX.32.o obj/ext_ADL.LINUX.32.o obj/ext_nvml.LINUX.32.o obj/ext_nvapi.LINUX.32.o
LINUX_64_OBJS := obj/ext_OpenCL.LINUX.64.o obj/shared.LINUX.64.o obj/rp_kernel_on_cpu.LINUX.64.o obj/ext_ADL.LINUX.64.o obj/ext_nvml.LINUX.64.o LINUX_64_OBJS := obj/ext_OpenCL.LINUX.64.o obj/shared.LINUX.64.o obj/rp_kernel_on_cpu.LINUX.64.o obj/ext_ADL.LINUX.64.o obj/ext_nvml.LINUX.64.o obj/ext_nvapi.LINUX.32.o
WIN_32_OBJS := obj/ext_OpenCL.WIN.32.o obj/shared.WIN.32.o obj/rp_kernel_on_cpu.WIN.32.o obj/ext_ADL.WIN.32.o obj/ext_nvml.WIN.32.o obj/ext_nvapi.WIN.32.o WIN_32_OBJS := obj/ext_OpenCL.WIN.32.o obj/shared.WIN.32.o obj/rp_kernel_on_cpu.WIN.32.o obj/ext_ADL.WIN.32.o obj/ext_nvml.WIN.32.o obj/ext_nvapi.WIN.32.o
WIN_64_OBJS := obj/ext_OpenCL.WIN.64.o obj/shared.WIN.64.o obj/rp_kernel_on_cpu.WIN.64.o obj/ext_ADL.WIN.64.o obj/ext_nvml.WIN.64.o obj/ext_nvapi.WIN.64.o WIN_64_OBJS := obj/ext_OpenCL.WIN.64.o obj/shared.WIN.64.o obj/rp_kernel_on_cpu.WIN.64.o obj/ext_ADL.WIN.64.o obj/ext_nvml.WIN.64.o obj/ext_nvapi.WIN.64.o

View File

@ -13,11 +13,15 @@ int nvapi_init (NVAPI_PTR *nvapi)
memset (nvapi, 0, sizeof (NVAPI_PTR)); memset (nvapi, 0, sizeof (NVAPI_PTR));
#ifdef _WIN
#if __x86_64__ #if __x86_64__
nvapi->lib = hc_dlopen ("nvapi64.dll"); nvapi->lib = hc_dlopen ("nvapi64.dll");
#elif __x86__ #elif __x86__
nvapi->lib = hc_dlopen ("nvapi.dll"); nvapi->lib = hc_dlopen ("nvapi.dll");
#endif #endif
#else
nvapi->lib = hc_dlopen ("nvapi.so", RTLD_NOW); // uhm yes, but .. yeah
#endif
if (!nvapi->lib) if (!nvapi->lib)
{ {

View File

@ -13319,6 +13319,7 @@ int main (int argc, char **argv)
*/ */
int need_adl = 0; int need_adl = 0;
int need_nvapi = 0;
int need_nvml = 0; int need_nvml = 0;
hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (DEVICES_MAX, sizeof (hc_device_param_t)); hc_device_param_t *devices_param = (hc_device_param_t *) mycalloc (DEVICES_MAX, sizeof (hc_device_param_t));
@ -13577,7 +13578,7 @@ int main (int argc, char **argv)
if (device_endian_little == CL_FALSE) if (device_endian_little == CL_FALSE)
{ {
if (data.quiet == 0) log_info ("Device #%u: WARNING: not little endian device", device_id + 1); log_info ("Device #%u: WARNING: not little endian device", device_id + 1);
device_param->skipped = 1; device_param->skipped = 1;
} }
@ -13590,7 +13591,7 @@ int main (int argc, char **argv)
if (device_available == CL_FALSE) if (device_available == CL_FALSE)
{ {
if (data.quiet == 0) log_info ("Device #%u: WARNING: device not available", device_id + 1); log_info ("Device #%u: WARNING: device not available", device_id + 1);
device_param->skipped = 1; device_param->skipped = 1;
} }
@ -13603,7 +13604,7 @@ int main (int argc, char **argv)
if (device_compiler_available == CL_FALSE) if (device_compiler_available == CL_FALSE)
{ {
if (data.quiet == 0) log_info ("Device #%u: WARNING: device no compiler available", device_id + 1); log_info ("Device #%u: WARNING: device no compiler available", device_id + 1);
device_param->skipped = 1; device_param->skipped = 1;
} }
@ -13616,7 +13617,7 @@ int main (int argc, char **argv)
if ((device_execution_capabilities & CL_EXEC_KERNEL) == 0) if ((device_execution_capabilities & CL_EXEC_KERNEL) == 0)
{ {
if (data.quiet == 0) log_info ("Device #%u: WARNING: device does not support executing kernels", device_id + 1); log_info ("Device #%u: WARNING: device does not support executing kernels", device_id + 1);
device_param->skipped = 1; device_param->skipped = 1;
} }
@ -13633,14 +13634,14 @@ int main (int argc, char **argv)
if (strstr (device_extensions, "base_atomics") == 0) if (strstr (device_extensions, "base_atomics") == 0)
{ {
if (data.quiet == 0) log_info ("Device #%u: WARNING: device does not support base atomics", device_id + 1); log_info ("Device #%u: WARNING: device does not support base atomics", device_id + 1);
device_param->skipped = 1; device_param->skipped = 1;
} }
if (strstr (device_extensions, "byte_addressable_store") == 0) if (strstr (device_extensions, "byte_addressable_store") == 0)
{ {
if (data.quiet == 0) log_info ("Device #%u: WARNING: device does not support byte addressable store", device_id + 1); log_info ("Device #%u: WARNING: device does not support byte addressable store", device_id + 1);
device_param->skipped = 1; device_param->skipped = 1;
} }
@ -13655,7 +13656,7 @@ int main (int argc, char **argv)
if (device_local_mem_size < 32768) if (device_local_mem_size < 32768)
{ {
if (data.quiet == 0) log_info ("Device #%u: WARNING: device local mem size is too small", device_id + 1); log_info ("Device #%u: WARNING: device local mem size is too small", device_id + 1);
device_param->skipped = 1; device_param->skipped = 1;
} }
@ -13670,9 +13671,10 @@ int main (int argc, char **argv)
{ {
if (device_param->device_vendor_id == VENDOR_ID_AMD_USE_INTEL) if (device_param->device_vendor_id == VENDOR_ID_AMD_USE_INTEL)
{ {
if (data.quiet == 0) log_info ("Device #%u: WARNING: not native intel opencl platform", device_id + 1); log_info ("Device #%u: WARNING: not native intel opencl runtime, expect massive speed loss", device_id + 1);
log_info (" You can use --force to override this but do not post error reports if you do so");
device_param->skipped = 1; if (data.force == 0) device_param->skipped = 1;
} }
} }
@ -13721,6 +13723,10 @@ int main (int argc, char **argv)
if ((device_param->platform_vendor_id == VENDOR_ID_NV) && (device_param->device_vendor_id == VENDOR_ID_NV)) if ((device_param->platform_vendor_id == VENDOR_ID_NV) && (device_param->device_vendor_id == VENDOR_ID_NV))
{ {
need_nvml = 1; need_nvml = 1;
#ifdef _WIN
need_nvapi = 1;
#endif
} }
} }
@ -14042,6 +14048,28 @@ int main (int argc, char **argv)
} }
} }
if ((need_nvapi == 1) && (nvapi_init (nvapi) == 0))
{
data.hm_nvapi = nvapi;
}
if (data.hm_nvapi)
{
if (hm_NvAPI_Initialize (data.hm_nvapi) == NVAPI_OK)
{
HM_ADAPTER_NVAPI nvGPUHandle[DEVICES_MAX] = { 0 };
int tmp_in = hm_get_adapter_index_nvapi (nvGPUHandle);
int tmp_out = 0;
for (int i = 0; i < tmp_in; i++)
{
hm_adapters_nvapi[tmp_out++].adapter_index.nvapi = nvGPUHandle[i];
}
}
}
if ((need_adl == 1) && (adl_init (adl) == 0)) if ((need_adl == 1) && (adl_init (adl) == 0))
{ {
data.hm_adl = adl; data.hm_adl = adl;

View File

@ -2672,31 +2672,6 @@ void fsync (int fd)
#ifdef HAVE_HWMON #ifdef HAVE_HWMON
int hm_get_adapter_index_nvml (HM_ADAPTER_NVML nvGPUHandle[DEVICES_MAX])
{
int pGpuCount = 0;
for (uint i = 0; i < DEVICES_MAX; i++)
{
if (hm_NVML_nvmlDeviceGetHandleByIndex (data.hm_nvml, 1, i, &nvGPUHandle[i]) != NVML_SUCCESS) break;
// can be used to determine if the device by index matches the cuda device by index
// char name[100]; memset (name, 0, sizeof (name));
// hm_NVML_nvmlDeviceGetName (data.hm_nvml, nvGPUHandle[i], name, sizeof (name) - 1);
pGpuCount++;
}
if (pGpuCount == 0)
{
log_info ("WARN: No NVML adapters found");
return (0);
}
return (pGpuCount);
}
int get_adapters_num_adl (void *adl, int *iNumberAdapters) int get_adapters_num_adl (void *adl, int *iNumberAdapters)
{ {
if (hm_ADL_Adapter_NumberOfAdapters_Get ((ADL_PTR *) adl, iNumberAdapters) != ADL_OK) return -1; if (hm_ADL_Adapter_NumberOfAdapters_Get ((ADL_PTR *) adl, iNumberAdapters) != ADL_OK) return -1;
@ -2758,6 +2733,47 @@ LPAdapterInfo hm_get_adapter_info_adl (void *adl, int iNumberAdapters)
return lpAdapterInfo; return lpAdapterInfo;
} }
int hm_get_adapter_index_nvapi (HM_ADAPTER_NVAPI nvapiGPUHandle[DEVICES_MAX])
{
NvU32 pGpuCount;
if (hm_NvAPI_EnumPhysicalGPUs (data.hm_nvapi, nvapiGPUHandle, &pGpuCount) != NVAPI_OK) return (0);
if (pGpuCount == 0)
{
log_info ("WARN: No NvAPI adapters found");
return (0);
}
return (pGpuCount);
}
int hm_get_adapter_index_nvml (HM_ADAPTER_NVML nvmlGPUHandle[DEVICES_MAX])
{
int pGpuCount = 0;
for (uint i = 0; i < DEVICES_MAX; i++)
{
if (hm_NVML_nvmlDeviceGetHandleByIndex (data.hm_nvml, 1, i, &nvmlGPUHandle[i]) != NVML_SUCCESS) break;
// can be used to determine if the device by index matches the cuda device by index
// char name[100]; memset (name, 0, sizeof (name));
// hm_NVML_nvmlDeviceGetName (data.hm_nvml, nvGPUHandle[i], name, sizeof (name) - 1);
pGpuCount++;
}
if (pGpuCount == 0)
{
log_info ("WARN: No NVML adapters found");
return (0);
}
return (pGpuCount);
}
/* /*
// //
// does not help at all, since ADL does not assign different bus id, device id when we have multi GPU setups // does not help at all, since ADL does not assign different bus id, device id when we have multi GPU setups