Fix for -m 1500, -m 3000: Missing "defined" keyword

Fix for -m 1000: MD4_H1 copy/paste error
Fix for -m 8900, -m 9300: Invalid value for device_processor_cores for CPU devices
Fix for -m 9100: Variable salt2 initializer needed some clauses for clearness
Temporary limit gpu_accel for CPU devices to 1 for development phase
Mark pocl as too bleeding edge for production use, recommend native drivers
Remove workarounds for pocl
Rename VENDOR_ID_UNKNOWN to VENDOR_ID_GENERIC in host code
Rename IS_UNKNOWN to IS_GENERIC in kernel code
pull/66/head
jsteube 8 years ago
parent 6dab39e69c
commit e8229af09b

@ -9,7 +9,7 @@
static void switch_buffer_by_offset (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 offset)
{
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset;

@ -144,7 +144,7 @@ static void make_unicode (const u32 in[4], u32 out1[4], u32 out2[4])
out1[0] = __byte_perm (in[0], 0, 0x7170);
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
out2[3] = ((in[3] >> 8) & 0x00FF0000) | ((in[3] >> 16) & 0x000000FF);
out2[2] = ((in[3] << 8) & 0x00FF0000) | ((in[3] >> 0) & 0x000000FF);
out2[1] = ((in[2] >> 8) & 0x00FF0000) | ((in[2] >> 16) & 0x000000FF);
@ -165,7 +165,7 @@ static void undo_unicode (const u32 in1[4], const u32 in2[4], u32 out[4])
out[3] = __byte_perm (in2[2], in2[3], 0x6420);
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
out[0] = ((in1[0] & 0x000000ff) >> 0) | ((in1[0] & 0x00ff0000) >> 8)
| ((in1[1] & 0x000000ff) << 16) | ((in1[1] & 0x00ff0000) << 8);
out[1] = ((in1[2] & 0x000000ff) >> 0) | ((in1[2] & 0x00ff0000) >> 8)
@ -3987,7 +3987,7 @@ static void append_0x80_1x16 (u32 w[16], const u32 offset)
static void switch_buffer_by_offset (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 offset)
{
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_mod_4 = offset & 3;
const int offset_minus_4 = 4 - offset;
@ -4701,7 +4701,7 @@ static void switch_buffer_by_offset (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4],
static void switch_buffer_by_offset_be (u32 w0[4], u32 w1[4], u32 w2[4], u32 w3[4], const u32 offset)
{
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
switch (offset / 4)
{
case 0:

@ -130,7 +130,7 @@ static void memcat16 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4]
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (block_len & 3);
@ -242,7 +242,7 @@ static void memcat16_x80 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (block_len & 3);
@ -352,7 +352,7 @@ static void memcat8 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4],
u32 tmp1;
u32 tmp2;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (block_len & 3);

@ -893,7 +893,7 @@ static void s8 (const u32 a1, const u32 a2, const u32 a3, const u32 a4, const u3
#endif
#endif
#if defined IS_AMD || IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
/*
* Bitslice DES S-boxes making use of a vector conditional select operation
@ -1467,7 +1467,7 @@ s8(u32 a1, u32 a2, u32 a3, u32 a4, u32 a5, u32 a6,
#define sXXX_DECL volatile
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define KXX_DECL
#define sXXX_DECL
#endif

@ -131,7 +131,7 @@ static void memcat16 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4]
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (block_len & 3);
@ -243,7 +243,7 @@ static void memcat16_x80 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (block_len & 3);
@ -353,7 +353,7 @@ static void memcat8 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4],
u32 tmp1;
u32 tmp2;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (block_len & 3);

@ -253,7 +253,7 @@ static void sha512_update (sha512_ctx_t *sha512_ctx, const u64 *buf, int len)
sha512_transform (sha512_ctx->buf, sha512_ctx->state);
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
sha512_transform (sha512_ctx->buf, sha512_ctx->state);
#endif

@ -29,7 +29,7 @@
#define KXX_DECL volatile
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define KXX_DECL
#endif
@ -902,7 +902,7 @@ static void s8 (const u32 a1, const u32 a2, const u32 a3, const u32 a4, const u3
#endif
#endif
#if defined IS_AMD || IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
/*
* Bitslice DES S-boxes making use of a vector conditional select operation
@ -1488,7 +1488,7 @@ static void DES (const u32 K00, const u32 K01, const u32 K02, const u32 K03, con
#pragma unroll
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#pragma unroll 1
#endif

@ -324,7 +324,7 @@ __constant u32 c_sbox3[256] =
}
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define BF_ROUND(L,R,N) \
{ \
uchar4 c = as_uchar4 ((L)); \

@ -1098,7 +1098,7 @@ static void append_salt (u32 w0[4], u32 w1[4], u32 w2[4], const u32 append[5], c
u32 tmp4;
u32 tmp5;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (offset & 3);

@ -128,7 +128,7 @@ static void memcat16 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4]
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (block_len & 3);
@ -240,7 +240,7 @@ static void memcat16_x80 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (block_len & 3);
@ -350,7 +350,7 @@ static void memcat8 (u32 block0[4], u32 block1[4], u32 block2[4], u32 block3[4],
u32 tmp1;
u32 tmp2;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - (block_len & 3);

@ -205,7 +205,7 @@ static u32 memcat16 (u32 block[16], const u32 block_len, const u32 append[4], co
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - block_len;
tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
@ -342,7 +342,7 @@ static u32 memcat16c (u32 block[16], const u32 block_len, const u32 append[4], c
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - block_len;
tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
@ -505,7 +505,7 @@ static u32 memcat20 (u32 block[20], const u32 block_len, const u32 append[4], co
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - block_len;
tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
@ -650,7 +650,7 @@ static u32 memcat20_x80 (u32 block[20], const u32 block_len, const u32 append[4]
u32 tmp3;
u32 tmp4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - block_len;
tmp0 = amd_bytealign (append[0], 0, offset_minus_4);

@ -324,7 +324,7 @@ __constant u32 c_sbox3[256] =
}
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define BF_ROUND(L,R,N) \
{ \
uchar4 c = as_uchar4 ((L)); \

@ -705,7 +705,7 @@ __kernel void __attribute__((reqd_work_group_size (64, 1, 1))) m09100_init (__gl
salt_buf3[3] = 0;
const u32 salt0 = salt_buf0[0];
const u32 salt1 = salt_buf0[1] & 0xff | '(' << 8;
const u32 salt1 = (salt_buf0[1] & 0xff) | ('(' << 8);
/**
* Lotus 6 hash - SEC_pwddigest_V2

@ -1198,7 +1198,7 @@ static void make_sc (u32 *sc, const u32 *pw, const u32 pw_len, const u32 *bl, co
u32 i;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
for (i = 0; i < pd; i++) sc[idx++] = pw[i];
sc[idx++] = pw[i]
| amd_bytealign (bl[0], 0, pm4);
@ -1229,7 +1229,7 @@ static void make_pt_with_offset (u32 *pt, const u32 offset, const u32 *sc, const
const u32 om = m % 4;
const u32 od = m / 4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
pt[0] = amd_bytealign (sc[od + 1], sc[od + 0], om);
pt[1] = amd_bytealign (sc[od + 2], sc[od + 1], om);
pt[2] = amd_bytealign (sc[od + 3], sc[od + 2], om);

@ -29,7 +29,7 @@ static u32 memcat32 (u32 block0[16], u32 block1[16], const u32 block_len, const
const u32 mod = block_len & 3;
const u32 div = block_len / 4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - mod;
u32 append0_t[4];

@ -27,7 +27,7 @@ static u32 memcat32 (u32 block0[16], u32 block1[16], const u32 block_len, const
const u32 mod = block_len & 3;
const u32 div = block_len / 4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - mod;
u32 append0_t[4];

@ -27,7 +27,7 @@ static u32 memcat32 (u32 block0[16], u32 block1[16], const u32 block_len, const
const u32 mod = block_len & 3;
const u32 div = block_len / 4;
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - mod;
u32 append0_t[4];

@ -1139,7 +1139,7 @@ static u32 memcat8c (u32 block[16], const u32 block_len, const u32 append[2], co
tmp2 = __byte_perm (append[1], 0, selector);
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - block_len;
tmp0 = amd_bytealign (append[0], 0, offset_minus_4);
@ -1272,7 +1272,7 @@ static u32 memcat32c (u32 block[16], const u32 block_len, const u32 append[8], c
tmp8 = __byte_perm (append[7], 0, selector);
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const int offset_minus_4 = 4 - block_len;
tmp0 = amd_bytealign (append[0], 0, offset_minus_4);

@ -133,7 +133,7 @@ static void lshift_block (const u32 in0[4], const u32 in1[4], u32 out0[4], u32 o
out1[3] = __byte_perm (in1[3], 0, 0x4321);
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
out0[0] = amd_bytealign (in0[1], in0[0], 1);
out0[1] = amd_bytealign (in0[2], in0[1], 1);
out0[2] = amd_bytealign (in0[3], in0[2], 1);
@ -158,7 +158,7 @@ static void rshift_block (const u32 in0[4], const u32 in1[4], u32 out0[4], u32 o
out0[0] = __byte_perm ( 0, in0[0], 0x6543);
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
out1[3] = amd_bytealign (in1[3], in1[2], 3);
out1[2] = amd_bytealign (in1[2], in1[1], 3);
out1[1] = amd_bytealign (in1[1], in1[0], 3);
@ -466,7 +466,7 @@ static void lshift_block_N (const u32 in0[4], const u32 in1[4], u32 out0[4], u32
}
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
switch (num)
{
case 0: out0[0] = in0[0];
@ -1058,7 +1058,7 @@ static void rshift_block_N (const u32 in0[4], const u32 in1[4], u32 out0[4], u32
}
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
switch (num)
{
case 0: out1[3] = in1[3];
@ -1648,7 +1648,7 @@ static void append_block8 (const u32 offset, u32 dst0[4], u32 dst1[4], const u32
}
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
switch (offset)
{
case 0:
@ -2578,7 +2578,7 @@ static u32 rule_op_mangle_replace (const u32 p0, const u32 p1, u32 buf0[4], u32
}
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
const uchar4 tmp0 = (uchar4) (p0);
const uchar4 tmp1 = (uchar4) (p1);
@ -2793,7 +2793,7 @@ static u32 rule_op_mangle_dupechar_first (const u32 p0, const u32 p1, u32 buf0[4
}
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
switch (p0)
{
case 1: buf0[0] |= tmp << 0;
@ -3036,7 +3036,7 @@ static u32 rule_op_mangle_dupechar_all (const u32 p0, const u32 p1, u32 buf0[4],
buf1[3] = tib41[3];
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
tib40[0] = ((buf0[0] & 0x000000FF) << 0) | ((buf0[0] & 0x0000FF00) << 8);
tib40[1] = ((buf0[0] & 0x00FF0000) >> 16) | ((buf0[0] & 0xFF000000) >> 8);
tib40[2] = ((buf0[1] & 0x000000FF) << 0) | ((buf0[1] & 0x0000FF00) << 8);
@ -3069,7 +3069,7 @@ static u32 rule_op_mangle_switch_first (const u32 p0, const u32 p1, u32 buf0[4],
buf0[0] = __byte_perm (buf0[0], 0, 0x3201);
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
buf0[0] = (buf0[0] & 0xFFFF0000) | ((buf0[0] << 8) & 0x0000FF00) | ((buf0[0] >> 8) & 0x000000FF);
#endif
@ -3160,7 +3160,7 @@ static u32 rule_op_mangle_switch_last (const u32 p0, const u32 p1, u32 buf0[4],
}
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
switch (in_len)
{
case 2: buf0[0] = ((buf0[0] << 8) & 0x0000FF00) | ((buf0[0] >> 8) & 0x000000FF);
@ -3489,7 +3489,7 @@ static u32 rule_op_mangle_switch_at (const u32 p0, const u32 p1, u32 buf0[4], u3
}
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
switch (p0)
{
case 0: tmp0 = (buf0[0] >> 0) & 0xFF;
@ -4065,7 +4065,7 @@ static u32 rule_op_mangle_title (const u32 p0, const u32 p1, u32 buf0[4], u32 bu
}
#endif
#if defined IS_AMD || defined IS_UNKNOWN
#if defined IS_AMD || defined IS_GENERIC
u32 tib40[4];
u32 tib41[4];

@ -81,7 +81,7 @@ static inline u64 swap64 (const u64 v)
}
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
static inline u32 swap32 (const u32 v)
{
return (as_uint (as_uchar4 (v).s3210));
@ -135,7 +135,7 @@ static inline u32 amd_bytealign (const u32 a, const u32 b, const u32 c)
#endif
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
static inline u32 __bfe (const u32 a, const u32 b, const u32 c)
{
#define BIT(x) (1 << (x))
@ -299,7 +299,7 @@ static inline u64 rotl64 (const u64 a, const u32 n)
#endif
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
static inline u32 rotr32 (const u32 a, const u32 n)
{
return rotate (a, 32 - n);

@ -33,12 +33,12 @@
#define MD4_Go(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define MD4_F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
#define MD4_G(x,y,z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
#define MD4_H(x,y,z) ((x) ^ (y) ^ (z))
#define MD5_H1(x,y,z) (MD4_H((x), (y), (z)))
#define MD5_H2(x,y,z) (MD4_H((x), (y), (z)))
#define MD4_H1(x,y,z) (MD4_H((x), (y), (z)))
#define MD4_H2(x,y,z) (MD4_H((x), (y), (z)))
#define MD4_Fo(x,y,z) (MD4_F((x), (y), (z)))
#define MD4_Go(x,y,z) (MD4_G((x), (y), (z)))
#endif
@ -93,7 +93,7 @@
#define MD5_Go(x,y,z) (bitselect ((y), (x), (z)))
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define MD5_F(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
#define MD5_G(x,y,z) ((y) ^ ((z) & ((x) ^ (y))))
#define MD5_H(x,y,z) ((x) ^ (y) ^ (z))
@ -146,16 +146,12 @@
#define SHA1_F2o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define SHA1_F0(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
#define SHA1_F1(x,y,z) ((x) ^ (y) ^ (z))
#define SHA1_F2(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
// either pocl or llvm fails and produces invalid optimized code
//#define SHA1_F0o(x,y,z) (SHA1_F0 ((x), (y), (z)))
//#define SHA1_F2o(x,y,z) (SHA1_F2 ((x), (y), (z)))
// luckily we can use bitselect as a workaround
#define SHA1_F0o(x,y,z) (bitselect ((z), (y), (x)))
#define SHA1_F2o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
#define SHA1_F0o(x,y,z) (SHA1_F0 ((x), (y), (z)))
#define SHA1_F2o(x,y,z) (SHA1_F2 ((x), (y), (z)))
#endif
#define SHA1_STEP(f,a,b,c,d,e,x) \
@ -225,7 +221,7 @@
#define SHA256_F1o(x,y,z) (bitselect ((z), (y), (x)))
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define SHA256_F0(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
#define SHA256_F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
#define SHA256_F0o(x,y,z) (SHA256_F0 ((x), (y), (z)))
@ -269,7 +265,7 @@
#define SHA384_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define SHA384_F0o(x,y,z) (SHA384_F0 ((x), (y), (z)))
#define SHA384_F1o(x,y,z) (SHA384_F1 ((x), (y), (z)))
#endif
@ -313,7 +309,7 @@
#define SHA512_F1o(x,y,z) (bitselect ((x), (y), ((x) ^ (z))))
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define SHA512_F0o(x,y,z) (SHA512_F0 ((x), (y), (z)))
#define SHA512_F1o(x,y,z) (SHA512_F1 ((x), (y), (z)))
#endif
@ -370,7 +366,7 @@
#define RIPEMD160_Io(x,y,z) (bitselect ((y), (x), (z)))
#endif
#ifdef IS_UNKNOWN
#ifdef IS_GENERIC
#define RIPEMD160_F(x,y,z) ((x) ^ (y) ^ (z))
#define RIPEMD160_G(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* x ? y : z */
#define RIPEMD160_H(x,y,z) (((x) | ~(y)) ^ (z))

@ -18,11 +18,11 @@
#endif
#if VENDOR_ID == 9998 // temporary for dev
#define IS_UNKNOWN
#define IS_GENERIC
#endif
#if VENDOR_ID == 9999
#define IS_UNKNOWN
#define IS_GENERIC
#endif
/**

@ -87,14 +87,11 @@
#define CL_VENDOR_NV "NVIDIA Corporation"
#define CL_VENDOR_AMD "Advanced Micro Devices, Inc."
#define CL_VENDOR_SDS "Shiloh Distributed Solutions"
#define CL_VENDOR_APPLE "Apple"
#define CL_VENDOR_POCL "The pocl project"
#define VENDOR_ID_AMD 4098
#define VENDOR_ID_NV 4318
#define VENDOR_ID_POCL 9998
#define VENDOR_ID_UNKNOWN 9999
#define VENDOR_ID_GENERIC 9999
#define BLOCK_SIZE 64

@ -2537,30 +2537,7 @@ static void run_kernel_bzero (hc_device_param_t *device_param, cl_mem buf, const
myfree (tmp);
}
if (data.vendor_id == VENDOR_ID_POCL)
{
// NOTE: clEnqueueFillBuffer () segfaults with Ubuntu 15.04 pocl
// We need to workaround...
#define FILLSZ 0x100000
char *tmp = (char *) mymalloc (FILLSZ);
memset (tmp, 0, FILLSZ);
for (uint i = 0; i < size; i += FILLSZ)
{
const int left = size - i;
const int fillsz = MIN (FILLSZ, left);
hc_clEnqueueWriteBuffer (device_param->command_queue, buf, CL_TRUE, i, fillsz, tmp, 0, NULL, NULL);
}
myfree (tmp);
}
if (data.vendor_id == VENDOR_ID_UNKNOWN)
if (data.vendor_id == VENDOR_ID_GENERIC)
{
const cl_uchar zero = 0;
@ -12300,7 +12277,7 @@ int main (int argc, char **argv)
return (-1);
}
uint CL_platform_sel = 1;
int CL_platform_sel = 1;
if (opencl_platform != NULL)
{
@ -12342,7 +12319,7 @@ int main (int argc, char **argv)
return (-1);
}
if (CL_platform_sel > CL_platforms_cnt)
if (CL_platform_sel > (int) CL_platforms_cnt)
{
log_error ("ERROR: invalid OpenCL platforms selected");
@ -12373,7 +12350,7 @@ int main (int argc, char **argv)
hc_clGetPlatformInfo (CL_platform, CL_PLATFORM_VENDOR, sizeof (CL_platform_vendor), CL_platform_vendor, NULL);
cl_device_type device_type;
cl_device_type device_type_filter;
uint vendor_id;
@ -12381,13 +12358,13 @@ int main (int argc, char **argv)
{
vendor_id = VENDOR_ID_AMD;
device_type = CL_DEVICE_TYPE_GPU;
device_type_filter = CL_DEVICE_TYPE_GPU;
}
else if (strcmp (CL_platform_vendor, CL_VENDOR_NV) == 0)
{
vendor_id = VENDOR_ID_NV;
device_type = CL_DEVICE_TYPE_GPU;
device_type_filter = CL_DEVICE_TYPE_GPU;
// make sure that we do not directly control the fan for NVidia
@ -12397,20 +12374,28 @@ int main (int argc, char **argv)
}
else if (strcmp (CL_platform_vendor, CL_VENDOR_POCL) == 0)
{
vendor_id = VENDOR_ID_POCL;
if (force == 0)
{
log_error ("");
log_error ("ATTENTION! All pocl drivers are known to be broken due to broken LLVM <= 3.7");
log_error ("You are STRONGLY encouraged not to use it");
log_error ("You can use --force to override this but do not post error reports if you do so");
return (-1);
}
device_type = CL_DEVICE_TYPE_CPU;
vendor_id = VENDOR_ID_GENERIC;
gpu_temp_disable = 1;
device_type_filter = CL_DEVICE_TYPE_DEFAULT;
}
else
{
vendor_id = VENDOR_ID_UNKNOWN;
vendor_id = VENDOR_ID_GENERIC;
device_type = CL_DEVICE_TYPE_DEFAULT;
device_type_filter = CL_DEVICE_TYPE_DEFAULT;
}
if (vendor_id == VENDOR_ID_UNKNOWN)
if (vendor_id == VENDOR_ID_GENERIC)
{
log_error ("Warning: unknown OpenCL vendor '%s' detected", CL_platform_vendor);
@ -12446,7 +12431,7 @@ int main (int argc, char **argv)
uint devices_all_cnt = 0;
hc_clGetDeviceIDs (CL_platform, device_type, DEVICES_MAX, devices_all, (uint *) &devices_all_cnt);
hc_clGetDeviceIDs (CL_platform, device_type_filter, DEVICES_MAX, devices_all, (uint *) &devices_all_cnt);
int hm_adapters_all = devices_all_cnt;
@ -12801,14 +12786,14 @@ int main (int argc, char **argv)
device_param->device_name_chksum = mystrdup (tmp);
if (device_type == CL_DEVICE_TYPE_CPU)
if (device_type & CL_DEVICE_TYPE_CPU)
{
cl_uint device_processor_cores = 1;
device_param->device_processor_cores = device_processor_cores;
}
if (device_type == CL_DEVICE_TYPE_GPU)
if (device_type & CL_DEVICE_TYPE_GPU)
{
if (vendor_id == VENDOR_ID_AMD)
{
@ -12857,7 +12842,7 @@ int main (int argc, char **argv)
* common driver check
*/
if (device_type == CL_DEVICE_TYPE_GPU)
if (device_type & CL_DEVICE_TYPE_GPU)
{
if (vendor_id == VENDOR_ID_NV)
{
@ -12995,6 +12980,8 @@ int main (int argc, char **argv)
uint device_processor_cores = device_param->device_processor_cores;
cl_device_type device_type = device_param->device_type;
/**
* create context for each device
*/
@ -13020,11 +13007,12 @@ int main (int argc, char **argv)
if (hash_mode == 3200) kernel_threads = 8;
if (hash_mode == 9000) kernel_threads = 8;
if (device_type == CL_DEVICE_TYPE_CPU)
if (device_type & CL_DEVICE_TYPE_CPU)
{
// CPU still need lots of workitems, don't know why...
// for testing phase, lets start with this
kernel_accel = (kernel_accel >= 8) ? kernel_accel / 8 : 1;
kernel_accel = 1;
}
uint kernel_power = device_processors * kernel_threads * kernel_accel;

Loading…
Cancel
Save