You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1584 lines
120 KiB
1584 lines
120 KiB
6 years ago
|
import gc
|
||
|
|
||
|
from trezor import utils
|
||
|
from trezor.utils import memcpy as _memcpy
|
||
|
|
||
|
from apps.monero.xmr import crypto
|
||
|
from apps.monero.xmr.serialize.int_serialize import dump_uvarint_b_into, uvarint_size
|
||
|
|
||
|
# Constants
|
||
|
|
||
|
BP_LOG_N = 6
|
||
|
BP_N = 64 # 1 << BP_LOG_N
|
||
|
BP_M = 16 # maximal number of bulletproofs
|
||
|
|
||
|
ZERO = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
|
||
|
ONE = b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
|
||
|
TWO = b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
|
||
|
EIGHT = b"\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
|
||
|
INV_EIGHT = b"\x79\x2f\xdc\xe2\x29\xe5\x06\x61\xd0\xda\x1c\x7d\xb3\x9d\xd3\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06"
|
||
|
MINUS_ONE = b"\xec\xd3\xf5\x5c\x1a\x63\x12\x58\xd6\x9c\xf7\xa2\xde\xf9\xde\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10"
|
||
|
MINUS_INV_EIGHT = b"\x74\xa4\x19\x7a\xf0\x7d\x0b\xf7\x05\xc2\xda\x25\x2b\x5c\x0b\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a"
|
||
|
|
||
|
# Monero H point
|
||
|
XMR_H = b"\x8b\x65\x59\x70\x15\x37\x99\xaf\x2a\xea\xdc\x9f\xf1\xad\xd0\xea\x6c\x72\x51\xd5\x41\x54\xcf\xa9\x2c\x17\x3a\x0d\xd3\x9c\x1f\x94"
|
||
|
XMR_HP = crypto.xmr_H()
|
||
|
|
||
|
# get_exponent(Gi[i], XMR_H, i * 2 + 1)
|
||
|
BP_GI_PRE = b"\x0b\x48\xbe\x50\xe4\x9c\xad\x13\xfb\x3e\x01\x4f\x3f\xa7\xd6\x8b\xac\xa7\xc8\xa9\x10\x83\xdc\x9c\x59\xb3\x79\xaa\xab\x21\x8f\x15\xdf\x01\xa5\xd6\x3b\x3e\x3a\x38\x38\x2a\xfb\xd7\xbc\x68\x5f\x34\x3d\x61\x92\xda\x16\xed\x4b\x45\x1f\x15\xfd\xda\xb1\x70\xe2\x2d\x73\x69\xc8\xd5\xa7\x45\x42\x3d\x26\x06\x23\xa1\xf7\x5f\xae\x1f\xb1\xf8\x1b\x16\x9d\x42\x2a\xcd\x85\x58\xe9\xd5\x74\x25\x48\xbd\x81\xc0\x7d\x2b\xd8\x77\x1e\xb4\xbd\x84\x15\x5d\x38\xd7\x05\x31\xfe\x66\x2b\x78\xf0\xc4\x4a\x9a\xea\xea\x2e\xd2\xd6\xf0\xeb\xe1\x08\x96\xc5\xc2\x2f\x00\x70\xeb\xf0\x55\xdf\xe8\xdc\x1c\xb2\x05\x42\xef\x29\x15\x1a\xa0\x77\x1e\x58\x1e\x68\xfe\x78\x18\xef\x42\x35\xc8\xdf\x1a\x32\xae\xce\xed\xef\xcb\xdf\x6d\x91\xd5\x24\x92\x9b\x84\x02\xa0\x26\xcb\x85\x74\xe0\xe3\xa3\x34\x2c\xe2\x11\xbc\xd9\x67\xbc\x14\xe7\xab\xda\x6c\x17\xc2\xf2\x2a\x38\x1b\x84\xc2\x49\x75\x78\x52\xe9\x9d\x62\xc4\x5f\x16\x0e\x89\x15\xec\x21\xd4\xc8\xa3\x83\x1d\x7c\x2f\x24\x58\x1e\xc9\xd1\x50\x13\xdf\xcc\xb5\xeb\xa6\x9d\xf6\x91\xa0\x80\x02\xb3\x3d\x4f\x2f\xb0\x6c\xa9\xf2\x9c\xfb\xc7\x0d\xb0\x23\xa4\x8e\x45\x35\xf5\x83\x8f\x5e\xa2\x7f\x70\x98\x0d\x11\xec\xd9\x35\xb4\x78\x25\x8e\x2a\x4f\x10\x06\xb3\x2d\xa6\x38\x72\x92\x25\x9e\x69\xac\x0a\x82\x9e\xf3\x47\x69\x98\x96\x72\x8c\x0c\xc0\xca\xdc\x74\x6d\xae\x46\xfb\x31\x86\x4a\x59\xa5\xb9\xa1\x54\x9c\x77\xe4\xcf\x8a\xb8\xb2\x55\xa3\xa0\xae\xfa\xa4\xca\xd1\x25\xd2\x19\x94\x9c\x0a\xef\xf0\xc3\x56\x0a\xb1\x58\xed\x67\x17\x48\xa1\x75\x56\x41\x9e\xc9\x42\xe1\x6b\x90\x1d\xbb\x2f\xc6\xdf\x96\x60\x32\x4f\xcb\xcd\x6e\x40\xf2\x35\xd7\x5b\x76\x4f\xaf\xf6\x1c\x19\x05\x22\x2b\xaf\x87\xd5\x1d\x45\xf3\x55\x81\x38\xc8\x7c\xe5\x4c\x46\x4c\xc6\x40\xb9\x55\xe7\xfa\x33\x10\xf8\x3b\x13\xdd\x7b\x24\x73\x19\xe1\x3c\xe6\x19\x95\xbc\x77\x1e\xe1\xed\xe7\x36\x35\x99\xf0\x8f\xc5\xcf\xda\x89\x0e\xa8\x03\xe0\xec\xa7\x0a\x97\x70\x7e\x90\x56\x29\xa5\xe0\x6d\x18\x6a\x96\x4f\x32\x2f\xff\xba\xa7\xed\x2e\x78\x1d\x4d\x3f\xed\xe0\x74\x61\xf4\x4b\x2d\x98\xdb\xcc\x0c\xaa\x20\x55\x14\x6e\x13\xf5\x0e\xcf\x75\x49\x1d\xad\xd3\x6a\xd2\xba\xac\x56\xbc\x08\x56\x2e\xc6\x6c\xe1\x10\xb5\x44\x83\x1d\xbd\x34\xc6\xc2\x52\x95\x81\x51\xc4\x9a\x73\x4c\x6e\x62\x5e\x42\x60\x8c\x00\x5e\x79\x7e\xdb\x6d\x0a\x89\x34\xb3\x24\xa0\xe4\xd3\x1c\xba\x01\x57\x83\x50\x1e\xcd\xfa\x7a\x8e\xba\xe3\xa6\xbf\xd3\x2e\x6d\x1a\x36\x14\xb1\x11\x83\xc8\x09\x80\xd4\x54\x6c\xc3\xee\x5d\xb4\x7b\xfe\x97\x05\xaa\x95\xe2\xda\x29\xf2\x28\x23\x03\x53\x91\x7e\x5d\x2b\x19\x32\xfe\x48\x2f\xbc\xfe\xd7\x13\x4d\x55\x6d\x0c\x27\xf6\xcc\x6b\xf3\x01\x5c\x06\x61\x16\x25\x73\x9d\x88\x9c\x57\x89\xfa\x75\xb3\xc8\x39\x69\xcb\x88\xb1\xdf\x01\xc0\xac\xa4\x70\xf6\x65\xeb\x71\x82\xe0\x72\xbc\xa8\x9b\xc6\x69\xff\xe5\xb0\x29\x6f\xe2\x13\x43\xa8\xc3\x27\xc8\xa8\x41\x75\x02\x85\x5a\x25\xcc\xb7\x5b\x2f\x8e\xea\xc5\xd1\xdb\x25\x04\x4b\x0a\xea\xd2\xcf\x77\x02\x1e\xd9\x4f\x79\xf3\x00\x1e\x7b\x8e\x9d\xb7\x31\x1d\xb2\x8c\x45\xc9\x0d\x80\xa1\xe3\xd5\xb2\x7b\x43\xf8\xe3\x80\x21\x4d\x6a\x2c\x40\x46\xc8\xd4\x0f\x52\x4d\x47\x83\x53\x20\x4d\x01\xa1\x7c\x4f\xb7\xb1\x8c\x2f\x48\x27\x01\x50\xdb\x67\xd4\xb0\xb9\xce\x87\x86\xe0\x3c\x95\x50\xc5\x47\xfb\x18\x02\x9e\xf1\x6e\x56\x29\xe9\xa1\xc6\x68\xe1\xaa\x79\xc7\x88\x73\x55\xf5\xf5\x1b\x0c\xbb\x1f\x08\x35\xe0\x4e\x7a\xcc\x53\xac\x55\xa3\x57\x41\x97\xb5\x4c\x5a\xaa\xad\x47\xbe\x24\xdb\xbc\x11\xc1\xbd\x3e\xeb\x62\x46\x54\x2d\x2f\x5a\xe5\xf4\x39\x8d\xd4\xa7\x60\x17\x03\xcb\xbf\xd5\x9b\xad\xdd\x3a\x7c\xe6\xe3\x75\xe7\xd9\x00\x50\xe2\x71\xb1\x3f\x13\x2d\xf8\x5e\x1c\x12\xbe\x54\xfe\x66\xde\x81\xf6\x8a\x1c\x8f\x69\x6f\x3e\x77\x3c\x7e\xef\x57\xac\x13\x89\xbd\x02\x80\xd5\x58\xea\x78\x62\xf0\x1b\x64\x1e\xc6\xda\x0e\xfe\xfb\xee\xd0\x50\x9c\x53\x8a\x8c\x36\x16\x68\x1d\x76\x1a\xe5\xc6\xf9\xd2\xaa\xde\xd7\x18\x90\xda\x24\x96\x15\x60\x43\x08\x21\x82\xec\x85\x9c\x3a\xe4\x86\x93\xf9\x13\x43\xd0\xa5\xf0\xec\xbb\x7d\xec\x9b\x97\x3b\xf2\x13\x67\x8a\x65\x3b\x0d\x9d\xf5\x10\x65\x2a\x23\xc0\xb8\x06\x53\x67\x92\x4a\x4c\xfc\x78\x60\x36\xc0\x66\xca\xa7\x38\x34\x9c\xf1\xcd\xa7\x0d\xbf\xa8\x5c\xce\xb4\xa0\x9f\x85\x03\x9b\x6f\x77\x27\x4f\xa6\xe2\x79\x35\xbf\x89\xae\x37\x3a\x3b\x5a\xda\x58\x24\xbd\x4b\x2a\xec\x22\x2a\xeb\xd7\xfe\xe7\
|
||
|
|
||
|
# get_exponent(Hi[i], XMR_H, i * 2)
|
||
|
BP_HI_PRE = b"\x42\xba\x66\x8a\x00\x7d\x0f\xcd\x6f\xea\x40\x09\xde\x8a\x64\x37\x24\x8f\x2d\x44\x52\x30\xaf\x00\x4a\x89\xfd\x04\x27\x9b\xc2\x97\xe5\x22\x4e\xf8\x71\xee\xb8\x72\x11\x51\x1d\x2a\x5c\xb8\x1e\xea\xa1\x60\xa8\xa5\x40\x8e\xab\x5d\xea\xeb\x9d\x45\x58\x78\x09\x47\x8f\xc5\x47\xc0\xc5\x2e\x90\xe0\x1e\xcd\x2c\xe4\x1b\xfc\x62\x40\x86\xf0\xec\xdc\x26\x0c\xf3\x0e\x1b\x9c\xae\x3b\x18\xed\x6b\x2c\x9f\x11\x04\x41\x45\xda\x98\xe3\x11\x1b\x40\xa1\x07\x8e\xa9\x04\x57\xb2\x8b\x01\x46\x2c\x90\xe3\xd8\x47\x94\x9e\xd8\xc1\xd3\x1d\x17\x96\x37\xec\x75\x65\xf7\x6f\xa2\x0a\xcc\x47\x1b\x16\x94\xb7\x95\xca\x44\x61\x8e\x4c\xc6\x8e\x0a\x46\xb2\x0f\x91\xe8\x67\x77\x25\x1d\xad\x91\xf0\xd5\xd4\x51\xd7\xe9\x4b\xfc\xd4\x13\x93\x4c\x1d\xa1\x73\xa9\x2d\xdc\x0d\x5e\x0e\x4c\x2c\xfb\xe5\x92\x5b\x0b\x88\x9c\x80\x22\xf3\xa7\xe4\x2f\xcf\xd4\xea\xcd\x06\x31\x63\x15\xc8\xc0\x6c\xb6\x67\x17\x6e\x8f\xd6\x75\xe1\x8a\x22\x96\x10\x0a\xd3\x42\x06\xfc\xf4\x44\x35\x7b\xe1\xe9\x87\x2f\x59\xd7\x1c\x4e\x66\xaf\xdf\x7c\x19\x6b\x6a\x59\x6b\xe2\x89\x0c\x0a\xea\x92\x8a\x9c\x69\xd2\xc4\xdf\x3b\x9c\x52\x8b\xce\x2c\x0c\x30\x6b\x62\x91\xde\xa2\x8d\xe1\xc0\x23\x32\x87\x19\xe9\xa1\xba\x1d\x84\x9c\x1b\xb4\x46\xbc\x0b\x0d\x37\x76\x25\x0d\xd6\x6d\x97\x27\xc2\x5d\x0e\xfe\xb0\xf9\x31\xfc\x53\x7a\xb2\xbd\x9f\x89\x78\x21\x6f\x6e\xb6\xe4\x23\xfa\xe0\xd3\x74\xd3\x4a\x20\x69\x4e\x39\x7a\x70\xb8\x4b\x75\xe3\xbe\x14\xb2\xcf\x53\x01\xc7\xcb\xc6\x62\x50\x96\x71\xa5\xe5\x93\x73\x6f\x61\x13\xc3\xf2\x88\xec\x00\xa1\xcc\x2f\xc7\x15\x6f\x4f\xff\xa1\x74\x8e\x9b\x2c\x2d\xdf\x2f\x43\x03\xbb\xfe\x7f\xfc\xee\x5e\x57\xb3\xb8\x42\x06\xa9\x1b\xcf\x32\xf7\x12\xc7\x5e\x5f\xa5\x10\x87\x85\xb8\xcc\x24\x47\x99\x83\x12\xca\x31\xab\x85\x00\xc8\x2c\x62\x68\x45\x39\xa2\x70\x01\xfb\x17\xf2\xa5\x64\x9d\xb2\xe2\xd6\x4b\x6b\x88\xf0\xd6\x81\x00\x9a\xe7\x8e\xae\xce\x9c\x73\x57\x80\x2c\x6c\x1c\xd8\x1e\xf6\x24\x86\x89\x85\x40\x89\xaa\xd6\x94\x47\x33\x91\xba\xd6\x18\xef\x01\xdf\xd6\x80\x98\x1a\x78\x97\x18\xe9\xd7\xca\xef\x06\x3d\xeb\x2d\x67\x5f\xe8\x43\xea\x63\x4d\xcf\x96\x77\xc1\xd3\xee\x92\x51\x39\x71\xb7\x24\xc7\x88\xe4\x10\x7a\x42\x40\xfe\x26\xe5\xfb\x36\xcc\x00\x7e\x76\x58\x96\x48\x82\xf7\x69\xf1\x8c\x78\x6a\xb1\x52\xf2\x5c\x5d\x2a\xe4\x72\xf7\x1e\x40\x13\xc4\xb0\xc5\x78\x7d\xc1\xd7\x8b\xdc\x8d\x52\x33\x10\x39\xaf\x41\x24\x11\x2e\xe9\x34\x6f\x11\x0a\x4e\x81\x18\xe8\x64\x11\x5d\x49\xb0\x82\xc8\x38\x51\xd4\xd5\xe1\x10\xa4\xab\xda\xdd\xbd\xa9\xb0\x22\x7f\x5b\x26\xbf\x52\xd5\xa2\x25\x25\x23\x59\x72\x84\x3d\xe9\x1d\x99\xd0\x09\x1f\x17\xf4\x78\x2d\x4f\xeb\x2b\x76\x0c\xd5\x8b\x6f\x24\x76\xe8\xb0\x2d\x90\x8a\x15\x15\x07\x8a\xa8\x08\xaa\x3a\x56\x5e\xfc\xb7\x16\x9f\xe0\xcb\xf7\x2c\x12\xce\x17\x50\xf2\x86\x1f\xb6\xc6\x85\x16\x13\xcb\xe9\x74\xef\xc1\x68\x4a\xeb\xbe\x8b\x8a\x52\x2a\xbb\xe7\x82\x77\xd0\xda\xa7\x89\x2d\x9d\xa8\x7c\x27\xbe\xcd\x3e\xc0\x38\x95\x23\x3a\xd4\x66\x31\x8c\x44\x3c\x4d\x6d\x5c\xf1\x2e\xba\x7d\xbd\x3e\x84\x32\x9d\xf6\x1a\xfc\x9b\x7e\x08\xfc\x13\x32\xa6\x82\x34\x42\x73\x39\x6e\xc7\xdc\xdc\xbe\xae\x48\xff\x70\xa1\x9a\x31\xd6\x62\x44\x3c\xce\x57\xf7\x7a\xfe\x05\x0b\x81\x22\x48\x60\x25\x5b\xcb\xc8\xf4\x80\xc4\x3c\xfd\xeb\xb1\xb2\xa6\x89\x72\xb7\xd3\x32\x3b\x03\x61\xf3\xa1\x14\x2f\x8b\x45\x2e\x92\x98\x77\x3d\xef\x56\x35\xc2\xe2\xef\xa3\x70\x0e\x4c\xc9\xe5\xd8\xde\x78\x96\x7e\x57\x35\x82\xcf\x7c\x74\x97\x7c\x30\xb5\x46\x9b\x2c\x0b\xac\xe8\xec\x25\x9f\x71\xba\x25\xc8\xdd\x1c\x51\xe5\xb0\x24\x1c\xca\x7c\x86\xf7\x18\xb7\xd2\xc3\xd4\x57\xa6\xe5\xe0\xb3\x9f\x1f\x39\xeb\xaf\xbb\x08\x83\xd4\x27\xd9\x36\x47\x60\x15\xad\x88\xb7\x92\xa0\x31\xe4\xdd\x98\x37\x57\xc9\x9a\xea\x39\x12\xe8\xf8\xc2\xf6\x59\xde\x4b\xc1\xa2\x20\x4c\xea\x13\x2e\x4f\x9e\xf7\x17\x77\x11\x91\x53\x63\x9a\x71\xff\x24\x17\xf5\x22\xfe\x41\xb8\x7e\x9c\x1c\xb7\x66\x9f\x40\xf9\xd6\x85\x88\x7d\xff\x81\x92\x7a\xa4\x2e\xda\x7f\x2a\x69\x67\x89\x09\x10\x33\xcf\x5b\xe2\xfc\x1f\x5f\x3a\x2d\xe2\x27\x15\xeb\x33\xd6\x28\x28\x92\x2d\xac\x86\x2e\xfc\x7f\xc6\xd5\x4c\x99\xe6\xec\x6e\x58\xc0\xb6\x4d\xa9\x57\xe7\x36\xd3\x00\x93\xc8\x67\xa1\x20\xd5\xdb\xfc\x55\x03\xca\x27\x64\x05\xdf\x4b\x2d\xbe\x6c\xfe\x7c\x2c\x56\xbc\xd2\x66\x9f\x1b\x7d\x82\xc9\xf9\x29\x91\xbf\x41\x02\
|
||
|
|
||
|
# twoN = vector_powers(TWO, BP_N);
|
||
|
BP_TWO_N = b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x
|
||
|
|
||
|
# ip12 = inner_product(oneN, twoN);
|
||
|
BP_IP12 = b"\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
|
||
|
|
||
|
|
||
|
#
|
||
|
# Rct keys operations
|
||
|
# tmp_x are global working registers to minimize memory allocations / heap fragmentation.
|
||
|
# Caution has to be exercised when using the registers and operations using the registers
|
||
|
#
|
||
|
|
||
|
tmp_bf_0 = bytearray(32)
|
||
|
tmp_bf_1 = bytearray(32)
|
||
|
tmp_bf_2 = bytearray(32)
|
||
|
tmp_bf_exp = bytearray(11 + 32 + 4)
|
||
|
tmp_bf_exp_mv = memoryview(tmp_bf_exp)
|
||
|
|
||
|
tmp_pt_1 = crypto.new_point()
|
||
|
tmp_pt_2 = crypto.new_point()
|
||
|
tmp_pt_3 = crypto.new_point()
|
||
|
tmp_pt_4 = crypto.new_point()
|
||
|
|
||
|
tmp_sc_1 = crypto.new_scalar()
|
||
|
tmp_sc_2 = crypto.new_scalar()
|
||
|
tmp_sc_3 = crypto.new_scalar()
|
||
|
tmp_sc_4 = crypto.new_scalar()
|
||
|
|
||
|
|
||
|
def _ensure_dst_key(dst=None):
|
||
|
if dst is None:
|
||
|
dst = bytearray(32)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def memcpy(dst, dst_off, src, src_off, len):
|
||
|
if dst is not None:
|
||
|
_memcpy(dst, dst_off, src, src_off, len)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def alloc_scalars(num=1):
|
||
|
return (crypto.new_scalar() for _ in range(num))
|
||
|
|
||
|
|
||
|
def copy_key(dst, src):
|
||
|
for i in range(32):
|
||
|
dst[i] = src[i]
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def init_key(val, dst=None):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
return copy_key(dst, val)
|
||
|
|
||
|
|
||
|
def gc_iter(i):
|
||
|
if i & 127 == 0:
|
||
|
gc.collect()
|
||
|
|
||
|
|
||
|
def invert(dst, x):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, x)
|
||
|
crypto.sc_inv_into(tmp_sc_2, tmp_sc_1)
|
||
|
crypto.encodeint_into(dst, tmp_sc_2)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def scalarmult_key(dst, P, s):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodepoint_into(tmp_pt_1, P)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, s)
|
||
|
crypto.scalarmult_into(tmp_pt_2, tmp_pt_1, tmp_sc_1)
|
||
|
crypto.encodepoint_into(dst, tmp_pt_2)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def scalarmultH(dst, x):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into(tmp_sc_1, x)
|
||
|
crypto.scalarmult_into(tmp_pt_1, XMR_HP, tmp_sc_1)
|
||
|
crypto.encodepoint_into(dst, tmp_pt_1)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def scalarmult_base(dst, x):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, x)
|
||
|
crypto.scalarmult_base_into(tmp_pt_1, tmp_sc_1)
|
||
|
crypto.encodepoint_into(dst, tmp_pt_1)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def sc_gen(dst=None):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.random_scalar(tmp_sc_1)
|
||
|
crypto.encodeint_into(dst, tmp_sc_1)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def sc_add(dst, a, b):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b)
|
||
|
crypto.sc_add_into(tmp_sc_3, tmp_sc_1, tmp_sc_2)
|
||
|
crypto.encodeint_into(dst, tmp_sc_3)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def sc_sub(dst, a, b):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b)
|
||
|
crypto.sc_sub_into(tmp_sc_3, tmp_sc_1, tmp_sc_2)
|
||
|
crypto.encodeint_into(dst, tmp_sc_3)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def sc_mul(dst, a, b):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b)
|
||
|
crypto.sc_mul_into(tmp_sc_3, tmp_sc_1, tmp_sc_2)
|
||
|
crypto.encodeint_into(dst, tmp_sc_3)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def sc_muladd(dst, a, b, c):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_3, c)
|
||
|
crypto.sc_muladd_into(tmp_sc_4, tmp_sc_1, tmp_sc_2, tmp_sc_3)
|
||
|
crypto.encodeint_into(dst, tmp_sc_4)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def sc_mulsub(dst, a, b, c):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_3, c)
|
||
|
crypto.sc_mulsub_into(tmp_sc_4, tmp_sc_1, tmp_sc_2, tmp_sc_3)
|
||
|
crypto.encodeint_into(dst, tmp_sc_4)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def add_keys(dst, A, B):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodepoint_into(tmp_pt_1, A)
|
||
|
crypto.decodepoint_into(tmp_pt_2, B)
|
||
|
crypto.point_add_into(tmp_pt_3, tmp_pt_1, tmp_pt_2)
|
||
|
crypto.encodepoint_into(dst, tmp_pt_3)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def sub_keys(dst, A, B):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodepoint_into(tmp_pt_1, A)
|
||
|
crypto.decodepoint_into(tmp_pt_2, B)
|
||
|
crypto.point_sub_into(tmp_pt_3, tmp_pt_1, tmp_pt_2)
|
||
|
crypto.encodepoint_into(dst, tmp_pt_3)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def add_keys2(dst, a, b, B):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b)
|
||
|
crypto.decodepoint_into(tmp_pt_1, B)
|
||
|
crypto.add_keys2_into(tmp_pt_2, tmp_sc_1, tmp_sc_2, tmp_pt_1)
|
||
|
crypto.encodepoint_into(dst, tmp_pt_2)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def add_keys3(dst, a, A, b, B):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b)
|
||
|
crypto.decodepoint_into(tmp_pt_1, A)
|
||
|
crypto.decodepoint_into(tmp_pt_2, B)
|
||
|
crypto.add_keys3_into(tmp_pt_3, tmp_sc_1, tmp_pt_1, tmp_sc_2, tmp_pt_2)
|
||
|
crypto.encodepoint_into(dst, tmp_pt_3)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def hash_to_scalar(dst, data):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.hash_to_scalar_into(tmp_sc_1, data)
|
||
|
crypto.encodeint_into(dst, tmp_sc_1)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def hash_vct_to_scalar(dst, data):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
ctx = crypto.get_keccak()
|
||
|
for x in data:
|
||
|
ctx.update(x)
|
||
|
hsh = ctx.digest()
|
||
|
|
||
|
crypto.decodeint_into(tmp_sc_1, hsh)
|
||
|
crypto.encodeint_into(tmp_bf_1, tmp_sc_1)
|
||
|
copy_key(dst, tmp_bf_1)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def get_exponent(dst, base, idx):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
salt = b"bulletproof"
|
||
|
idx_size = uvarint_size(idx)
|
||
|
final_size = len(salt) + 32 + idx_size
|
||
|
buff = tmp_bf_exp_mv
|
||
|
memcpy(buff, 0, base, 0, 32)
|
||
|
memcpy(buff, 32, salt, 0, len(salt))
|
||
|
dump_uvarint_b_into(idx, buff, 32 + len(salt))
|
||
|
crypto.keccak_hash_into(tmp_bf_1, buff[:final_size])
|
||
|
crypto.hash_to_point_into(tmp_pt_1, tmp_bf_1)
|
||
|
crypto.encodepoint_into(dst, tmp_pt_1)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
#
|
||
|
# Key Vectors
|
||
|
#
|
||
|
|
||
|
|
||
|
class KeyVBase:
|
||
|
"""
|
||
|
Base KeyVector object
|
||
|
"""
|
||
|
|
||
|
def __init__(self, elems=64):
|
||
|
self.current_idx = 0
|
||
|
self.size = elems
|
||
|
|
||
|
def idxize(self, idx):
|
||
|
if idx < 0:
|
||
|
idx = self.size + idx
|
||
|
if idx >= self.size:
|
||
|
raise IndexError("Index out of bounds")
|
||
|
return idx
|
||
|
|
||
|
def __getitem__(self, item):
|
||
|
raise ValueError("Not supported")
|
||
|
|
||
|
def __setitem__(self, key, value):
|
||
|
raise ValueError("Not supported")
|
||
|
|
||
|
def __iter__(self):
|
||
|
self.current_idx = 0
|
||
|
return self
|
||
|
|
||
|
def __next__(self):
|
||
|
if self.current_idx >= self.size:
|
||
|
raise StopIteration
|
||
|
else:
|
||
|
self.current_idx += 1
|
||
|
return self[self.current_idx - 1]
|
||
|
|
||
|
def __len__(self):
|
||
|
return self.size
|
||
|
|
||
|
def to(self, idx, buff, offset=0):
|
||
|
return memcpy(buff, offset, self[self.idxize(idx)], 0, 32)
|
||
|
|
||
|
def read(self, idx, buff, offset=0):
|
||
|
raise ValueError()
|
||
|
|
||
|
def slice(self, res, start, stop):
|
||
|
for i in range(start, stop):
|
||
|
res[i - start] = self[i]
|
||
|
return res
|
||
|
|
||
|
def slice_view(self, start, stop):
|
||
|
return KeyVSliced(self, start, stop)
|
||
|
|
||
|
|
||
|
class KeyV(KeyVBase):
|
||
|
"""
|
||
|
KeyVector abstraction
|
||
|
Constant precomputed buffers = bytes, frozen. Same operation as normal.
|
||
|
|
||
|
Non-constant KeyVector is separated to 64 elements chunks to avoid problems with
|
||
|
the heap fragmentation. In this way the chunks are more probable to be correctly
|
||
|
allocated as smaller chunk of continuous memory is required. Chunk is assumed to
|
||
|
have 64 elements at all times to minimize corner cases handling. BP require either
|
||
|
multiple of 64 elements vectors or less than 64.
|
||
|
|
||
|
Some chunk-dependent cases are not implemented as they are currently not needed in the BP.
|
||
|
"""
|
||
|
|
||
|
def __init__(self, elems=64, buffer=None, const=False, no_init=False):
|
||
|
super().__init__(elems)
|
||
|
self.d = None
|
||
|
self.mv = None
|
||
|
self.const = const
|
||
|
self.cur = _ensure_dst_key()
|
||
|
self.chunked = False
|
||
|
if no_init:
|
||
|
pass
|
||
|
elif buffer:
|
||
|
self.d = buffer # can be immutable (bytes)
|
||
|
self.size = len(buffer) // 32
|
||
|
else:
|
||
|
self._set_d(elems)
|
||
|
|
||
|
if not no_init:
|
||
|
self._set_mv()
|
||
|
|
||
|
def _set_d(self, elems):
|
||
|
if elems > 64 and elems % 64 == 0:
|
||
|
self.chunked = True
|
||
|
self.d = [bytearray(32 * 64) for _ in range(elems // 64)]
|
||
|
|
||
|
else:
|
||
|
self.chunked = False
|
||
|
self.d = bytearray(32 * elems)
|
||
|
|
||
|
def _set_mv(self):
|
||
|
if not self.chunked:
|
||
|
self.mv = memoryview(self.d)
|
||
|
|
||
|
def __getitem__(self, item):
|
||
|
"""
|
||
|
Returns corresponding 32 byte array.
|
||
|
Creates new memoryview on access.
|
||
|
"""
|
||
|
if self.chunked:
|
||
|
raise ValueError("Not supported") # not needed
|
||
|
item = self.idxize(item)
|
||
|
return self.mv[item * 32 : (item + 1) * 32]
|
||
|
|
||
|
def __setitem__(self, key, value):
|
||
|
if self.chunked:
|
||
|
raise ValueError("Not supported") # not needed
|
||
|
if self.const:
|
||
|
raise ValueError("Constant KeyV")
|
||
|
ck = self[key]
|
||
|
for i in range(32):
|
||
|
ck[i] = value[i]
|
||
|
|
||
|
def to(self, idx, buff=None, offset=0):
|
||
|
idx = self.idxize(idx)
|
||
|
if self.chunked:
|
||
|
memcpy(
|
||
|
buff if buff else self.cur,
|
||
|
offset,
|
||
|
self.d[idx >> 6],
|
||
|
(idx & 63) << 5,
|
||
|
32,
|
||
|
)
|
||
|
else:
|
||
|
memcpy(buff if buff else self.cur, offset, self.d, idx << 5, 32)
|
||
|
return buff if buff else self.cur
|
||
|
|
||
|
def read(self, idx, buff, offset=0):
|
||
|
idx = self.idxize(idx)
|
||
|
if self.chunked:
|
||
|
memcpy(self.d[idx >> 6], (idx & 63) << 5, buff, offset, 32)
|
||
|
else:
|
||
|
memcpy(self.d, idx << 5, buff, offset, 32)
|
||
|
|
||
|
def resize(self, nsize, chop=False, realloc=False):
|
||
|
if self.size == nsize:
|
||
|
return self
|
||
|
|
||
|
if self.chunked and nsize <= 64:
|
||
|
self.chunked = False # de-chunk
|
||
|
if self.size > nsize and realloc:
|
||
|
self.d = bytearray(self.d[0][: nsize << 5])
|
||
|
elif self.size > nsize and not chop:
|
||
|
self.d = self.d[0][: nsize << 5]
|
||
|
else:
|
||
|
self.d = bytearray(nsize << 5)
|
||
|
|
||
|
elif self.chunked:
|
||
|
raise ValueError("Unsupported") # not needed
|
||
|
|
||
|
else:
|
||
|
if self.size > nsize and realloc:
|
||
|
self.d = bytearray(self.d[: nsize << 5])
|
||
|
elif self.size > nsize and not chop:
|
||
|
self.d = self.d[: nsize << 5]
|
||
|
else:
|
||
|
self.d = bytearray(nsize << 5)
|
||
|
|
||
|
self.size = nsize
|
||
|
self._set_mv()
|
||
|
|
||
|
def realloc(self, nsize, collect=False):
|
||
|
self.d = None
|
||
|
self.mv = None
|
||
|
if collect:
|
||
|
gc.collect() # gc collect prev. allocation
|
||
|
|
||
|
self._set_d(nsize)
|
||
|
self.size = nsize
|
||
|
self._set_mv()
|
||
|
|
||
|
def realloc_init_from(self, nsize, src, offset=0, collect=False):
|
||
|
if not isinstance(src, KeyV):
|
||
|
raise ValueError("KeyV supported only")
|
||
|
self.realloc(nsize, collect)
|
||
|
|
||
|
if not self.chunked and not src.chunked:
|
||
|
memcpy(self.d, 0, src.d, offset << 5, nsize << 5)
|
||
|
|
||
|
elif self.chunked and not src.chunked:
|
||
|
raise ValueError("Unsupported") # not needed
|
||
|
|
||
|
elif self.chunked and src.chunked:
|
||
|
raise ValueError("Unsupported") # not needed
|
||
|
|
||
|
elif not self.chunked and src.chunked:
|
||
|
for i in range(nsize >> 6):
|
||
|
memcpy(
|
||
|
self.d,
|
||
|
i << 11,
|
||
|
src.d[i + (offset >> 6)],
|
||
|
(offset & 63) << 5 if i == 0 else 0,
|
||
|
nsize << 5 if i <= nsize >> 6 else (nsize & 64) << 5,
|
||
|
)
|
||
|
|
||
|
|
||
|
class KeyVEval(KeyVBase):
|
||
|
"""
|
||
|
KeyVector computed / evaluated on demand
|
||
|
"""
|
||
|
|
||
|
def __init__(self, elems=64, src=None):
|
||
|
super().__init__(elems)
|
||
|
self.fnc = src
|
||
|
self.buff = _ensure_dst_key()
|
||
|
self.mv = memoryview(self.buff)
|
||
|
|
||
|
def __getitem__(self, item):
|
||
|
return self.fnc(self.idxize(item), self.buff)
|
||
|
|
||
|
def to(self, idx, buff=None, offset=0):
|
||
|
self.fnc(self.idxize(idx), self.buff)
|
||
|
memcpy(buff, offset, self.buff, 0, 32)
|
||
|
return buff if buff else self.buff
|
||
|
|
||
|
|
||
|
class KeyVSized(KeyVBase):
|
||
|
"""
|
||
|
Resized vector, wrapping possibly larger vector
|
||
|
(e.g., precomputed, but has to have exact size for further computations)
|
||
|
"""
|
||
|
|
||
|
def __init__(self, wrapped, new_size):
|
||
|
super().__init__(new_size)
|
||
|
self.wrapped = wrapped
|
||
|
|
||
|
def __getitem__(self, item):
|
||
|
return self.wrapped[self.idxize(item)]
|
||
|
|
||
|
def __setitem__(self, key, value):
|
||
|
self.wrapped[self.idxize(key)] = value
|
||
|
|
||
|
|
||
|
class KeyVConst(KeyVBase):
|
||
|
def __init__(self, size, elem, copy=True):
|
||
|
super().__init__(size)
|
||
|
self.elem = init_key(elem) if copy else elem
|
||
|
|
||
|
def __getitem__(self, item):
|
||
|
return self.elem
|
||
|
|
||
|
def to(self, idx, buff=None, offset=0):
|
||
|
memcpy(buff, offset, self.elem, 0, 32)
|
||
|
return buff if buff else self.elem
|
||
|
|
||
|
|
||
|
class KeyVPrecomp(KeyVBase):
|
||
|
"""
|
||
|
Vector with possibly large size and some precomputed prefix.
|
||
|
Usable for Gi vector with precomputed usual sizes (i.e., 2 output transactions)
|
||
|
but possible to compute further
|
||
|
"""
|
||
|
|
||
|
def __init__(self, size, precomp_prefix, aux_comp_fnc):
|
||
|
super().__init__(size)
|
||
|
self.precomp_prefix = precomp_prefix
|
||
|
self.aux_comp_fnc = aux_comp_fnc
|
||
|
self.buff = _ensure_dst_key()
|
||
|
|
||
|
def __getitem__(self, item):
|
||
|
item = self.idxize(item)
|
||
|
if item < len(self.precomp_prefix):
|
||
|
return self.precomp_prefix[item]
|
||
|
return self.aux_comp_fnc(item, self.buff)
|
||
|
|
||
|
def to(self, idx, buff=None, offset=0):
|
||
|
item = self.idxize(idx)
|
||
|
if item < len(self.precomp_prefix):
|
||
|
return self.precomp_prefix.to(item, buff if buff else self.buff, offset)
|
||
|
self.aux_comp_fnc(item, self.buff)
|
||
|
memcpy(buff, offset, self.buff, 0, 32)
|
||
|
return buff if buff else self.buff
|
||
|
|
||
|
|
||
|
class KeyVSliced(KeyVBase):
|
||
|
"""
|
||
|
Sliced in-memory vector version, remapping
|
||
|
"""
|
||
|
|
||
|
def __init__(self, src, start, stop):
|
||
|
super().__init__(stop - start)
|
||
|
self.wrapped = src
|
||
|
self.offset = start
|
||
|
|
||
|
def __getitem__(self, item):
|
||
|
return self.wrapped[self.offset + self.idxize(item)]
|
||
|
|
||
|
def __setitem__(self, key, value):
|
||
|
self.wrapped[self.offset + self.idxize(key)] = value
|
||
|
|
||
|
def resize(self, nsize, chop=False):
|
||
|
raise ValueError("Not supported")
|
||
|
|
||
|
def to(self, idx, buff=None, offset=0):
|
||
|
return self.wrapped.to(self.offset + self.idxize(idx), buff, offset)
|
||
|
|
||
|
def read(self, idx, buff, offset=0):
|
||
|
return self.wrapped.read(self.offset + self.idxize(idx), buff, offset)
|
||
|
|
||
|
|
||
|
class KeyVPowers(KeyVBase):
|
||
|
"""
|
||
|
Vector of x^i. Allows only sequential access (no jumping). Resets on [0,1] access.
|
||
|
"""
|
||
|
|
||
|
def __init__(self, size, x, **kwargs):
|
||
|
super().__init__(size)
|
||
|
self.x = x
|
||
|
self.cur = bytearray(32)
|
||
|
self.last_idx = 0
|
||
|
|
||
|
def __getitem__(self, item):
|
||
|
prev = self.last_idx
|
||
|
item = self.idxize(item)
|
||
|
self.last_idx = item
|
||
|
|
||
|
if item == 0:
|
||
|
return copy_key(self.cur, ONE)
|
||
|
elif item == 1:
|
||
|
return copy_key(self.cur, self.x)
|
||
|
elif item == prev + 1:
|
||
|
return sc_mul(self.cur, self.cur, self.x)
|
||
|
else:
|
||
|
IndexError("Only linear scan allowed")
|
||
|
|
||
|
|
||
|
class KeyVZtwo(KeyVBase):
|
||
|
"""
|
||
|
Ztwo vector - see vector_z_two_i
|
||
|
"""
|
||
|
|
||
|
def __init__(self, N, logN, M, zpow, twoN, raw=False):
|
||
|
super().__init__(N * M)
|
||
|
self.N = N
|
||
|
self.logN = logN
|
||
|
self.M = M
|
||
|
self.zpow = zpow
|
||
|
self.twoN = twoN
|
||
|
self.raw = raw
|
||
|
self.sc = crypto.new_scalar()
|
||
|
self.cur = bytearray(32) if not raw else None
|
||
|
|
||
|
def __getitem__(self, item):
|
||
|
vector_z_two_i(self.logN, self.zpow, self.twoN, self.idxize(item), self.sc)
|
||
|
if self.raw:
|
||
|
return self.sc
|
||
|
|
||
|
crypto.encodeint_into(self.cur, self.sc)
|
||
|
return self.cur
|
||
|
|
||
|
|
||
|
def _ensure_dst_keyvect(dst=None, size=None):
|
||
|
if dst is None:
|
||
|
dst = KeyV(elems=size)
|
||
|
return dst
|
||
|
if size is not None and size != len(dst):
|
||
|
dst.resize(size)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def const_vector(val, elems=BP_N, copy=True):
|
||
|
return KeyVConst(elems, val, copy)
|
||
|
|
||
|
|
||
|
def vector_exponent_custom(A, B, a, b, dst=None):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.identity_into(tmp_pt_2)
|
||
|
|
||
|
for i in range(len(a)):
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a.to(i))
|
||
|
crypto.decodepoint_into(tmp_pt_3, A.to(i))
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b.to(i))
|
||
|
crypto.decodepoint_into(tmp_pt_4, B.to(i))
|
||
|
crypto.add_keys3_into(tmp_pt_1, tmp_sc_1, tmp_pt_3, tmp_sc_2, tmp_pt_4)
|
||
|
crypto.point_add_into(tmp_pt_2, tmp_pt_2, tmp_pt_1)
|
||
|
gc_iter(i)
|
||
|
crypto.encodepoint_into(dst, tmp_pt_2)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def vector_powers(x, n, dst=None, dynamic=False, **kwargs):
|
||
|
if dynamic:
|
||
|
return KeyVPowers(n, x, **kwargs)
|
||
|
dst = _ensure_dst_keyvect(dst, n)
|
||
|
if n == 0:
|
||
|
return dst
|
||
|
dst.read(0, ONE)
|
||
|
if n == 1:
|
||
|
return dst
|
||
|
dst.read(1, x)
|
||
|
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, x)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, x)
|
||
|
for i in range(2, n):
|
||
|
crypto.sc_mul_into(tmp_sc_1, tmp_sc_1, tmp_sc_2)
|
||
|
crypto.encodeint_into(tmp_bf_0, tmp_sc_1)
|
||
|
dst.read(i, tmp_bf_0)
|
||
|
gc_iter(i)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def vector_power_sum(x, n, dst=None):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
if n == 0:
|
||
|
return copy_key(dst, ZERO)
|
||
|
|
||
|
copy_key(dst, ONE)
|
||
|
if n == 1:
|
||
|
return dst
|
||
|
|
||
|
prev = init_key(x)
|
||
|
for i in range(1, n):
|
||
|
if i > 1:
|
||
|
sc_mul(prev, prev, x)
|
||
|
sc_add(dst, dst, prev)
|
||
|
gc_iter(i)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def inner_product(a, b, dst=None):
|
||
|
if len(a) != len(b):
|
||
|
raise ValueError("Incompatible sizes of a and b")
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
crypto.sc_init_into(tmp_sc_1, 0)
|
||
|
|
||
|
for i in range(len(a)):
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, a.to(i))
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_3, b.to(i))
|
||
|
crypto.sc_muladd_into(tmp_sc_1, tmp_sc_2, tmp_sc_3, tmp_sc_1)
|
||
|
gc_iter(i)
|
||
|
|
||
|
crypto.encodeint_into(dst, tmp_sc_1)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def hadamard(a, b, dst=None):
|
||
|
dst = _ensure_dst_keyvect(dst, len(a))
|
||
|
for i in range(len(a)):
|
||
|
sc_mul(tmp_bf_1, a.to(i), b.to(i))
|
||
|
dst.read(i, tmp_bf_1)
|
||
|
gc_iter(i)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def hadamard_fold(v, a, b, into=None, into_offset=0):
|
||
|
"""
|
||
|
Folds a curvepoint array using a two way scaled Hadamard product
|
||
|
|
||
|
ln = len(v); h = ln // 2
|
||
|
v[i] = a * v[i] + b * v[h + i]
|
||
|
"""
|
||
|
h = len(v) // 2
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b)
|
||
|
into = into if into else v
|
||
|
|
||
|
for i in range(h):
|
||
|
crypto.decodepoint_into(tmp_pt_1, v.to(i))
|
||
|
crypto.decodepoint_into(tmp_pt_2, v.to(h + i))
|
||
|
crypto.add_keys3_into(tmp_pt_3, tmp_sc_1, tmp_pt_1, tmp_sc_2, tmp_pt_2)
|
||
|
crypto.encodepoint_into(tmp_bf_0, tmp_pt_3)
|
||
|
into.read(i + into_offset, tmp_bf_0)
|
||
|
gc_iter(i)
|
||
|
|
||
|
return into
|
||
|
|
||
|
|
||
|
def scalar_fold(v, a, b, into=None, into_offset=0):
|
||
|
"""
|
||
|
ln = len(v); h = ln // 2
|
||
|
v[i] = v[i] * a + v[h+i] * b)
|
||
|
"""
|
||
|
h = len(v) // 2
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, a)
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, b)
|
||
|
into = into if into else v
|
||
|
|
||
|
for i in range(h):
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_3, v.to(i))
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_4, v.to(h + i))
|
||
|
crypto.sc_mul_into(tmp_sc_3, tmp_sc_3, tmp_sc_1)
|
||
|
crypto.sc_mul_into(tmp_sc_4, tmp_sc_4, tmp_sc_2)
|
||
|
crypto.sc_add_into(tmp_sc_3, tmp_sc_3, tmp_sc_4)
|
||
|
crypto.encodeint_into(tmp_bf_0, tmp_sc_3)
|
||
|
into.read(i + into_offset, tmp_bf_0)
|
||
|
gc_iter(i)
|
||
|
|
||
|
return into
|
||
|
|
||
|
|
||
|
def cross_inner_product(l0, r0, l1, r1):
|
||
|
"""
|
||
|
t1_1 = l0 . r1, t1_2 = l1 . r0
|
||
|
t1 = t1_1 + t1_2, t2 = l1 . r1
|
||
|
"""
|
||
|
sc_t1_1, sc_t1_2, sc_t2 = alloc_scalars(3)
|
||
|
cl0, cr0, cl1, cr1 = alloc_scalars(4)
|
||
|
|
||
|
for i in range(len(l0)):
|
||
|
crypto.decodeint_into_noreduce(cl0, l0.to(i))
|
||
|
crypto.decodeint_into_noreduce(cr0, r0.to(i))
|
||
|
crypto.decodeint_into_noreduce(cl1, l1.to(i))
|
||
|
crypto.decodeint_into_noreduce(cr1, r1.to(i))
|
||
|
|
||
|
crypto.sc_muladd_into(sc_t1_1, cl0, cr1, sc_t1_1)
|
||
|
crypto.sc_muladd_into(sc_t1_2, cl1, cr0, sc_t1_2)
|
||
|
crypto.sc_muladd_into(sc_t2, cl1, cr1, sc_t2)
|
||
|
gc_iter(i)
|
||
|
|
||
|
crypto.sc_add_into(sc_t1_1, sc_t1_1, sc_t1_2)
|
||
|
return crypto.encodeint(sc_t1_1), crypto.encodeint(sc_t2)
|
||
|
|
||
|
|
||
|
def vector_gen(dst, size, op):
|
||
|
dst = _ensure_dst_keyvect(dst, size)
|
||
|
for i in range(size):
|
||
|
dst.to(i, tmp_bf_0)
|
||
|
op(i, tmp_bf_0)
|
||
|
dst.read(i, tmp_bf_0)
|
||
|
gc_iter(i)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def vector_add(a, b, dst=None):
|
||
|
dst = _ensure_dst_keyvect(dst, len(a))
|
||
|
for i in range(len(a)):
|
||
|
sc_add(tmp_bf_1, a.to(i), b.to(i))
|
||
|
dst.read(i, tmp_bf_1)
|
||
|
gc_iter(i)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def vector_subtract(a, b, dst=None):
|
||
|
dst = _ensure_dst_keyvect(dst, len(a))
|
||
|
for i in range(len(a)):
|
||
|
sc_sub(tmp_bf_1, a.to(i), b.to(i))
|
||
|
dst.read(i, tmp_bf_1)
|
||
|
gc_iter(i)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def vector_dup(x, n, dst=None):
|
||
|
dst = _ensure_dst_keyvect(dst, n)
|
||
|
for i in range(n):
|
||
|
dst[i] = x
|
||
|
gc_iter(i)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def vector_z_two_i(logN, zpow, twoN, i, dst_sc=None):
|
||
|
"""
|
||
|
0...N|N+1...2N|2N+1...3N|....
|
||
|
zt[i] = z^b 2^c, where
|
||
|
b = 2 + blockNumber. BlockNumber is idx of N block
|
||
|
c = i % N = i - N * blockNumber
|
||
|
"""
|
||
|
j = i >> logN
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, zpow.to(j + 2))
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, twoN.to(i & ((1 << logN) - 1)))
|
||
|
crypto.sc_mul_into(dst_sc, tmp_sc_1, tmp_sc_2)
|
||
|
return dst_sc
|
||
|
|
||
|
|
||
|
def vector_z_two(N, logN, M, zpow, twoN, zero_twos=None, dynamic=False, **kwargs):
|
||
|
if dynamic:
|
||
|
return KeyVZtwo(N, logN, M, zpow, twoN, **kwargs)
|
||
|
else:
|
||
|
raise NotImplementedError()
|
||
|
|
||
|
|
||
|
def hash_cache_mash(dst, hash_cache, *args):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
ctx = crypto.get_keccak()
|
||
|
ctx.update(hash_cache)
|
||
|
|
||
|
for x in args:
|
||
|
if x is None:
|
||
|
break
|
||
|
ctx.update(x)
|
||
|
hsh = ctx.digest()
|
||
|
|
||
|
crypto.decodeint_into(tmp_sc_1, hsh)
|
||
|
crypto.encodeint_into(tmp_bf_1, tmp_sc_1)
|
||
|
|
||
|
copy_key(dst, tmp_bf_1)
|
||
|
copy_key(hash_cache, tmp_bf_1)
|
||
|
return dst
|
||
|
|
||
|
|
||
|
def is_reduced(sc):
|
||
|
return crypto.encodeint(crypto.decodeint(sc)) == sc
|
||
|
|
||
|
|
||
|
class MultiExpSequential:
|
||
|
"""
|
||
|
MultiExp object similar to MultiExp array of [(scalar, point), ]
|
||
|
MultiExp computes simply: res = \\sum_i scalar_i * point_i
|
||
|
Straus / Pippenger algorithms are implemented in the original Monero C++ code for the speed
|
||
|
but the memory cost is around 1 MB which is not affordable here in HW devices.
|
||
|
|
||
|
Moreover, Monero needs speed for very fast verification for blockchain verification which is not
|
||
|
priority in this use case.
|
||
|
|
||
|
MultiExp holder with sequential evaluation
|
||
|
"""
|
||
|
|
||
|
def __init__(self, size=None, points=None, point_fnc=None):
|
||
|
self.current_idx = 0
|
||
|
self.size = size if size else None
|
||
|
self.points = points if points else []
|
||
|
self.point_fnc = point_fnc
|
||
|
if points and size is None:
|
||
|
self.size = len(points) if points else 0
|
||
|
else:
|
||
|
self.size = 0
|
||
|
|
||
|
self.acc = crypto.identity()
|
||
|
self.tmp = _ensure_dst_key()
|
||
|
|
||
|
def get_point(self, idx):
|
||
|
return (
|
||
|
self.point_fnc(idx, None) if idx >= len(self.points) else self.points[idx]
|
||
|
)
|
||
|
|
||
|
def add_pair(self, scalar, point):
|
||
|
self._acc(scalar, point)
|
||
|
|
||
|
def add_scalar(self, scalar):
|
||
|
self._acc(scalar, self.get_point(self.current_idx))
|
||
|
|
||
|
def _acc(self, scalar, point):
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_1, scalar)
|
||
|
crypto.decodepoint_into(tmp_pt_2, point)
|
||
|
crypto.scalarmult_into(tmp_pt_3, tmp_pt_2, tmp_sc_1)
|
||
|
crypto.point_add_into(self.acc, self.acc, tmp_pt_3)
|
||
|
self.current_idx += 1
|
||
|
self.size += 1
|
||
|
|
||
|
def eval(self, dst, GiHi=False):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
return crypto.encodepoint_into(dst, self.acc)
|
||
|
|
||
|
|
||
|
def multiexp(dst=None, data=None, GiHi=False):
|
||
|
return data.eval(dst, GiHi)
|
||
|
|
||
|
|
||
|
class BulletProofBuilder:
|
||
|
def __init__(self):
|
||
|
self.use_det_masks = True
|
||
|
self.proof_sec = None
|
||
|
|
||
|
self.Gprec = KeyV(buffer=BP_GI_PRE, const=True)
|
||
|
self.Hprec = KeyV(buffer=BP_HI_PRE, const=True)
|
||
|
self.oneN = const_vector(ONE, 64)
|
||
|
self.twoN = KeyV(buffer=BP_TWO_N, const=True)
|
||
|
self.ip12 = BP_IP12
|
||
|
self.fnc_det_mask = None
|
||
|
|
||
|
self.tmp_sc_1 = crypto.new_scalar()
|
||
|
self.tmp_det_buff = bytearray(64 + 1 + 4)
|
||
|
|
||
|
self.gc_fnc = gc.collect
|
||
|
self.gc_trace = None
|
||
|
|
||
|
def gc(self, *args):
|
||
|
if self.gc_trace:
|
||
|
self.gc_trace(*args)
|
||
|
if self.gc_fnc:
|
||
|
self.gc_fnc()
|
||
|
|
||
|
def aX_vcts(self, sv, MN):
|
||
|
num_inp = len(sv)
|
||
|
|
||
|
def e_xL(idx, d=None, is_a=True):
|
||
|
j, i = idx // BP_N, idx % BP_N
|
||
|
r = None
|
||
|
if j >= num_inp:
|
||
|
r = ZERO if is_a else MINUS_ONE
|
||
|
elif sv[j][i // 8] & (1 << i % 8):
|
||
|
r = ONE if is_a else ZERO
|
||
|
else:
|
||
|
r = ZERO if is_a else MINUS_ONE
|
||
|
if d:
|
||
|
memcpy(d, 0, r, 0, 32)
|
||
|
return r
|
||
|
|
||
|
aL = KeyVEval(MN, lambda i, d: e_xL(i, d, True))
|
||
|
aR = KeyVEval(MN, lambda i, d: e_xL(i, d, False))
|
||
|
return aL, aR
|
||
|
|
||
|
def _det_mask_init(self):
|
||
|
memcpy(self.tmp_det_buff, 0, self.proof_sec, 0, len(self.proof_sec))
|
||
|
|
||
|
def _det_mask(self, i, is_sL=True, dst=None):
|
||
|
dst = _ensure_dst_key(dst)
|
||
|
if self.fnc_det_mask:
|
||
|
return self.fnc_det_mask(i, is_sL, dst)
|
||
|
self.tmp_det_buff[64] = int(is_sL)
|
||
|
memcpy(self.tmp_det_buff, 65, ZERO, 0, 4)
|
||
|
dump_uvarint_b_into(i, self.tmp_det_buff, 65)
|
||
|
crypto.hash_to_scalar_into(self.tmp_sc_1, self.tmp_det_buff)
|
||
|
crypto.encodeint_into(dst, self.tmp_sc_1)
|
||
|
return dst
|
||
|
|
||
|
def _gprec_aux(self, size):
|
||
|
return KeyVPrecomp(
|
||
|
size, self.Gprec, lambda i, d: get_exponent(d, XMR_H, i * 2 + 1)
|
||
|
)
|
||
|
|
||
|
def _hprec_aux(self, size):
|
||
|
return KeyVPrecomp(size, self.Hprec, lambda i, d: get_exponent(d, XMR_H, i * 2))
|
||
|
|
||
|
def _two_aux(self, size):
|
||
|
# Simple recursive exponentiation from precomputed results
|
||
|
lx = len(self.twoN)
|
||
|
|
||
|
def pow_two(i, d=None):
|
||
|
if i < lx:
|
||
|
return self.twoN[i]
|
||
|
|
||
|
d = _ensure_dst_key(d)
|
||
|
flr = i // 2
|
||
|
|
||
|
lw = pow_two(flr)
|
||
|
rw = pow_two(flr + 1 if flr != i / 2.0 else lw)
|
||
|
return sc_mul(d, lw, rw)
|
||
|
|
||
|
return KeyVPrecomp(size, self.twoN, pow_two)
|
||
|
|
||
|
def sL_vct(self, ln=BP_N):
|
||
|
return (
|
||
|
KeyVEval(ln, lambda i, dst: self._det_mask(i, True, dst))
|
||
|
if self.use_det_masks
|
||
|
else self.sX_gen(ln)
|
||
|
)
|
||
|
|
||
|
def sR_vct(self, ln=BP_N):
|
||
|
return (
|
||
|
KeyVEval(ln, lambda i, dst: self._det_mask(i, False, dst))
|
||
|
if self.use_det_masks
|
||
|
else self.sX_gen(ln)
|
||
|
)
|
||
|
|
||
|
def sX_gen(self, ln=BP_N):
|
||
|
buff = bytearray(ln * 32)
|
||
|
buff_mv = memoryview(buff)
|
||
|
sc = crypto.new_scalar()
|
||
|
for i in range(ln):
|
||
|
crypto.random_scalar(sc)
|
||
|
crypto.encodeint_into(buff_mv[i * 32 : (i + 1) * 32], sc)
|
||
|
gc_iter(i)
|
||
|
return KeyV(buffer=buff)
|
||
|
|
||
|
def vector_exponent(self, a, b, dst=None):
|
||
|
return vector_exponent_custom(self.Gprec, self.Hprec, a, b, dst)
|
||
|
|
||
|
def prove_testnet(self, sv, gamma):
|
||
|
return self.prove(sv, gamma, proof_v8=True)
|
||
|
|
||
|
def prove(self, sv, gamma, proof_v8=False):
|
||
|
return self.prove_batch([sv], [gamma], proof_v8=proof_v8)
|
||
|
|
||
|
def prove_setup(self, sv, gamma, proof_v8=False):
|
||
|
utils.ensure(len(sv) == len(gamma), "|sv| != |gamma|")
|
||
|
utils.ensure(len(sv) > 0, "sv empty")
|
||
|
|
||
|
self.proof_sec = crypto.random_bytes(64)
|
||
|
self._det_mask_init()
|
||
|
sv = [crypto.encodeint(x) for x in sv]
|
||
|
gamma = [crypto.encodeint(x) for x in gamma]
|
||
|
|
||
|
M, logM = 1, 0
|
||
|
while M <= BP_M and M < len(sv):
|
||
|
logM += 1
|
||
|
M = 1 << logM
|
||
|
MN = M * BP_N
|
||
|
|
||
|
V = _ensure_dst_keyvect(None, len(sv))
|
||
|
for i in range(len(sv)):
|
||
|
add_keys2(tmp_bf_0, gamma[i], sv[i], XMR_H)
|
||
|
if not proof_v8:
|
||
|
scalarmult_key(tmp_bf_0, tmp_bf_0, INV_EIGHT)
|
||
|
V.read(i, tmp_bf_0)
|
||
|
|
||
|
aL, aR = self.aX_vcts(sv, MN)
|
||
|
return M, logM, aL, aR, V, gamma
|
||
|
|
||
|
def prove_batch(self, sv, gamma, proof_v8=False):
|
||
|
M, logM, aL, aR, V, gamma = self.prove_setup(sv, gamma, proof_v8)
|
||
|
hash_cache = _ensure_dst_key()
|
||
|
while True:
|
||
|
self.gc(10)
|
||
|
r = self._prove_batch_main(
|
||
|
V, gamma, aL, aR, hash_cache, logM, BP_LOG_N, M, BP_N, proof_v8
|
||
|
)
|
||
|
if r[0]:
|
||
|
break
|
||
|
return r[1]
|
||
|
|
||
|
def _prove_batch_main(
|
||
|
self, V, gamma, aL, aR, hash_cache, logM, logN, M, N, proof_v8=False
|
||
|
):
|
||
|
logMN = logM + logN
|
||
|
MN = M * N
|
||
|
hash_vct_to_scalar(hash_cache, V)
|
||
|
|
||
|
# Extended precomputed GiHi
|
||
|
Gprec = self._gprec_aux(MN)
|
||
|
Hprec = self._hprec_aux(MN)
|
||
|
|
||
|
# PAPER LINES 38-39
|
||
|
alpha = sc_gen()
|
||
|
ve = _ensure_dst_key()
|
||
|
A = _ensure_dst_key()
|
||
|
vector_exponent_custom(Gprec, Hprec, aL, aR, ve)
|
||
|
add_keys(A, ve, scalarmult_base(tmp_bf_1, alpha))
|
||
|
if not proof_v8:
|
||
|
scalarmult_key(A, A, INV_EIGHT)
|
||
|
self.gc(11)
|
||
|
|
||
|
# PAPER LINES 40-42
|
||
|
sL = self.sL_vct(MN)
|
||
|
sR = self.sR_vct(MN)
|
||
|
rho = sc_gen()
|
||
|
vector_exponent_custom(Gprec, Hprec, sL, sR, ve)
|
||
|
S = _ensure_dst_key()
|
||
|
add_keys(S, ve, scalarmult_base(tmp_bf_1, rho))
|
||
|
if not proof_v8:
|
||
|
scalarmult_key(S, S, INV_EIGHT)
|
||
5 years ago
|
del ve
|
||
6 years ago
|
self.gc(12)
|
||
|
|
||
|
# PAPER LINES 43-45
|
||
|
y = _ensure_dst_key()
|
||
|
hash_cache_mash(y, hash_cache, A, S)
|
||
|
if y == ZERO:
|
||
|
return (0,)
|
||
|
|
||
|
z = _ensure_dst_key()
|
||
|
hash_to_scalar(hash_cache, y)
|
||
|
copy_key(z, hash_cache)
|
||
|
if z == ZERO:
|
||
|
return (0,)
|
||
|
|
||
|
# Polynomial construction by coefficients
|
||
|
zMN = const_vector(z, MN)
|
||
|
l0 = _ensure_dst_keyvect(None, MN)
|
||
|
vector_subtract(aL, zMN, l0)
|
||
|
l1 = sL
|
||
|
self.gc(13)
|
||
|
|
||
|
# This computes the ugly sum/concatenation from PAPER LINE 65
|
||
|
# r0 = aR + z
|
||
|
r0 = vector_add(aR, zMN)
|
||
5 years ago
|
del zMN
|
||
6 years ago
|
self.gc(14)
|
||
|
|
||
|
# r0 = r0 \odot yMN => r0[i] = r0[i] * y^i
|
||
|
# r1 = sR \odot yMN => r1[i] = sR[i] * y^i
|
||
|
yMN = vector_powers(y, MN, dynamic=False)
|
||
|
hadamard(r0, yMN, dst=r0)
|
||
|
self.gc(15)
|
||
|
|
||
|
# r0 = r0 + zero_twos
|
||
|
zpow = vector_powers(z, M + 2)
|
||
|
twoN = self._two_aux(MN)
|
||
|
zero_twos = vector_z_two(N, logN, M, zpow, twoN, dynamic=True, raw=True)
|
||
|
vector_gen(
|
||
|
r0,
|
||
|
len(r0),
|
||
|
lambda i, d: crypto.encodeint_into(
|
||
|
d,
|
||
|
crypto.sc_add_into(
|
||
|
tmp_sc_1,
|
||
|
zero_twos[i], # noqa: F821
|
||
|
crypto.decodeint_into_noreduce(tmp_sc_2, r0.to(i)), # noqa: F821
|
||
|
),
|
||
|
),
|
||
|
)
|
||
|
|
||
|
del (zero_twos, twoN)
|
||
|
self.gc(15)
|
||
|
|
||
|
# Polynomial construction before PAPER LINE 46
|
||
|
# r1 = KeyVEval(MN, lambda i, d: sc_mul(d, yMN[i], sR[i]))
|
||
|
# r1 optimization possible, but has clashing sc registers.
|
||
|
# Moreover, max memory complexity is 4MN as below (while loop).
|
||
|
r1 = hadamard(yMN, sR, yMN) # re-use yMN vector for r1
|
||
|
del (yMN, sR)
|
||
|
self.gc(16)
|
||
|
|
||
|
# Inner products
|
||
|
# l0 = aL - z r0 = ((aR + z) \cdot ypow) + zt
|
||
|
# l1 = sL r1 = sR \cdot ypow
|
||
|
# t1_1 = l0 . r1, t1_2 = l1 . r0
|
||
|
# t1 = t1_1 + t1_2, t2 = l1 . r1
|
||
|
# l = l0 \odot x*l1 r = r0 \odot x*r1
|
||
|
t1, t2 = cross_inner_product(l0, r0, l1, r1)
|
||
|
self.gc(17)
|
||
|
|
||
|
# PAPER LINES 47-48
|
||
|
tau1, tau2 = sc_gen(), sc_gen()
|
||
|
T1, T2 = _ensure_dst_key(), _ensure_dst_key()
|
||
|
|
||
|
add_keys(T1, scalarmultH(tmp_bf_1, t1), scalarmult_base(tmp_bf_2, tau1))
|
||
|
if not proof_v8:
|
||
|
scalarmult_key(T1, T1, INV_EIGHT)
|
||
|
|
||
|
add_keys(T2, scalarmultH(tmp_bf_1, t2), scalarmult_base(tmp_bf_2, tau2))
|
||
|
if not proof_v8:
|
||
|
scalarmult_key(T2, T2, INV_EIGHT)
|
||
|
del (t1, t2)
|
||
|
self.gc(17)
|
||
|
|
||
|
# PAPER LINES 49-51
|
||
|
x = _ensure_dst_key()
|
||
|
hash_cache_mash(x, hash_cache, z, T1, T2)
|
||
|
if x == ZERO:
|
||
|
return (0,)
|
||
|
|
||
|
# PAPER LINES 52-53
|
||
|
taux = _ensure_dst_key()
|
||
|
copy_key(taux, ZERO)
|
||
|
sc_mul(taux, tau1, x)
|
||
|
xsq = _ensure_dst_key()
|
||
|
sc_mul(xsq, x, x)
|
||
|
sc_muladd(taux, tau2, xsq, taux)
|
||
|
del (xsq, tau1, tau2)
|
||
|
for j in range(1, len(V) + 1):
|
||
|
sc_muladd(taux, zpow.to(j + 1), gamma[j - 1], taux)
|
||
5 years ago
|
del zpow
|
||
6 years ago
|
|
||
|
self.gc(18)
|
||
|
mu = _ensure_dst_key()
|
||
|
sc_muladd(mu, x, rho, alpha)
|
||
|
del (rho, alpha)
|
||
|
|
||
|
# PAPER LINES 54-57
|
||
|
# l = l0 \odot x*l1, has to evaluated as it becomes aprime in the loop
|
||
|
l = vector_gen(
|
||
|
l0,
|
||
|
len(l0),
|
||
|
lambda i, d: sc_add(d, d, sc_mul(tmp_bf_1, l1.to(i), x)), # noqa: F821
|
||
|
)
|
||
|
del (l0, l1, sL)
|
||
|
self.gc(19)
|
||
|
|
||
|
# r = r0 \odot x*r1, has to evaluated as it becomes bprime in the loop
|
||
|
r = vector_gen(
|
||
|
r0,
|
||
|
len(r0),
|
||
|
lambda i, d: sc_add(d, d, sc_mul(tmp_bf_1, r1.to(i), x)), # noqa: F821
|
||
|
)
|
||
|
t = inner_product(l, r)
|
||
|
del (r1, r0)
|
||
|
self.gc(19)
|
||
|
|
||
|
# PAPER LINES 32-33
|
||
|
x_ip = hash_cache_mash(None, hash_cache, x, taux, mu, t)
|
||
|
if x_ip == ZERO:
|
||
|
return 0, None
|
||
|
|
||
|
# PHASE 2
|
||
|
# These are used in the inner product rounds
|
||
|
nprime = MN
|
||
|
Gprime = _ensure_dst_keyvect(None, MN)
|
||
|
Hprime = _ensure_dst_keyvect(None, MN)
|
||
|
aprime = l
|
||
|
bprime = r
|
||
|
yinv = invert(None, y)
|
||
|
yinvpow = init_key(ONE)
|
||
|
self.gc(20)
|
||
|
|
||
|
for i in range(0, MN):
|
||
|
Gprime.read(i, Gprec.to(i))
|
||
|
scalarmult_key(tmp_bf_0, Hprec.to(i), yinvpow)
|
||
|
Hprime.read(i, tmp_bf_0)
|
||
|
sc_mul(yinvpow, yinvpow, yinv)
|
||
|
gc_iter(i)
|
||
|
self.gc(21)
|
||
|
|
||
|
L = _ensure_dst_keyvect(None, logMN)
|
||
|
R = _ensure_dst_keyvect(None, logMN)
|
||
|
cL = _ensure_dst_key()
|
||
|
cR = _ensure_dst_key()
|
||
|
winv = _ensure_dst_key()
|
||
|
w_round = _ensure_dst_key()
|
||
|
tmp = _ensure_dst_key()
|
||
|
|
||
|
round = 0
|
||
|
_tmp_k_1 = _ensure_dst_key()
|
||
|
|
||
|
# PAPER LINE 13
|
||
|
while nprime > 1:
|
||
|
# PAPER LINE 15
|
||
|
npr2 = nprime
|
||
|
nprime >>= 1
|
||
|
self.gc(22)
|
||
|
|
||
|
# PAPER LINES 16-17
|
||
|
inner_product(
|
||
|
aprime.slice_view(0, nprime), bprime.slice_view(nprime, npr2), cL
|
||
|
)
|
||
|
|
||
|
inner_product(
|
||
|
aprime.slice_view(nprime, npr2), bprime.slice_view(0, nprime), cR
|
||
|
)
|
||
|
self.gc(23)
|
||
|
|
||
|
# PAPER LINES 18-19
|
||
|
vector_exponent_custom(
|
||
|
Gprime.slice_view(nprime, npr2),
|
||
|
Hprime.slice_view(0, nprime),
|
||
|
aprime.slice_view(0, nprime),
|
||
|
bprime.slice_view(nprime, npr2),
|
||
|
tmp_bf_0,
|
||
|
)
|
||
|
|
||
|
sc_mul(tmp, cL, x_ip)
|
||
|
add_keys(tmp_bf_0, tmp_bf_0, scalarmultH(_tmp_k_1, tmp))
|
||
|
if not proof_v8:
|
||
|
scalarmult_key(tmp_bf_0, tmp_bf_0, INV_EIGHT)
|
||
|
L.read(round, tmp_bf_0)
|
||
|
self.gc(24)
|
||
|
|
||
|
vector_exponent_custom(
|
||
|
Gprime.slice_view(0, nprime),
|
||
|
Hprime.slice_view(nprime, npr2),
|
||
|
aprime.slice_view(nprime, npr2),
|
||
|
bprime.slice_view(0, nprime),
|
||
|
tmp_bf_0,
|
||
|
)
|
||
|
|
||
|
sc_mul(tmp, cR, x_ip)
|
||
|
add_keys(tmp_bf_0, tmp_bf_0, scalarmultH(_tmp_k_1, tmp))
|
||
|
if not proof_v8:
|
||
|
scalarmult_key(tmp_bf_0, tmp_bf_0, INV_EIGHT)
|
||
|
R.read(round, tmp_bf_0)
|
||
|
self.gc(25)
|
||
|
|
||
|
# PAPER LINES 21-22
|
||
|
hash_cache_mash(w_round, hash_cache, L.to(round), R.to(round))
|
||
|
if w_round == ZERO:
|
||
|
return (0,)
|
||
|
|
||
|
# PAPER LINES 24-25
|
||
|
invert(winv, w_round)
|
||
|
self.gc(26)
|
||
|
|
||
|
hadamard_fold(Gprime, winv, w_round)
|
||
|
self.gc(27)
|
||
|
|
||
|
hadamard_fold(Hprime, w_round, winv, Gprime, nprime)
|
||
|
Hprime.realloc_init_from(nprime, Gprime, nprime, round < 2)
|
||
|
self.gc(28)
|
||
|
|
||
|
# PAPER LINES 28-29
|
||
|
scalar_fold(aprime, w_round, winv, Gprime, nprime)
|
||
|
aprime.realloc_init_from(nprime, Gprime, nprime, round < 2)
|
||
|
self.gc(29)
|
||
|
|
||
|
scalar_fold(bprime, winv, w_round, Gprime, nprime)
|
||
|
bprime.realloc_init_from(nprime, Gprime, nprime, round < 2)
|
||
|
self.gc(30)
|
||
|
|
||
|
# Finally resize Gprime which was buffer for all ops
|
||
|
Gprime.resize(nprime, realloc=True)
|
||
|
round += 1
|
||
|
|
||
|
from apps.monero.xmr.serialize_messages.tx_rsig_bulletproof import Bulletproof
|
||
|
|
||
|
return (
|
||
|
1,
|
||
|
Bulletproof(
|
||
|
V=V,
|
||
|
A=A,
|
||
|
S=S,
|
||
|
T1=T1,
|
||
|
T2=T2,
|
||
|
taux=taux,
|
||
|
mu=mu,
|
||
|
L=L,
|
||
|
R=R,
|
||
|
a=aprime.to(0),
|
||
|
b=bprime.to(0),
|
||
|
t=t,
|
||
|
),
|
||
|
)
|
||
|
|
||
|
def verify_testnet(self, proof):
|
||
|
return self.verify(proof, proof_v8=True)
|
||
|
|
||
|
def verify(self, proof, proof_v8=False):
|
||
|
return self.verify_batch([proof], proof_v8=proof_v8)
|
||
|
|
||
|
def verify_batch(self, proofs, single_optim=True, proof_v8=False):
|
||
|
"""
|
||
|
BP batch verification
|
||
|
:param proofs:
|
||
|
:param single_optim: single proof memory optimization
|
||
|
:param proof_v8: previous testnet version
|
||
|
:return:
|
||
|
"""
|
||
|
max_length = 0
|
||
|
for proof in proofs:
|
||
|
utils.ensure(is_reduced(proof.taux), "Input scalar not in range")
|
||
|
utils.ensure(is_reduced(proof.mu), "Input scalar not in range")
|
||
|
utils.ensure(is_reduced(proof.a), "Input scalar not in range")
|
||
|
utils.ensure(is_reduced(proof.b), "Input scalar not in range")
|
||
|
utils.ensure(is_reduced(proof.t), "Input scalar not in range")
|
||
|
utils.ensure(len(proof.V) >= 1, "V does not have at least one element")
|
||
|
utils.ensure(len(proof.L) == len(proof.R), "|L| != |R|")
|
||
|
utils.ensure(len(proof.L) > 0, "Empty proof")
|
||
|
max_length = max(max_length, len(proof.L))
|
||
|
|
||
|
utils.ensure(max_length < 32, "At least one proof is too large")
|
||
|
|
||
|
maxMN = 1 << max_length
|
||
|
logN = 6
|
||
|
N = 1 << logN
|
||
|
tmp = _ensure_dst_key()
|
||
|
|
||
|
# setup weighted aggregates
|
||
|
is_single = len(proofs) == 1 and single_optim # ph4
|
||
|
z1 = init_key(ZERO)
|
||
|
z3 = init_key(ZERO)
|
||
|
m_z4 = vector_dup(ZERO, maxMN) if not is_single else None
|
||
|
m_z5 = vector_dup(ZERO, maxMN) if not is_single else None
|
||
|
m_y0 = init_key(ZERO)
|
||
|
y1 = init_key(ZERO)
|
||
|
muex_acc = init_key(ONE)
|
||
|
|
||
|
Gprec = self._gprec_aux(maxMN)
|
||
|
Hprec = self._hprec_aux(maxMN)
|
||
|
|
||
|
for proof in proofs:
|
||
|
M = 1
|
||
|
logM = 0
|
||
|
while M <= BP_M and M < len(proof.V):
|
||
|
logM += 1
|
||
|
M = 1 << logM
|
||
|
|
||
|
utils.ensure(len(proof.L) == 6 + logM, "Proof is not the expected size")
|
||
|
MN = M * N
|
||
|
weight_y = crypto.encodeint(crypto.random_scalar())
|
||
|
weight_z = crypto.encodeint(crypto.random_scalar())
|
||
|
|
||
|
# Reconstruct the challenges
|
||
|
hash_cache = hash_vct_to_scalar(None, proof.V)
|
||
|
y = hash_cache_mash(None, hash_cache, proof.A, proof.S)
|
||
|
utils.ensure(y != ZERO, "y == 0")
|
||
|
z = hash_to_scalar(None, y)
|
||
|
copy_key(hash_cache, z)
|
||
|
utils.ensure(z != ZERO, "z == 0")
|
||
|
|
||
|
x = hash_cache_mash(None, hash_cache, z, proof.T1, proof.T2)
|
||
|
utils.ensure(x != ZERO, "x == 0")
|
||
|
x_ip = hash_cache_mash(None, hash_cache, x, proof.taux, proof.mu, proof.t)
|
||
|
utils.ensure(x_ip != ZERO, "x_ip == 0")
|
||
|
|
||
|
# PAPER LINE 61
|
||
|
sc_mulsub(m_y0, proof.taux, weight_y, m_y0)
|
||
|
zpow = vector_powers(z, M + 3)
|
||
|
|
||
|
k = _ensure_dst_key()
|
||
|
ip1y = vector_power_sum(y, MN)
|
||
|
sc_mulsub(k, zpow[2], ip1y, ZERO)
|
||
|
for j in range(1, M + 1):
|
||
|
utils.ensure(j + 2 < len(zpow), "invalid zpow index")
|
||
|
sc_mulsub(k, zpow.to(j + 2), BP_IP12, k)
|
||
|
|
||
|
# VERIFY_line_61rl_new
|
||
|
sc_muladd(tmp, z, ip1y, k)
|
||
|
sc_sub(tmp, proof.t, tmp)
|
||
|
|
||
|
sc_muladd(y1, tmp, weight_y, y1)
|
||
|
weight_y8 = init_key(weight_y)
|
||
|
if not proof_v8:
|
||
|
weight_y8 = sc_mul(None, weight_y, EIGHT)
|
||
|
|
||
|
muex = MultiExpSequential(points=[pt for pt in proof.V])
|
||
|
for j in range(len(proof.V)):
|
||
|
sc_mul(tmp, zpow[j + 2], weight_y8)
|
||
|
muex.add_scalar(init_key(tmp))
|
||
|
|
||
|
sc_mul(tmp, x, weight_y8)
|
||
|
muex.add_pair(init_key(tmp), proof.T1)
|
||
|
|
||
|
xsq = _ensure_dst_key()
|
||
|
sc_mul(xsq, x, x)
|
||
|
|
||
|
sc_mul(tmp, xsq, weight_y8)
|
||
|
muex.add_pair(init_key(tmp), proof.T2)
|
||
|
|
||
|
weight_z8 = init_key(weight_z)
|
||
|
if not proof_v8:
|
||
|
weight_z8 = sc_mul(None, weight_z, EIGHT)
|
||
|
|
||
|
muex.add_pair(weight_z8, proof.A)
|
||
|
sc_mul(tmp, x, weight_z8)
|
||
|
muex.add_pair(init_key(tmp), proof.S)
|
||
|
|
||
|
multiexp(tmp, muex, False)
|
||
|
add_keys(muex_acc, muex_acc, tmp)
|
||
5 years ago
|
del muex
|
||
6 years ago
|
|
||
|
# Compute the number of rounds for the inner product
|
||
|
rounds = logM + logN
|
||
|
utils.ensure(rounds > 0, "Zero rounds")
|
||
|
|
||
|
# PAPER LINES 21-22
|
||
|
# The inner product challenges are computed per round
|
||
|
w = _ensure_dst_keyvect(None, rounds)
|
||
|
for i in range(rounds):
|
||
|
hash_cache_mash(tmp_bf_0, hash_cache, proof.L[i], proof.R[i])
|
||
|
w.read(i, tmp_bf_0)
|
||
|
utils.ensure(w[i] != ZERO, "w[i] == 0")
|
||
|
|
||
|
# Basically PAPER LINES 24-25
|
||
|
# Compute the curvepoints from G[i] and H[i]
|
||
|
yinvpow = init_key(ONE)
|
||
|
ypow = init_key(ONE)
|
||
|
yinv = invert(None, y)
|
||
|
self.gc(61)
|
||
|
|
||
|
winv = _ensure_dst_keyvect(None, rounds)
|
||
|
for i in range(rounds):
|
||
|
invert(tmp_bf_0, w.to(i))
|
||
|
winv.read(i, tmp_bf_0)
|
||
|
self.gc(62)
|
||
|
|
||
|
g_scalar = _ensure_dst_key()
|
||
|
h_scalar = _ensure_dst_key()
|
||
|
twoN = self._two_aux(N)
|
||
|
for i in range(MN):
|
||
|
copy_key(g_scalar, proof.a)
|
||
|
sc_mul(h_scalar, proof.b, yinvpow)
|
||
|
|
||
|
for j in range(rounds - 1, -1, -1):
|
||
|
J = len(w) - j - 1
|
||
|
|
||
|
if (i & (1 << j)) == 0:
|
||
|
sc_mul(g_scalar, g_scalar, winv.to(J))
|
||
|
sc_mul(h_scalar, h_scalar, w.to(J))
|
||
|
else:
|
||
|
sc_mul(g_scalar, g_scalar, w.to(J))
|
||
|
sc_mul(h_scalar, h_scalar, winv.to(J))
|
||
|
|
||
|
# Adjust the scalars using the exponents from PAPER LINE 62
|
||
|
sc_add(g_scalar, g_scalar, z)
|
||
|
utils.ensure(2 + i // N < len(zpow), "invalid zpow index")
|
||
|
utils.ensure(i % N < len(twoN), "invalid twoN index")
|
||
|
sc_mul(tmp, zpow.to(2 + i // N), twoN.to(i % N))
|
||
|
sc_muladd(tmp, z, ypow, tmp)
|
||
|
sc_mulsub(h_scalar, tmp, yinvpow, h_scalar)
|
||
|
|
||
|
if not is_single: # ph4
|
||
|
sc_mulsub(m_z4[i], g_scalar, weight_z, m_z4[i])
|
||
|
sc_mulsub(m_z5[i], h_scalar, weight_z, m_z5[i])
|
||
|
else:
|
||
|
sc_mul(tmp, g_scalar, weight_z)
|
||
|
sub_keys(muex_acc, muex_acc, scalarmult_key(tmp, Gprec.to(i), tmp))
|
||
|
|
||
|
sc_mul(tmp, h_scalar, weight_z)
|
||
|
sub_keys(muex_acc, muex_acc, scalarmult_key(tmp, Hprec.to(i), tmp))
|
||
|
|
||
|
if i != MN - 1:
|
||
|
sc_mul(yinvpow, yinvpow, yinv)
|
||
|
sc_mul(ypow, ypow, y)
|
||
|
if i & 15 == 0:
|
||
|
self.gc(62)
|
||
|
|
||
|
del (g_scalar, h_scalar, twoN)
|
||
|
self.gc(63)
|
||
|
|
||
|
sc_muladd(z1, proof.mu, weight_z, z1)
|
||
|
muex = MultiExpSequential(
|
||
|
point_fnc=lambda i, d: proof.L[i // 2]
|
||
|
if i & 1 == 0
|
||
|
else proof.R[i // 2]
|
||
|
)
|
||
|
for i in range(rounds):
|
||
|
sc_mul(tmp, w[i], w[i])
|
||
|
sc_mul(tmp, tmp, weight_z8)
|
||
|
muex.add_scalar(tmp)
|
||
|
sc_mul(tmp, winv[i], winv[i])
|
||
|
sc_mul(tmp, tmp, weight_z8)
|
||
|
muex.add_scalar(tmp)
|
||
|
|
||
|
acc = multiexp(None, muex, False)
|
||
|
add_keys(muex_acc, muex_acc, acc)
|
||
|
|
||
|
sc_mulsub(tmp, proof.a, proof.b, proof.t)
|
||
|
sc_mul(tmp, tmp, x_ip)
|
||
|
sc_muladd(z3, tmp, weight_z, z3)
|
||
|
|
||
|
sc_sub(tmp, m_y0, z1)
|
||
|
z3p = sc_sub(None, z3, y1)
|
||
|
|
||
|
check2 = crypto.encodepoint(
|
||
|
crypto.ge25519_double_scalarmult_base_vartime(
|
||
|
crypto.decodeint(z3p), crypto.xmr_H(), crypto.decodeint(tmp)
|
||
|
)
|
||
|
)
|
||
|
add_keys(muex_acc, muex_acc, check2)
|
||
|
|
||
|
if not is_single: # ph4
|
||
|
muex = MultiExpSequential(
|
||
|
point_fnc=lambda i, d: Gprec.to(i // 2)
|
||
|
if i & 1 == 0
|
||
|
else Hprec.to(i // 2)
|
||
|
)
|
||
|
for i in range(maxMN):
|
||
|
muex.add_scalar(m_z4[i])
|
||
|
muex.add_scalar(m_z5[i])
|
||
|
add_keys(muex_acc, muex_acc, multiexp(None, muex, True))
|
||
|
|
||
|
if muex_acc != ONE:
|
||
|
raise ValueError("Verification failure at step 2")
|
||
|
return True
|