Dependencies: Updated LZMA SDK to 22.01

pull/3721/head
Gabriele Gristina 12 months ago
parent bdedb609d0
commit a983957b21

@ -1,5 +1,5 @@
/* 7zTypes.h -- Basic types
2021-04-25 : Igor Pavlov : Public domain */
2022-04-01 : Igor Pavlov : Public domain */
#ifndef __7Z_TYPES_H
#define __7Z_TYPES_H
@ -62,6 +62,8 @@ typedef int SRes;
typedef unsigned WRes;
#define MY_SRes_HRESULT_FROM_WRes(x) HRESULT_FROM_WIN32(x)
// #define MY_HRES_ERROR__INTERNAL_ERROR MY_SRes_HRESULT_FROM_WRes(ERROR_INTERNAL_ERROR)
#else // _WIN32
// #define ENV_HAVE_LSTAT
@ -95,6 +97,7 @@ typedef int WRes;
#define ERROR_DIRECTORY 267L
#define ERROR_TOO_MANY_POSTS 298L
#define ERROR_INTERNAL_ERROR 1359L
#define ERROR_INVALID_REPARSE_DATA 4392L
#define ERROR_REPARSE_TAG_INVALID 4393L
#define ERROR_REPARSE_TAG_MISMATCH 4394L
@ -102,6 +105,7 @@ typedef int WRes;
// we use errno equivalents for some WIN32 errors:
#define ERROR_INVALID_PARAMETER EINVAL
#define ERROR_INVALID_FUNCTION EINVAL
#define ERROR_ALREADY_EXISTS EEXIST
#define ERROR_FILE_EXISTS EEXIST
@ -129,10 +133,6 @@ typedef int WRes;
#define MY__E_ERROR_NEGATIVE_SEEK MY_HRESULT_FROM_errno_CONST_ERROR(EINVAL)
*/
// gcc / clang : (sizeof(long) == sizeof(void*)) in 32/64 bits
typedef long INT_PTR;
typedef unsigned long UINT_PTR;
#define TEXT(quote) quote
#define FILE_ATTRIBUTE_READONLY 0x0001
@ -206,6 +206,8 @@ typedef size_t SIZE_T;
#endif // _WIN32
#define MY_HRES_ERROR__INTERNAL_ERROR ((HRESULT)0x8007054FL)
#ifdef _SZ_NO_INT_64
@ -514,6 +516,14 @@ struct ISzAlloc
#endif
#define k_PropVar_TimePrec_0 0
#define k_PropVar_TimePrec_Unix 1
#define k_PropVar_TimePrec_DOS 2
#define k_PropVar_TimePrec_HighPrec 3
#define k_PropVar_TimePrec_Base 16
#define k_PropVar_TimePrec_100ns (k_PropVar_TimePrec_Base + 7)
#define k_PropVar_TimePrec_1ns (k_PropVar_TimePrec_Base + 9)
EXTERN_C_END
#endif

@ -1,7 +1,7 @@
#define MY_VER_MAJOR 21
#define MY_VER_MINOR 02
#define MY_VER_MAJOR 22
#define MY_VER_MINOR 01
#define MY_VER_BUILD 0
#define MY_VERSION_NUMBERS "21.02 alpha"
#define MY_VERSION_NUMBERS "22.01"
#define MY_VERSION MY_VERSION_NUMBERS
#ifdef MY_CPU_NAME
@ -10,12 +10,12 @@
#define MY_VERSION_CPU MY_VERSION
#endif
#define MY_DATE "2021-05-06"
#define MY_DATE "2022-07-15"
#undef MY_COPYRIGHT
#undef MY_VERSION_COPYRIGHT_DATE
#define MY_AUTHOR_NAME "Igor Pavlov"
#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain"
#define MY_COPYRIGHT_CR "Copyright (c) 1999-2021 Igor Pavlov"
#define MY_COPYRIGHT_CR "Copyright (c) 1999-2022 Igor Pavlov"
#ifdef USE_COPYRIGHT_CR
#define MY_COPYRIGHT MY_COPYRIGHT_CR

@ -5,6 +5,7 @@ MY_ASM = jwasm
MY_ASM = asmc
PROGPATH = $(O)/$(PROG)
PROGPATH_STATIC = $(O)/$(PROG)s
# for object file
@ -15,12 +16,32 @@ CFLAGS_BASE = $(MY_ARCH_2) -O2 $(CFLAGS_BASE_LIST) -Wall -Werror -Wextra $(CFLAG
-DNDEBUG -D_REENTRANT -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE
LDFLAGS_STATIC = -DNDEBUG
# -static
ifdef SystemDrive
IS_MINGW = 1
else
ifdef SYSTEMDRIVE
# ifdef OS
IS_MINGW = 1
endif
endif
ifdef IS_MINGW
LDFLAGS_STATIC_2 = -static
else
ifndef DEF_FILE
ifndef IS_NOT_STANDALONE
ifndef MY_DYNAMIC_LINK
ifneq ($(CC), clang)
LDFLAGS_STATIC_2 =
# -static
# -static-libstdc++ -static-libgcc
endif
endif
endif
endif
endif
LDFLAGS_STATIC = -DNDEBUG $(LDFLAGS_STATIC_2)
ifdef DEF_FILE
@ -53,7 +74,7 @@ endif
PROGPATH = $(O)/$(PROG)$(SHARED_EXT)
PROGPATH_STATIC = $(O)/$(PROG)s$(SHARED_EXT)
ifndef O
O=_o
@ -61,15 +82,22 @@ endif
ifdef IS_MINGW
ifdef MSYSTEM
RM = rm -f
MY_MKDIR=mkdir -p
DEL_OBJ_EXE = -$(RM) $(PROGPATH) $(PROGPATH_STATIC) $(OBJS)
else
RM = del
MY_MKDIR=mkdir
LIB2 = -loleaut32 -luuid -ladvapi32 -lUser32
DEL_OBJ_EXE = -$(RM) $(O)\*.o $(O)\$(PROG).exe $(O)\$(PROG).dll
endif
LIB2 = -lOle32 -loleaut32 -luuid -ladvapi32 -lUser32
CXXFLAGS_EXTRA = -DUNICODE -D_UNICODE
# -Wno-delete-non-virtual-dtor
DEL_OBJ_EXE = -$(RM) $(O)\*.o $(O)\$(PROG).exe $(O)\$(PROG).dll
else
@ -82,7 +110,7 @@ MY_MKDIR=mkdir -p
# LOCAL_LIBS_DLL=$(LOCAL_LIBS) -ldl
LIB2 = -lpthread -ldl
DEL_OBJ_EXE = -$(RM) $(PROGPATH) $(OBJS)
DEL_OBJ_EXE = -$(RM) $(PROGPATH) $(PROGPATH_STATIC) $(OBJS)
endif
@ -108,14 +136,23 @@ CXX_WARN_FLAGS =
CXXFLAGS = $(LOCAL_FLAGS) $(CXXFLAGS_BASE2) $(CFLAGS_BASE) $(CXXFLAGS_EXTRA) $(CC_SHARED) -o $@ $(CXX_WARN_FLAGS)
all: $(O) $(PROGPATH)
STATIC_TARGET=
ifdef COMPL_STATIC
STATIC_TARGET=$(PROGPATH_STATIC)
endif
all: $(O) $(PROGPATH) $(STATIC_TARGET)
$(O):
$(MY_MKDIR) $(O)
LFLAGS_ALL = -s $(MY_ARCH_2) $(LDFLAGS) $(LD_arch) $(OBJS) $(MY_LIBS) $(LIB2)
$(PROGPATH): $(OBJS)
$(CXX) -s -o $(PROGPATH) $(MY_ARCH_2) $(LDFLAGS) $(OBJS) $(MY_LIBS) $(LIB2)
$(CXX) -o $(PROGPATH) $(LFLAGS_ALL)
$(PROGPATH_STATIC): $(OBJS)
$(CXX) -static -o $(PROGPATH_STATIC) $(LFLAGS_ALL)
ifndef NO_DEFAULT_RES
@ -174,6 +211,8 @@ $O/LzFind.o: ../../../C/LzFind.c
# ifdef MT_FILES
$O/LzFindMt.o: ../../../C/LzFindMt.c
$(CC) $(CFLAGS) $<
$O/LzFindOpt.o: ../../../C/LzFindOpt.c
$(CC) $(CFLAGS) $<
$O/Threads.o: ../../../C/Threads.c
$(CC) $(CFLAGS) $<
@ -294,7 +333,10 @@ $O/7zMain.o: ../../../C/Util/7z/7zMain.c
$(CC) $(CFLAGS) $<
$O/LzmaUtil.o: ../../../C/Util/Lzma/LzmaUtil.c
$(CC) $(CFLAGS) $<
$O/7zipInstall.o: ../../../C/Util/7zipInstall/7zipInstall.c
$(CC) $(CFLAGS) $<
$O/7zipUninstall.o: ../../../C/Util/7zipUninstall/7zipUninstall.c
$(CC) $(CFLAGS) $<
clean:

@ -1,5 +1,5 @@
/* Aes.c -- AES encryption / decryption
2021-04-01 : Igor Pavlov : Public domain */
2021-05-13 : Igor Pavlov : Public domain */
#include "Precomp.h"
@ -365,10 +365,10 @@ void MY_FAST_CALL AesCtr_Code(UInt32 *p, Byte *data, size_t numBlocks)
#ifdef MY_CPU_LE_UNALIGN
*((UInt32 *)(void *)data) ^= t;
#else
data[0] ^= (t & 0xFF);
data[1] ^= ((t >> 8) & 0xFF);
data[2] ^= ((t >> 16) & 0xFF);
data[3] ^= ((t >> 24));
data[0] = (Byte)(data[0] ^ (t & 0xFF));
data[1] = (Byte)(data[1] ^ ((t >> 8) & 0xFF));
data[2] = (Byte)(data[2] ^ ((t >> 16) & 0xFF));
data[3] = (Byte)(data[3] ^ ((t >> 24)));
#endif
}
}

@ -1,12 +1,12 @@
/* Alloc.c -- Memory allocation functions
2020-10-29 : Igor Pavlov : Public domain */
2021-07-13 : Igor Pavlov : Public domain */
#include "Precomp.h"
#include <stdio.h>
#ifdef _WIN32
#include <windows.h>
#include <Windows.h>
#endif
#include <stdlib.h>
@ -247,14 +247,14 @@ static void *SzAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MyAlloc
static void SzFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MyFree(address); }
const ISzAlloc g_Alloc = { SzAlloc, SzFree };
#ifdef _WIN32
static void *SzMidAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MidAlloc(size); }
static void SzMidFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MidFree(address); }
const ISzAlloc g_MidAlloc = { SzMidAlloc, SzMidFree };
static void *SzBigAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return BigAlloc(size); }
static void SzBigFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); BigFree(address); }
const ISzAlloc g_MidAlloc = { SzMidAlloc, SzMidFree };
const ISzAlloc g_BigAlloc = { SzBigAlloc, SzBigFree };
#endif
/*
uintptr_t : <stdint.h> C99 (optional)

@ -1,5 +1,5 @@
/* Alloc.h -- Memory allocation functions
2021-02-08 : Igor Pavlov : Public domain */
2021-07-13 : Igor Pavlov : Public domain */
#ifndef __COMMON_ALLOC_H
#define __COMMON_ALLOC_H
@ -30,8 +30,15 @@ void BigFree(void *address);
#endif
extern const ISzAlloc g_Alloc;
#ifdef _WIN32
extern const ISzAlloc g_BigAlloc;
extern const ISzAlloc g_MidAlloc;
#else
#define g_BigAlloc g_AlignedAlloc
#define g_MidAlloc g_AlignedAlloc
#endif
extern const ISzAlloc g_AlignedAlloc;

@ -1,5 +1,5 @@
/* CpuArch.c -- CPU specific code
2021-04-28 : Igor Pavlov : Public domain */
2021-07-13 : Igor Pavlov : Public domain */
#include "Precomp.h"
@ -217,7 +217,7 @@ BoolInt CPU_Is_InOrder()
}
#if !defined(MY_CPU_AMD64) && defined(_WIN32)
#include <windows.h>
#include <Windows.h>
static BoolInt CPU_Sys_Is_SSE_Supported()
{
OSVERSIONINFO vi;
@ -275,9 +275,33 @@ BoolInt CPU_IsSupported_SHA()
// #include <stdio.h>
#ifdef _WIN32
#include <windows.h>
#include <Windows.h>
#endif
BoolInt CPU_IsSupported_AVX2()
{
Cx86cpuid p;
CHECK_SYS_SSE_SUPPORT
#ifdef _WIN32
#define MY__PF_XSAVE_ENABLED 17
if (!IsProcessorFeaturePresent(MY__PF_XSAVE_ENABLED))
return False;
#endif
if (!x86cpuid_CheckAndRead(&p))
return False;
if (p.maxFunc < 7)
return False;
{
UInt32 d[4] = { 0 };
MyCPUID(7, &d[0], &d[1], &d[2], &d[3]);
// printf("\ncpuid(7): ebx=%8x ecx=%8x\n", d[1], d[2]);
return 1
& (d[1] >> 5); // avx2
}
}
BoolInt CPU_IsSupported_VAES_AVX2()
{
Cx86cpuid p;
@ -327,12 +351,11 @@ BoolInt CPU_IsSupported_PageGB()
#ifdef _WIN32
#include <windows.h>
#include <Windows.h>
BoolInt CPU_IsSupported_CRC32()
{ return IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
BoolInt CPU_IsSupported_CRYPTO()
{ return IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
BoolInt CPU_IsSupported_CRC32() { return IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
BoolInt CPU_IsSupported_CRYPTO() { return IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
BoolInt CPU_IsSupported_NEON() { return IsProcessorFeaturePresent(PF_ARM_NEON_INSTRUCTIONS_AVAILABLE) ? 1 : 0; }
#else
@ -356,17 +379,27 @@ static void Print_sysctlbyname(const char *name)
}
*/
BoolInt CPU_IsSupported_CRC32(void)
static BoolInt My_sysctlbyname_Get_BoolInt(const char *name)
{
UInt32 val = 0;
if (My_sysctlbyname_Get_UInt32(name, &val) == 0 && val == 1)
return 1;
return 0;
}
/*
Print_sysctlbyname("hw.pagesize");
Print_sysctlbyname("machdep.cpu.brand_string");
*/
UInt32 val = 0;
if (My_sysctlbyname_Get_UInt32("hw.optional.armv8_crc32", &val) == 0 && val == 1)
return 1;
return 0;
BoolInt CPU_IsSupported_CRC32(void)
{
return My_sysctlbyname_Get_BoolInt("hw.optional.armv8_crc32");
}
BoolInt CPU_IsSupported_NEON(void)
{
return My_sysctlbyname_Get_BoolInt("hw.optional.neon");
}
#ifdef MY_CPU_ARM64
@ -390,18 +423,25 @@ BoolInt CPU_IsSupported_AES (void) { return APPLE_CRYPTO_SUPPORT_VAL; }
#include <asm/hwcap.h>
#define MY_HWCAP_CHECK_FUNC_2(name1, name2) \
BoolInt CPU_IsSupported_ ## name1() { return (getauxval(AT_HWCAP) & (HWCAP_ ## name2)) ? 1 : 0; }
#ifdef MY_CPU_ARM64
#define MY_HWCAP_CHECK_FUNC(name) \
BoolInt CPU_IsSupported_ ## name() { return (getauxval(AT_HWCAP) & (HWCAP_ ## name)) ? 1 : 0; }
MY_HWCAP_CHECK_FUNC_2(name, name)
MY_HWCAP_CHECK_FUNC_2(NEON, ASIMD)
// MY_HWCAP_CHECK_FUNC (ASIMD)
#elif defined(MY_CPU_ARM)
#define MY_HWCAP_CHECK_FUNC(name) \
BoolInt CPU_IsSupported_ ## name() { return (getauxval(AT_HWCAP2) & (HWCAP2_ ## name)) ? 1 : 0; }
MY_HWCAP_CHECK_FUNC_2(NEON, NEON)
#endif
#else // USE_HWCAP
#define MY_HWCAP_CHECK_FUNC(name) \
BoolInt CPU_IsSupported_ ## name() { return 0; }
MY_HWCAP_CHECK_FUNC(NEON)
#endif // USE_HWCAP

@ -1,5 +1,5 @@
/* CpuArch.h -- CPU specific code
2021-04-25 : Igor Pavlov : Public domain */
2022-07-15 : Igor Pavlov : Public domain */
#ifndef __CPU_ARCH_H
#define __CPU_ARCH_H
@ -123,12 +123,15 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
#endif
#if defined(__sparc64__)
#define MY_CPU_NAME "sparc64"
#define MY_CPU_64BIT
#elif defined(__sparc__)
#define MY_CPU_NAME "sparc"
/* #define MY_CPU_32BIT */
#if defined(__riscv) \
|| defined(__riscv__)
#if __riscv_xlen == 32
#define MY_CPU_NAME "riscv32"
#elif __riscv_xlen == 64
#define MY_CPU_NAME "riscv64"
#else
#define MY_CPU_NAME "riscv"
#endif
#endif
@ -225,7 +228,6 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
#endif
#else
#ifdef __xlC__
// for XLC compiler:
#define MY_CPU_pragma_pack_push_1 _Pragma("pack(1)")
#define MY_CPU_pragma_pop _Pragma("pack()")
#else
@ -253,8 +255,12 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
#ifdef MY_CPU_LE
#if defined(MY_CPU_X86_OR_AMD64) \
|| defined(MY_CPU_ARM64) \
|| defined(__ARM_FEATURE_UNALIGNED)
|| defined(MY_CPU_ARM64)
#define MY_CPU_LE_UNALIGN
#define MY_CPU_LE_UNALIGN_64
#elif defined(__ARM_FEATURE_UNALIGNED)
/* gcc9 for 32-bit arm can use LDRD instruction that requires 32-bit alignment.
So we can't use unaligned 64-bit operations. */
#define MY_CPU_LE_UNALIGN
#endif
#endif
@ -264,11 +270,15 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
#define GetUi16(p) (*(const UInt16 *)(const void *)(p))
#define GetUi32(p) (*(const UInt32 *)(const void *)(p))
#ifdef MY_CPU_LE_UNALIGN_64
#define GetUi64(p) (*(const UInt64 *)(const void *)(p))
#endif
#define SetUi16(p, v) { *(UInt16 *)(void *)(p) = (v); }
#define SetUi32(p, v) { *(UInt32 *)(void *)(p) = (v); }
#ifdef MY_CPU_LE_UNALIGN_64
#define SetUi64(p, v) { *(UInt64 *)(void *)(p) = (v); }
#endif
#else
@ -282,8 +292,6 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
((UInt32)((const Byte *)(p))[2] << 16) | \
((UInt32)((const Byte *)(p))[3] << 24))
#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32))
#define SetUi16(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
_ppp_[0] = (Byte)_vvv_; \
_ppp_[1] = (Byte)(_vvv_ >> 8); }
@ -294,12 +302,22 @@ MY_CPU_64BIT means that processor can work with 64-bit registers.
_ppp_[2] = (Byte)(_vvv_ >> 16); \
_ppp_[3] = (Byte)(_vvv_ >> 24); }
#endif
#ifndef MY_CPU_LE_UNALIGN_64
#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32))
#define SetUi64(p, v) { Byte *_ppp2_ = (Byte *)(p); UInt64 _vvv2_ = (v); \
SetUi32(_ppp2_ , (UInt32)_vvv2_); \
SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)); }
#endif
#ifdef __has_builtin
#define MY__has_builtin(x) __has_builtin(x)
#else
@ -392,6 +410,7 @@ int x86cpuid_GetFirm(const Cx86cpuid *p);
BoolInt CPU_Is_InOrder(void);
BoolInt CPU_IsSupported_AES(void);
BoolInt CPU_IsSupported_AVX2(void);
BoolInt CPU_IsSupported_VAES_AVX2(void);
BoolInt CPU_IsSupported_SSSE3(void);
BoolInt CPU_IsSupported_SSE41(void);
@ -401,6 +420,7 @@ BoolInt CPU_IsSupported_PageGB(void);
#elif defined(MY_CPU_ARM_OR_ARM64)
BoolInt CPU_IsSupported_CRC32(void);
BoolInt CPU_IsSupported_NEON(void);
#if defined(_WIN32)
BoolInt CPU_IsSupported_CRYPTO(void);

@ -1,16 +1,20 @@
/* DllSecur.c -- DLL loading security
2018-02-21 : Igor Pavlov : Public domain */
2022-07-15 : Igor Pavlov : Public domain */
#include "Precomp.h"
#ifdef _WIN32
#include <windows.h>
#include <Windows.h>
#include "DllSecur.h"
#ifndef UNDER_CE
#if defined(__GNUC__) && (__GNUC__ >= 8)
#pragma GCC diagnostic ignored "-Wcast-function-type"
#endif
typedef BOOL (WINAPI *Func_SetDefaultDllDirectories)(DWORD DirectoryFlags);
#define MY_LOAD_LIBRARY_SEARCH_USER_DIRS 0x400
@ -33,17 +37,19 @@ static const char * const g_Dlls =
#endif
// #define MY_CAST_FUNC (void(*)())
#define MY_CAST_FUNC
void My_SetDefaultDllDirectories()
{
#ifndef UNDER_CE
OSVERSIONINFO vi;
vi.dwOSVersionInfoSize = sizeof(vi);
GetVersionEx(&vi);
if (!GetVersionEx(&vi) || vi.dwMajorVersion != 6 || vi.dwMinorVersion != 0)
{
Func_SetDefaultDllDirectories setDllDirs = (Func_SetDefaultDllDirectories)
GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "SetDefaultDllDirectories");
MY_CAST_FUNC GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "SetDefaultDllDirectories");
if (setDllDirs)
if (setDllDirs(MY_LOAD_LIBRARY_SEARCH_SYSTEM32 | MY_LOAD_LIBRARY_SEARCH_USER_DIRS))
return;
@ -66,7 +72,7 @@ void LoadSecurityDlls()
if (!GetVersionEx(&vi) || vi.dwMajorVersion != 6 || vi.dwMinorVersion != 0)
{
Func_SetDefaultDllDirectories setDllDirs = (Func_SetDefaultDllDirectories)
GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "SetDefaultDllDirectories");
MY_CAST_FUNC GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "SetDefaultDllDirectories");
if (setDllDirs)
if (setDllDirs(MY_LOAD_LIBRARY_SEARCH_SYSTEM32 | MY_LOAD_LIBRARY_SEARCH_USER_DIRS))
return;

File diff suppressed because it is too large Load Diff

@ -1,5 +1,5 @@
/* LzFind.h -- Match finder for LZ algorithms
2021-02-09 : Igor Pavlov : Public domain */
2021-07-13 : Igor Pavlov : Public domain */
#ifndef __LZ_FIND_H
#define __LZ_FIND_H
@ -15,7 +15,7 @@ typedef struct _CMatchFinder
Byte *buffer;
UInt32 pos;
UInt32 posLimit;
UInt32 streamPos;
UInt32 streamPos; /* wrap over Zero is allowed (streamPos < pos). Use (UInt32)(streamPos - pos) */
UInt32 lenLimit;
UInt32 cyclicBufferPos;
@ -51,17 +51,19 @@ typedef struct _CMatchFinder
UInt64 expectedDataSize;
} CMatchFinder;
#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((const Byte *)(p)->buffer)
#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
#define Inline_MatchFinder_GetNumAvailableBytes(p) ((UInt32)((p)->streamPos - (p)->pos))
/*
#define Inline_MatchFinder_IsFinishedOK(p) \
((p)->streamEndWasReached \
&& (p)->streamPos == (p)->pos \
&& (!(p)->directInput || (p)->directInputRem == 0))
*/
int MatchFinder_NeedMove(CMatchFinder *p);
// Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
/* Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p); */
void MatchFinder_MoveBlock(CMatchFinder *p);
void MatchFinder_ReadIfRequired(CMatchFinder *p);
@ -76,10 +78,21 @@ int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
ISzAllocPtr alloc);
void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc);
void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems);
void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
// void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
/*
#define Inline_MatchFinder_InitPos(p, val) \
(p)->pos = (val); \
(p)->streamPos = (val);
*/
#define Inline_MatchFinder_ReduceOffsets(p, subValue) \
(p)->pos -= (subValue); \
(p)->streamPos -= (subValue);
UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
UInt32 *distances, UInt32 maxLen);
/*
@ -91,7 +104,7 @@ Conditions:
typedef void (*Mf_Init_Func)(void *object);
typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
typedef UInt32 * (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
typedef void (*Mf_Skip_Func)(void *object, UInt32);
typedef struct _IMatchFinder
@ -101,21 +114,23 @@ typedef struct _IMatchFinder
Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
Mf_GetMatches_Func GetMatches;
Mf_Skip_Func Skip;
} IMatchFinder;
} IMatchFinder2;
void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder2 *vTable);
void MatchFinder_Init_LowHash(CMatchFinder *p);
void MatchFinder_Init_HighHash(CMatchFinder *p);
void MatchFinder_Init_3(CMatchFinder *p, int readData);
void MatchFinder_Init_4(CMatchFinder *p);
void MatchFinder_Init(CMatchFinder *p);
UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
UInt32* Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
UInt32* Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
void LzFindPrepare(void);
EXTERN_C_END
#endif

File diff suppressed because it is too large Load Diff

@ -1,5 +1,5 @@
/* LzFindMt.h -- multithreaded Match finder for LZ algorithms
2019-11-05 : Igor Pavlov : Public domain */
2021-07-12 : Igor Pavlov : Public domain */
#ifndef __LZ_FIND_MT_H
#define __LZ_FIND_MT_H
@ -11,22 +11,24 @@ EXTERN_C_BEGIN
typedef struct _CMtSync
{
UInt32 numProcessedBlocks;
CThread thread;
UInt64 affinity;
BoolInt wasCreated;
BoolInt needStart;
BoolInt csWasInitialized;
BoolInt csWasEntered;
BoolInt exit;
BoolInt stopWriting;
CThread thread;
CAutoResetEvent canStart;
CAutoResetEvent wasStarted;
CAutoResetEvent wasStopped;
CSemaphore freeSemaphore;
CSemaphore filledSemaphore;
BoolInt csWasInitialized;
BoolInt csWasEntered;
CCriticalSection cs;
UInt32 numProcessedBlocks;
UInt64 affinity;
// UInt32 numBlocks_Sent;
} CMtSync;
typedef UInt32 * (*Mf_Mix_Matches)(void *p, UInt32 matchMinPos, UInt32 *distances);
@ -42,8 +44,8 @@ typedef struct _CMatchFinderMt
/* LZ */
const Byte *pointerToCurPos;
UInt32 *btBuf;
UInt32 btBufPos;
UInt32 btBufPosLimit;
const UInt32 *btBufPos;
const UInt32 *btBufPosLimit;
UInt32 lzPos;
UInt32 btNumAvailBytes;
@ -54,6 +56,10 @@ typedef struct _CMatchFinderMt
const UInt32 *crc;
Mf_Mix_Matches MixMatchesFunc;
UInt32 failure_LZ_BT; // failure in BT transfered to LZ
// UInt32 failure_LZ_LZ; // failure in LZ tables
UInt32 failureBuf[1];
// UInt32 crc[256];
/* LZ + BT */
CMtSync btSync;
@ -64,6 +70,8 @@ typedef struct _CMatchFinderMt
UInt32 hashBufPos;
UInt32 hashBufPosLimit;
UInt32 hashNumAvail;
UInt32 failure_BT;
CLzRef *son;
UInt32 matchMaxLen;
@ -71,7 +79,7 @@ typedef struct _CMatchFinderMt
UInt32 pos;
const Byte *buffer;
UInt32 cyclicBufferPos;
UInt32 cyclicBufferSize; /* it must be historySize + 1 */
UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
UInt32 cutValue;
/* BT + Hash */
@ -81,13 +89,19 @@ typedef struct _CMatchFinderMt
/* Hash */
Mf_GetHeads GetHeadsFunc;
CMatchFinder *MatchFinder;
// CMatchFinder MatchFinder;
} CMatchFinderMt;
// only for Mt part
void MatchFinderMt_Construct(CMatchFinderMt *p);
void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAllocPtr alloc);
SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore,
UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAllocPtr alloc);
void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder *vTable);
void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder2 *vTable);
/* call MatchFinderMt_InitMt() before IMatchFinder::Init() */
SRes MatchFinderMt_InitMt(CMatchFinderMt *p);
void MatchFinderMt_ReleaseStream(CMatchFinderMt *p);
EXTERN_C_END

@ -0,0 +1,578 @@
/* LzFindOpt.c -- multithreaded Match finder for LZ algorithms
2021-07-13 : Igor Pavlov : Public domain */
#include "Precomp.h"
#include "CpuArch.h"
#include "LzFind.h"
// #include "LzFindMt.h"
// #define LOG_ITERS
// #define LOG_THREAD
#ifdef LOG_THREAD
#include <stdio.h>
#define PRF(x) x
#else
// #define PRF(x)
#endif
#ifdef LOG_ITERS
#include <stdio.h>
UInt64 g_NumIters_Tree;
UInt64 g_NumIters_Loop;
UInt64 g_NumIters_Bytes;
#define LOG_ITER(x) x
#else
#define LOG_ITER(x)
#endif
// ---------- BT THREAD ----------
#define USE_SON_PREFETCH
#define USE_LONG_MATCH_OPT
#define kEmptyHashValue 0
// #define CYC_TO_POS_OFFSET 0
// #define CYC_TO_POS_OFFSET 1 // for debug
/*
MY_NO_INLINE
UInt32 * MY_FAST_CALL GetMatchesSpecN_1(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size, UInt32 *posRes)
{
do
{
UInt32 delta;
if (hash == size)
break;
delta = *hash++;
if (delta == 0 || delta > (UInt32)pos)
return NULL;
lenLimit++;
if (delta == (UInt32)pos)
{
CLzRef *ptr1 = son + ((size_t)pos << 1) - CYC_TO_POS_OFFSET * 2;
*d++ = 0;
ptr1[0] = kEmptyHashValue;
ptr1[1] = kEmptyHashValue;
}
else
{
UInt32 *_distances = ++d;
CLzRef *ptr0 = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2 + 1;
CLzRef *ptr1 = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2;
const Byte *len0 = cur, *len1 = cur;
UInt32 cutValue = _cutValue;
const Byte *maxLen = cur + _maxLen;
for (LOG_ITER(g_NumIters_Tree++);;)
{
LOG_ITER(g_NumIters_Loop++);
{
const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
CLzRef *pair = son + ((size_t)(((ptrdiff_t)pos - CYC_TO_POS_OFFSET) + diff) << 1);
const Byte *len = (len0 < len1 ? len0 : len1);
#ifdef USE_SON_PREFETCH
const UInt32 pair0 = *pair;
#endif
if (len[diff] == len[0])
{
if (++len != lenLimit && len[diff] == len[0])
while (++len != lenLimit)
{
LOG_ITER(g_NumIters_Bytes++);
if (len[diff] != len[0])
break;
}
if (maxLen < len)
{
maxLen = len;
*d++ = (UInt32)(len - cur);
*d++ = delta - 1;
if (len == lenLimit)
{
const UInt32 pair1 = pair[1];
*ptr1 =
#ifdef USE_SON_PREFETCH
pair0;
#else
pair[0];
#endif
*ptr0 = pair1;
_distances[-1] = (UInt32)(d - _distances);
#ifdef USE_LONG_MATCH_OPT
if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
break;
{
for (;;)
{
hash++;
pos++;
cur++;
lenLimit++;
{
CLzRef *ptr = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2;
#if 0
*(UInt64 *)(void *)ptr = ((const UInt64 *)(const void *)ptr)[diff];
#else
const UInt32 p0 = ptr[0 + (diff * 2)];
const UInt32 p1 = ptr[1 + (diff * 2)];
ptr[0] = p0;
ptr[1] = p1;
// ptr[0] = ptr[0 + (diff * 2)];
// ptr[1] = ptr[1 + (diff * 2)];
#endif
}
// PrintSon(son + 2, pos - 1);
// printf("\npos = %x delta = %x\n", pos, delta);
len++;
*d++ = 2;
*d++ = (UInt32)(len - cur);
*d++ = delta - 1;
if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
break;
}
}
#endif
break;
}
}
}
{
const UInt32 curMatch = (UInt32)pos - delta; // (UInt32)(pos + diff);
if (len[diff] < len[0])
{
delta = pair[1];
if (delta >= curMatch)
return NULL;
*ptr1 = curMatch;
ptr1 = pair + 1;
len1 = len;
}
else
{
delta = *pair;
if (delta >= curMatch)
return NULL;
*ptr0 = curMatch;
ptr0 = pair;
len0 = len;
}
delta = (UInt32)pos - delta;
if (--cutValue == 0 || delta >= pos)
{
*ptr0 = *ptr1 = kEmptyHashValue;
_distances[-1] = (UInt32)(d - _distances);
break;
}
}
}
} // for (tree iterations)
}
pos++;
cur++;
}
while (d < limit);
*posRes = (UInt32)pos;
return d;
}
*/
/* define cbs if you use 2 functions.
GetMatchesSpecN_1() : (pos < _cyclicBufferSize)
GetMatchesSpecN_2() : (pos >= _cyclicBufferSize)
do not define cbs if you use 1 function:
GetMatchesSpecN_2()
*/
// #define cbs _cyclicBufferSize
/*
we use size_t for (pos) and (_cyclicBufferPos_ instead of UInt32
to eliminate "movsx" BUG in old MSVC x64 compiler.
*/
UInt32 * MY_FAST_CALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
UInt32 *posRes);
MY_NO_INLINE
UInt32 * MY_FAST_CALL GetMatchesSpecN_2(const Byte *lenLimit, size_t pos, const Byte *cur, CLzRef *son,
UInt32 _cutValue, UInt32 *d, size_t _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
UInt32 *posRes)
{
do // while (hash != size)
{
UInt32 delta;
#ifndef cbs
UInt32 cbs;
#endif
if (hash == size)
break;
delta = *hash++;
if (delta == 0)
return NULL;
lenLimit++;
#ifndef cbs
cbs = _cyclicBufferSize;
if ((UInt32)pos < cbs)
{
if (delta > (UInt32)pos)
return NULL;
cbs = (UInt32)pos;
}
#endif
if (delta >= cbs)
{
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
*d++ = 0;
ptr1[0] = kEmptyHashValue;
ptr1[1] = kEmptyHashValue;
}
else
{
UInt32 *_distances = ++d;
CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
UInt32 cutValue = _cutValue;
const Byte *len0 = cur, *len1 = cur;
const Byte *maxLen = cur + _maxLen;
// if (cutValue == 0) { *ptr0 = *ptr1 = kEmptyHashValue; } else
for (LOG_ITER(g_NumIters_Tree++);;)
{
LOG_ITER(g_NumIters_Loop++);
{
// SPEC code
CLzRef *pair = son + ((size_t)((ptrdiff_t)_cyclicBufferPos - (ptrdiff_t)delta
+ (ptrdiff_t)(UInt32)(_cyclicBufferPos < delta ? cbs : 0)
) << 1);
const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
const Byte *len = (len0 < len1 ? len0 : len1);
#ifdef USE_SON_PREFETCH
const UInt32 pair0 = *pair;
#endif
if (len[diff] == len[0])
{
if (++len != lenLimit && len[diff] == len[0])
while (++len != lenLimit)
{
LOG_ITER(g_NumIters_Bytes++);
if (len[diff] != len[0])
break;
}
if (maxLen < len)
{
maxLen = len;
*d++ = (UInt32)(len - cur);
*d++ = delta - 1;
if (len == lenLimit)
{
const UInt32 pair1 = pair[1];
*ptr1 =
#ifdef USE_SON_PREFETCH
pair0;
#else
pair[0];
#endif
*ptr0 = pair1;
_distances[-1] = (UInt32)(d - _distances);
#ifdef USE_LONG_MATCH_OPT
if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
break;
{
for (;;)
{
*d++ = 2;
*d++ = (UInt32)(lenLimit - cur);
*d++ = delta - 1;
cur++;
lenLimit++;
// SPEC
_cyclicBufferPos++;
{
// SPEC code
CLzRef *dest = son + ((size_t)(_cyclicBufferPos) << 1);
const CLzRef *src = dest + ((diff
+ (ptrdiff_t)(UInt32)((_cyclicBufferPos < delta) ? cbs : 0)) << 1);
// CLzRef *ptr = son + ((size_t)(pos) << 1) - CYC_TO_POS_OFFSET * 2;
#if 0
*(UInt64 *)(void *)dest = *((const UInt64 *)(const void *)src);
#else
const UInt32 p0 = src[0];
const UInt32 p1 = src[1];
dest[0] = p0;
dest[1] = p1;
#endif
}
pos++;
hash++;
if (hash == size || *hash != delta || lenLimit[diff] != lenLimit[0] || d >= limit)
break;
} // for() end for long matches
}
#endif
break; // break from TREE iterations
}
}
}
{
const UInt32 curMatch = (UInt32)pos - delta; // (UInt32)(pos + diff);
if (len[diff] < len[0])
{
delta = pair[1];
*ptr1 = curMatch;
ptr1 = pair + 1;
len1 = len;
if (delta >= curMatch)
return NULL;
}
else
{
delta = *pair;
*ptr0 = curMatch;
ptr0 = pair;
len0 = len;
if (delta >= curMatch)
return NULL;
}
delta = (UInt32)pos - delta;
if (--cutValue == 0 || delta >= cbs)
{
*ptr0 = *ptr1 = kEmptyHashValue;
_distances[-1] = (UInt32)(d - _distances);
break;
}
}
}
} // for (tree iterations)
}
pos++;
_cyclicBufferPos++;
cur++;
}
while (d < limit);
*posRes = (UInt32)pos;
return d;
}
/*
typedef UInt32 uint32plus; // size_t
UInt32 * MY_FAST_CALL GetMatchesSpecN_3(uint32plus lenLimit, size_t pos, const Byte *cur, CLzRef *son,
UInt32 _cutValue, UInt32 *d, uint32plus _maxLen, const UInt32 *hash, const UInt32 *limit, const UInt32 *size,
size_t _cyclicBufferPos, UInt32 _cyclicBufferSize,
UInt32 *posRes)
{
do // while (hash != size)
{
UInt32 delta;
#ifndef cbs
UInt32 cbs;
#endif
if (hash == size)
break;
delta = *hash++;
if (delta == 0)
return NULL;
#ifndef cbs
cbs = _cyclicBufferSize;
if ((UInt32)pos < cbs)
{
if (delta > (UInt32)pos)
return NULL;
cbs = (UInt32)pos;
}
#endif
if (delta >= cbs)
{
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
*d++ = 0;
ptr1[0] = kEmptyHashValue;
ptr1[1] = kEmptyHashValue;
}
else
{
CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
UInt32 *_distances = ++d;
uint32plus len0 = 0, len1 = 0;
UInt32 cutValue = _cutValue;
uint32plus maxLen = _maxLen;
// lenLimit++; // const Byte *lenLimit = cur + _lenLimit;
for (LOG_ITER(g_NumIters_Tree++);;)
{
LOG_ITER(g_NumIters_Loop++);
{
// const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
CLzRef *pair = son + ((size_t)((ptrdiff_t)_cyclicBufferPos - delta
+ (ptrdiff_t)(UInt32)(_cyclicBufferPos < delta ? cbs : 0)
) << 1);
const Byte *pb = cur - delta;
uint32plus len = (len0 < len1 ? len0 : len1);
#ifdef USE_SON_PREFETCH
const UInt32 pair0 = *pair;
#endif
if (pb[len] == cur[len])
{
if (++len != lenLimit && pb[len] == cur[len])
while (++len != lenLimit)
if (pb[len] != cur[len])
break;
if (maxLen < len)
{
maxLen = len;
*d++ = (UInt32)len;
*d++ = delta - 1;
if (len == lenLimit)
{
{
const UInt32 pair1 = pair[1];
*ptr0 = pair1;
*ptr1 =
#ifdef USE_SON_PREFETCH
pair0;
#else
pair[0];
#endif
}
_distances[-1] = (UInt32)(d - _distances);
#ifdef USE_LONG_MATCH_OPT
if (hash == size || *hash != delta || pb[lenLimit] != cur[lenLimit] || d >= limit)
break;
{
const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
for (;;)
{
*d++ = 2;
*d++ = (UInt32)lenLimit;
*d++ = delta - 1;
_cyclicBufferPos++;
{
CLzRef *dest = son + ((size_t)_cyclicBufferPos << 1);
const CLzRef *src = dest + ((diff +
(ptrdiff_t)(UInt32)(_cyclicBufferPos < delta ? cbs : 0)) << 1);
#if 0
*(UInt64 *)(void *)dest = *((const UInt64 *)(const void *)src);
#else
const UInt32 p0 = src[0];
const UInt32 p1 = src[1];
dest[0] = p0;
dest[1] = p1;
#endif
}
hash++;
pos++;
cur++;
pb++;
if (hash == size || *hash != delta || pb[lenLimit] != cur[lenLimit] || d >= limit)
break;
}
}
#endif
break;
}
}
}
{
const UInt32 curMatch = (UInt32)pos - delta;
if (pb[len] < cur[len])
{
delta = pair[1];
*ptr1 = curMatch;
ptr1 = pair + 1;
len1 = len;
}
else
{
delta = *pair;
*ptr0 = curMatch;
ptr0 = pair;
len0 = len;
}
{
if (delta >= curMatch)
return NULL;
delta = (UInt32)pos - delta;
if (delta >= cbs
// delta >= _cyclicBufferSize || delta >= pos
|| --cutValue == 0)
{
*ptr0 = *ptr1 = kEmptyHashValue;
_distances[-1] = (UInt32)(d - _distances);
break;
}
}
}
}
} // for (tree iterations)
}
pos++;
_cyclicBufferPos++;
cur++;
}
while (d < limit);
*posRes = (UInt32)pos;
return d;
}
*/

@ -1,5 +1,5 @@
/* LzmaEnc.c -- LZMA Encoder
2021-04-01: Igor Pavlov : Public domain */
2022-07-15: Igor Pavlov : Public domain */
#include "Precomp.h"
@ -12,6 +12,7 @@
#include <stdio.h>
#endif
#include "CpuArch.h"
#include "LzmaEnc.h"
#include "LzFind.h"
@ -36,8 +37,8 @@ void LzmaEnc_RestoreState(CLzmaEncHandle pp);
static unsigned g_STAT_OFFSET = 0;
#endif
#define kLzmaMaxHistorySize ((UInt32)3 << 29)
/* #define kLzmaMaxHistorySize ((UInt32)7 << 29) */
/* for good normalization speed we still reserve 256 MB before 4 GB range */
#define kLzmaMaxHistorySize ((UInt32)15 << 28)
#define kNumTopBits 24
#define kTopValue ((UInt32)1 << kNumTopBits)
@ -78,13 +79,12 @@ void LzmaEncProps_Normalize(CLzmaEncProps *p)
if (p->dictSize > p->reduceSize)
{
unsigned i;
UInt32 reduceSize = (UInt32)p->reduceSize;
for (i = 11; i <= 30; i++)
{
if (reduceSize <= ((UInt32)2 << i)) { p->dictSize = ((UInt32)2 << i); break; }
if (reduceSize <= ((UInt32)3 << i)) { p->dictSize = ((UInt32)3 << i); break; }
}
UInt32 v = (UInt32)p->reduceSize;
const UInt32 kReduceMin = ((UInt32)1 << 12);
if (v < kReduceMin)
v = kReduceMin;
if (p->dictSize > v)
p->dictSize = v;
}
if (p->lc < 0) p->lc = 3;
@ -113,18 +113,85 @@ UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
return props.dictSize;
}
#if defined(_MSC_VER) && (_MSC_VER >= 1400)
/* BSR code is fast for some new CPUs */
/* #define LZMA_LOG_BSR */
/*
x86/x64:
BSR:
IF (SRC == 0) ZF = 1, DEST is undefined;
AMD : DEST is unchanged;
IF (SRC != 0) ZF = 0; DEST is index of top non-zero bit
BSR is slow in some processors
LZCNT:
IF (SRC == 0) CF = 1, DEST is size_in_bits_of_register(src) (32 or 64)
IF (SRC != 0) CF = 0, DEST = num_lead_zero_bits
IF (DEST == 0) ZF = 1;
LZCNT works only in new processors starting from Haswell.
if LZCNT is not supported by processor, then it's executed as BSR.
LZCNT can be faster than BSR, if supported.
*/
// #define LZMA_LOG_BSR
#if defined(MY_CPU_ARM_OR_ARM64) /* || defined(MY_CPU_X86_OR_AMD64) */
#if (defined(__clang__) && (__clang_major__ >= 6)) \
|| (defined(__GNUC__) && (__GNUC__ >= 6))
#define LZMA_LOG_BSR
#elif defined(_MSC_VER) && (_MSC_VER >= 1300)
// #if defined(MY_CPU_ARM_OR_ARM64)
#define LZMA_LOG_BSR
// #endif
#endif
#endif
// #include <intrin.h>
#ifdef LZMA_LOG_BSR
#define kDicLogSizeMaxCompress 32
#if defined(__clang__) \
|| defined(__GNUC__)
/*
C code: : (30 - __builtin_clz(x))
gcc9/gcc10 for x64 /x86 : 30 - (bsr(x) xor 31)
clang10 for x64 : 31 + (bsr(x) xor -32)
*/
#define MY_clz(x) ((unsigned)__builtin_clz(x))
// __lzcnt32
// __builtin_ia32_lzcnt_u32
#else // #if defined(_MSC_VER)
#ifdef MY_CPU_ARM_OR_ARM64
#define MY_clz _CountLeadingZeros
#else // if defined(MY_CPU_X86_OR_AMD64)
// #define MY_clz __lzcnt // we can use lzcnt (unsupported by old CPU)
// _BitScanReverse code is not optimal for some MSVC compilers
#define BSR2_RET(pos, res) { unsigned long zz; _BitScanReverse(&zz, (pos)); zz--; \
res = (zz + zz) + (pos >> zz); }
#endif // MY_CPU_X86_OR_AMD64
#endif // _MSC_VER
#ifndef BSR2_RET
#define BSR2_RET(pos, res) { unsigned long zz; _BitScanReverse(&zz, (pos)); res = (zz + zz) + ((pos >> (zz - 1)) & 1); }
#define BSR2_RET(pos, res) { unsigned zz = 30 - MY_clz(pos); \
res = (zz + zz) + (pos >> zz); }
static unsigned GetPosSlot1(UInt32 pos)
#endif
unsigned GetPosSlot1(UInt32 pos);
unsigned GetPosSlot1(UInt32 pos)
{
unsigned res;
BSR2_RET(pos, res);
@ -133,10 +200,10 @@ static unsigned GetPosSlot1(UInt32 pos)
#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
#else
#define kNumLogBits (9 + sizeof(size_t) / 2)
/* #define kNumLogBits (11 + sizeof(size_t) / 8 * 3) */
#else // ! LZMA_LOG_BSR
#define kNumLogBits (11 + sizeof(size_t) / 8 * 3)
#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
@ -183,7 +250,7 @@ static void LzmaEnc_FastPosInit(Byte *g_FastPos)
#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos & (kNumFullDistances - 1)]; else BSR2_RET(pos, res); }
#endif
#endif // LZMA_LOG_BSR
#define LZMA_NUM_REPS 4
@ -319,7 +386,7 @@ typedef UInt32 CProbPrice;
typedef struct
{
void *matchFinderObj;
IMatchFinder matchFinder;
IMatchFinder2 matchFinder;
unsigned optCur;
unsigned optEnd;
@ -364,10 +431,14 @@ typedef struct
// begin of CMatchFinderMt is used in LZ thread
CMatchFinderMt matchFinderMt;
// end of CMatchFinderMt is used in BT and HASH threads
// #else
// CMatchFinder matchFinderBase;
#endif
CMatchFinder matchFinderBase;
// we suppose that we have 8-bytes alignment after CMatchFinder
#ifndef _7ZIP_ST
Byte pad[128];
#endif
@ -375,8 +446,10 @@ typedef struct
// LZ thread
CProbPrice ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
// we want {len , dist} pairs to be 8-bytes aligned in matches array
UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2];
// we want 8-bytes alignment here
UInt32 alignPrices[kAlignTableSize];
UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
@ -405,12 +478,19 @@ typedef struct
CSaveState saveState;
// BoolInt mf_Failure;
#ifndef _7ZIP_ST
Byte pad2[128];
#endif
} CLzmaEnc;
#define MFB (p->matchFinderBase)
/*
#ifndef _7ZIP_ST
#define MFB (p->matchFinderMt.MatchFinder)
#endif
*/
#define COPY_ARR(dest, src, arr) memcpy(dest->arr, src->arr, sizeof(src->arr));
@ -475,11 +555,21 @@ SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
if (props.lc > LZMA_LC_MAX
|| props.lp > LZMA_LP_MAX
|| props.pb > LZMA_PB_MAX
|| props.dictSize > ((UInt64)1 << kDicLogSizeMaxCompress)
|| props.dictSize > kLzmaMaxHistorySize)
|| props.pb > LZMA_PB_MAX)
return SZ_ERROR_PARAM;
if (props.dictSize > kLzmaMaxHistorySize)
props.dictSize = kLzmaMaxHistorySize;
#ifndef LZMA_LOG_BSR
{
const UInt64 dict64 = props.dictSize;
if (dict64 > ((UInt64)1 << kDicLogSizeMaxCompress))
return SZ_ERROR_PARAM;
}
#endif
p->dictSize = props.dictSize;
{
unsigned fb = (unsigned)props.fb;
@ -494,7 +584,7 @@ SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
p->pb = (unsigned)props.pb;
p->fastMode = (props.algo == 0);
// p->_maxMode = True;
p->matchFinderBase.btMode = (Byte)(props.btMode ? 1 : 0);
MFB.btMode = (Byte)(props.btMode ? 1 : 0);
{
unsigned numHashBytes = 4;
if (props.btMode)
@ -504,10 +594,10 @@ SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
}
if (props.numHashBytes >= 5) numHashBytes = 5;
p->matchFinderBase.numHashBytes = numHashBytes;
MFB.numHashBytes = numHashBytes;
}
p->matchFinderBase.cutValue = props.mc;
MFB.cutValue = props.mc;
p->writeEndMark = (BoolInt)props.writeEndMark;
@ -531,7 +621,7 @@ SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
void LzmaEnc_SetDataSize(CLzmaEncHandle pp, UInt64 expectedDataSiize)
{
CLzmaEnc *p = (CLzmaEnc *)pp;
p->matchFinderBase.expectedDataSize = expectedDataSiize;
MFB.expectedDataSize = expectedDataSiize;
}
@ -578,12 +668,11 @@ static int RangeEnc_Alloc(CRangeEnc *p, ISzAllocPtr alloc)
static void RangeEnc_Free(CRangeEnc *p, ISzAllocPtr alloc)
{
ISzAlloc_Free(alloc, p->bufBase);
p->bufBase = 0;
p->bufBase = NULL;
}
static void RangeEnc_Init(CRangeEnc *p)
{
/* Stream.Init(); */
p->range = 0xFFFFFFFF;
p->cache = 0;
p->low = 0;
@ -597,12 +686,12 @@ static void RangeEnc_Init(CRangeEnc *p)
MY_NO_INLINE static void RangeEnc_FlushStream(CRangeEnc *p)
{
size_t num;
if (p->res != SZ_OK)
return;
num = (size_t)(p->buf - p->bufBase);
if (num != ISeqOutStream_Write(p->outStream, p->bufBase, num))
p->res = SZ_ERROR_WRITE;
const size_t num = (size_t)(p->buf - p->bufBase);
if (p->res == SZ_OK)
{
if (num != ISeqOutStream_Write(p->outStream, p->bufBase, num))
p->res = SZ_ERROR_WRITE;
}
p->processed += num;
p->buf = p->bufBase;
}
@ -1007,7 +1096,11 @@ static unsigned ReadMatchDistances(CLzmaEnc *p, unsigned *numPairsRes)
p->additionalOffset++;
p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
{
const UInt32 *d = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
// if (!d) { p->mf_Failure = True; *numPairsRes = 0; return 0; }
numPairs = (unsigned)(d - p->matches);
}
*numPairsRes = numPairs;
#ifdef SHOW_STAT
@ -1023,7 +1116,7 @@ static unsigned ReadMatchDistances(CLzmaEnc *p, unsigned *numPairsRes)
if (numPairs == 0)
return 0;
{
unsigned len = p->matches[(size_t)numPairs - 2];
const unsigned len = p->matches[(size_t)numPairs - 2];
if (len != p->numFastBytes)
return len;
{
@ -1033,7 +1126,7 @@ static unsigned ReadMatchDistances(CLzmaEnc *p, unsigned *numPairsRes)
{
const Byte *p1 = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
const Byte *p2 = p1 + len;
ptrdiff_t dif = (ptrdiff_t)-1 - (ptrdiff_t)p->matches[(size_t)numPairs - 1];
const ptrdiff_t dif = (ptrdiff_t)-1 - (ptrdiff_t)p->matches[(size_t)numPairs - 1];
const Byte *lim = p1 + numAvail;
for (; p2 != lim && *p2 == p2[dif]; p2++)
{}
@ -1189,6 +1282,8 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
repLens[i] = len;
if (len > repLens[repMaxIndex])
repMaxIndex = i;
if (len == LZMA_MATCH_LEN_MAX) // 21.03 : optimization
break;
}
if (repLens[repMaxIndex] >= p->numFastBytes)
@ -1201,10 +1296,12 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
}
matches = p->matches;
#define MATCHES matches
// #define MATCHES p->matches
if (mainLen >= p->numFastBytes)
{
p->backRes = matches[(size_t)numPairs - 1] + LZMA_NUM_REPS;
p->backRes = MATCHES[(size_t)numPairs - 1] + LZMA_NUM_REPS;
MOVE_POS(p, mainLen - 1)
return mainLen;
}
@ -1298,13 +1395,13 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
if (len < 2)
len = 2;
else
while (len > matches[offs])
while (len > MATCHES[offs])
offs += 2;
for (; ; len++)
{
COptimal *opt;
UInt32 dist = matches[(size_t)offs + 1];
UInt32 dist = MATCHES[(size_t)offs + 1];
UInt32 price = normalMatchPrice + GET_PRICE_LEN(&p->lenEnc, posState, len);
unsigned lenToPosState = GetLenToPosState(len);
@ -1328,7 +1425,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
opt->extra = 0;
}
if (len == matches[offs])
if (len == MATCHES[offs])
{
offs += 2;
if (offs == numPairs)
@ -1749,8 +1846,8 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
if (newLen > numAvail)
{
newLen = numAvail;
for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
matches[numPairs] = (UInt32)newLen;
for (numPairs = 0; newLen > MATCHES[numPairs]; numPairs += 2);
MATCHES[numPairs] = (UInt32)newLen;
numPairs += 2;
}
@ -1769,9 +1866,9 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
}
offs = 0;
while (startLen > matches[offs])
while (startLen > MATCHES[offs])
offs += 2;
dist = matches[(size_t)offs + 1];
dist = MATCHES[(size_t)offs + 1];
// if (dist >= kNumFullDistances)
GetPosSlot2(dist, posSlot);
@ -1798,7 +1895,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
}
}
if (len == matches[offs])
if (len == MATCHES[offs])
{
// if (p->_maxMode) {
// MATCH : LIT : REP_0
@ -1863,7 +1960,7 @@ static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
offs += 2;
if (offs == numPairs)
break;
dist = matches[(size_t)offs + 1];
dist = MATCHES[(size_t)offs + 1];
// if (dist >= kNumFullDistances)
GetPosSlot2(dist, posSlot);
}
@ -2081,8 +2178,23 @@ static SRes CheckErrors(CLzmaEnc *p)
return p->result;
if (p->rc.res != SZ_OK)
p->result = SZ_ERROR_WRITE;
if (p->matchFinderBase.result != SZ_OK)
#ifndef _7ZIP_ST
if (
// p->mf_Failure ||
(p->mtMode &&
( // p->matchFinderMt.failure_LZ_LZ ||
p->matchFinderMt.failure_LZ_BT))
)
{
p->result = MY_HRES_ERROR__INTERNAL_ERROR;
// printf("\nCheckErrors p->matchFinderMt.failureLZ\n");
}
#endif
if (MFB.result != SZ_OK)
p->result = SZ_ERROR_READ;
if (p->result != SZ_OK)
p->finished = True;
return p->result;
@ -2223,11 +2335,11 @@ MY_NO_INLINE static void FillDistancesPrices(CLzmaEnc *p)
static void LzmaEnc_Construct(CLzmaEnc *p)
{
RangeEnc_Construct(&p->rc);
MatchFinder_Construct(&p->matchFinderBase);
MatchFinder_Construct(&MFB);
#ifndef _7ZIP_ST
p->matchFinderMt.MatchFinder = &MFB;
MatchFinderMt_Construct(&p->matchFinderMt);
p->matchFinderMt.MatchFinder = &p->matchFinderBase;
#endif
{
@ -2243,7 +2355,6 @@ static void LzmaEnc_Construct(CLzmaEnc *p)
LzmaEnc_InitPriceTables(p->ProbPrices);
p->litProbs = NULL;
p->saveState.litProbs = NULL;
}
CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc)
@ -2269,7 +2380,7 @@ static void LzmaEnc_Destruct(CLzmaEnc *p, ISzAllocPtr alloc, ISzAllocPtr allocBi
MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
#endif
MatchFinder_Free(&p->matchFinderBase, allocBig);
MatchFinder_Free(&MFB, allocBig);
LzmaEnc_FreeLits(p, alloc);
RangeEnc_Free(&p->rc, alloc);
}
@ -2287,6 +2398,12 @@ static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpa
UInt32 nowPos32, startPos32;
if (p->needInit)
{
#ifndef _7ZIP_ST
if (p->mtMode)
{
RINOK(MatchFinderMt_InitMt(&p->matchFinderMt));
}
#endif
p->matchFinder.Init(p->matchFinderObj);
p->needInit = 0;
}
@ -2582,11 +2699,13 @@ static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpa
static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
UInt32 beforeSize = kNumOpts;
UInt32 dictSize;
if (!RangeEnc_Alloc(&p->rc, alloc))
return SZ_ERROR_MEM;
#ifndef _7ZIP_ST
p->mtMode = (p->multiThread && !p->fastMode && (p->matchFinderBase.btMode != 0));
p->mtMode = (p->multiThread && !p->fastMode && (MFB.btMode != 0));
#endif
{
@ -2605,30 +2724,50 @@ static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc,
}
}
p->matchFinderBase.bigHash = (Byte)(p->dictSize > kBigHashDicLimit ? 1 : 0);
MFB.bigHash = (Byte)(p->dictSize > kBigHashDicLimit ? 1 : 0);
dictSize = p->dictSize;
if (dictSize == ((UInt32)2 << 30) ||
dictSize == ((UInt32)3 << 30))
{
/* 21.03 : here we reduce the dictionary for 2 reasons:
1) we don't want 32-bit back_distance matches in decoder for 2 GB dictionary.
2) we want to elimate useless last MatchFinder_Normalize3() for corner cases,
where data size is aligned for 1 GB: 5/6/8 GB.
That reducing must be >= 1 for such corner cases. */
dictSize -= 1;
}
if (beforeSize + dictSize < keepWindowSize)
beforeSize = keepWindowSize - dictSize;
if (beforeSize + p->dictSize < keepWindowSize)
beforeSize = keepWindowSize - p->dictSize;
/* in worst case we can look ahead for
max(LZMA_MATCH_LEN_MAX, numFastBytes + 1 + numFastBytes) bytes.
we send larger value for (keepAfter) to MantchFinder_Create():
(numFastBytes + LZMA_MATCH_LEN_MAX + 1)
*/
#ifndef _7ZIP_ST
if (p->mtMode)
{
RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes,
LZMA_MATCH_LEN_MAX
+ 1 /* 18.04 */
RINOK(MatchFinderMt_Create(&p->matchFinderMt, dictSize, beforeSize,
p->numFastBytes, LZMA_MATCH_LEN_MAX + 1 /* 18.04 */
, allocBig));
p->matchFinderObj = &p->matchFinderMt;
p->matchFinderBase.bigHash = (Byte)(
(p->dictSize > kBigHashDicLimit && p->matchFinderBase.hashMask >= 0xFFFFFF) ? 1 : 0);
MFB.bigHash = (Byte)(
(p->dictSize > kBigHashDicLimit && MFB.hashMask >= 0xFFFFFF) ? 1 : 0);
MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
}
else
#endif
{
if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
if (!MatchFinder_Create(&MFB, dictSize, beforeSize,
p->numFastBytes, LZMA_MATCH_LEN_MAX + 1 /* 21.03 */
, allocBig))
return SZ_ERROR_MEM;
p->matchFinderObj = &p->matchFinderBase;
MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
p->matchFinderObj = &MFB;
MatchFinder_CreateVTable(&MFB, &p->matchFinder);
}
return SZ_OK;
@ -2700,6 +2839,8 @@ static void LzmaEnc_Init(CLzmaEnc *p)
p->pbMask = ((unsigned)1 << p->pb) - 1;
p->lpMask = ((UInt32)0x100 << p->lp) - ((unsigned)0x100 >> p->lc);
// p->mf_Failure = False;
}
@ -2742,7 +2883,7 @@ static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInS
ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
CLzmaEnc *p = (CLzmaEnc *)pp;
p->matchFinderBase.stream = inStream;
MFB.stream = inStream;
p->needInit = 1;
p->rc.outStream = outStream;
return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
@ -2753,16 +2894,16 @@ SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
ISzAllocPtr alloc, ISzAllocPtr allocBig)
{
CLzmaEnc *p = (CLzmaEnc *)pp;
p->matchFinderBase.stream = inStream;
MFB.stream = inStream;
p->needInit = 1;
return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
}
static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
{
p->matchFinderBase.directInput = 1;
p->matchFinderBase.bufferBase = (Byte *)src;
p->matchFinderBase.directInputRem = srcLen;
MFB.directInput = 1;
MFB.bufferBase = (Byte *)src;
MFB.directInputRem = srcLen;
}
SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
@ -2804,9 +2945,12 @@ static size_t SeqOutStreamBuf_Write(const ISeqOutStream *pp, const void *data, s
size = p->rem;
p->overflow = True;
}
memcpy(p->data, data, size);
p->rem -= size;
p->data += size;
if (size != 0)
{
memcpy(p->data, data, size);
p->rem -= size;
p->data += size;
}
return size;
}
@ -2826,6 +2970,7 @@ const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
}
// (desiredPackSize == 0) is not allowed
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
{
@ -2846,14 +2991,10 @@ SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
if (reInit)
LzmaEnc_Init(p);
LzmaEnc_InitPrices(p);
nowPos64 = p->nowPos64;
RangeEnc_Init(&p->rc);
p->rc.outStream = &outStream.vt;
if (desiredPackSize == 0)
return SZ_ERROR_OUTPUT_EOF;
nowPos64 = p->nowPos64;
res = LzmaEnc_CodeOneBlock(p, desiredPackSize, *unpackSize);
*unpackSize = (UInt32)(p->nowPos64 - nowPos64);
@ -2895,7 +3036,7 @@ static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
LzmaEnc_Finish(p);
/*
if (res == SZ_OK && !Inline_MatchFinder_IsFinishedOK(&p->matchFinderBase))
if (res == SZ_OK && !Inline_MatchFinder_IsFinishedOK(&MFB))
res = SZ_ERROR_FAIL;
}
*/
@ -2914,29 +3055,37 @@ SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *i
SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
{
CLzmaEnc *p = (CLzmaEnc *)pp;
unsigned i;
UInt32 dictSize = p->dictSize;
if (*size < LZMA_PROPS_SIZE)
return SZ_ERROR_PARAM;
*size = LZMA_PROPS_SIZE;
props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
if (dictSize >= ((UInt32)1 << 22))
{
const UInt32 kDictMask = ((UInt32)1 << 20) - 1;
if (dictSize < (UInt32)0xFFFFFFFF - kDictMask)
dictSize = (dictSize + kDictMask) & ~kDictMask;
}
else for (i = 11; i <= 30; i++)
{
if (dictSize <= ((UInt32)2 << i)) { dictSize = ((UInt32)2 << i); break; }
if (dictSize <= ((UInt32)3 << i)) { dictSize = ((UInt32)3 << i); break; }
}
const CLzmaEnc *p = (const CLzmaEnc *)pp;
const UInt32 dictSize = p->dictSize;
UInt32 v;
props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
// we write aligned dictionary value to properties for lzma decoder
if (dictSize >= ((UInt32)1 << 21))
{
const UInt32 kDictMask = ((UInt32)1 << 20) - 1;
v = (dictSize + kDictMask) & ~kDictMask;
if (v < dictSize)
v = dictSize;
}
else
{
unsigned i = 11 * 2;
do
{
v = (UInt32)(2 + (i & 1)) << (i >> 1);
i++;
}
while (v < dictSize);
}
for (i = 0; i < 4; i++)
props[1 + i] = (Byte)(dictSize >> (8 * i));
return SZ_OK;
SetUi32(props + 1, v);
return SZ_OK;
}
}

@ -1,5 +1,5 @@
/* MtCoder.c -- Multi-thread Coder
2021-02-09 : Igor Pavlov : Public domain */
2021-12-21 : Igor Pavlov : Public domain */
#include "Precomp.h"
@ -44,7 +44,7 @@ static WRes ArEvent_OptCreate_And_Reset(CEvent *p)
}
static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc(void *pp);
static THREAD_FUNC_DECL ThreadFunc(void *pp);
static SRes MtCoderThread_CreateAndStart(CMtCoderThread *t)
@ -335,7 +335,7 @@ static SRes ThreadFunc2(CMtCoderThread *t)
}
static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc(void *pp)
static THREAD_FUNC_DECL ThreadFunc(void *pp)
{
CMtCoderThread *t = (CMtCoderThread *)pp;
for (;;)
@ -495,12 +495,7 @@ SRes MtCoder_Code(CMtCoder *p)
{
RINOK_THREAD(ArEvent_OptCreate_And_Reset(&p->readEvent));
if (Semaphore_IsCreated(&p->blocksSemaphore))
{
RINOK_THREAD(Semaphore_Close(&p->blocksSemaphore));
}
RINOK_THREAD(Semaphore_Create(&p->blocksSemaphore, numBlocksMax, numBlocksMax));
RINOK_THREAD(Semaphore_OptCreateInit(&p->blocksSemaphore, numBlocksMax, numBlocksMax));
}
for (i = 0; i < MTCODER__BLOCKS_MAX - 1; i++)

@ -1,5 +1,5 @@
/* MtDec.c -- Multi-thread Decoder
2021-02-27 : Igor Pavlov : Public domain */
2021-12-21 : Igor Pavlov : Public domain */
#include "Precomp.h"
@ -102,7 +102,7 @@ typedef struct __CMtDecBufLink CMtDecBufLink;
static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc(void *pp);
static THREAD_FUNC_DECL ThreadFunc(void *pp);
static WRes MtDecThread_CreateEvents(CMtDecThread *t)
@ -836,7 +836,7 @@ static WRes ThreadFunc2(CMtDecThread *t)
#endif
static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc1(void *pp)
static THREAD_FUNC_DECL ThreadFunc1(void *pp)
{
WRes res;
@ -862,7 +862,7 @@ static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc1(void *pp)
return (THREAD_FUNC_RET_TYPE)(UINT_PTR)res;
}
static MY_NO_INLINE THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc(void *pp)
static MY_NO_INLINE THREAD_FUNC_DECL ThreadFunc(void *pp)
{
#ifdef USE_ALLOCA
CMtDecThread *t = (CMtDecThread *)pp;

@ -1,11 +1,11 @@
/* Threads.c -- multithreading library
2021-04-25 : Igor Pavlov : Public domain */
2021-12-21 : Igor Pavlov : Public domain */
#include "Precomp.h"
#ifdef _WIN32
#ifndef UNDER_CE
#ifndef USE_THREADS_CreateThread
#include <process.h>
#endif
@ -63,10 +63,10 @@ WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param)
{
/* Windows Me/98/95: threadId parameter may not be NULL in _beginthreadex/CreateThread functions */
#ifdef UNDER_CE
#ifdef USE_THREADS_CreateThread
DWORD threadId;
*p = CreateThread(0, 0, func, param, 0, &threadId);
*p = CreateThread(NULL, 0, func, param, 0, &threadId);
#else
@ -82,7 +82,7 @@ WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param)
WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity)
{
#ifdef UNDER_CE
#ifdef USE_THREADS_CreateThread
UNUSED_VAR(affinity)
return Thread_Create(p, func, param);
@ -150,6 +150,17 @@ WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
return HandleToWRes(*p);
}
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
{
// if (Semaphore_IsCreated(p))
{
WRes wres = Semaphore_Close(p);
if (wres != 0)
return wres;
}
return Semaphore_Create(p, initCount, maxCount);
}
static WRes Semaphore_Release(CSemaphore *p, LONG releaseCount, LONG *previousCount)
{ return BOOLToWRes(ReleaseSemaphore(*p, releaseCount, previousCount)); }
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num)
@ -158,7 +169,9 @@ WRes Semaphore_Release1(CSemaphore *p) { return Semaphore_ReleaseN(p, 1); }
WRes CriticalSection_Init(CCriticalSection *p)
{
/* InitializeCriticalSection can raise only STATUS_NO_MEMORY exception */
/* InitializeCriticalSection() can raise exception:
Windows XP, 2003 : can raise a STATUS_NO_MEMORY exception
Windows Vista+ : no exceptions */
#ifdef _MSC_VER
__try
#endif
@ -167,7 +180,7 @@ WRes CriticalSection_Init(CCriticalSection *p)
/* InitializeCriticalSectionAndSpinCount(p, 0); */
}
#ifdef _MSC_VER
__except (EXCEPTION_EXECUTE_HANDLER) { return 1; }
__except (EXCEPTION_EXECUTE_HANDLER) { return ERROR_NOT_ENOUGH_MEMORY; }
#endif
return 0;
}
@ -406,6 +419,27 @@ WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
return 0;
}
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount)
{
if (Semaphore_IsCreated(p))
{
/*
WRes wres = Semaphore_Close(p);
if (wres != 0)
return wres;
*/
if (initCount > maxCount || maxCount < 1)
return EINVAL;
// return EINVAL; // for debug
p->_count = initCount;
p->_maxCount = maxCount;
return 0;
}
return Semaphore_Create(p, initCount, maxCount);
}
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 releaseCount)
{
UInt32 newCount;

@ -1,21 +1,25 @@
/* Threads.h -- multithreading library
2021-04-25 : Igor Pavlov : Public domain */
2021-12-21 : Igor Pavlov : Public domain */
#ifndef __7Z_THREADS_H
#define __7Z_THREADS_H
#ifdef _WIN32
#include <windows.h>
#include <Windows.h>
#else
#if !defined(__APPLE__) && !defined(_AIX)
#if defined(__linux__)
#if !defined(__APPLE__) && !defined(_AIX) && !defined(__ANDROID__)
#ifndef _7ZIP_AFFINITY_DISABLE
#define _7ZIP_AFFINITY_SUPPORTED
// #pragma message(" ==== _7ZIP_AFFINITY_SUPPORTED")
// #define _GNU_SOURCE
#endif
#endif
#endif
#include <pthread.h>
#endif
#include "7zTypes.h"
@ -34,8 +38,14 @@ typedef HANDLE CThread;
#define Thread_Close(p) HandlePtr_Close(p)
// #define Thread_Wait(p) Handle_WaitObject(*(p))
#ifdef UNDER_CE
// if (USE_THREADS_CreateThread is defined), we use _beginthreadex()
// if (USE_THREADS_CreateThread is not definned), we use CreateThread()
#define USE_THREADS_CreateThread
#endif
typedef
#ifdef UNDER_CE
#ifdef USE_THREADS_CreateThread
DWORD
#else
unsigned
@ -86,7 +96,30 @@ typedef UInt64 CCpuSet;
#define THREAD_FUNC_CALL_TYPE MY_STD_CALL
#define THREAD_FUNC_DECL THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE
#if defined(_WIN32) && defined(__GNUC__)
/* GCC compiler for x86 32-bit uses the rule:
the stack is 16-byte aligned before CALL instruction for function calling.
But only root function main() contains instructions that
set 16-byte alignment for stack pointer. And another functions
just keep alignment, if it was set in some parent function.
The problem:
if we create new thread in MinGW (GCC) 32-bit x86 via _beginthreadex() or CreateThread(),
the root function of thread doesn't set 16-byte alignment.
And stack frames in all child functions also will be unaligned in that case.
Here we set (force_align_arg_pointer) attribute for root function of new thread.
Do we need (force_align_arg_pointer) also for another systems? */
#define THREAD_FUNC_ATTRIB_ALIGN_ARG __attribute__((force_align_arg_pointer))
// #define THREAD_FUNC_ATTRIB_ALIGN_ARG // for debug : bad alignment in SSE functions
#else
#define THREAD_FUNC_ATTRIB_ALIGN_ARG
#endif
#define THREAD_FUNC_DECL THREAD_FUNC_ATTRIB_ALIGN_ARG THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE
typedef THREAD_FUNC_RET_TYPE (THREAD_FUNC_CALL_TYPE * THREAD_FUNC_TYPE)(void *);
WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param);
WRes Thread_Create_With_Affinity(CThread *p, THREAD_FUNC_TYPE func, LPVOID param, CAffinityMask affinity);
@ -122,6 +155,7 @@ typedef HANDLE CSemaphore;
#define Semaphore_Close(p) HandlePtr_Close(p)
#define Semaphore_Wait(p) Handle_WaitObject(*(p))
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
WRes Semaphore_Release1(CSemaphore *p);
@ -172,6 +206,7 @@ typedef struct _CSemaphore
#define Semaphore_IsCreated(p) ((p)->_created)
WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
WRes Semaphore_OptCreateInit(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
#define Semaphore_Release1(p) Semaphore_ReleaseN(p, 1)
WRes Semaphore_Wait(CSemaphore *p);

@ -1,5 +1,5 @@
/* LzmaUtil.c -- Test application for LZMA compression
2021-02-15 : Igor Pavlov : Public domain */
2021-11-01 : Igor Pavlov : Public domain */
#include "../../Precomp.h"
@ -12,6 +12,7 @@
#include "../../Alloc.h"
#include "../../7zFile.h"
#include "../../7zVersion.h"
#include "../../LzFind.h"
#include "../../LzmaDec.h"
#include "../../LzmaEnc.h"
@ -195,6 +196,8 @@ static int main2(int numArgs, const char *args[], char *rs)
int encodeMode;
BoolInt useOutFile = False;
LzFindPrepare();
FileSeqInStream_CreateVTable(&inStream);
File_Construct(&inStream.file);
inStream.wres = 0;
@ -276,7 +279,7 @@ static int main2(int numArgs, const char *args[], char *rs)
int MY_CDECL main(int numArgs, const char *args[])
{
char rs[800] = { 0 };
char rs[1000] = { 0 };
int res = main2(numArgs, args, rs);
fputs(rs, stdout);
return res;

@ -134,6 +134,10 @@ SOURCE=..\..\LzFindMt.h
# End Source File
# Begin Source File
SOURCE=..\..\LzFindOpt.c
# End Source File
# Begin Source File
SOURCE=..\..\LzHash.h
# End Source File
# Begin Source File

@ -8,8 +8,10 @@ LIB_OBJS = \
C_OBJS = \
$O\Alloc.obj \
$O\CpuArch.obj \
$O\LzFind.obj \
$O\LzFindMt.obj \
$O\LzFindOpt.obj \
$O\LzmaDec.obj \
$O\LzmaEnc.obj \
$O\7zFile.obj \

@ -8,8 +8,10 @@ OBJS = \
$O/7zFile.o \
$O/7zStream.o \
$O/Alloc.o \
$O/CpuArch.o \
$O/LzFind.o \
$O/LzFindMt.o \
$O/LzFindOpt.o \
$O/LzmaDec.o \
$O/LzmaEnc.o \
$O/LzmaUtil.o \

@ -136,6 +136,10 @@ SOURCE=..\..\LzFindMt.h
# End Source File
# Begin Source File
SOURCE=..\..\LzFindOpt.c
# End Source File
# Begin Source File
SOURCE=..\..\LzHash.h
# End Source File
# Begin Source File

@ -11,8 +11,10 @@ LIB_OBJS = \
C_OBJS = \
$O\Alloc.obj \
$O\CpuArch.obj \
$O\LzFind.obj \
$O\LzFindMt.obj \
$O\LzFindOpt.obj \
$O\LzmaDec.obj \
$O\LzmaEnc.obj \
$O\LzmaLib.obj \

@ -1,5 +1,5 @@
/* XzDec.c -- Xz Decode
2021-04-01 : Igor Pavlov : Public domain */
2021-09-04 : Igor Pavlov : Public domain */
#include "Precomp.h"
@ -773,7 +773,8 @@ static BoolInt Xz_CheckFooter(CXzStreamFlags flags, UInt64 indexSize, const Byte
#define READ_VARINT_AND_CHECK(buf, pos, size, res) \
{ unsigned s = Xz_ReadVarInt(buf + pos, size - pos, res); \
if (s == 0) return SZ_ERROR_ARCHIVE; pos += s; }
if (s == 0) return SZ_ERROR_ARCHIVE; \
pos += s; }
static BoolInt XzBlock_AreSupportedFilters(const CXzBlock *p)

@ -1,5 +1,5 @@
/* XzIn.c - Xz input
2021-04-01 : Igor Pavlov : Public domain */
2021-09-04 : Igor Pavlov : Public domain */
#include "Precomp.h"
@ -26,7 +26,8 @@ SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStream *inStream)
#define READ_VARINT_AND_CHECK(buf, pos, size, res) \
{ unsigned s = Xz_ReadVarInt(buf + pos, size - pos, res); \
if (s == 0) return SZ_ERROR_ARCHIVE; pos += s; }
if (s == 0) return SZ_ERROR_ARCHIVE; \
pos += s; }
SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStream *inStream, BoolInt *isIndex, UInt32 *headerSizeRes)
{

@ -9,4 +9,3 @@ USE_ASM=1
CC=$(CROSS_COMPILE)clang
CXX=$(CROSS_COMPILE)clang++
USE_CLANG=1

@ -9,4 +9,3 @@ USE_ASM=1
CC=$(CROSS_COMPILE)clang
CXX=$(CROSS_COMPILE)clang++
USE_CLANG=1

@ -8,4 +8,3 @@ MY_ARCH=-m32
USE_ASM=1
CC=$(CROSS_COMPILE)gcc
CXX=$(CROSS_COMPILE)g++

@ -49,5 +49,3 @@ CFLAGS_WARN_GCC_PPMD_UNALIGNED = \
CFLAGS_WARN = $(CFLAGS_WARN_GCC_9) \
# $(CFLAGS_WARN_GCC_PPMD_UNALIGNED)

@ -1,6 +1,42 @@
HISTORY of the LZMA SDK
-----------------------
21.07 2021-12-26
-------------------------
- New switches: -spm and -im!{file_path} to exclude directories from processing
for specified paths that don't contain path separator character at the end of path.
- The sorting order of files in archives was slightly changed to be more consistent
for cases where the name of some directory is the same as the prefix part of the name
of another directory or file.
21.06 2021-11-24
-------------------------
- Bug in LZMA encoder in file LzmaEnc.c was fixed:
LzmaEnc_MemEncode(), LzmaEncode() and LzmaCompress() could work incorrectly,
if size value for output buffer is smaller than size required for all compressed data.
LzmaEnc_Encode() could work incorrectly,
if callback ISeqOutStream::Write() doesn't write all compressed data.
NCompress::NLzma::CEncoder::Code() could work incorrectly,
if callback ISequentialOutStream::Write() returns error code.
- Bug in versions 21.00-21.05 was fixed:
7-Zip didn't set attributes of directories during archive extracting.
21.04 beta 2021-11-02
-------------------------
- 7-Zip now reduces the number of working CPU threads for compression,
if RAM size is not enough for compression with big LZMA2 dictionary.
- 7-Zip now can create and check "file.sha256" text files that contain the list
of file names and SHA-256 checksums in format compatible with sha256sum program.
21.03 beta 2021-07-20
-------------------------
- The maximum dictionary size for LZMA/LZMA2 compressing was increased to 4 GB (3840 MiB).
- Minor speed optimizations in LZMA/LZMA2 compressing.
21.02 alpha 2021-05-06
-------------------------
- The command line version of 7-Zip for macOS was released.

@ -1,4 +1,4 @@
LZMA SDK 21.02
LZMA SDK 22.01
--------------
LZMA SDK provides the documentation, samples, header files,
@ -62,14 +62,61 @@ LZMA SDK Contents
UNIX/Linux version
------------------
To compile C++ version of file->file LZMA encoding, go to directory
CPP/7zip/Bundles/LzmaCon
and call make to recompile it:
make -f makefile.gcc clean all
In some UNIX/Linux versions you must compile LZMA with static libraries.
To compile with static libraries, you can use
LIB = -lm -static
There are several otpions to compile 7-Zip with different compilers: gcc and clang.
Also 7-Zip code contains two versions for some critical parts of code: in C and in Assembeler.
So if you compile the version with Assembeler code, you will get faster 7-Zip binary.
7-Zip's assembler code uses the following syntax for different platforms:
1) x86 and x86-64 (AMD64): MASM syntax.
There are 2 programs that supports MASM syntax in Linux.
' 'Asmc Macro Assembler and JWasm. But JWasm now doesn't support some
cpu instructions used in 7-Zip.
So you must install Asmc Macro Assembler in Linux, if you want to compile fastest version
of 7-Zip x86 and x86-64:
https://github.com/nidud/asmc
2) arm64: GNU assembler for ARM64 with preprocessor.
That systax of that arm64 assembler code in 7-Zip is supported by GCC and CLANG for ARM64.
There are different binaries that can be compiled from 7-Zip source.
There are 2 main files in folder for compiling:
makefile - that can be used for compiling Windows version of 7-Zip with nmake command
makefile.gcc - that can be used for compiling Linux/macOS versions of 7-Zip with make command
At first you must change the current folder to folder that contains `makefile.gcc`:
cd CPP/7zip/Bundles/Alone7z
Then you can compile `makefile.gcc` with the command:
make -j -f makefile.gcc
Also there are additional "*.mak" files in folder "CPP/7zip/" that can be used to compile
7-Zip binaries with optimized code and optimzing options.
To compile with GCC without assembler:
cd CPP/7zip/Bundles/Alone7z
make -j -f ../../cmpl_gcc.mak
To compile with CLANG without assembler:
make -j -f ../../cmpl_clang.mak
To compile 7-Zip for x86-64 with asmc assembler:
make -j -f ../../cmpl_gcc_x64.mak
To compile 7-Zip for arm64 with assembler:
make -j -f ../../cmpl_gcc_arm64.mak
To compile 7-Zip for arm64 for macOS:
make -j -f ../../cmpl_mac_arm64.mak
Also you can change some compiler options in the mak files:
cmpl_gcc.mak
var_gcc.mak
warn_gcc.mak
Also you can use p7zip (port of 7-Zip for POSIX systems like Unix or Linux):

@ -77,6 +77,7 @@
- Apple Driver: Updated requirements to use Apple OpenCL API to macOS 13.0 - use
- Backend Checks: Describe workaround in error message when detecting more than 64 backend devices
- Brain: Added sanity check and corresponding error message for invalid --brain-port values
- Dependencies: Updated LZMA SDK to 22.01
- Modules: Added support for non-zero IVs for -m 6800 (Lastpass). Also added `tools/lastpass2hashcat.py`
- Open Document Format: Added support for small documents with content length < 1024
- Status Code: Add specific return code for self-test fail (-11)

Loading…
Cancel
Save