mirror of
https://github.com/curl/curl.git
synced 2026-04-14 23:51:42 +03:00
md5/md4: enable unaligned access fast path on powerpc64
PowerPC64 (both big-endian and little-endian) supports efficient unaligned memory access, similar to x86. This extends the existing fast path that avoids byte-by-byte loads in the MD5 and MD4 SET/GET macros. On POWER8 ppc64le, this eliminates 3 shifts + 3 ORs per 32-bit word load, replacing them with a single lwz (or lwbrx on big-endian). Co Authored By Claude Opus 4.6 (1M context) Closes #20985
This commit is contained in:
parent
77ed315096
commit
21fc17b265
2 changed files with 4 additions and 2 deletions
|
|
@ -213,7 +213,8 @@ typedef struct md4_ctx MD4_CTX;
|
|||
* The check for little-endian architectures that tolerate unaligned memory
|
||||
* accesses is an optimization. Nothing will break if it does not work.
|
||||
*/
|
||||
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
|
||||
#if defined(__i386__) || defined(__x86_64__) || \
|
||||
defined(__vax__) || defined(__powerpc64__)
|
||||
#define MD4_SET(n) (*(const uint32_t *)(const void *)&ptr[(n) * 4])
|
||||
#define MD4_GET(n) MD4_SET(n)
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -294,7 +294,8 @@ typedef struct md5_ctx my_md5_ctx;
|
|||
* The check for little-endian architectures that tolerate unaligned memory
|
||||
* accesses is an optimization. Nothing will break if it does not work.
|
||||
*/
|
||||
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
|
||||
#if defined(__i386__) || defined(__x86_64__) || \
|
||||
defined(__vax__) || defined(__powerpc64__)
|
||||
#define MD5_SET(n) (*(const uint32_t *)(const void *)&ptr[(n) * 4])
|
||||
#define MD5_GET(n) MD5_SET(n)
|
||||
#else
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue