aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-09-29 15:55:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-09-29 15:55:20 -0700
commit1896ce8eb6c61824f6c1125d69d8fda1f44a22f8 (patch)
treeb05121e615b6f063b0544a0e27db5d1c65ca23e6 /include
parentMerge tag 'libcrypto-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
parentfsverity: Use 2-way interleaved SHA-256 hashing when supported (diff)
downloadlinux-1896ce8eb6c61824f6c1125d69d8fda1f44a22f8.tar.gz
linux-1896ce8eb6c61824f6c1125d69d8fda1f44a22f8.zip
Merge tag 'fsverity-for-linus' of git://git.kernel.org/pub/scm/fs/fsverity/linux
Pull interleaved SHA-256 hashing support from Eric Biggers: "Optimize fsverity with 2-way interleaved hashing Add support for 2-way interleaved SHA-256 hashing to lib/crypto/, and make fsverity use it for faster file data verification. This improves fsverity performance on many x86_64 and arm64 processors. Later, I plan to make dm-verity use this too" * tag 'fsverity-for-linus' of git://git.kernel.org/pub/scm/fs/fsverity/linux: fsverity: Use 2-way interleaved SHA-256 hashing when supported fsverity: Remove inode parameter from fsverity_hash_block() lib/crypto: tests: Add tests and benchmark for sha256_finup_2x() lib/crypto: x86/sha256: Add support for 2-way interleaved hashing lib/crypto: arm64/sha256: Add support for 2-way interleaved hashing lib/crypto: sha256: Add support for 2-way interleaved hashing
Diffstat (limited to 'include')
-rw-r--r--include/crypto/sha2.h28
1 files changed, 28 insertions, 0 deletions
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
index 15e461e568cc..e5dafb935cc8 100644
--- a/include/crypto/sha2.h
+++ b/include/crypto/sha2.h
@@ -376,6 +376,34 @@ void sha256_final(struct sha256_ctx *ctx, u8 out[SHA256_DIGEST_SIZE]);
void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]);
/**
+ * sha256_finup_2x() - Compute two SHA-256 digests from a common initial
+ * context. On some CPUs, this is faster than sequentially
+ * computing each digest.
+ * @ctx: an optional initial context, which may have already processed data. If
+ * NULL, a default initial context is used (equivalent to sha256_init()).
+ * @data1: data for the first message
+ * @data2: data for the second message
+ * @len: the length of each of @data1 and @data2, in bytes
+ * @out1: (output) the first SHA-256 message digest
+ * @out2: (output) the second SHA-256 message digest
+ *
+ * Context: Any context.
+ */
+void sha256_finup_2x(const struct sha256_ctx *ctx, const u8 *data1,
+ const u8 *data2, size_t len, u8 out1[SHA256_DIGEST_SIZE],
+ u8 out2[SHA256_DIGEST_SIZE]);
+
+/**
+ * sha256_finup_2x_is_optimized() - Check if sha256_finup_2x() is using a real
+ * interleaved implementation, as opposed to a
+ * sequential fallback
+ * @return: true if optimized
+ *
+ * Context: Any context.
+ */
+bool sha256_finup_2x_is_optimized(void);
+
+/**
* struct hmac_sha256_key - Prepared key for HMAC-SHA256
* @key: private
*/