bk://herbert.bkbits.net/cryptodev-2.6
herbert@gondor.apana.org.au|ChangeSet|20050331104212|27419 herbert

# This is a BitKeeper generated diff -Nru style patch.
#
# ChangeSet
#   2005/03/31 20:42:12+10:00 herbert@gondor.apana.org.au 
#   [CRYPTO] Add partial compression interface
#   
#   Some applications need to compress as much data as they can into a buffer of a fixed
#   size.  That is, they need a partial compression interface.  Within the
#   kernel this is currently needed by JFFS.
#   
#   Compressing part of the input could be significantly different from
#   compressing all of the input depending on the algorithm.  In particular
#   it could be moore costly to do so and/or result in worse compression.
#   
#   So while such an interface is needed it needs to be kept separate from the
#   full compression interface.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apaan.org.au>
# 
# include/linux/crypto.h
#   2005/03/31 20:41:35+10:00 herbert@gondor.apana.org.au +13 -0
#   [CRYPTO] Add partial compression interface
#   
#   Add partial compression interface with crypto_comp_pcompress, cot_compress for
#   compress_tfm, and coa_pcompress in compress_alg.
# 
# crypto/compress.c
#   2005/03/31 20:41:35+10:00 herbert@gondor.apana.org.au +12 -0
#   [CRYPTO] Add partial compression interface
#   
#   Add crypto_pcompress which simply invokes the algorithm's coa_pcompress function if
#   it exists.  Export it for all compression algorithms as cot_pcompress.
# 
# ChangeSet
#   2005/03/31 20:27:29+10:00 dedekind@infradead.org 
#   [CRYPTO] Call zlib end functions on deflate exit path
#   
#   And one more thing I wanted to offer. In the
#   deflate_[compress|uncompress|pcompress] functions we call the
#   zlib_[in|de]flateReset function at the beginning. This is OK. But when we
#   unload the deflate module we don't call zlib_[in|de]flateEnd to free all
#   the zlib internal data. It looks like a bug for me. Please, consider the
#   attached patch.
#   
#   Signed-off-by: Artem B. Bityuckiy <dedekind@infradead.org>
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/deflate.c
#   2005/03/31 20:23:52+10:00 dedekind@infradead.org +2 -0
#   [CRYPTO] Call zlib end functions on deflate exit path
# 
# ChangeSet
#   2005/03/27 16:38:52-08:00 akpm@bix.(none) 
#   Merge bix.(none):/usr/src/bk25 into bix.(none):/usr/src/bk-cryptodev
# 
# crypto/scatterwalk.h
#   2005/03/27 16:38:46-08:00 akpm@bix.(none) +0 -17
#   Auto merged
# 
# crypto/scatterwalk.c
#   2005/03/27 16:38:46-08:00 akpm@bix.(none) +0 -20
#   Auto merged
# 
# crypto/cipher.c
#   2005/03/27 16:38:46-08:00 akpm@bix.(none) +0 -84
#   Auto merged
# 
# ChangeSet
#   2005/03/22 22:19:24+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Remap when walk_out crosses page in crypt()
#   
#   This is needed so that we can keep the in_place assignment outside the inner loop.
#   Without this in pathalogical situations we can start out having walk_out being
#   different from walk_in, but when walk_out crosses a page it may converge with
#   walk_in.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/22 22:18:51+11:00 herbert@gondor.apana.org.au +3 -1
#   [CRYPTO]: Remap when walk_out crosses page in crypt()
# 
# ChangeSet
#   2005/03/22 21:56:53+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Split cbc_process into encrypt/decrypt
#   
#   Rather than taking a branch on the fast path, we might as well split cbc_process
#   into encrypt and decrypt since they don't share anything in common.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/22 21:56:21+11:00 herbert@gondor.apana.org.au +25 -21
#   [CRYPTO]: Split cbc_process into encrypt/decrypt
# 
# ChangeSet
#   2005/03/22 21:34:04+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Kill obsolete iv check in cbc_process()
#   
#   We have long since stopped using a null cit_iv as a means of doing null encryption.
#   In fact it doesn't work here anyway since we need to copy src into dst to achieve
#   null encryption.
#   
#   No user of cbc_encrypt_iv/cbc_decrypt_iv does this either so let's just get rid of
#   this check which is sitting in the fast path.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/22 21:33:25+11:00 herbert@gondor.apana.org.au +1 -5
#   [CRYPTO]: Kill obsolete iv check in cbc_process()
# 
# ChangeSet
#   2005/03/22 20:23:48+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Advance walk->data per block in crypt()
#   
#   Now that crypt() no longer calls scatterwalk_done for each block, we need to use
#   other methods to ensure that walk->data gets updated per block.  Without this we'll
#   keep on reading/writing the same block over and over again until we move to the next
#   page.
#   
#   The solution is to update walk->data in scatterwalk_advance.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/scatterwalk.h
#   2005/03/22 20:23:10+11:00 herbert@gondor.apana.org.au +1 -0
#   [CRYPTO]: Advance walk->data per block in crypt()
# 
# crypto/scatterwalk.c
#   2005/03/22 20:23:10+11:00 herbert@gondor.apana.org.au +11 -4
#   [CRYPTO]: Advance walk->data per block in crypt()
#   
#   As walk->data is advanced after each operation, it may now point to the first byte of
#   the next page.  So we need to take that into account when using it to unmap the page.
#   
#   Check sg->length to make sure that we can transfer one byte at least.
# 
# ChangeSet
#   2005/03/21 18:42:12+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Optimise kmap calls in crypt()
#   
#   Perform kmap once (or twice if the buffer is not aligned correctly) per page in
#   crypt() instead of the current code which does it once per block.  Consequently
#   it will yield once per page instead of once per block.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/21 18:41:41+11:00 herbert@gondor.apana.org.au +11 -7
#   [CRYPTO]: Optimise kmap calls in crypt()
#   
#   Perform kmap once (or twice if the buffer is not aligned correctly) per page in
#   crypt() instead of the current code which does it once per block.  Consequently
#   it will yield once per page instead of once per block.
# 
# ChangeSet
#   2005/03/20 22:19:30+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Eliminate most calls to scatterwalk_copychunks from crypt()
#   
#   Only call scatterwalk_copychunks when the block straddles a page boundary.  This
#   allows crypt() to skip the out-of-line call most of the time.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/scatterwalk.h
#   2005/03/20 22:18:58+11:00 herbert@gondor.apana.org.au +8 -0
#   [CRYPTO]: Eliminate most calls to scatterwalk_copychunks from crypt()
#   
#   Add scatterwalk_advance.
# 
# crypto/scatterwalk.c
#   2005/03/20 22:18:58+11:00 herbert@gondor.apana.org.au +10 -16
#   [CRYPTO]: Eliminate most calls to scatterwalk_copychunks from crypt()
#   
#   Only call scatterwalk_copychunks when the block straddles a page boundary.  Also let
#   the caller do the final walk update.
# 
# crypto/cipher.c
#   2005/03/20 22:18:58+11:00 herbert@gondor.apana.org.au +13 -3
#   [CRYPTO]: Eliminate most calls to scatterwalk_copychunks from crypt()
#   
#   Only call scatterwalk_copychunks from crypt() when the block straddles a page
#   boundary.
# 
# ChangeSet
#   2005/03/20 22:06:18+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Split src/dst handling out from crypt()
#   
#   Move src/dst handling from crypt() into the helpers prepare_src, prepare_dst,
#   complete_src and complete_dst.  complete_src doesn't actually do anything at the
#   moment but is included for completeness.
#   
#   This sets the stage for further optimisations down the track without polluting
#   crypt() itself.
#   
#   These helpers don't belong in scatterwalk.[ch] since they only help the particular
#   way that crypt() is walking the scatter lists.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/20 22:05:46+11:00 herbert@gondor.apana.org.au +35 -11
#   [CRYPTO]: Split src/dst handling out from crypt()
# 
# ChangeSet
#   2005/03/20 21:21:56+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Handle in_place flag in crypt()
#   
#   Move the handling of in_place into crypt() itself.  This means that we only need two
#   temporary buffers instead of three.  It also allows us to simplify the check in
#   scatterwalk_samebuf.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/scatterwalk.h
#   2005/03/20 21:21:26+11:00 herbert@gondor.apana.org.au +2 -4
#   [CRYPTO]: Handle in_place flag in crypt()
#   
#   Since in_place is now handled together with the page boundary check, it is no longer
#   necessary to optimise for the page boundary case in scatterwalk_samebuf.
# 
# crypto/cipher.c
#   2005/03/20 21:21:26+11:00 herbert@gondor.apana.org.au +10 -15
#   [CRYPTO]: Handle in_place flag in crypt()
#   
#   Move the handling of in_place into crypt() itself.
# 
# ChangeSet
#   2005/03/20 21:18:42+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Do scatterwalk_whichbuf inline
#   
#   scatterwalk_whichbuf is called once for each block which could be as small as 8/16
#   bytes.  So it makes sense to do that work inline.
#   
#   It's also a bit inflexible since we may want to use the temporary buffer even if the
#   block doesn't cross page boundaries.  In particular, we want to do that when the
#   source and destination are the same.
#   
#   So let's replace it with scatterwalk_across_pages.
#   
#   I've also simplified the check in scatterwalk_across_pages.  It is sufficient to only
#   check len_this_page.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/scatterwalk.h
#   2005/03/20 21:18:09+11:00 herbert@gondor.apana.org.au +6 -1
#   [CRYPTO]: Do scatterwalk_whichbuf inline
#   
#   Remove scatterwalk_whichbuf and add scatterwalk_across_pages.
# 
# crypto/scatterwalk.c
#   2005/03/20 21:18:09+11:00 herbert@gondor.apana.org.au +0 -10
#   [CRYPTO]: Do scatterwalk_whichbuf inline
#   
#   Remove scatterwalk_whichbuf.
# 
# crypto/cipher.c
#   2005/03/20 21:18:09+11:00 herbert@gondor.apana.org.au +10 -2
#   [CRYPTO]: Do scatterwalk_whichbuf inline
#   
#   Use scatterwalk_across_pages instead of scatterwalk_whichbuf for better performance
#   and flexibility.
# 
diff -Nru a/crypto/compress.c b/crypto/compress.c
--- a/crypto/compress.c	2005-04-04 18:40:35 -07:00
+++ b/crypto/compress.c	2005-04-04 18:40:35 -07:00
@@ -18,6 +18,17 @@
 #include <linux/string.h>
 #include "internal.h"
 
+static int crypto_pcompress(struct crypto_tfm *tfm,
+			    const u8 *src, unsigned int *slen,
+			    u8 *dst, unsigned int *dlen)
+{
+	if (!tfm->__crt_alg->cra_compress.coa_pcompress)
+		return -ENOSYS;
+	return tfm->__crt_alg->cra_compress.coa_pcompress(crypto_tfm_ctx(tfm),
+							  src, slen, dst,
+							  dlen);
+}
+
 static int crypto_compress(struct crypto_tfm *tfm,
                             const u8 *src, unsigned int slen,
                             u8 *dst, unsigned int *dlen)
@@ -50,6 +61,7 @@
 	if (ret)
 		goto out;
 
+	ops->cot_pcompress = crypto_pcompress;
 	ops->cot_compress = crypto_compress;
 	ops->cot_decompress = crypto_decompress;
 	
diff -Nru a/crypto/deflate.c b/crypto/deflate.c
--- a/crypto/deflate.c	2005-04-04 18:40:35 -07:00
+++ b/crypto/deflate.c	2005-04-04 18:40:35 -07:00
@@ -93,11 +93,13 @@
 
 static void deflate_comp_exit(struct deflate_ctx *ctx)
 {
+	zlib_deflateEnd(&ctx->comp_stream);
 	vfree(ctx->comp_stream.workspace);
 }
 
 static void deflate_decomp_exit(struct deflate_ctx *ctx)
 {
+	zlib_inflateEnd(&ctx->decomp_stream);
 	kfree(ctx->decomp_stream.workspace);
 }
 
diff -Nru a/include/linux/crypto.h b/include/linux/crypto.h
--- a/include/linux/crypto.h	2005-04-04 18:40:35 -07:00
+++ b/include/linux/crypto.h	2005-04-04 18:40:35 -07:00
@@ -87,6 +87,8 @@
 struct compress_alg {
 	int (*coa_init)(void *ctx);
 	void (*coa_exit)(void *ctx);
+	int (*coa_pcompress)(void *ctx, const u8 *src, unsigned int *slen,
+			     u8 *dst, unsigned int *dlen);
 	int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen,
 	                    u8 *dst, unsigned int *dlen);
 	int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen,
@@ -178,6 +180,9 @@
 };
 
 struct compress_tfm {
+	int (*cot_pcompress)(struct crypto_tfm *tfm,
+			     const u8 *src, unsigned int *slen,
+			     u8 *dst, unsigned int *dlen);
 	int (*cot_compress)(struct crypto_tfm *tfm,
 	                    const u8 *src, unsigned int slen,
 	                    u8 *dst, unsigned int *dlen);
@@ -363,6 +368,14 @@
 {
 	BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
 	memcpy(dst, tfm->crt_cipher.cit_iv, len);
+}
+
+static inline int crypto_comp_pcompress(struct crypto_tfm *tfm,
+					const u8 *src, unsigned int *slen,
+					u8 *dst, unsigned int *dlen)
+{
+	BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
+	return tfm->crt_compress.cot_pcompress(tfm, src, slen, dst, dlen);
 }
 
 static inline int crypto_comp_compress(struct crypto_tfm *tfm,