54
54
typedef struct {
55
55
int derive_key ;
56
56
EVP_CIPHER * evp_cipher ;
57
+ EVP_CIPHER_CTX ectx ;
58
+ HMAC_CTX hctx ;
57
59
int kdf_iter ;
58
60
int key_sz ;
59
61
int iv_sz ;
@@ -427,18 +429,17 @@ void sqlcipher_codec_ctx_free(codec_ctx **iCtx) {
427
429
}
428
430
429
431
int sqlcipher_page_hmac (cipher_ctx * ctx , Pgno pgno , unsigned char * in , int in_sz , unsigned char * out ) {
430
- HMAC_CTX hctx ;
431
- HMAC_CTX_init (& hctx );
432
+ HMAC_CTX_init (& ctx -> hctx );
432
433
433
- HMAC_Init_ex (& hctx , ctx -> hmac_key , ctx -> key_sz , EVP_sha1 (), NULL );
434
+ HMAC_Init_ex (& ctx -> hctx , ctx -> hmac_key , ctx -> key_sz , EVP_sha1 (), NULL );
434
435
435
436
/* include the encrypted page data, initialization vector, and page number in HMAC. This will
436
437
prevent both tampering with the ciphertext, manipulation of the IV, or resequencing otherwise
437
438
valid pages out of order in a database */
438
- HMAC_Update (& hctx , in , in_sz );
439
- HMAC_Update (& hctx , (const unsigned char * ) & pgno , sizeof (Pgno ));
440
- HMAC_Final (& hctx , out , NULL );
441
- HMAC_CTX_cleanup (& hctx );
439
+ HMAC_Update (& ctx -> hctx , in , in_sz );
440
+ HMAC_Update (& ctx -> hctx , (const unsigned char * ) & pgno , sizeof (Pgno ));
441
+ HMAC_Final (& ctx -> hctx , out , NULL );
442
+ HMAC_CTX_cleanup (& ctx -> hctx );
442
443
return SQLITE_OK ;
443
444
}
444
445
@@ -452,7 +453,6 @@ int sqlcipher_page_hmac(cipher_ctx *ctx, Pgno pgno, unsigned char *in, int in_sz
452
453
*/
453
454
int sqlcipher_page_cipher (codec_ctx * ctx , int for_ctx , Pgno pgno , int mode , int page_sz , unsigned char * in , unsigned char * out ) {
454
455
cipher_ctx * c_ctx = for_ctx ? ctx -> write_ctx : ctx -> read_ctx ;
455
- EVP_CIPHER_CTX ectx ;
456
456
unsigned char * iv_in , * iv_out , * hmac_in , * hmac_out , * out_start ;
457
457
int tmp_csz , csz , size ;
458
458
@@ -501,15 +501,15 @@ int sqlcipher_page_cipher(codec_ctx *ctx, int for_ctx, Pgno pgno, int mode, int
501
501
}
502
502
}
503
503
504
- EVP_CipherInit (& ectx , c_ctx -> evp_cipher , NULL , NULL , mode );
505
- EVP_CIPHER_CTX_set_padding (& ectx , 0 );
506
- EVP_CipherInit (& ectx , NULL , c_ctx -> key , iv_out , mode );
507
- EVP_CipherUpdate (& ectx , out , & tmp_csz , in , size );
504
+ EVP_CipherInit (& c_ctx -> ectx , c_ctx -> evp_cipher , NULL , NULL , mode );
505
+ EVP_CIPHER_CTX_set_padding (& c_ctx -> ectx , 0 );
506
+ EVP_CipherInit (& c_ctx -> ectx , NULL , c_ctx -> key , iv_out , mode );
507
+ EVP_CipherUpdate (& c_ctx -> ectx , out , & tmp_csz , in , size );
508
508
csz = tmp_csz ;
509
509
out += tmp_csz ;
510
- EVP_CipherFinal (& ectx , out , & tmp_csz );
510
+ EVP_CipherFinal (& c_ctx -> ectx , out , & tmp_csz );
511
511
csz += tmp_csz ;
512
- EVP_CIPHER_CTX_cleanup (& ectx );
512
+ EVP_CIPHER_CTX_cleanup (& c_ctx -> ectx );
513
513
assert (size == csz );
514
514
515
515
if (c_ctx -> use_hmac && (mode == CIPHER_ENCRYPT )) {
0 commit comments