exynos9810: reset: crypto and f2fs from android-4.9-q

This commit is contained in:
krazey 2021-11-02 21:08:31 +01:00 committed by xxmustafacooTR
parent 8a9b2212b1
commit 1272e07f87
No known key found for this signature in database
GPG key ID: 520B6FE385CBF5C9
29 changed files with 272 additions and 1647 deletions

View file

@ -8,9 +8,6 @@
# published by the Free Software Foundation.
#
#Keep this at the top
obj-$(CONFIG_CRYPTO_FIPS) += first_file_asm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
@ -44,9 +41,3 @@ CFLAGS_crc32-arm64.o := -mcpu=generic+crc
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
# KASAN White list
KASAN_SANITIZE_sha2-ce-glue.o := n
#Keep this at the bottom
obj-$(CONFIG_CRYPTO_FIPS) += last_file_asm.o

View file

@ -23,6 +23,8 @@ comment "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
depends on MODULE_SIG
help
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
@ -100,15 +102,6 @@ config CRYPTO_KPP
select CRYPTO_ALGAPI
select CRYPTO_KPP2
config CRYPTO_ACOMP2
tristate
select CRYPTO_ALGAPI2
config CRYPTO_ACOMP
tristate
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
config CRYPTO_RSA
tristate "RSA algorithm"
select CRYPTO_AKCIPHER
@ -145,7 +138,6 @@ config CRYPTO_MANAGER2
select CRYPTO_BLKCIPHER2
select CRYPTO_AKCIPHER2
select CRYPTO_KPP2
select CRYPTO_ACOMP2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
@ -157,7 +149,7 @@ config CRYPTO_USER
config CRYPTO_MANAGER_DISABLE_TESTS
bool "Disable run-time self tests"
default n
default y
depends on CRYPTO_MANAGER2
help
Disable run-time self tests that normally take place at
@ -235,7 +227,7 @@ config CRYPTO_AUTHENC
config CRYPTO_TEST
tristate "Testing module"
default n
depends on m
select CRYPTO_MANAGER
help
Quick & dirty crypto test module.
@ -1760,36 +1752,6 @@ config CRYPTO_USER_API_AEAD
config CRYPTO_HASH_INFO
bool
config CRYPTO_POST_DEFERRED_INIT
bool "FIPS POST via deferred_module_init"
default n
help
This is FIPS 140-2 selftest timing indication,
i.e. tcrypt_mod_init. FIPS requires automatic
selftest without user's intervention.
Only one CONFIG should be selected from following.
POST_DEFERRED_INIT / POST_LATE_INIT_SYNC / POST_LATE_INIT / none
config CRYPTO_POST_LATE_INIT_SYNC
bool "FIPS POST via late_initcall_sync"
default n
help
This is FIPS 140-2 selftest timing indication,
i.e. tcrypt_mod_init. FIPS requires automatic
selftest without user's intervention.
Only one CONFIG should be selected from following.
POST_DEFERRED_INIT / POST_LATE_INIT_SYNC / POST_LATE_INIT / none
config CRYPTO_POST_LATE_INIT
bool "FIPS POST via late_initcall"
default n
help
This is FIPS 140-2 selftest timing indication,
i.e. tcrypt_mod_init. FIPS requires automatic
selftest without user's intervention.
Only one CONFIG should be selected from following.
POST_DEFERRED_INIT / POST_LATE_INIT_SYNC / POST_LATE_INIT / none
source "drivers/crypto/Kconfig"
source crypto/asymmetric_keys/Kconfig
source certs/Kconfig

View file

@ -2,16 +2,12 @@
# Cryptographic API
#
# ---------------------------------------
# Keep this at the top of FIPS boundary
obj-$(CONFIG_CRYPTO_FIPS) += first_file.o
obj-$(CONFIG_CRYPTO_FIPS) += fips_integrity.o
obj-$(CONFIG_CRYPTO) += crypto.o
crypto-y := api.o cipher.o compress.o memneq.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
crypto_algapi-$(CONFIG_PROC_FS) += proc.o
@ -31,36 +27,6 @@ crypto_hash-y += ahash.o
crypto_hash-y += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
CFLAGS_testmgr.o = -fno-merge-constants
cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy_rng.o
CFLAGS_jitterentropy.o = -O0
jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
# Keep this at the bottom of FIPS boundary
obj-$(CONFIG_CRYPTO_FIPS) += last_file.o
obj-$(CONFIG_CRYPTO_FIPS) += fips_out.o
# ---------------------------------------
obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
@ -85,11 +51,12 @@ rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
@ -99,15 +66,21 @@ obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o
obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
obj-$(CONFIG_CRYPTO_CTS) += cts.o
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o
obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o
obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o
@ -124,6 +97,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
obj-$(CONFIG_CRYPTO_CAST5) += cast5_generic.o
@ -141,11 +115,19 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy_rng.o
CFLAGS_jitterentropy.o = -O0
jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o

View file

@ -45,11 +45,6 @@ void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
{
struct ablkcipher_buffer *p, *tmp;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return;
#endif
list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
ablkcipher_buffer_write(p);
list_del(&p->entry);
@ -108,11 +103,6 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
unsigned int n; /* bytes processed */
bool more;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
if (unlikely(err < 0))
goto finish;

View file

@ -86,11 +86,6 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
walk->data -= walk->offset;
if (walk->entrylen && (walk->offset & alignmask) && !err) {
@ -138,12 +133,6 @@ EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
walk->total = req->nbytes;
if (!walk->total) {
@ -379,11 +368,6 @@ static int crypto_ahash_op(struct ahash_request *req,
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
if ((unsigned long)req->result & alignmask)
return ahash_op_unaligned(req, op);
@ -494,11 +478,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
hash->setkey = ahash_nosetkey;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
@ -636,11 +615,6 @@ int ahash_register_instance(struct crypto_template *tmpl,
{
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
err = ahash_prepare_alg(&inst->alg);
if (err)
return err;

View file

@ -51,13 +51,7 @@ static inline void crypto_check_module_sig(struct module *mod)
static int crypto_check_alg(struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err())) {
pr_err("crypto_check_alg failed due to FIPS error: %s",
alg->cra_name);
return -EACCES;
}
#endif
crypto_check_module_sig(alg->cra_module);
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
return -EINVAL;
@ -374,14 +368,6 @@ int crypto_register_alg(struct crypto_alg *alg)
struct crypto_larval *larval;
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err())) {
pr_err("Unable to registrer alg: %s because of FIPS ERROR\n"
, alg->cra_name);
return -EACCES;
}
#endif
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
err = crypto_check_alg(alg);
if (err)
@ -474,11 +460,6 @@ int crypto_register_template(struct crypto_template *tmpl)
struct crypto_template *q;
int err = -EEXIST;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
down_write(&crypto_alg_sem);
crypto_check_module_sig(tmpl->module);
@ -512,6 +493,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
list = &tmpl->instances;
hlist_for_each_entry(inst, list, list) {
int err = crypto_remove_alg(&inst->alg, &users);
BUG_ON(err);
}
@ -548,12 +530,6 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
struct crypto_template *crypto_lookup_template(const char *name)
{
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err())) {
pr_err("crypto_lookup failed due to FIPS error: %s", name);
return ERR_PTR(-EACCES);
}
#endif
return try_then_request_module(__crypto_lookup_template(name),
"crypto-%s", name);
}
@ -565,11 +541,6 @@ int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_larval *larval;
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
err = crypto_check_alg(&inst->alg);
if (err)
return err;
@ -631,11 +602,6 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
{
int err = -EAGAIN;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
spawn->inst = inst;
spawn->mask = mask;
@ -872,11 +838,6 @@ void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
char *p;
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return ERR_PTR(-EACCES);
#endif
p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
GFP_KERNEL);
if (!p)
@ -903,11 +864,6 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
struct crypto_spawn *spawn;
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return ERR_PTR(-EACCES);
#endif
inst = crypto_alloc_instance2(name, alg, 0);
if (IS_ERR(inst))
goto out;
@ -944,11 +900,6 @@ int crypto_enqueue_request(struct crypto_queue *queue,
{
int err = -EINPROGRESS;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
if (unlikely(queue->qlen >= queue->max_qlen)) {
err = -EBUSY;
if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
@ -1068,21 +1019,13 @@ EXPORT_SYMBOL_GPL(crypto_type_has_alg);
static int __init crypto_algapi_init(void)
{
#ifndef CONFIG_CRYPTO_FIPS
crypto_init_proc();
#else
//Moved to testmgr
#endif
return 0;
}
static void __exit crypto_algapi_exit(void)
{
#ifndef CONFIG_CRYPTO_FIPS
crypto_exit_proc();
#else
//Moved to testmgr
#endif
}
module_init(crypto_algapi_init);

View file

@ -67,20 +67,9 @@ static int cryptomgr_probe(void *data)
int err;
tmpl = crypto_lookup_template(param->template);
#ifndef CONFIG_CRYPTO_FIPS
if (!tmpl)
goto out;
#else
/* change@dtl.ksingh
* Below if condition needs to test for valid point
* but instead it was testing for NULL. Crypto APIs never
* return NULL, hence in failure case this was causing
* kernel panic
*/
if (!tmpl || IS_ERR(tmpl))
goto out;
#endif
do {
if (tmpl->create) {
err = tmpl->create(tmpl, param->tb);
@ -225,8 +214,9 @@ static int cryptomgr_test(void *data)
u32 type = param->type;
int err = 0;
// skip test procedure if it's not from POST.
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
goto skiptest;
#endif
if (type & CRYPTO_ALG_TESTED)
goto skiptest;

View file

@ -370,11 +370,6 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
unsigned int tfm_size;
int err = -ENOMEM;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return ERR_PTR(-EACCES);
#endif
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
tfm = kzalloc(tfm_size, GFP_KERNEL);
if (tfm == NULL)
@ -431,11 +426,6 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
struct crypto_tfm *tfm;
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return ERR_PTR(-EACCES);
#endif
for (;;) {
struct crypto_alg *alg;
@ -474,12 +464,6 @@ void *crypto_create_tfm(struct crypto_alg *alg,
unsigned int total;
int err = -ENOMEM;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err())) {
pr_err("Fail %s due to fips error state.\n", __func__);
return ERR_PTR(-EACCES);
}
#endif
tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
@ -559,11 +543,6 @@ void *crypto_alloc_tfm(const char *alg_name,
void *tfm;
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return ERR_PTR(-EACCES);
#endif
for (;;) {
struct crypto_alg *alg;
@ -593,14 +572,6 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
static void hexdump(unsigned char *buf, unsigned int len)
{
print_hex_dump(KERN_INFO, "FIPS FUNC : ", DUMP_PREFIX_OFFSET,
16, 1, buf, len, false);
}
#endif
/*
* crypto_destroy_tfm - Free crypto transform
* @mem: Start of tfm slab
@ -622,21 +593,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
alg->cra_exit(tfm);
crypto_exit_ops(tfm);
crypto_mod_put(alg);
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
if (!strcmp("zeroization", get_fips_functest_mode())) {
int t = ksize(mem);
pr_err("FIPS FUNC : Zeroization %s %d\n", __func__, t);
hexdump(mem, t);
kzfree(mem);
pr_err("FIPS FUNC : Zeroization %s %d\n", __func__, t);
hexdump(mem, t);
} else {
kzfree(mem);
}
#else
kzfree(mem);
#endif
}
EXPORT_SYMBOL_GPL(crypto_destroy_tfm);

View file

@ -103,11 +103,6 @@ int blkcipher_walk_done(struct blkcipher_desc *desc,
unsigned int n; /* bytes processed */
bool more;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return (-EACCES);
#endif
if (unlikely(err < 0))
goto finish;
@ -326,11 +321,6 @@ EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
static int blkcipher_walk_first(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return (-EACCES);
#endif
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
@ -434,10 +424,6 @@ static int async_encrypt(struct ablkcipher_request *req)
.flags = req->base.flags,
};
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return (-EACCES);
#endif
return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
}
@ -452,11 +438,6 @@ static int async_decrypt(struct ablkcipher_request *req)
.flags = req->base.flags,
};
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return (-EACCES);
#endif
return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
}

View file

@ -70,11 +70,6 @@ static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
u8 buffer[size + alignmask];
u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return;
#endif
memcpy(tmp, src, size);
fn(tfm, tmp, tmp);
memcpy(dst, tmp, size);
@ -86,11 +81,6 @@ static void cipher_encrypt_unaligned(struct crypto_tfm *tfm,
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return;
#endif
if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src);
return;
@ -105,11 +95,6 @@ static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return;
#endif
if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src);
return;

View file

@ -115,21 +115,6 @@ nla_put_failure:
return -EMSGSIZE;
}
static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
strncpy(racomp.type, "acomp", sizeof(racomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
sizeof(struct crypto_report_acomp), &racomp))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
@ -204,11 +189,7 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_ACOMPRESS:
if (crypto_report_acomp(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_AKCIPHER:
if (crypto_report_akcipher(skb, alg))
goto nla_put_failure;

View file

@ -100,11 +100,6 @@
#include <crypto/drbg.h>
#include <linux/kernel.h>
#ifdef CONFIG_CRYPTO_FIPS
#include "internal.h"
#define ENTROPY_BLOCK_LEN 20
#endif
/***************************************************************
* Backend cipher definitions available to DRBG
***************************************************************/
@ -552,13 +547,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
int ret;
int len = min_t(int, buflen, INT_MAX);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err())) {
pr_err("FIPS : drbg.c:%s FIPS in Error!!!\n", __func__);
return -EACCES;
}
#endif
/* 10.2.1.5.2 step 2 */
if (addtl && !list_empty(addtl)) {
ret = drbg_ctr_update(drbg, addtl, 2);
@ -675,13 +663,6 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
struct drbg_string data;
LIST_HEAD(datalist);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err())) {
pr_err("FIPS : drbg.c:%s FIPS in Error!!!\n", __func__);
return -EACCES;
}
#endif
/* 10.1.2.5 step 2 */
if (addtl && !list_empty(addtl)) {
ret = drbg_hmac_update(drbg, addtl, 1);
@ -912,13 +893,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
struct drbg_string data;
LIST_HEAD(datalist);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err())) {
pr_err("FIPS : drbg.c:%s FIPS in Error!!!\n", __func__);
return -EACCES;
}
#endif
/* 10.1.1.4 step hashgen 2 */
memcpy(src, drbg->V, drbg_statelen(drbg));
@ -1011,71 +985,6 @@ static const struct drbg_state_ops drbg_hash_ops = {
* Functions common for DRBG implementations
******************************************************************/
#ifdef CONFIG_CRYPTO_FIPS
#define NUM_MAX_READ_COUNT 16
static int get_blocking_random_bytes(u8 *entropy, unsigned int len)
{
struct file *filp = NULL;
u8 *buf = entropy;
int length_req = len;
int length_read = 0;
int length_ret = 0;
int i = NUM_MAX_READ_COUNT;
mm_segment_t oldfs;
if (!buf || length_req == 0)
return -EINVAL;
filp = filp_open("/dev/random", O_RDONLY, 0);
if (IS_ERR(filp)) {
pr_info("FIPS : DRBG cannot open blocking pool as entropy\n");
return -ENOENT;
}
oldfs = get_fs();
/* set_fs(KERNEL_DS); */
current_thread_info()->addr_limit = KERNEL_DS;
memset((void *)buf, 0, length_req);
do {
length_ret = (int)filp->f_op->read(filp, &(buf[length_read]), length_req-length_read,
&filp->f_pos);
if (length_ret > 0)
length_read += length_ret;
if (length_read < length_req)
i--;
else
break;
} while (i);
/* set_fs(oldfs); */
current_thread_info()->addr_limit = oldfs;
if (filp)
filp_close(filp, NULL);
if (length_read < length_req) {
pr_info("FIPS : DRBG cannot collect enough entropy\n");
return -EAGAIN;
}
return 0;
}
static void drbg_read_entropy(struct drbg_state *drbg, u8 *entropy, unsigned int len)
{
int ret = -1;
// Try /dev/random first
ret = get_blocking_random_bytes(entropy, len);
if (ret < 0) {
// Reading in kernel /dev/urandom, never fails.
pr_info("FIPS : DRBG uses non-blocking pool\n");
get_random_bytes(entropy, len);
drbg->hw_entropy = false;
} else
drbg->hw_entropy = true;
}
#endif
static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
int reseed)
{
@ -1102,11 +1011,7 @@ static void drbg_async_seed(struct work_struct *work)
BUG_ON(!entropylen);
BUG_ON(entropylen > sizeof(entropy));
#ifdef CONFIG_CRYPTO_FIPS
drbg_read_entropy(drbg, entropy, entropylen);
#else
get_random_bytes(entropy, entropylen);
#endif
drbg_string_fill(&data, entropy, entropylen);
list_add_tail(&data.list, &seedlist);
@ -1148,11 +1053,6 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
{
int ret;
unsigned char entropy[((32 + 16) * 2)];
#ifdef CONFIG_CRYPTO_FIPS
unsigned char *p;
unsigned char buf[((32 + 16) * 2) + 4 + ENTROPY_BLOCK_LEN];
unsigned int buflen;
#endif
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
struct drbg_string data1;
LIST_HEAD(seedlist);
@ -1182,25 +1082,8 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
entropylen = ((entropylen + 1) / 2) * 3;
BUG_ON((entropylen * 2) > sizeof(entropy));
#ifdef CONFIG_CRYPTO_FIPS
buflen = (((entropylen + ENTROPY_BLOCK_LEN - 1) / ENTROPY_BLOCK_LEN) + 1) * ENTROPY_BLOCK_LEN;
/* Get seed from /dev/random if available, o.w /dev/urandom */
drbg_read_entropy(drbg, buf, buflen);
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
if (!strcmp("ndrng_crngt", get_fips_functest_mode()))
memcpy(buf, buf + ENTROPY_BLOCK_LEN, ENTROPY_BLOCK_LEN);
#endif
for (p = buf; p < buf + buflen - ENTROPY_BLOCK_LEN; p += ENTROPY_BLOCK_LEN) {
if (!memcmp(p, p + ENTROPY_BLOCK_LEN, ENTROPY_BLOCK_LEN)) {
pr_err("FIPS : DRBG - CRNGT failures on reading entropy\n");
return -EINVAL;
}
}
memcpy(entropy, buf + ENTROPY_BLOCK_LEN, entropylen);
#else
/* Get seed from in-kernel /dev/urandom */
get_random_bytes(entropy, entropylen);
#endif
if (!drbg->jent) {
drbg_string_fill(&data1, entropy, entropylen);
@ -1250,24 +1133,14 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
{
if (!drbg)
return;
if (drbg->Vbuf) {
kzfree(drbg->Vbuf);
drbg->Vbuf = NULL;
}
kzfree(drbg->Vbuf);
drbg->Vbuf = NULL;
drbg->V = NULL;
if (drbg->Cbuf) {
kzfree(drbg->Cbuf);
drbg->Cbuf = NULL;
}
kzfree(drbg->Cbuf);
drbg->Cbuf = NULL;
drbg->C = NULL;
if (drbg->scratchpadbuf) {
kzfree(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;
}
kzfree(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;
drbg->reseed_ctr = 0;
drbg->d_ops = NULL;
drbg->core = NULL;
@ -1605,9 +1478,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
drbg->core = &drbg_cores[coreref];
drbg->pr = pr;
drbg->seeded = false;
#ifdef CONFIG_CRYPTO_FIPS
drbg->hw_entropy = false;
#endif
drbg->reseed_threshold = drbg_max_requests(drbg);
ret = drbg_alloc_state(drbg);

View file

@ -10,13 +10,14 @@
*
*/
#include "internal.h"
#include <linux/export.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sysctl.h>
#ifdef CONFIG_CRYPTO_FIPS
int fips_enabled = 1;
#else
int fips_enabled;
#endif
EXPORT_SYMBOL_GPL(fips_enabled);
/* Process kernel command-line parameter at boot time. fips=0 or fips=1 */
@ -29,3 +30,49 @@ static int fips_enable(char *str)
}
__setup("fips=", fips_enable);
static struct ctl_table crypto_sysctl_table[] = {
{
.procname = "fips_enabled",
.data = &fips_enabled,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec
},
{}
};
static struct ctl_table crypto_dir_table[] = {
{
.procname = "crypto",
.mode = 0555,
.child = crypto_sysctl_table
},
{}
};
static struct ctl_table_header *crypto_sysctls;
static void crypto_proc_fips_init(void)
{
crypto_sysctls = register_sysctl_table(crypto_dir_table);
}
static void crypto_proc_fips_exit(void)
{
unregister_sysctl_table(crypto_sysctls);
}
static int __init fips_init(void)
{
crypto_proc_fips_init();
return 0;
}
static void __exit fips_exit(void)
{
crypto_proc_fips_exit();
}
module_init(fips_init);
module_exit(fips_exit);

View file

@ -25,21 +25,6 @@
#include <linux/notifier.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/fips.h>
#define SKC_VERSION_TEXT "SKC v1.9"
#define FIPS_HMAC_SIZE (32)
#define FIPS_CRYPTO_ADDRS_SIZE (1000)
struct first_last {
aligned_u64 first;
aligned_u64 last;
};
extern const __u64 crypto_buildtime_address;
extern const struct first_last integrity_crypto_addrs[FIPS_CRYPTO_ADDRS_SIZE];
extern const __s8 builtime_crypto_hmac[FIPS_HMAC_SIZE];
/* Crypto notification events. */
enum {
@ -65,26 +50,7 @@ extern struct rw_semaphore crypto_alg_sem;
extern struct blocking_notifier_head crypto_chain;
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_CRYPTO_FIPS
bool in_fips_err(void);
void set_in_fips_err(void);
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
void reset_in_fips_err(void);
void set_fips_functest_KAT_mode(const int num);
void set_fips_functest_conditional_mode(const int num);
char *get_fips_functest_mode(void);
#define SKC_FUNCTEST_KAT_CASE_NUM 24
#define SKC_FUNCTEST_CONDITIONAL_CASE_NUM 2
#define SKC_FUNCTEST_NO_TEST "NO_TEST"
#endif
void crypto_init_proc(int *fips_error);
int do_integrity_check(void);
int testmgr_crypto_proc_init(void);
const char *get_builtime_crypto_hmac(void);
#else
void __init crypto_init_proc(void);
#endif
void __exit crypto_exit_proc(void);
#else
static inline void crypto_init_proc(void)

View file

@ -20,46 +20,8 @@
#include <linux/rwsem.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include "internal.h"
#ifdef CONFIG_CRYPTO_FIPS
static struct ctl_table crypto_sysctl_table[] = {
{
.procname = "fips_status",
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec
},
{}
};
static struct ctl_table crypto_dir_table[] = {
{
.procname = "crypto",
.mode = 0555,
.child = crypto_sysctl_table
},
{}
};
static struct ctl_table_header *crypto_sysctls;
static void crypto_proc_fips_init(void)
{
crypto_sysctls = register_sysctl_table(crypto_dir_table);
}
static void crypto_proc_fips_exit(void)
{
if (crypto_sysctls)
unregister_sysctl_table(crypto_sysctls);
}
#else
#define crypto_proc_fips_init()
#define crypto_proc_fips_exit()
#endif
static void *c_start(struct seq_file *m, loff_t *pos)
{
down_read(&crypto_alg_sem);
@ -144,23 +106,12 @@ static const struct file_operations proc_crypto_ops = {
.release = seq_release
};
#ifdef CONFIG_CRYPTO_FIPS
void crypto_init_proc(int *fips_error)
{
proc_create("crypto", 0444, NULL, &proc_crypto_ops);
crypto_sysctl_table[0].data = fips_error;
crypto_proc_fips_init();
}
#else
void __init crypto_init_proc(void)
{
proc_create("crypto", 0444, NULL, &proc_crypto_ops);
crypto_proc_fips_init();
proc_create("crypto", 0, NULL, &proc_crypto_ops);
}
#endif
void __exit crypto_exit_proc(void)
{
crypto_proc_fips_exit();
remove_proc_entry("crypto", NULL);
}

View file

@ -15,9 +15,6 @@
#include <linux/atomic.h>
#include <crypto/internal/rng.h>
#ifdef CONFIG_CRYPTO_FIPS
#include <crypto/drbg.h>
#endif
#include <linux/err.h>
#include <linux/module.h>
#include <linux/mutex.h>
@ -235,28 +232,5 @@ void crypto_unregister_rngs(struct rng_alg *algs, int count)
}
EXPORT_SYMBOL_GPL(crypto_unregister_rngs);
#ifdef CONFIG_CRYPTO_FIPS
int crypto_rng_check_entropy(struct crypto_rng *rng)
{
struct drbg_state *drbg;
const char *algo = NULL;
if (!rng)
return -EINVAL;
algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(rng));
if (!algo)
return -EINVAL;
if (!memcmp(algo, "drbg_", 5)) {
drbg = crypto_rng_ctx(rng);
if (drbg->hw_entropy)
return 0;
}
return -1;
}
EXPORT_SYMBOL_GPL(crypto_rng_check_entropy);
#endif
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Random Number Generator");

View file

@ -22,16 +22,13 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
#ifndef CONFIG_CRYPTO_FIPS
const u8 *ptr = value;
size_t n_sz = vlen;
#endif
/* invalid key provided */
if (!value || !vlen)
return -EINVAL;
#ifndef CONFIG_CRYPTO_FIPS
if (fips_enabled) {
while (n_sz && !*ptr) {
ptr++;
@ -44,7 +41,6 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
return -EINVAL;
}
}
#endif
key->n = value;
key->n_sz = vlen;

View file

@ -119,11 +119,6 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
if ((unsigned long)data & alignmask)
return shash_update_unaligned(desc, data, len);
@ -159,11 +154,6 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out)
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
if ((unsigned long)out & alignmask)
return shash_final_unaligned(desc, out);
@ -185,11 +175,6 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_finup_unaligned(desc, data, len, out);
@ -211,11 +196,6 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
@ -251,11 +231,6 @@ static int shash_async_init(struct ahash_request *req)
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
desc->tfm = *ctx;
desc->flags = req->base.flags;
@ -267,11 +242,6 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
struct crypto_hash_walk walk;
int nbytes;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
nbytes = crypto_hash_walk_done(&walk, nbytes))
nbytes = crypto_shash_update(desc, walk.data, nbytes);
@ -295,11 +265,6 @@ int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
struct crypto_hash_walk walk;
int nbytes;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
nbytes = crypto_hash_walk_first(req, &walk);
if (!nbytes)
return crypto_shash_final(desc, req->result);
@ -334,11 +299,6 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
unsigned int offset;
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
if (nbytes &&
(sg = req->src, offset = sg->offset,
nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
@ -399,11 +359,6 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
if (!crypto_mod_get(calg))
return -EAGAIN;
@ -539,11 +494,6 @@ int crypto_register_shash(struct shash_alg *alg)
struct crypto_alg *base = &alg->base;
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
err = shash_prepare_alg(alg);
if (err)
return err;
@ -599,11 +549,6 @@ int shash_register_instance(struct crypto_template *tmpl,
{
int err;
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
err = shash_prepare_alg(&inst->alg);
if (err)
return err;
@ -623,11 +568,6 @@ int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
struct shash_alg *alg,
struct crypto_instance *inst)
{
#ifdef CONFIG_CRYPTO_FIPS
if (unlikely(in_fips_err()))
return -EACCES;
#endif
return crypto_init_spawn2(&spawn->base, &alg->base, inst,
&crypto_shash_type);
}

View file

@ -24,7 +24,6 @@
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/rng.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/fips.h>
@ -38,7 +37,6 @@
#include <linux/timex.h>
#include <linux/interrupt.h>
#include "tcrypt.h"
#include "internal.h"
/*
* Need slab memory for testing (size in number of pages).
@ -291,7 +289,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
}
init_completion(&result.completion);
pr_info("\ntesting speed of %s (%s) %s\n", algo,
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
req = aead_request_alloc(tfm, GFP_KERNEL);
@ -333,7 +331,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
memset(iv, 0xff, iv_len);
crypto_aead_clear_flags(tfm, ~0);
pr_info("test %u (%d bit key, %d byte blocks): ",
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
i, *keysize * 8, *b_size);
@ -700,7 +698,7 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
return;
}
pr_info("\ntesting speed of async %s (%s)\n", algo,
printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
get_driver_name(crypto_ahash, tfm));
if (crypto_ahash_digestsize(tfm) > MAX_DIGEST_SIZE) {
@ -2057,64 +2055,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
case 1000:
test_available();
break;
#ifdef CONFIG_CRYPTO_FIPS
case 1402:
#ifdef CONFIG_CRYPTO_AES
/* AES */
ret += alg_test("ecb(aes-generic)", "ecb(aes)", 0, 0);
ret += alg_test("cbc(aes-generic)", "cbc(aes)", 0, 0);
#ifdef CONFIG_CRYPTO_GCM
ret += alg_test("gcm(aes-generic)", "gcm(aes)", 0, 0);
#endif
#endif
#ifdef CONFIG_CRYPTO_AES_ARM64_CE
ret += alg_test("ecb(aes-ce)", "ecb(aes)", 0, 0);
ret += alg_test("cbc(aes-ce)", "cbc(aes)", 0, 0);
#ifdef CONFIG_CRYPTO_GCM
ret += alg_test("gcm(aes-ce)", "gcm(aes)", 0, 0);
#endif
#endif
/* SHA */
#ifdef CONFIG_CRYPTO_SHA1
ret += alg_test("sha1-generic", "sha1", 0, 0);
ret += alg_test("hmac(sha1-generic)", "hmac(sha1)", 0, 0);
#endif
#ifdef CONFIG_CRYPTO_SHA1_ARM64_CE
ret += alg_test("sha1-ce", "sha1", 0, 0);
ret += alg_test("hmac(sha1-ce)", "hmac(sha1)", 0, 0);
#endif
#ifdef CONFIG_CRYPTO_SHA256
ret += alg_test("sha224-generic", "sha224", 0, 0);
ret += alg_test("sha256-generic", "sha256", 0, 0);
ret += alg_test("hmac(sha224-generic)", "hmac(sha224)", 0, 0);
ret += alg_test("hmac(sha256-generic)", "hmac(sha256)", 0, 0);
#endif
#ifdef CONFIG_CRYPTO_SHA2_ARM64_CE
ret += alg_test("sha224-ce", "sha224", 0, 0);
ret += alg_test("sha256-ce", "sha256", 0, 0);
ret += alg_test("hmac(sha224-ce)", "hmac(sha224)", 0, 0);
ret += alg_test("hmac(sha256-ce)", "hmac(sha256)", 0, 0);
#endif
#ifdef CONFIG_CRYPTO_SHA512
ret += alg_test("sha384-generic", "sha384", 0, 0);
ret += alg_test("sha512-generic", "sha512", 0, 0);
ret += alg_test("hmac(sha384-generic)", "hmac(sha384)", 0, 0);
ret += alg_test("hmac(sha512-generic)", "hmac(sha512)", 0, 0);
#endif
#ifdef CONFIG_CRYPTO_DRBG
ret += alg_test("drbg_nopr_hmac_sha256", "stdrng", 0, 0);
ret += alg_test("drbg_pr_hmac_sha256", "stdrng", 0, 0);
#endif
break;
#endif //CONFIG_CRYPTO_FIPS
}
return ret;
@ -2131,50 +2071,6 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv;
}
#ifdef CONFIG_CRYPTO_FIPS
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
if (!strcmp(SKC_FUNCTEST_NO_TEST, get_fips_functest_mode()))
testmgr_crypto_proc_init();
#else
testmgr_crypto_proc_init();
#endif //CONFIG_CRYPTO_FIPS_FUNC_TEST
mode = 1402;
pr_info("FIPS : POST (%s)\n", SKC_VERSION_TEXT);
err = do_test(alg, type, mask, mode);
if (err) {
pr_err("FIPS : POST - one or more algorithm tests failed\n");
set_in_fips_err();
goto err_free_tv;
} else {
pr_info("FIPS : POST - Algorithm Tests Passed\n");
if (do_integrity_check() != 0) {
#ifndef CONFIG_FUNCTION_TRACER
pr_err("FIPS : POST - Integrity Check Failed\n");
set_in_fips_err();
#else
pr_err("FIPS : POST - Integrity Check bypassed due to ftrace debug mode\n");
#endif
} else {
pr_info("FIPS : POST - Integrity Check Passed\n");
}
if (in_fips_err())
pr_err("FIPS : POST - CRYPTO API in FIPS Error\n");
else
pr_info("FIPS : POST - CRYPTO API started in FIPS approved mode\n");
}
if (!fips_enabled)
err = -EAGAIN;
err_free_tv:
for (i = 0; i < TVMEMSIZE && tvmem[i]; i++)
free_page((unsigned long)tvmem[i]);
return err;
#else
err = do_test(alg, type, mask, mode);
if (err) {
@ -2197,7 +2093,6 @@ err_free_tv:
free_page((unsigned long)tvmem[i]);
return err;
#endif
}
/*
@ -2206,86 +2101,7 @@ err_free_tv:
*/
static void __exit tcrypt_mod_fini(void) { }
// When SKC_FUNC_TEST is defined, this function will be called instead of tcrypt_mode_init
// tcyprt_mode_init will be called as test case number
// after all tests are done, the normal POST test will start
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
static int __init fips_func_test(void)
{
int i;
struct crypto_ahash *tfm;
struct crypto_rng *rng;
pr_info("FIPS FUNC : Functional test start\n");
for (i = 0; i < SKC_FUNCTEST_KAT_CASE_NUM; i++) {
set_fips_functest_KAT_mode(i);
pr_info("FIPS FUNC : --------------------------------------------------\n");
pr_info("FIPS FUNC : Failure inducement case %d - [%s]\n", i + 1, get_fips_functest_mode());
pr_info("FIPS FUNC : --------------------------------------------------\n");
tcrypt_mod_init();
pr_info("FIPS FUNC : (%d-1) POST done. SKC module FIPS status : %s\n",
i+1, in_fips_err()?"failed":"passed");
pr_info("FIPS FUNC : (%d-2) Try to use crypto\n", i + 1);
// Check the module is not working in FIPS failure
tfm = crypto_alloc_ahash("sha256", 0, 0);
if (IS_ERR(tfm))
pr_info("FIPS FUNC : (%d-3) alloc hash is failed as expected\n", i + 1);
else {
pr_info("FIPS FUNC : (%d-3) crypto allocation is success\n", i + 1);
crypto_free_ahash(tfm);
}
// reset the fips err flag to prepare the next test
pr_err("FIPS FUNC : (%d-4) revert FIPS status to no error\n", i + 1);
reset_in_fips_err();
}
for (i = 0; i < SKC_FUNCTEST_CONDITIONAL_CASE_NUM; i++) {
set_fips_functest_conditional_mode(i);
pr_info("FIPS FUNC : --------------------------------------------------\n");
pr_info("FIPS FUNC : conditional test case %d - [%s]\n", i + 1, get_fips_functest_mode());
pr_info("FIPS FUNC : --------------------------------------------------\n");
rng = crypto_alloc_rng("drbg_pr_hmac_sha256", 0, 0);
if (IS_ERR(rng)) {
pr_err("FIPS FUNC : rng alloc was failed\n");
continue;
}
if (crypto_rng_reset(rng, NULL, 0))
pr_err("FIPS FUNC : DRBG instantiate failed as expected\n");
crypto_free_rng(rng);
}
set_fips_functest_conditional_mode(-1);
pr_info("FIPS FUNC : Functional test end\n");
pr_info("FIPS FUNC : Normal POST start\n");
return tcrypt_mod_init();
}
#endif
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
#if defined(CONFIG_CRYPTO_POST_DEFERRED_INIT)
deferred_module_init(fips_func_test);
#elif defined(CONFIG_CRYPTO_POST_LATE_INIT_SYNC)
late_initcall_sync(fips_func_test);
#elif defined(CONFIG_CRYPTO_POST_LATE_INIT)
late_initcall(fips_func_test);
#else
module_init(fips_func_test);
#endif// CONFIG_CRYPTO_POST_DEFERRED_INIT
#else
#if defined(CONFIG_CRYPTO_POST_DEFERRED_INIT)
deferred_module_init(tcrypt_mod_init);
#elif defined(CONFIG_CRYPTO_POST_LATE_INIT_SYNC)
late_initcall_sync(tcrypt_mod_init);
#elif defined(CONFIG_CRYPTO_POST_LATE_INIT)
late_initcall(tcrypt_mod_init);
#else
module_init(tcrypt_mod_init);
#endif // CONFIG_CRYPTO_POST_DEFERRED_INIT
#endif // CONFIG_CRYPTO_FIPS_FUNC_TEST
module_exit(tcrypt_mod_fini);
module_param(alg, charp, 0);

View file

@ -36,11 +36,9 @@
#include "internal.h"
#if 0
static bool notests;
module_param(notests, bool, 0644);
MODULE_PARM_DESC(notests, "disable crypto self-tests");
#endif
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
@ -50,14 +48,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
return 0;
}
#ifdef CONFIG_CRYPTO_FIPS
bool in_fips_err(void)
{
return false;
}
EXPORT_SYMBOL_GPL(in_fips_err);
#endif
#else
#include "testmgr.h"
@ -85,12 +75,6 @@ EXPORT_SYMBOL_GPL(in_fips_err);
#define ENCRYPT 1
#define DECRYPT 0
#ifdef CONFIG_CRYPTO_FIPS
#define FIPS_ERR 1
#define FIPS_NO_ERR 0
static int IN_FIPS_ERROR = FIPS_NO_ERR;
#endif
struct tcrypt_result {
struct completion completion;
int err;
@ -162,88 +146,6 @@ struct alg_test_desc {
static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
#ifdef CONFIG_CRYPTO_FIPS
bool in_fips_err(void)
{
return (IN_FIPS_ERROR == FIPS_ERR);
}
EXPORT_SYMBOL_GPL(in_fips_err);
void set_in_fips_err(void)
{
IN_FIPS_ERROR = FIPS_ERR;
}
EXPORT_SYMBOL_GPL(set_in_fips_err);
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
static char *fips_functest_mode;
static char *fips_functest_KAT_list[] = {
"ecb(aes-generic)",
"cbc(aes-generic)",
"gcm_base(ctr(aes-generic),ghash-generic)",
"ecb(aes-ce)",
"cbc(aes-ce)",
"gcm_base(ctr(aes-ce),ghash-generic)",
"sha1-generic",
"hmac(sha1-generic)",
"sha1-ce",
"hmac(sha1-ce)",
"sha224-generic",
"sha256-generic",
"hmac(sha224-generic)",
"hmac(sha256-generic)",
"sha224-ce",
"sha256-ce",
"hmac(sha224-ce)",
"hmac(sha256-ce)",
"sha384-generic",
"sha512-generic",
"hmac(sha384-generic)",
"hmac(sha512-generic)",
"drbg_nopr_hmac_sha256",
"drbg_pr_hmac_sha256",
"integrity"
};
static char *fips_functest_conditional_list[] = {
"ndrng_crngt",
"zeroization"
};
void reset_in_fips_err(void)
{
IN_FIPS_ERROR = FIPS_NO_ERR;
}
EXPORT_SYMBOL_GPL(reset_in_fips_err);
// This function is added to change fips_functest_KAT_num from tcrypt.c
void set_fips_functest_KAT_mode(const int num)
{
if (num >= 0 && num < SKC_FUNCTEST_KAT_CASE_NUM)
fips_functest_mode = fips_functest_KAT_list[num];
else
fips_functest_mode = SKC_FUNCTEST_NO_TEST;
}
EXPORT_SYMBOL_GPL(set_fips_functest_KAT_mode);
void set_fips_functest_conditional_mode(const int num)
{
if (num >= 0 && num < SKC_FUNCTEST_CONDITIONAL_CASE_NUM)
fips_functest_mode = fips_functest_conditional_list[num];
else
fips_functest_mode = SKC_FUNCTEST_NO_TEST;
}
EXPORT_SYMBOL_GPL(set_fips_functest_conditional_mode);
char *get_fips_functest_mode(void)
{
if (fips_functest_mode)
return fips_functest_mode;
else
return SKC_FUNCTEST_NO_TEST;
}
EXPORT_SYMBOL_GPL(get_fips_functest_mode);
#endif // CONFIG_CRYPTO_FIPS_FUNC_TEST
#endif
static void hexdump(unsigned char *buf, unsigned int len)
{
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
@ -457,44 +359,14 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
}
}
// Pass wrong digest for functional tests
// Test case : hmac(sha1), sha1
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
if (!strcmp(algo, get_fips_functest_mode())) {
unsigned char func_buf[1024];
strcpy(func_buf, template[i].digest);
func_buf[0] += 1;
set_fips_functest_KAT_mode(0);
if (memcmp(result, func_buf,
crypto_ahash_digestsize(tfm))) {
pr_err("alg: hash: Test %d failed for %s\n",
j, algo);
hexdump(result, crypto_ahash_digestsize(tfm));
ret = -EINVAL;
goto out;
}
} else {
if (memcmp(result, template[i].digest,
crypto_ahash_digestsize(tfm))) {
pr_err("alg: hash: Test %d failed for %s\n",
j, algo);
hexdump(result, crypto_ahash_digestsize(tfm));
ret = -EINVAL;
goto out;
}
if (memcmp(result, template[i].digest,
crypto_ahash_digestsize(tfm))) {
printk(KERN_ERR "alg: hash: Test %d failed for %s\n",
j, algo);
hexdump(result, crypto_ahash_digestsize(tfm));
ret = -EINVAL;
goto out;
}
#else
if (memcmp(result, template[i].digest,
crypto_ahash_digestsize(tfm))) {
pr_err("alg: hash: Test %d failed for %s\n",
j, algo);
hexdump(result, crypto_ahash_digestsize(tfm));
ret = -EINVAL;
goto out;
}
#endif
}
j = 0;
@ -793,23 +665,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
ret = -EINVAL;
goto out;
}
// Pass wrong key for functional tests
// Test case : gcm(aes)
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
if (!strcmp(algo, get_fips_functest_mode())) {
unsigned char func_buf[1024];
strcpy(func_buf, template[i].key);
func_buf[0] += 1;
memcpy(key, func_buf, template[i].klen);
set_fips_functest_KAT_mode(0);
} else {
memcpy(key, template[i].key, template[i].klen);
}
#else
memcpy(key, template[i].key, template[i].klen);
#endif
ret = crypto_aead_setkey(tfm, key, template[i].klen);
if (template[i].fail == !ret) {
@ -1284,23 +1140,8 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
crypto_skcipher_set_flags(tfm,
CRYPTO_TFM_REQ_WEAK_KEY);
// Pass wrong key for functional tests
// Test case : ecb(aes)
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
if (!strcmp(algo, get_fips_functest_mode())) {
unsigned char func_buf[1024];
strcpy(func_buf, template[i].key);
func_buf[0] += 1;
ret = crypto_skcipher_setkey(tfm, func_buf, template[i].klen);
set_fips_functest_KAT_mode(0);
} else {
ret = crypto_skcipher_setkey(tfm, template[i].key, template[i].klen);
}
#else
ret = crypto_skcipher_setkey(tfm, template[i].key,
template[i].klen);
#endif
if (template[i].fail == !ret) {
pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_skcipher_get_flags(tfm));
@ -1921,22 +1762,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
goto outbuf;
}
// Pass wrong entropy for functional tests
// Test case : drbg
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
if (!strcmp(driver, get_fips_functest_mode())) {
unsigned char func_buf[1024];
strcpy(func_buf, test->expected);
func_buf[0] += 1;
set_fips_functest_KAT_mode(0);
ret = memcmp(func_buf, buf, test->expectedlen);
} else {
ret = memcmp(test->expected, buf, test->expectedlen);
}
#else
ret = memcmp(test->expected, buf, test->expectedlen);
#endif
outbuf:
crypto_free_rng(drng);
@ -2259,12 +2085,6 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
static int alg_test_null(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
#ifdef CONFIG_CRYPTO_FIPS
if (desc && desc->fips_allowed) {
if (unlikely(in_fips_err()))
return -1;
}
#endif
return 0;
}
@ -2586,6 +2406,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha512),cbc(aes))",
.fips_allowed = 1,
.test = alg_test_aead,
.suite = {
.aead = {
@ -2613,6 +2434,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "authenc(hmac(sha512),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
@ -2623,6 +2445,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "authenc(hmac(sha512),ctr(aes))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "cbc(aes)",
.test = alg_test_skcipher,
@ -4390,12 +4220,10 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
int j;
int rc;
#if 0
if (!fips_enabled && notests) {
printk_once(KERN_INFO "alg: self-tests disabled\n");
return 0;
}
#endif
alg_test_descs_check_order();
@ -4435,46 +4263,21 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
type, mask);
test_done:
if (fips_enabled && rc) {
pr_err("FIPS : %s: %s alg self test failed\n",
driver, alg);
#ifdef CONFIG_CRYPTO_FIPS
#ifdef CONFIG_CRYPTO_FIPS_FUNC_TEST
if (!strcmp("", get_fips_functest_mode()))
IN_FIPS_ERROR = FIPS_ERR;
#else
IN_FIPS_ERROR = FIPS_ERR;
#endif // CONFIG_CRYPTO_FIPS_FUNC_TEST
#else
if (fips_enabled && rc)
panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
#endif //CONFIG_CRYPTO_FIPS
return rc;
}
if (fips_enabled && !rc)
pr_info("FIPS : self-tests for %s (%s) passed\n", driver, alg);
pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
return rc;
notest:
pr_info("FIPS : No test for %s (%s)\n", alg, driver);
printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
return 0;
non_fips_alg:
return -EINVAL;
}
int testmgr_crypto_proc_init(void)
{
#ifdef CONFIG_CRYPTO_FIPS
crypto_init_proc(&IN_FIPS_ERROR);
#else
crypto_init_proc();
#endif
return 0;
}
EXPORT_SYMBOL_GPL(testmgr_crypto_proc_init);
#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
EXPORT_SYMBOL_GPL(alg_test);

View file

@ -20,7 +20,6 @@
#include <linux/net.h>
#include <linux/vmalloc.h>
#include <linux/zstd.h>
#include <crypto/internal/scompress.h>
#define ZSTD_DEF_LEVEL 1
@ -111,24 +110,6 @@ static int __zstd_init(void *ctx)
return ret;
}
static void *zstd_alloc_ctx(struct crypto_scomp *tfm)
{
int ret;
struct zstd_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ret = __zstd_init(ctx);
if (ret) {
kfree(ctx);
return ERR_PTR(ret);
}
return ctx;
}
static int zstd_init(struct crypto_tfm *tfm)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
@ -142,12 +123,6 @@ static void __zstd_exit(void *ctx)
zstd_decomp_exit(ctx);
}
static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
__zstd_exit(ctx);
kzfree(ctx);
}
static void zstd_exit(struct crypto_tfm *tfm)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
@ -177,13 +152,6 @@ static int zstd_compress(struct crypto_tfm *tfm, const u8 *src,
return __zstd_compress(src, slen, dst, dlen, ctx);
}
static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __zstd_compress(src, slen, dst, dlen, ctx);
}
static int __zstd_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@ -205,13 +173,6 @@ static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src,
return __zstd_decompress(src, slen, dst, dlen, ctx);
}
static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __zstd_decompress(src, slen, dst, dlen, ctx);
}
static struct crypto_alg alg = {
.cra_name = "zstd",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
@ -224,18 +185,6 @@ static struct crypto_alg alg = {
.coa_decompress = zstd_decompress } }
};
static struct scomp_alg scomp = {
.alloc_ctx = zstd_alloc_ctx,
.free_ctx = zstd_free_ctx,
.compress = zstd_scompress,
.decompress = zstd_sdecompress,
.base = {
.cra_name = "zstd",
.cra_driver_name = "zstd-scomp",
.cra_module = THIS_MODULE,
}
};
static int __init zstd_mod_init(void)
{
int ret;
@ -244,17 +193,12 @@ static int __init zstd_mod_init(void)
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
if (ret)
crypto_unregister_alg(&alg);
return ret;
}
static void __exit zstd_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
module_init(zstd_mod_init);

View file

@ -554,6 +554,5 @@ config CRYPTO_DEV_ROCKCHIP
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
source "drivers/crypto/chelsio/Kconfig"
source "drivers/crypto/fmp/Kconfig"
endif # CRYPTO_HW

View file

@ -32,6 +32,3 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
ifneq (,$(filter $(CONFIG_EXYNOS_SMU) $(CONFIG_EXYNOS_FMP),y))
obj-y += fmp/
endif

View file

@ -23,30 +23,17 @@
#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <linux/rbtree.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/algapi.h>
#include <crypto/skcipher.h>
#include <crypto/fmp.h>
#include <linux/device-mapper.h>
#include "dm-crypt-fmp.h"
#define DM_MSG_PREFIX "crypt"
uint8_t fmp_disk_key[FMP_MAX_KEY_SIZE * 2];
struct exynos_fmp_data {
struct exynos_fmp_variant_ops *vops;
struct platform_device *pdev;
};
/*
* context holding the current state of a multi-part conversion
*/
@ -74,7 +61,6 @@ struct dm_crypt_io {
atomic_t io_pending;
int error;
sector_t sector;
unsigned long id; /* bio_trace id */
struct rb_node rb_node;
} CRYPTO_MINALIGN_ATTR;
@ -129,14 +115,11 @@ struct iv_tcw_private {
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
struct dm_crypt_bio_trace;
/*
* The fields in here must be read only after initialization.
*/
struct crypt_config {
struct dm_dev *dev;
struct exynos_fmp_data fmp;
sector_t start;
/*
@ -158,12 +141,6 @@ struct crypt_config {
char *cipher;
char *cipher_string;
/* hardware acceleration. 0 : no, 1 : yes */
unsigned int hw_fmp;
/* to trace bios used by hw_fmp */
struct dm_crypt_bio_trace *bio_trace;
struct crypt_iv_operations *iv_gen_ops;
union {
struct iv_essiv_private essiv;
@ -203,118 +180,6 @@ struct crypt_config {
u8 key[0];
};
/*
* To trace submitted and ended bio
*/
#define DM_CRYPT_BIO_TRACE_SLOT_SIZE (64)
struct dm_crypt_btrace_slot {
unsigned long id;
unsigned long time;
sector_t sec;
struct bio *bio;
struct dm_crypt_io *io;
union {
pid_t pid;
int err;
};
};
struct dm_crypt_bio_trace {
unsigned int start_idx; /* protected by start_lock */
unsigned int end_idx; /* protected by end_lock */
unsigned long next_id; /* protected by start_lock */
struct crypt_config *cc; /* just for fast debugging */
spinlock_t start_lock;
spinlock_t end_lock;
struct dm_crypt_btrace_slot start[DM_CRYPT_BIO_TRACE_SLOT_SIZE];
struct dm_crypt_btrace_slot end[DM_CRYPT_BIO_TRACE_SLOT_SIZE];
};
static inline bool dm_crypt_need_bio_trace(struct crypt_config *cc,
struct bio *bio, const char *fn_name)
{
#ifdef CONFIG_JOURNAL_DATA_TAG
if (!(bio->bi_flags & (1 << BIO_JOURNAL)))
return false;
if (!cc->bio_trace)
return false;
if (!cc->hw_fmp)
printk( KERN_WARNING
"%s: hw_fmp is not set, is possible?", fn_name);
return true;
#else
return false;
#endif
}
static void dm_crypt_bio_trace_start(struct crypt_config *cc, struct bio *bio)
{
unsigned long flags;
unsigned long id;
unsigned int idx;
struct dm_crypt_bio_trace *btrace;
struct dm_crypt_io *io;
if (!dm_crypt_need_bio_trace(cc, bio, __func__))
return;
btrace = cc->bio_trace;
io = (struct dm_crypt_io *)bio->bi_private;
spin_lock_irqsave(&btrace->start_lock, flags);
id = btrace->next_id++;
idx = btrace->start_idx++;
BUG_ON(idx >= DM_CRYPT_BIO_TRACE_SLOT_SIZE);
if (idx == (DM_CRYPT_BIO_TRACE_SLOT_SIZE - 1))
btrace->start_idx = 0;
io->id = id;
btrace->start[idx].id = id;
btrace->start[idx].time = jiffies;
btrace->start[idx].sec = bio->bi_iter.bi_sector;
btrace->start[idx].bio = bio;
btrace->start[idx].io = io;
btrace->start[idx].pid = current->pid;
spin_unlock_irqrestore(&btrace->start_lock, flags);
}
static void dm_crypt_bio_trace_end(struct crypt_config *cc, struct bio *bio)
{
unsigned long flags;
unsigned long id;
unsigned int idx;
struct dm_crypt_bio_trace *btrace;
struct dm_crypt_io *io;
if (!dm_crypt_need_bio_trace(cc, bio, __func__))
return;
btrace = cc->bio_trace;
io = (struct dm_crypt_io *)bio->bi_private;
spin_lock_irqsave(&btrace->end_lock, flags);
id = io->id;
idx = btrace->end_idx++;
BUG_ON(idx >= DM_CRYPT_BIO_TRACE_SLOT_SIZE);
if (idx == (DM_CRYPT_BIO_TRACE_SLOT_SIZE - 1))
btrace->end_idx = 0;
btrace->end[idx].id = id;
btrace->end[idx].time = jiffies;
btrace->end[idx].sec = bio->bi_iter.bi_sector;
btrace->end[idx].bio = bio;
btrace->end[idx].io = io;
btrace->end[idx].err = bio->bi_error;
spin_unlock_irqrestore(&btrace->end_lock, flags);
}
static struct dm_crypt_bio_trace dm_crypt_btrace;
#define MIN_IOS 64
static void clone_init(struct dm_crypt_io *, struct bio *);
@ -1243,24 +1108,18 @@ static void crypt_endio(struct bio *clone)
unsigned rw = bio_data_dir(clone);
int error;
dm_crypt_bio_trace_end(cc, clone);
if (cc->hw_fmp) {
error = clone->bi_error;
bio_put(clone);
} else {
/*
* free the processed pages
*/
if (rw == WRITE)
crypt_free_buffer_pages(cc, clone);
/*
* free the processed pages
*/
if (rw == WRITE)
crypt_free_buffer_pages(cc, clone);
error = clone->bi_error;
bio_put(clone);
error = clone->bi_error;
bio_put(clone);
if (rw == READ && !error) {
kcryptd_queue_crypt(io);
return;
}
if (rw == READ && !error) {
kcryptd_queue_crypt(io);
return;
}
if (unlikely(error))
@ -1277,38 +1136,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
#ifdef CONFIG_JOURNAL_DATA_TAG
clone->bi_flags |= io->base_bio->bi_flags & (1 << BIO_JOURNAL);
#endif
}
static int kcryptd_io_rw(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->cc;
struct bio *clone;
/*
* The block layer might modify the bvec array, so always
* copy the required bvecs because we need the original
* one in order to decrypt the whole bio data *afterwards*.
*/
clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
if (!clone)
return 1;
crypt_inc_pending(io);
clone_init(io, clone);
clone->fmp_ci.private_enc_mode = EXYNOS_FMP_DISK_ENC;
clone->fmp_ci.private_algo_mode = EXYNOS_FMP_ALGO_MODE_AES_XTS;
clone->fmp_ci.key = cc->key;
clone->fmp_ci.key_length = cc->key_size;
clone->bi_iter.bi_sector = cc->start + io->sector;
dm_crypt_bio_trace_start(cc, clone);
generic_make_request(clone);
return 0;
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@ -1573,27 +1400,12 @@ static void kcryptd_crypt(struct work_struct *work)
kcryptd_crypt_write_convert(io);
}
static void kcryptd_fmp_io(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
crypt_inc_pending(io);
if (kcryptd_io_rw(io, GFP_NOIO))
io->error = -ENOMEM;
crypt_dec_pending(io);
}
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
if (cc->hw_fmp) {
INIT_WORK(&io->work, kcryptd_fmp_io);
queue_work(cc->io_queue, &io->work);
} else {
INIT_WORK(&io->work, kcryptd_crypt);
queue_work(cc->crypt_queue, &io->work);
}
INIT_WORK(&io->work, kcryptd_crypt);
queue_work(cc->crypt_queue, &io->work);
}
/*
@ -1667,40 +1479,12 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
/* Ignore extra keys (which are used for IV etc) */
subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
if (cc->hw_fmp) {
struct fmp_data_setting fmp_data;
if (!cc->fmp.vops || !cc->fmp.pdev) {
err = -ENODEV;
return err;
}
if (cc->key_size > FMP_MAX_KEY_SIZE) {
pr_err("dm-crypt: Invalid key size(%d)\n", cc->key_size);
err = -ENODEV;
return err;
}
memcpy(fmp_disk_key, cc->key, cc->key_size);
memset(fmp_data.disk.key, 0, FMP_MAX_KEY_SIZE);
memcpy(fmp_data.disk.key, fmp_disk_key, cc->key_size);
fmp_data.disk.key_size = cc->key_size;
r = cc->fmp.vops->set_disk_key(cc->fmp.pdev, &fmp_data);
if (r) {
pr_err("dm-crypt: Fail to set fmp disk key. r(%d)\n", r);
for (i = 0; i < cc->tfms_count; i++) {
r = crypto_skcipher_setkey(cc->tfms[i],
cc->key + (i * subkey_size),
subkey_size);
if (r)
err = r;
return err;
}
pr_info("%s: fmp disk key is set\n", __func__);
} else {
for (i = 0; i < cc->tfms_count; i++) {
r = crypto_skcipher_setkey(cc->tfms[i],
cc->key + (i * subkey_size),
subkey_size);
if (r)
err = r;
}
}
return err;
@ -1744,29 +1528,8 @@ static int crypt_wipe_key(struct crypt_config *cc)
return crypt_setkey_allcpus(cc);
}
static int crypt_clear_key(struct crypt_config *cc)
{
int ret = 0;
char *disk_key = (char *)fmp_disk_key;
if (!cc->fmp.vops || !cc->fmp.pdev) {
ret = -ENODEV;
goto out;
}
memset(disk_key, 0, cc->key_size);
ret = cc->fmp.vops->clear_disk_key(cc->fmp.pdev);
if (ret)
pr_err("dm-crypt: Fail to clear disk key. ret(%d)\n", ret);
pr_info("%s: fmp disk key is clear\n", __func__);
out:
return ret;
}
static void crypt_dtr(struct dm_target *ti)
{
int r;
struct crypt_config *cc = ti->private;
ti->private = NULL;
@ -1774,30 +1537,24 @@ static void crypt_dtr(struct dm_target *ti)
if (!cc)
return;
if (!cc->hw_fmp) {
if (cc->write_thread)
kthread_stop(cc->write_thread);
}
if (cc->write_thread)
kthread_stop(cc->write_thread);
if (cc->io_queue)
destroy_workqueue(cc->io_queue);
if (!cc->hw_fmp) {
if (cc->crypt_queue)
destroy_workqueue(cc->crypt_queue);
}
if (cc->crypt_queue)
destroy_workqueue(cc->crypt_queue);
crypt_free_tfms(cc);
if (cc->bs)
bioset_free(cc->bs);
if (!cc->hw_fmp) {
mempool_destroy(cc->page_pool);
mempool_destroy(cc->req_pool);
mempool_destroy(cc->page_pool);
mempool_destroy(cc->req_pool);
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
}
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
if (cc->dev)
dm_put_device(ti, cc->dev);
@ -1805,88 +1562,10 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher);
kzfree(cc->cipher_string);
if (cc->hw_fmp) {
r = crypt_clear_key(cc);
if (r)
pr_err("dm-crypt: Fail to clear fmp key.(%d)\n", r);
}
/* Must zero key material before freeing */
kzfree(cc);
}
static struct exynos_fmp_variant_ops *dm_exynos_fmp_get_vops(void)
{
struct exynos_fmp_variant_ops *fmp_vops = NULL;
struct device_node *node;
node = of_find_compatible_node(NULL, NULL, "samsung,exynos-fmp");
if (!node) {
pr_err("%s: Fail to find exynos fmp device node\n", __func__);
goto out;
}
fmp_vops = exynos_fmp_get_variant_ops(node);
if (!fmp_vops)
pr_err("%s: Fail to get fmp_vops\n", __func__);
of_node_put(node);
out:
return fmp_vops;
}
static struct platform_device *dm_exynos_fmp_get_pdevice(void)
{
struct device_node *node;
struct platform_device *fmp_pdev = NULL;
node = of_find_compatible_node(NULL, NULL, "samsung,exynos-fmp");
if (!node) {
pr_err("%s: Fail to find exynos fmp device node\n", __func__);
goto out;
}
fmp_pdev = exynos_fmp_get_pdevice(node);
if (!fmp_pdev)
pr_err("%s: Fail to get fmp platform device\n", __func__);
out:
return fmp_pdev;
}
static int req_crypt_fmp_get_dev(struct crypt_config *cc)
{
int ret = 0;
if (!cc) {
pr_err("%s: Invalid crypt config.\n", __func__);
ret = -EINVAL;
return ret;
}
cc->fmp.vops = dm_exynos_fmp_get_vops();
cc->fmp.pdev = dm_exynos_fmp_get_pdevice();
if (cc->fmp.pdev == ERR_PTR(-EPROBE_DEFER)) {
pr_err("%s: FMP device not probed yet\n", __func__);
ret = -EPROBE_DEFER;
goto err;
}
if (!cc->fmp.pdev || !cc->fmp.vops) {
pr_err("%s: Invalid platform device %p or vops %p\n",
__func__, cc->fmp.pdev, cc->fmp.vops);
ret = -ENODEV;
goto err;
}
return 0;
err:
cc->fmp.pdev = NULL;
cc->fmp.vops = NULL;
return ret;
}
static int crypt_ctr_cipher(struct dm_target *ti,
char *cipher_in, char *key)
{
@ -1960,125 +1639,82 @@ static int crypt_ctr_cipher(struct dm_target *ti,
goto bad_mem;
}
if (!cipher) {
ti->error = "Error Invalid cipher";
ret = -EINVAL;
/* Allocate cipher */
ret = crypt_alloc_tfms(cc, cipher_api);
if (ret < 0) {
ti->error = "Error allocating crypto tfm";
goto bad;
}
if ((strcmp(chainmode, "xts") == 0) && ivmode &&
(strcmp(cipher, "aes") == 0) &&
(!strcmp(ivmode, "fmp") || !strcmp(ivmode, "disk"))) {
ret = req_crypt_fmp_get_dev(cc);
if (ret) {
ti->error = "Cannot get FMP device";
goto bad;
}
pr_info("%s: H/W FMP disk encryption\n", __func__);
cc->hw_fmp = 1;
/* link to global bio_trace info */
memset(&dm_crypt_btrace, 0, sizeof(struct dm_crypt_bio_trace));
spin_lock_init(&dm_crypt_btrace.start_lock);
spin_lock_init(&dm_crypt_btrace.end_lock);
dm_crypt_btrace.cc = cc;
cc->bio_trace = &dm_crypt_btrace;
/* Initialize max_io_len */
ret = dm_set_target_max_io_len(ti, 1024);
if (ret < 0)
goto bad;
/* Initialize and set key */
ret = crypt_set_key(cc, key);
if (ret < 0) {
ti->error = "Error decoding and setting key";
goto bad;
/* Initialize IV */
cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
cc->iv_size = max(cc->iv_size,
(unsigned int)(sizeof(u64) / sizeof(u8)));
else if (ivmode) {
DMWARN("Selected cipher does not support IVs");
ivmode = NULL;
}
/* Choose ivmode, see comments at iv code. */
if (ivmode == NULL)
cc->iv_gen_ops = NULL;
else if (strcmp(ivmode, "plain") == 0)
cc->iv_gen_ops = &crypt_iv_plain_ops;
else if (strcmp(ivmode, "plain64") == 0)
cc->iv_gen_ops = &crypt_iv_plain64_ops;
else if (strcmp(ivmode, "essiv") == 0)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else if (strcmp(ivmode, "benbi") == 0)
cc->iv_gen_ops = &crypt_iv_benbi_ops;
else if (strcmp(ivmode, "null") == 0)
cc->iv_gen_ops = &crypt_iv_null_ops;
else if (strcmp(ivmode, "lmk") == 0) {
cc->iv_gen_ops = &crypt_iv_lmk_ops;
/*
* Version 2 and 3 is recognised according
* to length of provided multi-key string.
* If present (version 3), last key is used as IV seed.
* All keys (including IV seed) are always the same size.
*/
if (cc->key_size % cc->key_parts) {
cc->key_parts++;
cc->key_extra_size = cc->key_size / cc->key_parts;
}
} else if (strcmp(ivmode, "tcw") == 0) {
cc->iv_gen_ops = &crypt_iv_tcw_ops;
cc->key_parts += 2; /* IV + whitening */
cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
} else {
pr_info("%s: S/W disk encryption\n", __func__);
ret = -EINVAL;
ti->error = "Invalid IV mode";
goto bad;
}
/* do not link to global bio_trace info */
cc->bio_trace = NULL;
/* Initialize and set key */
ret = crypt_set_key(cc, key);
if (ret < 0) {
ti->error = "Error decoding and setting key";
goto bad;
}
/* Allocate cipher */
ret = crypt_alloc_tfms(cc, cipher_api);
/* Allocate IV */
if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
if (ret < 0) {
ti->error = "Error allocating crypto tfm";
ti->error = "Error creating IV";
goto bad;
}
}
/* Initialize IV */
cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
cc->iv_size = max(cc->iv_size,
(unsigned int)(sizeof(u64) / sizeof(u8)));
else if (ivmode) {
DMWARN("Selected cipher does not support IVs");
ivmode = NULL;
}
/* Choose ivmode, see comments at iv code. */
if (ivmode == NULL)
cc->iv_gen_ops = NULL;
else if (strcmp(ivmode, "plain") == 0)
cc->iv_gen_ops = &crypt_iv_plain_ops;
else if (strcmp(ivmode, "plain64") == 0)
cc->iv_gen_ops = &crypt_iv_plain64_ops;
else if (strcmp(ivmode, "essiv") == 0)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else if (strcmp(ivmode, "benbi") == 0)
cc->iv_gen_ops = &crypt_iv_benbi_ops;
else if (strcmp(ivmode, "null") == 0)
cc->iv_gen_ops = &crypt_iv_null_ops;
else if (strcmp(ivmode, "lmk") == 0) {
cc->iv_gen_ops = &crypt_iv_lmk_ops;
/*
* Version 2 and 3 is recognised according
* to length of provided multi-key string.
* If present (version 3), last key is used as IV seed.
* All keys (including IV seed) are always the same size.
*/
if (cc->key_size % cc->key_parts) {
cc->key_parts++;
cc->key_extra_size = cc->key_size / cc->key_parts;
}
} else if (strcmp(ivmode, "tcw") == 0) {
cc->iv_gen_ops = &crypt_iv_tcw_ops;
cc->key_parts += 2; /* IV + whitening */
cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
} else {
ret = -EINVAL;
ti->error = "Invalid IV mode";
goto bad;
}
/* Initialize and set key */
ret = crypt_set_key(cc, key);
/* Initialize IV (set keys for ESSIV etc) */
if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
ret = cc->iv_gen_ops->init(cc);
if (ret < 0) {
ti->error = "Error decoding and setting key";
ti->error = "Error initialising IV";
goto bad;
}
/* Allocate IV */
if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
if (ret < 0) {
ti->error = "Error creating IV";
goto bad;
}
}
/* Initialize IV (set keys for ESSIV etc) */
if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
ret = cc->iv_gen_ops->init(cc);
if (ret < 0) {
ti->error = "Error initialising IV";
goto bad;
}
}
}
ret = 0;
@ -2129,45 +1765,40 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret < 0)
goto bad;
if (cc->hw_fmp) {
cc->per_bio_data_size = ti->per_io_data_size =
ALIGN(sizeof(struct dm_crypt_io), ARCH_KMALLOC_MINALIGN);
cc->dmreq_start = sizeof(struct skcipher_request);
cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
/* Allocate the padding exactly */
iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
& crypto_skcipher_alignmask(any_tfm(cc));
} else {
cc->dmreq_start = sizeof(struct skcipher_request);
cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
/*
* If the cipher requires greater alignment than kmalloc
* alignment, we don't know the exact position of the
* initialization vector. We must assume worst case.
*/
iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
}
if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
/* Allocate the padding exactly */
iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
& crypto_skcipher_alignmask(any_tfm(cc));
} else {
/*
* If the cipher requires greater alignment than kmalloc
* alignment, we don't know the exact position of the
* initialization vector. We must assume worst case.
*/
iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
}
ret = -ENOMEM;
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
if (!cc->req_pool) {
ti->error = "Cannot allocate crypt request mempool";
goto bad;
}
ret = -ENOMEM;
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
if (!cc->req_pool) {
ti->error = "Cannot allocate crypt request mempool";
goto bad;
}
cc->per_bio_data_size = ti->per_io_data_size =
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
ARCH_KMALLOC_MINALIGN);
cc->per_bio_data_size = ti->per_io_data_size =
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
ARCH_KMALLOC_MINALIGN);
cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
if (!cc->page_pool) {
ti->error = "Cannot allocate page mempool";
goto bad;
}
cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
if (!cc->page_pool) {
ti->error = "Cannot allocate page mempool";
goto bad;
}
cc->bs = bioset_create(MIN_IOS, 0);
@ -2178,14 +1809,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mutex_init(&cc->bio_alloc_lock);
if (!cc->hw_fmp) {
ret = -EINVAL;
if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid iv_offset sector";
goto bad;
}
cc->iv_offset = tmpll;
ret = -EINVAL;
if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
(tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
ti->error = "Invalid iv_offset sector";
goto bad;
}
cc->iv_offset = tmpll;
ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
if (ret) {
@ -2237,55 +1867,44 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
if (cc->hw_fmp) {
cc->io_queue = alloc_workqueue("kcryptd_fmp_io",
WQ_HIGHPRI |
WQ_MEM_RECLAIM,
1);
} else {
cc->io_queue = alloc_workqueue("kcryptd_io",
WQ_HIGHPRI |
WQ_MEM_RECLAIM,
1);
}
cc->io_queue = alloc_workqueue("kcryptd_io",
WQ_HIGHPRI |
WQ_MEM_RECLAIM,
1);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (!cc->hw_fmp) {
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
cc->crypt_queue = alloc_workqueue("kcryptd",
WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
else
cc->crypt_queue = alloc_workqueue("kcryptd",
WQ_HIGHPRI |
WQ_MEM_RECLAIM |
WQ_UNBOUND,
num_online_cpus());
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
}
init_waitqueue_head(&cc->write_thread_wait);
cc->write_tree = RB_ROOT;
cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
if (IS_ERR(cc->write_thread)) {
ret = PTR_ERR(cc->write_thread);
cc->write_thread = NULL;
ti->error = "Couldn't spawn write thread";
goto bad;
}
wake_up_process(cc->write_thread);
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
cc->crypt_queue = alloc_workqueue("kcryptd",
WQ_HIGHPRI |
WQ_MEM_RECLAIM, 1);
else
cc->crypt_queue = alloc_workqueue("kcryptd",
WQ_HIGHPRI |
WQ_MEM_RECLAIM |
WQ_UNBOUND,
num_online_cpus());
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
}
init_waitqueue_head(&cc->write_thread_wait);
cc->write_tree = RB_ROOT;
cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
if (IS_ERR(cc->write_thread)) {
ret = PTR_ERR(cc->write_thread);
cc->write_thread = NULL;
ti->error = "Couldn't spawn write thread";
goto bad;
}
wake_up_process(cc->write_thread);
ti->num_flush_bios = 1;
ti->discard_zeroes_data_unsupported = true;
ti->num_discard_bios = 1;
return 0;
@ -2305,7 +1924,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
* - for REQ_OP_DISCARD caller must use flush if IO ordering matters
*/
if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
bio_op(bio) == REQ_OP_DISCARD || bio->bi_sec_flags & SEC_BYPASS)) {
bio_op(bio) == REQ_OP_DISCARD)) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start +
@ -2324,16 +1943,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
io->ctx.req = (struct skcipher_request *)(io + 1);
if (cc->hw_fmp) {
if (kcryptd_io_rw(io, GFP_NOWAIT))
kcryptd_queue_crypt(io);
} else {
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
kcryptd_queue_read(io);
} else
kcryptd_queue_crypt(io);
}
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
kcryptd_queue_read(io);
} else
kcryptd_queue_crypt(io);
return DM_MAPIO_SUBMITTED;
}
@ -2464,8 +2078,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
* bio that are not as physically contiguous as the original bio.
*/
limits->max_segment_size = PAGE_SIZE;
if (cc->hw_fmp)
limits->logical_block_size = PAGE_SIZE;
limits->logical_block_size = PAGE_SIZE;
}
static struct target_type crypt_target = {

View file

@ -14,11 +14,3 @@ config FS_ENCRYPTION
efficient since it avoids caching the encrypted and
decrypted pages in the page cache.
config FSCRYPT_SDP
bool "Sensitive Data Protection for Per-file-encryption"
depends on FS_ENCRYPTION
depends on SDP
help
Enable SDP functional to fscrypto module.
This feature is supporting "Data-At-Rest" for file systems using
Per-file-encryption being controlled by SDP user status.

View file

@ -2,4 +2,3 @@ obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
fscrypto-$(CONFIG_BLOCK) += bio.o
fscrypto-$(CONFIG_FSCRYPT_SDP) += sdp/sdp_cache.o sdp/sdp_dek.o sdp/sdp_ioctl.o sdp/sdp_name.o sdp/sdp_xattr.o sdp/sdp_crypto.o

View file

@ -131,9 +131,6 @@ struct drbg_state {
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
#ifdef CONFIG_CRYPTO_FIPS
bool hw_entropy;
#endif
struct work_struct seed_work; /* asynchronous seeding support */
struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;

View file

@ -197,14 +197,4 @@ static inline int crypto_rng_seedsize(struct crypto_rng *tfm)
return crypto_rng_alg(tfm)->seedsize;
}
/**
* crypto_rng_check_entropy() - check rng is DRBG and initialized from hw entropy
*
* This function returns 0 if stdrng is DRBG familay and initialized by /dev/randon pool
* Important : only available where CONFIG_CRYPTO_FIPS build
*
* Return: 0 (/dev/random); -1 (/dev/urandom); < -1 if an error occurred
*/
int crypto_rng_check_entropy(struct crypto_rng *rng);
#endif

View file

@ -250,9 +250,7 @@ struct fsxattr {
#define FS_IOC32_SETVERSION _IOW('v', 2, int)
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
#if defined(CONFIG_SDP) && !defined(CONFIG_FSCRYPT_SDP)
#define FS_IOC_INVAL_MAPPING _IO('f', 13) /* CONFIG_EPM FMP */
#endif
/*
* File system encryption support
*/
@ -279,9 +277,6 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_ADIANTUM 9
#define FS_PRIVATE_ENCRYPTION_MODE_AES_256_CBC 126
#define FS_PRIVATE_ENCRYPTION_MODE_AES_256_XTS 127
struct fscrypt_policy {
__u8 version;
__u8 contents_encryption_mode;