dect
/
linux-2.6
Archived
13
0
Fork 0

crypto: Add support for x86 cpuid auto loading for x86 crypto drivers

Add support for auto-loading of crypto drivers based on cpuid features.
This enables auto-loading of the VIA and Intel specific drivers
for AES, hashing and CRCs.

Requires the earlier infrastructure patch to add x86 modinfo.
I kept it all in a single patch for now.

I dropped the printks when the driver cpuid doesn't match (imho
drivers never should print anything in such a case)

One drawback is that udev doesn't know if the drivers are used or not,
so they will be unconditionally loaded at boot up. That's better
than not loading them at all, like it often happens.

Cc: Dave Jones <davej@redhat.com>
Cc: Kay Sievers <kay.sievers@vrfy.org>
Cc: Jen Axboe <axboe@kernel.dk>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Huang Ying <ying.huang@intel.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Thomas Renninger <trenn@suse.de>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Andi Kleen 2012-01-26 00:09:06 +01:00 committed by Greg Kroah-Hartman
parent 644e9cbbe3
commit 3bd391f056
5 changed files with 41 additions and 19 deletions

View File

@ -28,6 +28,7 @@
#include <crypto/aes.h>
#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <asm/cpu_device_id.h>
#include <asm/i387.h>
#include <asm/aes.h>
#include <crypto/scatterwalk.h>
@ -1253,14 +1254,19 @@ static struct crypto_alg __rfc4106_alg = {
};
#endif
static const struct x86_cpu_id aesni_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_AES),
{}
};
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
static int __init aesni_init(void)
{
int err;
if (!cpu_has_aes) {
printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
}
if ((err = crypto_fpu_init()))
goto fpu_err;

View File

@ -31,6 +31,7 @@
#include <crypto/internal/hash.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
@ -173,13 +174,17 @@ static struct shash_alg alg = {
}
};
static const struct x86_cpu_id crc32c_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XMM4_2),
{}
};
MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id);
static int __init crc32c_intel_mod_init(void)
{
if (cpu_has_xmm4_2)
return crypto_register_shash(&alg);
else
if (!x86_match_cpu(crc32c_cpu_id))
return -ENODEV;
return crypto_register_shash(&alg);
}
static void __exit crc32c_intel_mod_fini(void)

View File

@ -20,6 +20,7 @@
#include <crypto/gf128mul.h>
#include <crypto/internal/hash.h>
#include <asm/i387.h>
#include <asm/cpu_device_id.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
@ -294,15 +295,18 @@ static struct ahash_alg ghash_async_alg = {
},
};
static const struct x86_cpu_id pcmul_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), /* Pickle-Mickle-Duck */
{}
};
MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id);
static int __init ghash_pclmulqdqni_mod_init(void)
{
int err;
if (!cpu_has_pclmulqdq) {
printk(KERN_INFO "Intel PCLMULQDQ-NI instructions are not"
" detected.\n");
if (!x86_match_cpu(pcmul_cpu_id))
return -ENODEV;
}
err = crypto_register_shash(&ghash_alg);
if (err)

View File

@ -19,6 +19,7 @@
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
#include <asm/byteorder.h>
#include <asm/processor.h>
#include <asm/i387.h>
@ -503,12 +504,18 @@ static struct crypto_alg cbc_aes_alg = {
}
};
static struct x86_cpu_id padlock_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
{}
};
MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
static int __init padlock_init(void)
{
int ret;
struct cpuinfo_x86 *c = &cpu_data(0);
if (!cpu_has_xcrypt)
if (!x86_match_cpu(padlock_cpu_id))
return -ENODEV;
if (!cpu_has_xcrypt_enabled) {

View File

@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <asm/cpu_device_id.h>
#include <asm/i387.h>
struct padlock_sha_desc {
@ -526,6 +527,12 @@ static struct shash_alg sha256_alg_nano = {
}
};
static struct x86_cpu_id padlock_sha_ids[] = {
X86_FEATURE_MATCH(X86_FEATURE_PHE),
{}
};
MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids);
static int __init padlock_init(void)
{
int rc = -ENODEV;
@ -533,15 +540,8 @@ static int __init padlock_init(void)
struct shash_alg *sha1;
struct shash_alg *sha256;
if (!cpu_has_phe) {
printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled)
return -ENODEV;
}
if (!cpu_has_phe_enabled) {
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}
/* Register the newly added algorithm module if on *
* VIA Nano processor, or else just do as before */