summaryrefslogtreecommitdiff
path: root/lib/test_kasan.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2020-12-10 13:30:11 +0000
committerMark Brown <broonie@kernel.org>2020-12-10 13:30:11 +0000
commit49ab19a4a51a31cb06992386cec4be82ebca5a2d (patch)
treede7d31ec7ded2c8ab8dbdfe5a55fa283068023d0 /lib/test_kasan.c
parentb0dfd948379c79b8754e224e29b99d30ce0d79b8 (diff)
parent3b25f337929e73232f0aa990cd68a129f53652e2 (diff)
Merge series "spi: spi-geni-qcom: Use gpio descriptors for CS" from Stephen Boyd <swboyd@chromium.org>:
Collected patches from the two series below and associated tags so they can be merged in one pile through the spi tree. Merry December! SPI: https://lore.kernel.org/r/20201202214935.1114381-1-swboyd@chromium.org cros-ec: https://lore.kernel.org/r/20201203011649.1405292-1-swboyd@chromium.org Cc: Akash Asthana <akashast@codeaurora.org> Cc: Simon Glass <sjg@chromium.org> Cc: Gwendal Grignou <gwendal@chromium.org> Cc: Douglas Anderson <dianders@chromium.org> Cc: Alexandru M Stan <amstan@chromium.org> Stephen Boyd (3): platform/chrome: cros_ec_spi: Don't overwrite spi::mode platform/chrome: cros_ec_spi: Drop bits_per_word assignment spi: spi-geni-qcom: Use the new method of gpio CS control drivers/platform/chrome/cros_ec_spi.c | 2 -- drivers/spi/spi-geni-qcom.c | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) base-commit: b65054597872ce3aefbc6a666385eabdf9e288da -- https://chromeos.dev
Diffstat (limited to 'lib/test_kasan.c')
-rw-r--r--lib/test_kasan.c149
1 files changed, 107 insertions, 42 deletions
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 63c26171a791..662f862702fc 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -216,6 +216,12 @@ static void kmalloc_oob_16(struct kunit *test)
u64 words[2];
} *ptr1, *ptr2;
+ /* This test is specifically crafted for the generic mode. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
+ return;
+ }
+
ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -227,6 +233,23 @@ static void kmalloc_oob_16(struct kunit *test)
kfree(ptr2);
}
+static void kmalloc_uaf_16(struct kunit *test)
+{
+ struct {
+ u64 words[2];
+ } *ptr1, *ptr2;
+
+ ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+
+ ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ kfree(ptr2);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
+ kfree(ptr1);
+}
+
static void kmalloc_oob_memset_2(struct kunit *test)
{
char *ptr;
@@ -429,6 +452,12 @@ static void kasan_global_oob(struct kunit *test)
volatile int i = 3;
char *p = &global_array[ARRAY_SIZE(global_array) + i];
+ /* Only generic mode instruments globals. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required");
+ return;
+ }
+
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -467,6 +496,12 @@ static void kasan_alloca_oob_left(struct kunit *test)
char alloca_array[i];
char *p = alloca_array - 1;
+ /* Only generic mode instruments dynamic allocas. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required");
+ return;
+ }
+
if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return;
@@ -481,6 +516,12 @@ static void kasan_alloca_oob_right(struct kunit *test)
char alloca_array[i];
char *p = alloca_array + i;
+ /* Only generic mode instruments dynamic allocas. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required");
+ return;
+ }
+
if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return;
@@ -551,6 +592,9 @@ static void kasan_memchr(struct kunit *test)
return;
}
+ if (OOB_TAG_OFF)
+ size = round_up(size, OOB_TAG_OFF);
+
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -573,6 +617,9 @@ static void kasan_memcmp(struct kunit *test)
return;
}
+ if (OOB_TAG_OFF)
+ size = round_up(size, OOB_TAG_OFF);
+
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset(arr, 0, sizeof(arr));
@@ -619,13 +666,50 @@ static void kasan_strings(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
}
-static void kasan_bitops(struct kunit *test)
+static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
+{
+ KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
+}
+
+static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
+{
+ KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
+
+#if defined(clear_bit_unlock_is_negative_byte)
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
+ clear_bit_unlock_is_negative_byte(nr, addr));
+#endif
+}
+
+static void kasan_bitops_generic(struct kunit *test)
{
+ long *bits;
+
+ /* This test is specifically crafted for the generic mode. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
+ return;
+ }
+
/*
* Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
* this way we do not actually corrupt other memory.
*/
- long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
+ bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
/*
@@ -633,55 +717,34 @@ static void kasan_bitops(struct kunit *test)
* below accesses are still out-of-bounds, since bitops are defined to
* operate on the whole long the bit is in.
*/
- KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits));
+ kasan_bitops_modify(test, BITS_PER_LONG, bits);
/*
* Below calls try to access bit beyond allocated memory.
*/
- KUNIT_EXPECT_KASAN_FAIL(test,
- test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test,
- __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test,
- test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
- KUNIT_EXPECT_KASAN_FAIL(test,
- test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ kfree(bits);
+}
- KUNIT_EXPECT_KASAN_FAIL(test,
- __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+static void kasan_bitops_tags(struct kunit *test)
+{
+ long *bits;
- KUNIT_EXPECT_KASAN_FAIL(test,
- test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ /* This test is specifically crafted for the tag-based mode. */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
+ return;
+ }
- KUNIT_EXPECT_KASAN_FAIL(test,
- __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ /* Allocation size will be rounded to up granule size, which is 16. */
+ bits = kzalloc(sizeof(*bits), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
- KUNIT_EXPECT_KASAN_FAIL(test,
- kasan_int_result =
- test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ /* Do the accesses past the 16 allocated bytes. */
+ kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
+ kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
-#if defined(clear_bit_unlock_is_negative_byte)
- KUNIT_EXPECT_KASAN_FAIL(test,
- kasan_int_result = clear_bit_unlock_is_negative_byte(
- BITS_PER_LONG + BITS_PER_BYTE, bits));
-#endif
kfree(bits);
}
@@ -728,6 +791,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_krealloc_more),
KUNIT_CASE(kmalloc_oob_krealloc_less),
KUNIT_CASE(kmalloc_oob_16),
+ KUNIT_CASE(kmalloc_uaf_16),
KUNIT_CASE(kmalloc_oob_in_memset),
KUNIT_CASE(kmalloc_oob_memset_2),
KUNIT_CASE(kmalloc_oob_memset_4),
@@ -751,7 +815,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_memchr),
KUNIT_CASE(kasan_memcmp),
KUNIT_CASE(kasan_strings),
- KUNIT_CASE(kasan_bitops),
+ KUNIT_CASE(kasan_bitops_generic),
+ KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree),
KUNIT_CASE(vmalloc_oob),
{}