btrfs-progs: kernel-lib: sync include/overflow.h

Sync current version with improved checks.

Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
David Sterba 2022-05-12 13:13:31 +02:00
parent e49441a953
commit 5ad2aacd24
1 changed files with 137 additions and 156 deletions

View File

@ -2,8 +2,12 @@
#ifndef __LINUX_OVERFLOW_H #ifndef __LINUX_OVERFLOW_H
#define __LINUX_OVERFLOW_H #define __LINUX_OVERFLOW_H
#include <stdbool.h>
/* /*
* It would seem more obvious to do something like * We need to compute the minimum and maximum values representable in a given
* type. These macros may also be useful elsewhere. It would seem more obvious
* to do something like:
* *
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@ -36,128 +40,57 @@
#define is_non_negative(a) ((a) > 0 || (a) == 0) #define is_non_negative(a) ((a) > 0 || (a) == 0)
#define is_negative(a) (!(is_non_negative(a))) #define is_negative(a) (!(is_non_negative(a)))
/* Checking for unsigned overflow is relatively easy without causing UB. */ /* Copied from linux.git/include/compiler_attributes.h */
#define __unsigned_add_overflow(a, b, d) ({ \ /*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
*/
#define __must_check __attribute__((__warn_unused_result__))
/*
* Allows for effectively applying __must_check to a macro so we can have
* both the type-agnostic benefits of the macros while also being able to
* enforce that the return value is, in fact, checked.
*/
static inline bool __must_check __must_check_overflow(bool overflow)
{
return overflow;
}
/*
* For simplicity and code hygiene, the fallback code below insists on
* a, b and *d having the same type (similar to the min() and max()
* macros), whereas gcc's type-generic overflow checkers accept
* different types. Hence we don't just make check_add_overflow an
* alias for __builtin_add_overflow, but add type checks similar to
* below.
*/
#define check_add_overflow(a, b, d) __must_check_overflow(({ \
typeof(a) __a = (a); \ typeof(a) __a = (a); \
typeof(b) __b = (b); \ typeof(b) __b = (b); \
typeof(d) __d = (d); \ typeof(d) __d = (d); \
(void) (&__a == &__b); \ (void) (&__a == &__b); \
(void) (&__a == __d); \ (void) (&__a == __d); \
*__d = __a + __b; \ __builtin_add_overflow(__a, __b, __d); \
*__d < __a; \ }))
})
#define __unsigned_sub_overflow(a, b, d) ({ \ #define check_sub_overflow(a, b, d) __must_check_overflow(({ \
typeof(a) __a = (a); \ typeof(a) __a = (a); \
typeof(b) __b = (b); \ typeof(b) __b = (b); \
typeof(d) __d = (d); \ typeof(d) __d = (d); \
(void) (&__a == &__b); \ (void) (&__a == &__b); \
(void) (&__a == __d); \ (void) (&__a == __d); \
*__d = __a - __b; \ __builtin_sub_overflow(__a, __b, __d); \
__a < __b; \ }))
})
/*
* If one of a or b is a compile-time constant, this avoids a division.
*/
#define __unsigned_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a * __b; \
__builtin_constant_p(__b) ? \
__b > 0 && __a > type_max(typeof(__a)) / __b : \
__a > 0 && __b > type_max(typeof(__b)) / __a; \
})
/* #define check_mul_overflow(a, b, d) __must_check_overflow(({ \
* For signed types, detecting overflow is much harder, especially if
* we want to avoid UB. But the interface of these macros is such that
* we must provide a result in *d, and in fact we must produce the
* result promised by gcc's builtins, which is simply the possibly
* wrapped-around value. Fortunately, we can just formally do the
* operations in the widest relevant unsigned type (u64) and then
* truncate the result - gcc is smart enough to generate the same code
* with and without the (u64) casts.
*/
/*
* Adding two signed integers can overflow only if they have the same
* sign, and overflow has happened iff the result has the opposite
* sign.
*/
#define __signed_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \ typeof(a) __a = (a); \
typeof(b) __b = (b); \ typeof(b) __b = (b); \
typeof(d) __d = (d); \ typeof(d) __d = (d); \
(void) (&__a == &__b); \ (void) (&__a == &__b); \
(void) (&__a == __d); \ (void) (&__a == __d); \
*__d = (u64)__a + (u64)__b; \ __builtin_mul_overflow(__a, __b, __d); \
(((~(__a ^ __b)) & (*__d ^ __a)) \ }))
& type_min(typeof(__a))) != 0; \
})
/*
* Subtraction is similar, except that overflow can now happen only
* when the signs are opposite. In this case, overflow has happened if
* the result has the opposite sign of a.
*/
#define __signed_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a - (u64)__b; \
((((__a ^ __b)) & (*__d ^ __a)) \
& type_min(typeof(__a))) != 0; \
})
/*
* Signed multiplication is rather hard. gcc always follows C99, so
* division is truncated towards 0. This means that we can write the
* overflow check like this:
*
* (a > 0 && (b > MAX/a || b < MIN/a)) ||
* (a < -1 && (b > MIN/a || b < MAX/a) ||
* (a == -1 && b == MIN)
*
* The redundant casts of -1 are to silence an annoying -Wtype-limits
* (included in -Wextra) warning: When the type is u8 or u16, the
* __b_c_e in check_mul_overflow obviously selects
* __unsigned_mul_overflow, but unfortunately gcc still parses this
* code and warns about the limited range of __b.
*/
#define __signed_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
typeof(a) __tmax = type_max(typeof(a)); \
typeof(a) __tmin = type_min(typeof(a)); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a * (u64)__b; \
(__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
(__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
(__b == (typeof(__b))-1 && __a == __tmin); \
})
#define check_add_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_add_overflow(a, b, d), \
__unsigned_add_overflow(a, b, d))
#define check_sub_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_sub_overflow(a, b, d), \
__unsigned_sub_overflow(a, b, d))
#define check_mul_overflow(a, b, d) \
__builtin_choose_expr(is_signed_type(typeof(a)), \
__signed_mul_overflow(a, b, d), \
__unsigned_mul_overflow(a, b, d))
/** check_shl_overflow() - Calculate a left-shifted value and check overflow /** check_shl_overflow() - Calculate a left-shifted value and check overflow
* *
@ -176,9 +109,9 @@
* - 'a << s' sets the sign bit, if any, in '*d'. * - 'a << s' sets the sign bit, if any, in '*d'.
* *
* '*d' will hold the results of the attempted shift, but is not * '*d' will hold the results of the attempted shift, but is not
* considered "safe for use" if false is returned. * considered "safe for use" if true is returned.
*/ */
#define check_shl_overflow(a, s, d) ({ \ #define check_shl_overflow(a, s, d) __must_check_overflow(({ \
typeof(a) _a = a; \ typeof(a) _a = a; \
typeof(s) _s = s; \ typeof(s) _s = s; \
typeof(d) _d = d; \ typeof(d) _d = d; \
@ -188,7 +121,70 @@
*_d = (_a_full << _to_shift); \ *_d = (_a_full << _to_shift); \
(_to_shift != _s || is_negative(*_d) || is_negative(_a) || \ (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
(*_d >> _to_shift) != _a); \ (*_d >> _to_shift) != _a); \
}) }))
/**
* size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
*
* @factor1: first factor
* @factor2: second factor
*
* Returns: calculate @factor1 * @factor2, both promoted to size_t,
* with any overflow causing the return value to be SIZE_MAX. The
* lvalue must be size_t to avoid implicit type conversion.
*/
static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
{
size_t bytes;
if (check_mul_overflow(factor1, factor2, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* size_add() - Calculate size_t addition with saturation at SIZE_MAX
*
* @addend1: first addend
* @addend2: second addend
*
* Returns: calculate @addend1 + @addend2, both promoted to size_t,
* with any overflow causing the return value to be SIZE_MAX. The
* lvalue must be size_t to avoid implicit type conversion.
*/
static inline size_t __must_check size_add(size_t addend1, size_t addend2)
{
size_t bytes;
if (check_add_overflow(addend1, addend2, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
*
* @minuend: value to subtract from
* @subtrahend: value to subtract from @minuend
*
* Returns: calculate @minuend - @subtrahend, both promoted to size_t,
* with any overflow causing the return value to be SIZE_MAX. For
* composition with the size_add() and size_mul() helpers, neither
* argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
* The lvalue must be size_t to avoid implicit type conversion.
*/
static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
{
size_t bytes;
if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
check_sub_overflow(minuend, subtrahend, &bytes))
return SIZE_MAX;
return bytes;
}
/** /**
* array_size() - Calculate size of 2-dimensional array. * array_size() - Calculate size of 2-dimensional array.
@ -201,15 +197,7 @@
* Returns: number of bytes needed to represent the array or SIZE_MAX on * Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow. * overflow.
*/ */
static inline size_t array_size(size_t a, size_t b) #define array_size(a, b) size_mul(a, b)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
return bytes;
}
/** /**
* array3_size() - Calculate size of 3-dimensional array. * array3_size() - Calculate size of 3-dimensional array.
@ -223,48 +211,41 @@ static inline size_t array_size(size_t a, size_t b)
* Returns: number of bytes needed to represent the array or SIZE_MAX on * Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow. * overflow.
*/ */
static inline size_t array3_size(size_t a, size_t b, size_t c) #define array3_size(a, b, c) size_mul(size_mul(a, b), c)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
if (check_mul_overflow(bytes, c, &bytes))
return SIZE_MAX;
return bytes;
}
/*
* Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for
* struct_size() below.
*/
static inline size_t __ab_c_size(size_t a, size_t b, size_t c)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
if (check_add_overflow(bytes, c, &bytes))
return SIZE_MAX;
return bytes;
}
/** /**
* struct_size() - Calculate size of structure with trailing array. * flex_array_size() - Calculate size of a flexible array member
* @p: Pointer to the structure. * within an enclosing structure.
* @member: Name of the array member.
* @n: Number of elements in the array.
* *
* Calculates size of memory needed for structure @p followed by an * @p: Pointer to the structure.
* array of @n @member elements. * @member: Name of the flexible array member.
* @count: Number of elements in the array.
*
* Calculates size of a flexible array of @count number of @member
* elements, at the end of structure @p.
* *
* Return: number of bytes needed or SIZE_MAX on overflow. * Return: number of bytes needed or SIZE_MAX on overflow.
*/ */
#define struct_size(p, member, n) \ #define flex_array_size(p, member, count) \
__ab_c_size(n, \ __builtin_choose_expr(__is_constexpr(count), \
sizeof(*(p)->member) + __must_be_array((p)->member),\ (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \
sizeof(*(p))) size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
/**
* struct_size() - Calculate size of structure with trailing flexible array.
*
* @p: Pointer to the structure.
* @member: Name of the array member.
* @count: Number of elements in the array.
*
* Calculates size of memory needed for structure @p followed by an
* array of @count number of @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define struct_size(p, member, count) \
__builtin_choose_expr(__is_constexpr(count), \
sizeof(*(p)) + flex_array_size(p, member, count), \
size_add(sizeof(*(p)), flex_array_size(p, member, count)))
#endif /* __LINUX_OVERFLOW_H */ #endif /* __LINUX_OVERFLOW_H */