Consider the following code:
#define UINT_MAX (~0U)
#define INT_MAX (int)(UINT_MAX & (UINT_MAX >> 1))
#define INT_MIN (int)(UINT_MAX & ~(UINT_MAX >> 1))
int foo(unsigned int u, signed int s) {
if (u > INT_MAX && u <= UINT_MAX / 2 + 4 && u != UINT_MAX / 2 + 2 &&
u != UINT_MAX / 2 + 3 && s >= INT_MIN + 1 && s <= INT_MIN + 2) {
// u: [INT_MAX+1, INT_MAX+1]U[INT_MAX+4, INT_MAX+4],
// s: [INT_MIN+1, INT_MIN+2]
return (u != s); // expected-warning{{TRUE}}
}
__builtin_unreachable();
}
The if
branch is crafted in such a way that u != s
is always true. So, we
can simply return 1. GCC catches this optimization, but clang misses it.
Moreover, consider this:
int foo(unsigned int u, signed int s) {
if (u > INT_MAX && u <= UINT_MAX / 2 + 4 && u != UINT_MAX / 2 + 2 &&
u != UINT_MAX / 2 + 3 && s >= INT_MIN + 1 && s <= INT_MIN + 2) {
if (u != s) {
return (u != s);
}
}
__builtin_unreachable();
}
Now, both clang and GCC can catch that nested if
branch will always be true,
and optimize accordingly.