Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
K
Kernel
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Bryan O'Donoghue
Kernel
Commits
e34266ac
Commit
e34266ac
authored
6 months ago
by
Stephen Rothwell
Browse files
Options
Downloads
Plain Diff
Merge branch 'bitmap-for-next' of
https://github.com/norov/linux.git
parents
77c69f44
af206905
Loading
Loading
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
arch/riscv/include/asm/bitops.h
+10
-10
10 additions, 10 deletions
arch/riscv/include/asm/bitops.h
include/linux/bits.h
+2
-3
2 additions, 3 deletions
include/linux/bits.h
include/linux/compiler.h
+22
-0
22 additions, 0 deletions
include/linux/compiler.h
with
34 additions
and
13 deletions
arch/riscv/include/asm/bitops.h
+
10
−
10
View file @
e34266ac
...
@@ -228,7 +228,7 @@ static __always_inline int variable_fls(unsigned int x)
...
@@ -228,7 +228,7 @@ static __always_inline int variable_fls(unsigned int x)
*
*
* This operation may be reordered on other architectures than x86.
* This operation may be reordered on other architectures than x86.
*/
*/
static
inline
int
arch_test_and_set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
static
__always_
inline
int
arch_test_and_set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
{
return
__test_and_op_bit
(
or
,
__NOP
,
nr
,
addr
);
return
__test_and_op_bit
(
or
,
__NOP
,
nr
,
addr
);
}
}
...
@@ -240,7 +240,7 @@ static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
...
@@ -240,7 +240,7 @@ static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
*
*
* This operation can be reordered on other architectures other than x86.
* This operation can be reordered on other architectures other than x86.
*/
*/
static
inline
int
arch_test_and_clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
static
__always_
inline
int
arch_test_and_clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
{
return
__test_and_op_bit
(
and
,
__NOT
,
nr
,
addr
);
return
__test_and_op_bit
(
and
,
__NOT
,
nr
,
addr
);
}
}
...
@@ -253,7 +253,7 @@ static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
...
@@ -253,7 +253,7 @@ static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
* It also implies a memory barrier.
*/
*/
static
inline
int
arch_test_and_change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
static
__always_
inline
int
arch_test_and_change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
{
return
__test_and_op_bit
(
xor
,
__NOP
,
nr
,
addr
);
return
__test_and_op_bit
(
xor
,
__NOP
,
nr
,
addr
);
}
}
...
@@ -270,7 +270,7 @@ static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
...
@@ -270,7 +270,7 @@ static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
* restricted to acting on a single-word quantity.
*/
*/
static
inline
void
arch_set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
static
__always_
inline
void
arch_set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
{
__op_bit
(
or
,
__NOP
,
nr
,
addr
);
__op_bit
(
or
,
__NOP
,
nr
,
addr
);
}
}
...
@@ -284,7 +284,7 @@ static inline void arch_set_bit(int nr, volatile unsigned long *addr)
...
@@ -284,7 +284,7 @@ static inline void arch_set_bit(int nr, volatile unsigned long *addr)
* on non x86 architectures, so if you are writing portable code,
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
* make sure not to rely on its reordering guarantees.
*/
*/
static
inline
void
arch_clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
static
__always_
inline
void
arch_clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
{
__op_bit
(
and
,
__NOT
,
nr
,
addr
);
__op_bit
(
and
,
__NOT
,
nr
,
addr
);
}
}
...
@@ -298,7 +298,7 @@ static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
...
@@ -298,7 +298,7 @@ static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
* restricted to acting on a single-word quantity.
*/
*/
static
inline
void
arch_change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
static
__always_
inline
void
arch_change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
{
__op_bit
(
xor
,
__NOP
,
nr
,
addr
);
__op_bit
(
xor
,
__NOP
,
nr
,
addr
);
}
}
...
@@ -311,7 +311,7 @@ static inline void arch_change_bit(int nr, volatile unsigned long *addr)
...
@@ -311,7 +311,7 @@ static inline void arch_change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and provides acquire barrier semantics.
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
* It can be used to implement bit locks.
*/
*/
static
inline
int
arch_test_and_set_bit_lock
(
static
__always_
inline
int
arch_test_and_set_bit_lock
(
unsigned
long
nr
,
volatile
unsigned
long
*
addr
)
unsigned
long
nr
,
volatile
unsigned
long
*
addr
)
{
{
return
__test_and_op_bit_ord
(
or
,
__NOP
,
nr
,
addr
,
.
aq
);
return
__test_and_op_bit_ord
(
or
,
__NOP
,
nr
,
addr
,
.
aq
);
...
@@ -324,7 +324,7 @@ static inline int arch_test_and_set_bit_lock(
...
@@ -324,7 +324,7 @@ static inline int arch_test_and_set_bit_lock(
*
*
* This operation is atomic and provides release barrier semantics.
* This operation is atomic and provides release barrier semantics.
*/
*/
static
inline
void
arch_clear_bit_unlock
(
static
__always_
inline
void
arch_clear_bit_unlock
(
unsigned
long
nr
,
volatile
unsigned
long
*
addr
)
unsigned
long
nr
,
volatile
unsigned
long
*
addr
)
{
{
__op_bit_ord
(
and
,
__NOT
,
nr
,
addr
,
.
rl
);
__op_bit_ord
(
and
,
__NOT
,
nr
,
addr
,
.
rl
);
...
@@ -345,13 +345,13 @@ static inline void arch_clear_bit_unlock(
...
@@ -345,13 +345,13 @@ static inline void arch_clear_bit_unlock(
* non-atomic property here: it's a lot more instructions and we still have to
* non-atomic property here: it's a lot more instructions and we still have to
* provide release semantics anyway.
* provide release semantics anyway.
*/
*/
static
inline
void
arch___clear_bit_unlock
(
static
__always_
inline
void
arch___clear_bit_unlock
(
unsigned
long
nr
,
volatile
unsigned
long
*
addr
)
unsigned
long
nr
,
volatile
unsigned
long
*
addr
)
{
{
arch_clear_bit_unlock
(
nr
,
addr
);
arch_clear_bit_unlock
(
nr
,
addr
);
}
}
static
inline
bool
arch_xor_unlock_is_negative_byte
(
unsigned
long
mask
,
static
__always_
inline
bool
arch_xor_unlock_is_negative_byte
(
unsigned
long
mask
,
volatile
unsigned
long
*
addr
)
volatile
unsigned
long
*
addr
)
{
{
unsigned
long
res
;
unsigned
long
res
;
...
...
This diff is collapsed.
Click to expand it.
include/linux/bits.h
+
2
−
3
View file @
e34266ac
...
@@ -20,9 +20,8 @@
...
@@ -20,9 +20,8 @@
*/
*/
#if !defined(__ASSEMBLY__)
#if !defined(__ASSEMBLY__)
#include
<linux/build_bug.h>
#include
<linux/build_bug.h>
#define GENMASK_INPUT_CHECK(h, l) \
#include
<linux/compiler.h>
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h)))
__is_constexpr((l) > (h)), (l) > (h), 0)))
#else
#else
/*
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
...
...
This diff is collapsed.
Click to expand it.
include/linux/compiler.h
+
22
−
0
View file @
e34266ac
...
@@ -292,6 +292,28 @@ static inline void *offset_to_ptr(const int *off)
...
@@ -292,6 +292,28 @@ static inline void *offset_to_ptr(const int *off)
*/
*/
#define statically_true(x) (__builtin_constant_p(x) && (x))
#define statically_true(x) (__builtin_constant_p(x) && (x))
/*
* Similar to statically_true() but produces a constant expression
*
* To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(),
* which require their input to be a constant expression and for which
* statically_true() would otherwise fail.
*
* This is a trade-off: const_true() requires all its operands to be
* compile time constants. Else, it would always returns false even on
* the most trivial cases like:
*
* true || non_const_var
*
* On the opposite, statically_true() is able to fold more complex
* tautologies and will return true on expressions such as:
*
* !(non_const_var * 8 % 4)
*
* For the general case, statically_true() is better.
*/
#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false)
/*
/*
* This is needed in functions which generate the stack canary, see
* This is needed in functions which generate the stack canary, see
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment