Branch data Line data Source code
1 : : // SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
2 : : /* Copyright 2013-2019 IBM Corp. */
3 : :
4 : : #ifndef __LOCK_H
5 : : #define __LOCK_H
6 : :
7 : : #include <stdbool.h>
8 : : #include <processor.h>
9 : : #include <cmpxchg.h>
10 : : #include <ccan/list/list.h>
11 : : #include <ccan/str/str.h>
12 : :
13 : : #ifdef DEBUG_LOCKS_BACKTRACE
14 : : #include <stack.h>
15 : :
16 : : #define LOCKS_BACKTRACE_MAX_ENTS 60
17 : : #endif
18 : :
19 : : struct lock {
20 : : /* Lock value has bit 63 as lock bit and the PIR of the owner
21 : : * in the top 32-bit
22 : : */
23 : : uint64_t lock_val;
24 : :
25 : : /*
26 : : * Set to true if lock is involved in the console flush path
27 : : * in which case taking it will suspend console flushing
28 : : */
29 : : bool in_con_path;
30 : :
31 : : /* file/line of lock owner */
32 : : const char *owner;
33 : :
34 : : #ifdef DEBUG_LOCKS_BACKTRACE
35 : : struct bt_entry bt_buf[LOCKS_BACKTRACE_MAX_ENTS];
36 : : struct bt_metadata bt_metadata;
37 : : #endif
38 : :
39 : : /* linkage in per-cpu list of owned locks */
40 : : struct list_node list;
41 : : };
42 : :
43 : : /* Initializer... not ideal but works for now. If we need different
44 : : * values for the fields and/or start getting warnings we'll have to
45 : : * play macro tricks
46 : : */
47 : : #define LOCK_UNLOCKED { 0 }
48 : :
49 : : /* Note vs. libc and locking:
50 : : *
51 : : * The printf() family of
52 : : * functions use stack based t buffers and call into skiboot
53 : : * underlying read() and write() which use a console lock.
54 : : *
55 : : * The underlying FSP console code will thus operate within that
56 : : * console lock.
57 : : *
58 : : * The libc does *NOT* lock stream buffer operations, so don't
59 : : * try to scanf() from the same FILE from two different processors.
60 : : *
61 : : * FSP operations are locked using an FSP lock, so all processors
62 : : * can safely call the FSP API
63 : : *
64 : : * Note about ordering:
65 : : *
66 : : * lock() is a full memory barrier. unlock() is a lwsync
67 : : *
68 : : */
69 : :
70 : : extern bool bust_locks;
71 : :
72 : 150 : static inline void init_lock(struct lock *l)
73 : : {
74 : 150 : *l = (struct lock)LOCK_UNLOCKED;
75 : 150 : }
76 : :
77 : : #define LOCK_CALLER __FILE__ ":" stringify(__LINE__)
78 : :
79 : : #define try_lock(l) try_lock_caller(l, LOCK_CALLER)
80 : : #define lock(l) lock_caller(l, LOCK_CALLER)
81 : : #define lock_recursive(l) lock_recursive_caller(l, LOCK_CALLER)
82 : :
83 : : extern bool try_lock_caller(struct lock *l, const char *caller);
84 : : extern void lock_caller(struct lock *l, const char *caller);
85 : : extern void unlock(struct lock *l);
86 : :
87 : : extern bool lock_held_by_me(struct lock *l);
88 : :
89 : : /* The debug output can happen while the FSP lock, so we need some kind
90 : : * of recursive lock support here. I don't want all locks to be recursive
91 : : * though, thus the caller need to explicitly call lock_recursive which
92 : : * returns false if the lock was already held by this cpu. If it returns
93 : : * true, then the caller shall release it when done.
94 : : */
95 : : extern bool lock_recursive_caller(struct lock *l, const char *caller);
96 : :
97 : : /* Called after per-cpu data structures are available */
98 : : extern void init_locks(void);
99 : :
100 : : /* Dump the list of locks held by this CPU */
101 : : extern void dump_locks_list(void);
102 : :
103 : : /* Clean all locks held by CPU (and warn if any) */
104 : : extern void drop_my_locks(bool warn);
105 : :
106 : : #endif /* __LOCK_H */
|