1/*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#define ATOMIC_PRIVATE 1
29#define LOCK_PRIVATE 1
30
31#include <stdint.h>
32#include <kern/thread.h>
33#include <machine/atomic.h>
34#include <kern/locks.h>
35#include <kern/lock_stat.h>
36#include <machine/machine_cpu.h>
37#include <os/atomic_private.h>
38#include <kern/hvg_hypercall.h>
39
40static _Atomic cpumap_t ticket_waitmask_pv;
41
42/*
43 * The ticket has been unlocked i.e. we just incremented cticket, so it's
44 * ready for acquisition by an acquirer that has nticket == cticket.
45 * Find the waiting vcpu and kick it out of its passive state.
46 */
47__attribute__((noinline))
48void
49hw_lck_ticket_unlock_kick_pv(hw_lck_ticket_t *lck, uint8_t ticket)
50{
51 const cpumap_t wmask = os_atomic_load(&ticket_waitmask_pv, acquire);
52
53 percpu_foreach_base(base) {
54 const processor_t ps = PERCPU_GET_WITH_BASE(base, processor);
55 const uint32_t tcpunum = ps->cpu_id;
56
57 if (!bit_test(wmask, tcpunum)) {
58 continue; // vcpu not currently waiting for a kick
59 }
60 const lck_tktlock_pv_info_t ltpi = PERCPU_GET_WITH_BASE(base,
61 lck_tktlock_pv_info);
62
63 const hw_lck_ticket_t *wlck = os_atomic_load(&ltpi->ltpi_lck,
64 acquire);
65 if (wlck != lck) {
66 continue; // vcpu waiting on a different lock
67 }
68
69 const uint8_t wt = os_atomic_load(&ltpi->ltpi_wt, acquire);
70 if (wt != ticket) {
71 continue; // vcpu doesn't have the right ticket
72 }
73
74 hvg_hc_kick_cpu(cpu_id: tcpunum);
75 PVTICKET_STATS_INC(kick_count);
76 break;
77 }
78}
79
80
81/*
82 * The current vcpu wants 'lck' but the vcpu holding it may not be running.
83 * Wait for it to kick us (above), just /after/ it increments cticket to
84 * drop the lock.
85 *
86 * Other states are possible e.g. the lock may have been unlocked just before
87 * this routine and so no kick was sent because we haven't initialized
88 * the per-cpu wait data. Or we may be sent a kick immediately after storing
89 * the wait data, but before halting.
90 *
91 * All we really know is that when we get here, spinning has been unsuccessful.
92 */
93__attribute__((noinline))
94void
95hw_lck_ticket_lock_wait_pv(hw_lck_ticket_t *lck, uint8_t mt)
96{
97 /*
98 * Disable interrupts so we don't lose the kick.
99 * (Also prevents collisions with ticket lock
100 * acquisition in an interrupt handler)
101 */
102
103 const boolean_t istate = ml_set_interrupts_enabled(FALSE);
104
105 /* Record the ticket + the lock this cpu is waiting for */
106
107 assert(!preemption_enabled());
108 lck_tktlock_pv_info_t ltpi = PERCPU_GET(lck_tktlock_pv_info);
109
110 os_atomic_store(&ltpi->ltpi_lck, NULL, release);
111 os_atomic_store(&ltpi->ltpi_wt, mt, release);
112 os_atomic_store(&ltpi->ltpi_lck, lck, release);
113
114 /* Mark this cpu as eligible for kicking */
115
116 const cpumap_t kickmask = BIT(cpu_number());
117 os_atomic_or(&ticket_waitmask_pv, kickmask, acq_rel);
118
119 assert((mt & HW_LCK_TICKET_LOCK_PVWAITFLAG) == 0);
120
121 /* Check the "now serving" field one last time */
122
123 const uint8_t cticket = os_atomic_load(&lck->cticket, acquire);
124 const uint8_t ccount = cticket & ~HW_LCK_TICKET_LOCK_PVWAITFLAG;
125
126 if (__probable(ccount != mt)) {
127 PVTICKET_STATS_INC(wait_count);
128 assert(cticket & HW_LCK_TICKET_LOCK_PVWAITFLAG);
129
130 /* wait for a kick (or other interrupt) */
131 hvg_hc_wait_for_kick(ien: istate);
132 /*
133 * Note: if interrupts were enabled at entry to the routine,
134 * even though we disabled them above, they'll be enabled here.
135 */
136 } else {
137 /* just return to the caller to claim the ticket */
138 PVTICKET_STATS_INC(already_count);
139 }
140
141 os_atomic_andnot(&ticket_waitmask_pv, kickmask, acq_rel);
142 os_atomic_store(&ltpi->ltpi_lck, NULL, release);
143
144 (void) ml_set_interrupts_enabled(enable: istate);
145
146 assert(!preemption_enabled());
147}
148