Text file
src/runtime/race_ppc64le.s
Documentation: runtime
1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build race
6 // +build race
7
8 #include "go_asm.h"
9 #include "go_tls.h"
10 #include "funcdata.h"
11 #include "textflag.h"
12 #include "asm_ppc64x.h"
13
14 // The following functions allow calling the clang-compiled race runtime directly
15 // from Go code without going all the way through cgo.
16 // First, it's much faster (up to 50% speedup for real Go programs).
17 // Second, it eliminates race-related special cases from cgocall and scheduler.
18 // Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
19
20 // A brief recap of the ppc64le calling convention.
21 // Arguments are passed in R3, R4, R5 ...
22 // SP must be 16-byte aligned.
23
24 // Note that for ppc64x, LLVM follows the standard ABI and
25 // expects arguments in registers, so these functions move
26 // the arguments from storage to the registers expected
27 // by the ABI.
28
29 // When calling from Go to Clang tsan code:
30 // R3 is the 1st argument and is usually the ThreadState*
31 // R4-? are the 2nd, 3rd, 4th, etc. arguments
32
33 // When calling racecalladdr:
34 // R8 is the call target address
35
36 // The race ctx is passed in R3 and loaded in
37 // racecalladdr.
38 //
39 // The sequence used to get the race ctx:
40 // MOVD runtime·tls_g(SB), R10 // Address of TLS variable
41 // MOVD 0(R10), g // g = R30
42 // MOVD g_racectx(g), R3 // racectx == ThreadState
43
44 // func runtime·RaceRead(addr uintptr)
45 // Called from instrumented Go code
46 TEXT runtime·raceread(SB), NOSPLIT, $0-8
47 MOVD addr+0(FP), R4
48 MOVD LR, R5 // caller of this?
49 // void __tsan_read(ThreadState *thr, void *addr, void *pc);
50 MOVD $__tsan_read(SB), R8
51 BR racecalladdr<>(SB)
52
53 TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
54 BR runtime·raceread(SB)
55
56 // void runtime·racereadpc(void *addr, void *callpc, void *pc)
57 TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
58 MOVD addr+0(FP), R4
59 MOVD callpc+8(FP), R5
60 MOVD pc+16(FP), R6
61 // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
62 MOVD $__tsan_read_pc(SB), R8
63 BR racecalladdr<>(SB)
64
65 // func runtime·RaceWrite(addr uintptr)
66 // Called from instrumented Go code
67 TEXT runtime·racewrite(SB), NOSPLIT, $0-8
68 MOVD addr+0(FP), R4
69 MOVD LR, R5 // caller has set LR via BL inst
70 // void __tsan_write(ThreadState *thr, void *addr, void *pc);
71 MOVD $__tsan_write(SB), R8
72 BR racecalladdr<>(SB)
73
74 TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
75 JMP runtime·racewrite(SB)
76
77 // void runtime·racewritepc(void *addr, void *callpc, void *pc)
78 TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
79 MOVD addr+0(FP), R4
80 MOVD callpc+8(FP), R5
81 MOVD pc+16(FP), R6
82 // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
83 MOVD $__tsan_write_pc(SB), R8
84 BR racecalladdr<>(SB)
85
86 // func runtime·RaceReadRange(addr, size uintptr)
87 // Called from instrumented Go code.
88 TEXT runtime·racereadrange(SB), NOSPLIT, $0-16
89 MOVD addr+0(FP), R4
90 MOVD size+8(FP), R5
91 MOVD LR, R6
92 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
93 MOVD $__tsan_read_range(SB), R8
94 BR racecalladdr<>(SB)
95
96 // void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
97 TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
98 MOVD addr+0(FP), R4
99 MOVD size+8(FP), R5
100 MOVD pc+16(FP), R6
101 ADD $4, R6 // tsan wants return addr
102 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
103 MOVD $__tsan_read_range(SB), R8
104 BR racecalladdr<>(SB)
105
106 TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
107 BR runtime·racereadrange(SB)
108
109 // func runtime·RaceWriteRange(addr, size uintptr)
110 // Called from instrumented Go code.
111 TEXT runtime·racewriterange(SB), NOSPLIT, $0-16
112 MOVD addr+0(FP), R4
113 MOVD size+8(FP), R5
114 MOVD LR, R6
115 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
116 MOVD $__tsan_write_range(SB), R8
117 BR racecalladdr<>(SB)
118
119 TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
120 BR runtime·racewriterange(SB)
121
122 // void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
123 // Called from instrumented Go code
124 TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
125 MOVD addr+0(FP), R4
126 MOVD size+8(FP), R5
127 MOVD pc+16(FP), R6
128 ADD $4, R6 // add 4 to inst offset?
129 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
130 MOVD $__tsan_write_range(SB), R8
131 BR racecalladdr<>(SB)
132
133 // Call a __tsan function from Go code.
134 // R8 = tsan function address
135 // R3 = *ThreadState a.k.a. g_racectx from g
136 // R4 = addr passed to __tsan function
137 //
138 // Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
139 TEXT racecalladdr<>(SB), NOSPLIT, $0-0
140 MOVD runtime·tls_g(SB), R10
141 MOVD 0(R10), g
142 MOVD g_racectx(g), R3 // goroutine context
143 // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
144 MOVD runtime·racearenastart(SB), R9
145 CMP R4, R9
146 BLT data
147 MOVD runtime·racearenaend(SB), R9
148 CMP R4, R9
149 BLT call
150 data:
151 MOVD runtime·racedatastart(SB), R9
152 CMP R4, R9
153 BLT ret
154 MOVD runtime·racedataend(SB), R9
155 CMP R4, R9
156 BGT ret
157 call:
158 // Careful!! racecall will save LR on its
159 // stack, which is OK as long as racecalladdr
160 // doesn't change in a way that generates a stack.
161 // racecall should return to the caller of
162 // recalladdr.
163 BR racecall<>(SB)
164 ret:
165 RET
166
167 // func runtime·racefuncenter(pc uintptr)
168 // Called from instrumented Go code.
169 TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
170 MOVD callpc+0(FP), R8
171 BR racefuncenter<>(SB)
172
173 // Common code for racefuncenter
174 // R11 = caller's return address
175 TEXT racefuncenter<>(SB), NOSPLIT, $0-0
176 MOVD runtime·tls_g(SB), R10
177 MOVD 0(R10), g
178 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
179 MOVD R8, R4 // caller pc set by caller in R8
180 // void __tsan_func_enter(ThreadState *thr, void *pc);
181 MOVD $__tsan_func_enter(SB), R8
182 BR racecall<>(SB)
183 RET
184
185 // func runtime·racefuncexit()
186 // Called from Go instrumented code.
187 TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
188 MOVD runtime·tls_g(SB), R10
189 MOVD 0(R10), g
190 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
191 // void __tsan_func_exit(ThreadState *thr);
192 MOVD $__tsan_func_exit(SB), R8
193 BR racecall<>(SB)
194
195 // Atomic operations for sync/atomic package.
196 // Some use the __tsan versions instead
197 // R6 = addr of arguments passed to this function
198 // R3, R4, R5 set in racecallatomic
199
200 // Load atomic in tsan
201 TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
202 GO_ARGS
203 // void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
204 MOVD $__tsan_go_atomic32_load(SB), R8
205 ADD $32, R1, R6 // addr of caller's 1st arg
206 BR racecallatomic<>(SB)
207 RET
208
209 TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
210 GO_ARGS
211 // void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
212 MOVD $__tsan_go_atomic64_load(SB), R8
213 ADD $32, R1, R6 // addr of caller's 1st arg
214 BR racecallatomic<>(SB)
215 RET
216
217 TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12
218 GO_ARGS
219 BR sync∕atomic·LoadInt32(SB)
220
221 TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16
222 GO_ARGS
223 BR sync∕atomic·LoadInt64(SB)
224
225 TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16
226 GO_ARGS
227 BR sync∕atomic·LoadInt64(SB)
228
229 TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
230 GO_ARGS
231 BR sync∕atomic·LoadInt64(SB)
232
233 // Store atomic in tsan
234 TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
235 GO_ARGS
236 // void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
237 MOVD $__tsan_go_atomic32_store(SB), R8
238 ADD $32, R1, R6 // addr of caller's 1st arg
239 BR racecallatomic<>(SB)
240
241 TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
242 GO_ARGS
243 // void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
244 MOVD $__tsan_go_atomic64_store(SB), R8
245 ADD $32, R1, R6 // addr of caller's 1st arg
246 BR racecallatomic<>(SB)
247
248 TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12
249 GO_ARGS
250 BR sync∕atomic·StoreInt32(SB)
251
252 TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16
253 GO_ARGS
254 BR sync∕atomic·StoreInt64(SB)
255
256 TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
257 GO_ARGS
258 BR sync∕atomic·StoreInt64(SB)
259
260 // Swap in tsan
261 TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
262 GO_ARGS
263 // void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
264 MOVD $__tsan_go_atomic32_exchange(SB), R8
265 ADD $32, R1, R6 // addr of caller's 1st arg
266 BR racecallatomic<>(SB)
267
268 TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
269 GO_ARGS
270 // void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a)
271 MOVD $__tsan_go_atomic64_exchange(SB), R8
272 ADD $32, R1, R6 // addr of caller's 1st arg
273 BR racecallatomic<>(SB)
274
275 TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20
276 GO_ARGS
277 BR sync∕atomic·SwapInt32(SB)
278
279 TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24
280 GO_ARGS
281 BR sync∕atomic·SwapInt64(SB)
282
283 TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
284 GO_ARGS
285 BR sync∕atomic·SwapInt64(SB)
286
287 // Add atomic in tsan
288 TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
289 GO_ARGS
290 // void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
291 MOVD $__tsan_go_atomic32_fetch_add(SB), R8
292 ADD $64, R1, R6 // addr of caller's 1st arg
293 BL racecallatomic<>(SB)
294 // The tsan fetch_add result is not as expected by Go,
295 // so the 'add' must be added to the result.
296 MOVW add+8(FP), R3 // The tsa fetch_add does not return the
297 MOVW ret+16(FP), R4 // result as expected by go, so fix it.
298 ADD R3, R4, R3
299 MOVW R3, ret+16(FP)
300 RET
301
302 TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
303 GO_ARGS
304 // void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
305 MOVD $__tsan_go_atomic64_fetch_add(SB), R8
306 ADD $64, R1, R6 // addr of caller's 1st arg
307 BL racecallatomic<>(SB)
308 // The tsan fetch_add result is not as expected by Go,
309 // so the 'add' must be added to the result.
310 MOVD add+8(FP), R3
311 MOVD ret+16(FP), R4
312 ADD R3, R4, R3
313 MOVD R3, ret+16(FP)
314 RET
315
316 TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20
317 GO_ARGS
318 BR sync∕atomic·AddInt32(SB)
319
320 TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24
321 GO_ARGS
322 BR sync∕atomic·AddInt64(SB)
323
324 TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
325 GO_ARGS
326 BR sync∕atomic·AddInt64(SB)
327
328 // CompareAndSwap in tsan
329 TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
330 GO_ARGS
331 // void __tsan_go_atomic32_compare_exchange(
332 // ThreadState *thr, uptr cpc, uptr pc, u8 *a)
333 MOVD $__tsan_go_atomic32_compare_exchange(SB), R8
334 ADD $32, R1, R6 // addr of caller's 1st arg
335 BR racecallatomic<>(SB)
336
337 TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
338 GO_ARGS
339 // void __tsan_go_atomic32_compare_exchange(
340 // ThreadState *thr, uptr cpc, uptr pc, u8 *a)
341 MOVD $__tsan_go_atomic64_compare_exchange(SB), R8
342 ADD $32, R1, R6 // addr of caller's 1st arg
343 BR racecallatomic<>(SB)
344
345 TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
346 GO_ARGS
347 BR sync∕atomic·CompareAndSwapInt32(SB)
348
349 TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
350 GO_ARGS
351 BR sync∕atomic·CompareAndSwapInt64(SB)
352
353 TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
354 GO_ARGS
355 BR sync∕atomic·CompareAndSwapInt64(SB)
356
357 // Common function used to call tsan's atomic functions
358 // R3 = *ThreadState
359 // R4 = TODO: What's this supposed to be?
360 // R5 = caller pc
361 // R6 = addr of incoming arg list
362 // R8 contains addr of target function.
363 TEXT racecallatomic<>(SB), NOSPLIT, $0-0
364 // Trigger SIGSEGV early if address passed to atomic function is bad.
365 MOVD (R6), R7 // 1st arg is addr
366 MOVD (R7), R9 // segv here if addr is bad
367 // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
368 MOVD runtime·racearenastart(SB), R9
369 CMP R7, R9
370 BLT racecallatomic_data
371 MOVD runtime·racearenaend(SB), R9
372 CMP R7, R9
373 BLT racecallatomic_ok
374 racecallatomic_data:
375 MOVD runtime·racedatastart(SB), R9
376 CMP R7, R9
377 BLT racecallatomic_ignore
378 MOVD runtime·racedataend(SB), R9
379 CMP R7, R9
380 BGE racecallatomic_ignore
381 racecallatomic_ok:
382 // Addr is within the good range, call the atomic function.
383 MOVD runtime·tls_g(SB), R10
384 MOVD 0(R10), g
385 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
386 MOVD R8, R5 // pc is the function called
387 MOVD (R1), R4 // caller pc from stack
388 BL racecall<>(SB) // BL needed to maintain stack consistency
389 RET //
390 racecallatomic_ignore:
391 // Addr is outside the good range.
392 // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
393 // An attempt to synchronize on the address would cause crash.
394 MOVD R8, R15 // save the original function
395 MOVD R6, R17 // save the original arg list addr
396 MOVD $__tsan_go_ignore_sync_begin(SB), R8 // func addr to call
397 MOVD runtime·tls_g(SB), R10
398 MOVD 0(R10), g
399 MOVD g_racectx(g), R3 // goroutine context
400 BL racecall<>(SB)
401 MOVD R15, R8 // restore the original function
402 MOVD R17, R6 // restore arg list addr
403 // Call the atomic function.
404 // racecall will call LLVM race code which might clobber r30 (g)
405 MOVD runtime·tls_g(SB), R10
406 MOVD 0(R10), g
407
408 MOVD g_racectx(g), R3
409 MOVD R8, R4 // pc being called same TODO as above
410 MOVD (R1), R5 // caller pc from latest LR
411 BL racecall<>(SB)
412 // Call __tsan_go_ignore_sync_end.
413 MOVD $__tsan_go_ignore_sync_end(SB), R8
414 MOVD g_racectx(g), R3 // goroutine context g should sitll be good?
415 BL racecall<>(SB)
416 RET
417
418 // void runtime·racecall(void(*f)(...), ...)
419 // Calls C function f from race runtime and passes up to 4 arguments to it.
420 // The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
421 TEXT runtime·racecall(SB), NOSPLIT, $0-0
422 MOVD fn+0(FP), R8
423 MOVD arg0+8(FP), R3
424 MOVD arg1+16(FP), R4
425 MOVD arg2+24(FP), R5
426 MOVD arg3+32(FP), R6
427 JMP racecall<>(SB)
428
429 // Finds g0 and sets its stack
430 // Arguments were loaded for call from Go to C
431 TEXT racecall<>(SB), NOSPLIT, $0-0
432 // Set the LR slot for the ppc64 ABI
433 MOVD LR, R10
434 MOVD R10, 0(R1) // Go expectation
435 MOVD R10, 16(R1) // C ABI
436 // Get info from the current goroutine
437 MOVD runtime·tls_g(SB), R10 // g offset in TLS
438 MOVD 0(R10), g
439 MOVD g_m(g), R7 // m for g
440 MOVD R1, R16 // callee-saved, preserved across C call
441 MOVD m_g0(R7), R10 // g0 for m
442 CMP R10, g // same g0?
443 BEQ call // already on g0
444 MOVD (g_sched+gobuf_sp)(R10), R1 // switch R1
445 call:
446 MOVD R8, CTR // R8 = caller addr
447 MOVD R8, R12 // expected by PPC64 ABI
448 BL (CTR)
449 XOR R0, R0 // clear R0 on return from Clang
450 MOVD R16, R1 // restore R1; R16 nonvol in Clang
451 MOVD runtime·tls_g(SB), R10 // find correct g
452 MOVD 0(R10), g
453 MOVD 16(R1), R10 // LR was saved away, restore for return
454 MOVD R10, LR
455 RET
456
457 // C->Go callback thunk that allows to call runtime·racesymbolize from C code.
458 // Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
459 // The overall effect of Go->C->Go call chain is similar to that of mcall.
460 // RARG0 contains command code. RARG1 contains command-specific context.
461 // See racecallback for command codes.
462 TEXT runtime·racecallbackthunk(SB), NOSPLIT, $-8
463 // Handle command raceGetProcCmd (0) here.
464 // First, code below assumes that we are on curg, while raceGetProcCmd
465 // can be executed on g0. Second, it is called frequently, so will
466 // benefit from this fast path.
467 XOR R0, R0 // clear R0 since we came from C code
468 CMP R3, $0
469 BNE rest
470 // g0 TODO: Don't modify g here since R30 is nonvolatile
471 MOVD g, R9
472 MOVD runtime·tls_g(SB), R10
473 MOVD 0(R10), g
474 MOVD g_m(g), R3
475 MOVD m_p(R3), R3
476 MOVD p_raceprocctx(R3), R3
477 MOVD R3, (R4)
478 MOVD R9, g // restore R30 ??
479 RET
480
481 // This is all similar to what cgo does
482 // Save registers according to the ppc64 ABI
483 rest:
484 MOVD LR, R10 // save link register
485 MOVD R10, 16(R1)
486 MOVW CR, R10
487 MOVW R10, 8(R1)
488 MOVDU R1, -336(R1) // Allocate frame needed for outargs and register save area
489
490 MOVD R14, 328(R1)
491 MOVD R15, 48(R1)
492 MOVD R16, 56(R1)
493 MOVD R17, 64(R1)
494 MOVD R18, 72(R1)
495 MOVD R19, 80(R1)
496 MOVD R20, 88(R1)
497 MOVD R21, 96(R1)
498 MOVD R22, 104(R1)
499 MOVD R23, 112(R1)
500 MOVD R24, 120(R1)
501 MOVD R25, 128(R1)
502 MOVD R26, 136(R1)
503 MOVD R27, 144(R1)
504 MOVD R28, 152(R1)
505 MOVD R29, 160(R1)
506 MOVD g, 168(R1) // R30
507 MOVD R31, 176(R1)
508 FMOVD F14, 184(R1)
509 FMOVD F15, 192(R1)
510 FMOVD F16, 200(R1)
511 FMOVD F17, 208(R1)
512 FMOVD F18, 216(R1)
513 FMOVD F19, 224(R1)
514 FMOVD F20, 232(R1)
515 FMOVD F21, 240(R1)
516 FMOVD F22, 248(R1)
517 FMOVD F23, 256(R1)
518 FMOVD F24, 264(R1)
519 FMOVD F25, 272(R1)
520 FMOVD F26, 280(R1)
521 FMOVD F27, 288(R1)
522 FMOVD F28, 296(R1)
523 FMOVD F29, 304(R1)
524 FMOVD F30, 312(R1)
525 FMOVD F31, 320(R1)
526
527 MOVD R3, FIXED_FRAME+0(R1)
528 MOVD R4, FIXED_FRAME+8(R1)
529
530 MOVD runtime·tls_g(SB), R10
531 MOVD 0(R10), g
532
533 MOVD g_m(g), R7
534 MOVD m_g0(R7), R8
535 CMP g, R8
536 BEQ noswitch
537
538 MOVD R8, g // set g = m-> g0
539
540 BL runtime·racecallback(SB)
541
542 // All registers are clobbered after Go code, reload.
543 MOVD runtime·tls_g(SB), R10
544 MOVD 0(R10), g
545
546 MOVD g_m(g), R7
547 MOVD m_curg(R7), g // restore g = m->curg
548
549 ret:
550 MOVD 328(R1), R14
551 MOVD 48(R1), R15
552 MOVD 56(R1), R16
553 MOVD 64(R1), R17
554 MOVD 72(R1), R18
555 MOVD 80(R1), R19
556 MOVD 88(R1), R20
557 MOVD 96(R1), R21
558 MOVD 104(R1), R22
559 MOVD 112(R1), R23
560 MOVD 120(R1), R24
561 MOVD 128(R1), R25
562 MOVD 136(R1), R26
563 MOVD 144(R1), R27
564 MOVD 152(R1), R28
565 MOVD 160(R1), R29
566 MOVD 168(R1), g // R30
567 MOVD 176(R1), R31
568 FMOVD 184(R1), F14
569 FMOVD 192(R1), F15
570 FMOVD 200(R1), F16
571 FMOVD 208(R1), F17
572 FMOVD 216(R1), F18
573 FMOVD 224(R1), F19
574 FMOVD 232(R1), F20
575 FMOVD 240(R1), F21
576 FMOVD 248(R1), F22
577 FMOVD 256(R1), F23
578 FMOVD 264(R1), F24
579 FMOVD 272(R1), F25
580 FMOVD 280(R1), F26
581 FMOVD 288(R1), F27
582 FMOVD 296(R1), F28
583 FMOVD 304(R1), F29
584 FMOVD 312(R1), F30
585 FMOVD 320(R1), F31
586
587 ADD $336, R1
588 MOVD 8(R1), R10
589 MOVFL R10, $0xff // Restore of CR
590 MOVD 16(R1), R10 // needed?
591 MOVD R10, LR
592 RET
593
594 noswitch:
595 BL runtime·racecallback(SB)
596 JMP ret
597
598 // tls_g, g value for each thread in TLS
599 GLOBL runtime·tls_g+0(SB), TLSBSS+DUPOK, $8
600
View as plain text