1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build mips64 || mips64le
6 // +build mips64 mips64le
7
8 #include "textflag.h"
9
10 #define SYNC WORD $0xf
11
12 // bool cas(uint32 *ptr, uint32 old, uint32 new)
13 // Atomically:
14 // if(*val == old){
15 // *val = new;
16 // return 1;
17 // } else
18 // return 0;
19 TEXT ·Cas(SB), NOSPLIT, $0-17
20 MOVV ptr+0(FP), R1
21 MOVW old+8(FP), R2
22 MOVW new+12(FP), R5
23 SYNC
24 cas_again:
25 MOVV R5, R3
26 LL (R1), R4
27 BNE R2, R4, cas_fail
28 SC R3, (R1)
29 BEQ R3, cas_again
30 MOVV $1, R1
31 MOVB R1, ret+16(FP)
32 SYNC
33 RET
34 cas_fail:
35 MOVV $0, R1
36 JMP -4(PC)
37
38 // bool cas64(uint64 *ptr, uint64 old, uint64 new)
39 // Atomically:
40 // if(*val == old){
41 // *val = new;
42 // return 1;
43 // } else {
44 // return 0;
45 // }
46 TEXT ·Cas64(SB), NOSPLIT, $0-25
47 MOVV ptr+0(FP), R1
48 MOVV old+8(FP), R2
49 MOVV new+16(FP), R5
50 SYNC
51 cas64_again:
52 MOVV R5, R3
53 LLV (R1), R4
54 BNE R2, R4, cas64_fail
55 SCV R3, (R1)
56 BEQ R3, cas64_again
57 MOVV $1, R1
58 MOVB R1, ret+24(FP)
59 SYNC
60 RET
61 cas64_fail:
62 MOVV $0, R1
63 JMP -4(PC)
64
65 TEXT ·Casint32(SB), NOSPLIT, $0-17
66 JMP ·Cas(SB)
67
68 TEXT ·Casint64(SB), NOSPLIT, $0-25
69 JMP ·Cas64(SB)
70
71 TEXT ·Casuintptr(SB), NOSPLIT, $0-25
72 JMP ·Cas64(SB)
73
74 TEXT ·CasRel(SB), NOSPLIT, $0-17
75 JMP ·Cas(SB)
76
77 TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16
78 JMP ·Load64(SB)
79
80 TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
81 JMP ·Load64(SB)
82
83 TEXT ·Storeint32(SB), NOSPLIT, $0-12
84 JMP ·Store(SB)
85
86 TEXT ·Storeint64(SB), NOSPLIT, $0-16
87 JMP ·Store64(SB)
88
89 TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
90 JMP ·Store64(SB)
91
92 TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
93 JMP ·Xadd64(SB)
94
95 TEXT ·Loadint32(SB), NOSPLIT, $0-12
96 JMP ·Load(SB)
97
98 TEXT ·Loadint64(SB), NOSPLIT, $0-16
99 JMP ·Load64(SB)
100
101 TEXT ·Xaddint32(SB), NOSPLIT, $0-20
102 JMP ·Xadd(SB)
103
104 TEXT ·Xaddint64(SB), NOSPLIT, $0-24
105 JMP ·Xadd64(SB)
106
107 // bool casp(void **val, void *old, void *new)
108 // Atomically:
109 // if(*val == old){
110 // *val = new;
111 // return 1;
112 // } else
113 // return 0;
114 TEXT ·Casp1(SB), NOSPLIT, $0-25
115 JMP ·Cas64(SB)
116
117 // uint32 xadd(uint32 volatile *ptr, int32 delta)
118 // Atomically:
119 // *val += delta;
120 // return *val;
121 TEXT ·Xadd(SB), NOSPLIT, $0-20
122 MOVV ptr+0(FP), R2
123 MOVW delta+8(FP), R3
124 SYNC
125 LL (R2), R1
126 ADDU R1, R3, R4
127 MOVV R4, R1
128 SC R4, (R2)
129 BEQ R4, -4(PC)
130 MOVW R1, ret+16(FP)
131 SYNC
132 RET
133
134 // uint64 Xadd64(uint64 volatile *ptr, int64 delta)
135 // Atomically:
136 // *val += delta;
137 // return *val;
138 TEXT ·Xadd64(SB), NOSPLIT, $0-24
139 MOVV ptr+0(FP), R2
140 MOVV delta+8(FP), R3
141 SYNC
142 LLV (R2), R1
143 ADDVU R1, R3, R4
144 MOVV R4, R1
145 SCV R4, (R2)
146 BEQ R4, -4(PC)
147 MOVV R1, ret+16(FP)
148 SYNC
149 RET
150
151 // uint32 Xchg(ptr *uint32, new uint32)
152 // Atomically:
153 // old := *ptr;
154 // *ptr = new;
155 // return old;
156 TEXT ·Xchg(SB), NOSPLIT, $0-20
157 MOVV ptr+0(FP), R2
158 MOVW new+8(FP), R5
159
160 SYNC
161 MOVV R5, R3
162 LL (R2), R1
163 SC R3, (R2)
164 BEQ R3, -3(PC)
165 MOVW R1, ret+16(FP)
166 SYNC
167 RET
168
169 // uint64 Xchg64(ptr *uint64, new uint64)
170 // Atomically:
171 // old := *ptr;
172 // *ptr = new;
173 // return old;
174 TEXT ·Xchg64(SB), NOSPLIT, $0-24
175 MOVV ptr+0(FP), R2
176 MOVV new+8(FP), R5
177
178 SYNC
179 MOVV R5, R3
180 LLV (R2), R1
181 SCV R3, (R2)
182 BEQ R3, -3(PC)
183 MOVV R1, ret+16(FP)
184 SYNC
185 RET
186
187 TEXT ·Xchgint32(SB), NOSPLIT, $0-20
188 JMP ·Xchg(SB)
189
190 TEXT ·Xchgint64(SB), NOSPLIT, $0-24
191 JMP ·Xchg64(SB)
192
193 TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
194 JMP ·Xchg64(SB)
195
196 TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
197 JMP ·Store64(SB)
198
199 TEXT ·StoreRel(SB), NOSPLIT, $0-12
200 JMP ·Store(SB)
201
202 TEXT ·StoreRel64(SB), NOSPLIT, $0-16
203 JMP ·Store64(SB)
204
205 TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
206 JMP ·Store64(SB)
207
208 TEXT ·Store(SB), NOSPLIT, $0-12
209 MOVV ptr+0(FP), R1
210 MOVW val+8(FP), R2
211 SYNC
212 MOVW R2, 0(R1)
213 SYNC
214 RET
215
216 TEXT ·Store8(SB), NOSPLIT, $0-9
217 MOVV ptr+0(FP), R1
218 MOVB val+8(FP), R2
219 SYNC
220 MOVB R2, 0(R1)
221 SYNC
222 RET
223
224 TEXT ·Store64(SB), NOSPLIT, $0-16
225 MOVV ptr+0(FP), R1
226 MOVV val+8(FP), R2
227 SYNC
228 MOVV R2, 0(R1)
229 SYNC
230 RET
231
232 // void Or8(byte volatile*, byte);
233 TEXT ·Or8(SB), NOSPLIT, $0-9
234 MOVV ptr+0(FP), R1
235 MOVBU val+8(FP), R2
236 // Align ptr down to 4 bytes so we can use 32-bit load/store.
237 MOVV $~3, R3
238 AND R1, R3
239 // Compute val shift.
240 #ifdef GOARCH_mips64
241 // Big endian. ptr = ptr ^ 3
242 XOR $3, R1
243 #endif
244 // R4 = ((ptr & 3) * 8)
245 AND $3, R1, R4
246 SLLV $3, R4
247 // Shift val for aligned ptr. R2 = val << R4
248 SLLV R4, R2
249
250 SYNC
251 LL (R3), R4
252 OR R2, R4
253 SC R4, (R3)
254 BEQ R4, -4(PC)
255 SYNC
256 RET
257
258 // void And8(byte volatile*, byte);
259 TEXT ·And8(SB), NOSPLIT, $0-9
260 MOVV ptr+0(FP), R1
261 MOVBU val+8(FP), R2
262 // Align ptr down to 4 bytes so we can use 32-bit load/store.
263 MOVV $~3, R3
264 AND R1, R3
265 // Compute val shift.
266 #ifdef GOARCH_mips64
267 // Big endian. ptr = ptr ^ 3
268 XOR $3, R1
269 #endif
270 // R4 = ((ptr & 3) * 8)
271 AND $3, R1, R4
272 SLLV $3, R4
273 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
274 MOVV $0xFF, R5
275 SLLV R4, R2
276 SLLV R4, R5
277 NOR R0, R5
278 OR R5, R2
279
280 SYNC
281 LL (R3), R4
282 AND R2, R4
283 SC R4, (R3)
284 BEQ R4, -4(PC)
285 SYNC
286 RET
287
288 // func Or(addr *uint32, v uint32)
289 TEXT ·Or(SB), NOSPLIT, $0-12
290 MOVV ptr+0(FP), R1
291 MOVW val+8(FP), R2
292
293 SYNC
294 LL (R1), R3
295 OR R2, R3
296 SC R3, (R1)
297 BEQ R3, -4(PC)
298 SYNC
299 RET
300
301 // func And(addr *uint32, v uint32)
302 TEXT ·And(SB), NOSPLIT, $0-12
303 MOVV ptr+0(FP), R1
304 MOVW val+8(FP), R2
305
306 SYNC
307 LL (R1), R3
308 AND R2, R3
309 SC R3, (R1)
310 BEQ R3, -4(PC)
311 SYNC
312 RET
313
314 // uint32 ·Load(uint32 volatile* ptr)
315 TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
316 MOVV ptr+0(FP), R1
317 SYNC
318 MOVWU 0(R1), R1
319 SYNC
320 MOVW R1, ret+8(FP)
321 RET
322
323 // uint8 ·Load8(uint8 volatile* ptr)
324 TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
325 MOVV ptr+0(FP), R1
326 SYNC
327 MOVBU 0(R1), R1
328 SYNC
329 MOVB R1, ret+8(FP)
330 RET
331
332 // uint64 ·Load64(uint64 volatile* ptr)
333 TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
334 MOVV ptr+0(FP), R1
335 SYNC
336 MOVV 0(R1), R1
337 SYNC
338 MOVV R1, ret+8(FP)
339 RET
340
341 // void *·Loadp(void *volatile *ptr)
342 TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16
343 MOVV ptr+0(FP), R1
344 SYNC
345 MOVV 0(R1), R1
346 SYNC
347 MOVV R1, ret+8(FP)
348 RET
349
350 // uint32 ·LoadAcq(uint32 volatile* ptr)
351 TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
352 JMP atomic·Load(SB)
353
354 // uint64 ·LoadAcq64(uint64 volatile* ptr)
355 TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16
356 JMP atomic·Load64(SB)
357
358 // uintptr ·LoadAcquintptr(uintptr volatile* ptr)
359 TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16
360 JMP atomic·Load64(SB)
361
View as plain text