@@ -207,16 +207,16 @@ typedef char bool;
207
207
__asm(zig_mangle_c(name) " = " zig_mangle_c(symbol))
208
208
#endif
209
209
210
+ #define zig_mangled_tentative zig_mangled
211
+ #define zig_mangled_final zig_mangled
210
212
#if _MSC_VER
211
- #define zig_mangled_tentative (mangled , unmangled )
212
- #define zig_mangled_final (mangled , unmangled ) ; \
213
+ #define zig_mangled (mangled , unmangled ) ; \
213
214
zig_export(#mangled, unmangled)
214
215
#define zig_mangled_export (mangled , unmangled , symbol ) \
215
216
zig_export(unmangled, #mangled) \
216
217
zig_export(symbol, unmangled)
217
218
#else /* _MSC_VER */
218
- #define zig_mangled_tentative (mangled , unmangled ) __asm(zig_mangle_c(unmangled))
219
- #define zig_mangled_final (mangled , unmangled ) zig_mangled_tentative(mangled, unmangled)
219
+ #define zig_mangled (mangled , unmangled ) __asm(zig_mangle_c(unmangled))
220
220
#define zig_mangled_export (mangled , unmangled , symbol ) \
221
221
zig_mangled_final(mangled, unmangled) \
222
222
zig_export(symbol, unmangled)
@@ -3636,7 +3636,7 @@ typedef int zig_memory_order;
3636
3636
#define zig_atomicrmw_min (res , obj , arg , order , Type , ReprType ) res = zig_msvc_atomicrmw_min_ ##Type(obj, arg)
3637
3637
#define zig_atomicrmw_max (res , obj , arg , order , Type , ReprType ) res = zig_msvc_atomicrmw_max_ ##Type(obj, arg)
3638
3638
#define zig_atomic_store ( obj , arg , order , Type , ReprType ) zig_msvc_atomic_store_ ##Type(obj, arg)
3639
- #define zig_atomic_load (res , obj , order , Type , ReprType ) res = zig_msvc_atomic_load_ ##Type(obj)
3639
+ #define zig_atomic_load (res , obj , order , Type , ReprType ) res = zig_msvc_atomic_load_ ##order##_## Type(obj)
3640
3640
#if _M_X64
3641
3641
#define zig_fence (order ) __faststorefence()
3642
3642
#else
@@ -3670,7 +3670,7 @@ typedef int zig_memory_order;
3670
3670
3671
3671
/* TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 */
3672
3672
3673
- #define zig_msvc_atomics (ZigType , Type , SigType , suffix ) \
3673
+ #define zig_msvc_atomics (ZigType , Type , SigType , suffix , iso_suffix ) \
3674
3674
static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \
3675
3675
Type comparand = *expected; \
3676
3676
Type initial = _InterlockedCompareExchange##suffix((SigType volatile*)obj, (SigType)desired, (SigType)comparand); \
@@ -3741,24 +3741,34 @@ typedef int zig_memory_order;
3741
3741
} \
3742
3742
static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \
3743
3743
(void)_InterlockedExchange##suffix((SigType volatile*)obj, (SigType)value); \
3744
+ } \
3745
+ static inline Type zig_msvc_atomic_load_zig_memory_order_relaxed_##ZigType(Type volatile* obj) { \
3746
+ return __iso_volatile_load##iso_suffix((SigType volatile*)obj); \
3744
3747
} \
3745
- static inline Type zig_msvc_atomic_load_##ZigType(Type volatile* obj) { \
3746
- return _InterlockedExchangeAdd##suffix((SigType volatile*)obj, (SigType)0); \
3748
+ static inline Type zig_msvc_atomic_load_zig_memory_order_acquire_##ZigType(Type volatile* obj) { \
3749
+ Type val = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \
3750
+ _ReadWriteBarrier(); \
3751
+ return val; \
3752
+ } \
3753
+ static inline Type zig_msvc_atomic_load_zig_memory_order_seq_cst_##ZigType(Type volatile* obj) { \
3754
+ Type val = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \
3755
+ _ReadWriteBarrier(); \
3756
+ return val; \
3747
3757
}
3748
3758
3749
- zig_msvc_atomics ( u8 , uint8_t , char , 8 )
3750
- zig_msvc_atomics ( i8 , int8_t , char , 8 )
3751
- zig_msvc_atomics (u16 , uint16_t , short , 16 )
3752
- zig_msvc_atomics (i16 , int16_t , short , 16 )
3753
- zig_msvc_atomics (u32 , uint32_t , long , )
3754
- zig_msvc_atomics (i32 , int32_t , long , )
3759
+ zig_msvc_atomics ( u8 , uint8_t , char , 8 , 8 )
3760
+ zig_msvc_atomics ( i8 , int8_t , char , 8 , 8 )
3761
+ zig_msvc_atomics (u16 , uint16_t , short , 16 , 16 )
3762
+ zig_msvc_atomics (i16 , int16_t , short , 16 , 16 )
3763
+ zig_msvc_atomics (u32 , uint32_t , long , , 32 )
3764
+ zig_msvc_atomics (i32 , int32_t , long , , 32 )
3755
3765
3756
3766
#if _M_X64
3757
- zig_msvc_atomics (u64 , uint64_t , __int64 , 64 )
3758
- zig_msvc_atomics (i64 , int64_t , __int64 , 64 )
3767
+ zig_msvc_atomics (u64 , uint64_t , __int64 , 64 , 64 )
3768
+ zig_msvc_atomics (i64 , int64_t , __int64 , 64 , 64 )
3759
3769
#endif
3760
3770
3761
- #define zig_msvc_flt_atomics (Type , SigType , suffix ) \
3771
+ #define zig_msvc_flt_atomics (Type , SigType , suffix , iso_suffix ) \
3762
3772
static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
3763
3773
SigType exchange; \
3764
3774
SigType comparand; \
@@ -3776,15 +3786,30 @@ zig_msvc_atomics(i64, int64_t, __int64, 64)
3776
3786
memcpy(&value, &arg, sizeof(value)); \
3777
3787
(void)_InterlockedExchange##suffix((SigType volatile*)obj, value); \
3778
3788
} \
3779
- static inline zig_##Type zig_msvc_atomic_load_ ##Type(zig_##Type volatile* obj) { \
3789
+ static inline zig_##Type zig_msvc_atomic_load_zig_memory_order_relaxed_ ##Type(zig_##Type volatile* obj) { \
3780
3790
zig_##Type result; \
3781
- SigType initial = _InterlockedExchangeAdd##suffix ((SigType volatile*)obj, (SigType)0 ); \
3791
+ SigType initial = __iso_volatile_load##iso_suffix ((SigType volatile*)obj); \
3782
3792
memcpy(&result, &initial, sizeof(result)); \
3783
3793
return result; \
3794
+ } \
3795
+ static inline zig_##Type zig_msvc_atomic_load_zig_memory_order_acquire_##Type(zig_##Type volatile* obj) { \
3796
+ zig_##Type result; \
3797
+ SigType initial = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \
3798
+ _ReadWriteBarrier(); \
3799
+ memcpy(&result, &initial, sizeof(result)); \
3800
+ return result; \
3801
+ } \
3802
+ static inline zig_##Type zig_msvc_atomic_load_zig_memory_order_seq_cst_##Type(zig_##Type volatile* obj) { \
3803
+ zig_##Type result; \
3804
+ SigType initial = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \
3805
+ _ReadWriteBarrier(); \
3806
+ memcpy(&result, &initial, sizeof(result)); \
3807
+ return result; \
3784
3808
}
3785
- zig_msvc_flt_atomics (f32 , long , )
3809
+
3810
+ zig_msvc_flt_atomics (f32 , long , , 32 )
3786
3811
#if _M_X64
3787
- zig_msvc_flt_atomics (f64 , int64_t , 64 )
3812
+ zig_msvc_flt_atomics (f64 , int64_t , 64 , 64 )
3788
3813
#endif
3789
3814
3790
3815
#if _M_IX86
0 commit comments