Skip to content

Commit bb4162c

Browse files
authored
SWIFT-1418 Vendor libmongoc 1.20 (#709)
1 parent d64f5c1 commit bb4162c

File tree

88 files changed

+4952
-2024
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

88 files changed

+4952
-2024
lines changed

Sources/CLibMongoC/bson/bson-atomic.c

Lines changed: 193 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -17,79 +17,231 @@
1717

1818
#include "CLibMongoC_bson-atomic.h"
1919

20+
#ifdef BSON_OS_UNIX
21+
/* For sched_yield() */
22+
#include <sched.h>
23+
#endif
2024

21-
/*
22-
* We should only ever hit these on non-Windows systems, for which we require
23-
* pthread support. Therefore, we will avoid making a threading portability
24-
* for threads here and just use pthreads directly.
25-
*/
25+
int32_t
26+
bson_atomic_int_add (volatile int32_t *p, int32_t n)
27+
{
28+
return n + bson_atomic_int32_fetch_add (p, n, bson_memory_order_seq_cst);
29+
}
2630

31+
int64_t
32+
bson_atomic_int64_add (volatile int64_t *p, int64_t n)
33+
{
34+
return n + bson_atomic_int64_fetch_add (p, n, bson_memory_order_seq_cst);
35+
}
36+
37+
void
38+
bson_thrd_yield (void)
39+
{
40+
BSON_IF_WINDOWS (SwitchToThread ();)
41+
BSON_IF_POSIX (sched_yield ();)
42+
}
2743

28-
#ifdef __BSON_NEED_BARRIER
29-
#include <pthread.h>
30-
static pthread_mutex_t gBarrier = PTHREAD_MUTEX_INITIALIZER;
3144
void
3245
bson_memory_barrier (void)
3346
{
34-
pthread_mutex_lock (&gBarrier);
35-
pthread_mutex_unlock (&gBarrier);
47+
bson_atomic_thread_fence ();
3648
}
37-
#endif
3849

50+
/**
51+
* Some platforms do not support compiler intrinsics for atomic operations.
52+
* We emulate that here using a spin lock and regular arithmetic operations
53+
*/
54+
static int8_t gEmulAtomicLock = 0;
3955

40-
#ifdef __BSON_NEED_ATOMIC_32
41-
#include <pthread.h>
42-
static pthread_mutex_t gSync32 = PTHREAD_MUTEX_INITIALIZER;
43-
int32_t
44-
bson_atomic_int_add (volatile int32_t *p, int32_t n)
56+
static void
57+
_lock_emul_atomic ()
4558
{
46-
int ret;
59+
int i;
60+
if (bson_atomic_int8_compare_exchange_weak (
61+
&gEmulAtomicLock, 0, 1, bson_memory_order_acquire) == 0) {
62+
/* Successfully took the spinlock */
63+
return;
64+
}
65+
/* Failed. Try taking ten more times, then begin sleeping. */
66+
for (i = 0; i < 10; ++i) {
67+
if (bson_atomic_int8_compare_exchange_weak (
68+
&gEmulAtomicLock, 0, 1, bson_memory_order_acquire) == 0) {
69+
/* Succeeded in taking the lock */
70+
return;
71+
}
72+
}
73+
/* Still don't have the lock. Spin and yield */
74+
while (bson_atomic_int8_compare_exchange_weak (
75+
&gEmulAtomicLock, 0, 1, bson_memory_order_acquire) != 0) {
76+
bson_thrd_yield ();
77+
}
78+
}
4779

48-
pthread_mutex_lock (&gSync32);
49-
*p += n;
50-
ret = *p;
51-
pthread_mutex_unlock (&gSync32);
80+
static void
81+
_unlock_emul_atomic ()
82+
{
83+
int64_t rv = bson_atomic_int8_exchange (
84+
&gEmulAtomicLock, 0, bson_memory_order_release);
85+
BSON_ASSERT (rv == 1 && "Released atomic lock while not holding it");
86+
}
5287

88+
int64_t
89+
_bson_emul_atomic_int64_fetch_add (volatile int64_t *p,
90+
int64_t n,
91+
enum bson_memory_order _unused)
92+
{
93+
int64_t ret;
94+
_lock_emul_atomic ();
95+
ret = *p;
96+
*p += n;
97+
_unlock_emul_atomic ();
5398
return ret;
5499
}
55-
#endif
56100

101+
int64_t
102+
_bson_emul_atomic_int64_exchange (volatile int64_t *p,
103+
int64_t n,
104+
enum bson_memory_order _unused)
105+
{
106+
int64_t ret;
107+
_lock_emul_atomic ();
108+
ret = *p;
109+
*p = n;
110+
_unlock_emul_atomic ();
111+
return ret;
112+
}
57113

58-
#ifdef __BSON_NEED_ATOMIC_64
59-
#include <pthread.h>
60-
static pthread_mutex_t gSync64 = PTHREAD_MUTEX_INITIALIZER;
61114
int64_t
62-
bson_atomic_int64_add (volatile int64_t *p, int64_t n)
115+
_bson_emul_atomic_int64_compare_exchange_strong (volatile int64_t *p,
116+
int64_t expect_value,
117+
int64_t new_value,
118+
enum bson_memory_order _unused)
63119
{
64120
int64_t ret;
121+
_lock_emul_atomic ();
122+
ret = *p;
123+
if (ret == expect_value) {
124+
*p = new_value;
125+
}
126+
_unlock_emul_atomic ();
127+
return ret;
128+
}
129+
130+
int64_t
131+
_bson_emul_atomic_int64_compare_exchange_weak (volatile int64_t *p,
132+
int64_t expect_value,
133+
int64_t new_value,
134+
enum bson_memory_order order)
135+
{
136+
/* We're emulating. We can't do a weak version. */
137+
return _bson_emul_atomic_int64_compare_exchange_strong (
138+
p, expect_value, new_value, order);
139+
}
65140

66-
pthread_mutex_lock (&gSync64);
141+
142+
int32_t
143+
_bson_emul_atomic_int32_fetch_add (volatile int32_t *p,
144+
int32_t n,
145+
enum bson_memory_order _unused)
146+
{
147+
int32_t ret;
148+
_lock_emul_atomic ();
149+
ret = *p;
67150
*p += n;
151+
_unlock_emul_atomic ();
152+
return ret;
153+
}
154+
155+
int32_t
156+
_bson_emul_atomic_int32_exchange (volatile int32_t *p,
157+
int32_t n,
158+
enum bson_memory_order _unused)
159+
{
160+
int32_t ret;
161+
_lock_emul_atomic ();
68162
ret = *p;
69-
pthread_mutex_unlock (&gSync64);
163+
*p = n;
164+
_unlock_emul_atomic ();
165+
return ret;
166+
}
70167

168+
int32_t
169+
_bson_emul_atomic_int32_compare_exchange_strong (volatile int32_t *p,
170+
int32_t expect_value,
171+
int32_t new_value,
172+
enum bson_memory_order _unused)
173+
{
174+
int32_t ret;
175+
_lock_emul_atomic ();
176+
ret = *p;
177+
if (ret == expect_value) {
178+
*p = new_value;
179+
}
180+
_unlock_emul_atomic ();
71181
return ret;
72182
}
73-
#endif
74183

184+
int32_t
185+
_bson_emul_atomic_int32_compare_exchange_weak (volatile int32_t *p,
186+
int32_t expect_value,
187+
int32_t new_value,
188+
enum bson_memory_order order)
189+
{
190+
/* We're emulating. We can't do a weak version. */
191+
return _bson_emul_atomic_int32_compare_exchange_strong (
192+
p, expect_value, new_value, order);
193+
}
75194

76-
/*
77-
* The logic in the header is such that __BSON_NEED_ATOMIC_WINDOWS should only
78-
* be defined if neither __BSON_NEED_ATOMIC_32 nor __BSON_NEED_ATOMIC_64 are.
79-
*/
80195

196+
int
197+
_bson_emul_atomic_int_fetch_add (volatile int *p,
198+
int n,
199+
enum bson_memory_order _unused)
200+
{
201+
int ret;
202+
_lock_emul_atomic ();
203+
ret = *p;
204+
*p += n;
205+
_unlock_emul_atomic ();
206+
return ret;
207+
}
81208

82-
#ifdef __BSON_NEED_ATOMIC_WINDOWS
83-
int32_t
84-
bson_atomic_int_add (volatile int32_t *p, int32_t n)
209+
int
210+
_bson_emul_atomic_int_exchange (volatile int *p,
211+
int n,
212+
enum bson_memory_order _unused)
85213
{
86-
return InterlockedExchangeAdd (p, n) + n;
214+
int ret;
215+
_lock_emul_atomic ();
216+
ret = *p;
217+
*p = n;
218+
_unlock_emul_atomic ();
219+
return ret;
87220
}
88221

222+
int
223+
_bson_emul_atomic_int_compare_exchange_strong (volatile int *p,
224+
int expect_value,
225+
int new_value,
226+
enum bson_memory_order _unused)
227+
{
228+
int ret;
229+
_lock_emul_atomic ();
230+
ret = *p;
231+
if (ret == expect_value) {
232+
*p = new_value;
233+
}
234+
_unlock_emul_atomic ();
235+
return ret;
236+
}
89237

90-
int64_t
91-
bson_atomic_int64_add (volatile int64_t *p, int64_t n)
238+
int
239+
_bson_emul_atomic_int_compare_exchange_weak (volatile int *p,
240+
int expect_value,
241+
int new_value,
242+
enum bson_memory_order order)
92243
{
93-
return InterlockedExchangeAdd (p, n) + n;
244+
/* We're emulating. We can't do a weak version. */
245+
return _bson_emul_atomic_int_compare_exchange_strong (
246+
p, expect_value, new_value, order);
94247
}
95-
#endif

Sources/CLibMongoC/bson/bson-context.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,8 @@ static void
107107
_bson_context_set_oid_seq32_threadsafe (bson_context_t *context, /* IN */
108108
bson_oid_t *oid) /* OUT */
109109
{
110-
int32_t seq = bson_atomic_int_add (&context->seq32, 1);
111-
110+
int32_t seq = 1 + bson_atomic_int32_fetch_add (
111+
&context->seq32, 1, bson_memory_order_seq_cst);
112112
seq = BSON_UINT32_TO_BE (seq);
113113
memcpy (&oid->bytes[9], ((uint8_t *) &seq) + 1, 3);
114114
}
@@ -164,7 +164,8 @@ static void
164164
_bson_context_set_oid_seq64_threadsafe (bson_context_t *context, /* IN */
165165
bson_oid_t *oid) /* OUT */
166166
{
167-
int64_t seq = bson_atomic_int64_add (&context->seq64, 1);
167+
int64_t seq = 1 + bson_atomic_int64_fetch_add (
168+
&context->seq64, 1, bson_memory_order_seq_cst);
168169

169170
seq = BSON_UINT64_TO_BE (seq);
170171
memcpy (&oid->bytes[4], &seq, sizeof (seq));

Sources/CLibMongoC/bson/bson-memory.c

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,7 @@
2828
static bson_mem_vtable_t gMemVtable = {
2929
malloc,
3030
calloc,
31-
#ifdef BSON_HAVE_REALLOCF
32-
reallocf,
33-
#else
3431
realloc,
35-
#endif
3632
free,
3733
};
3834

@@ -299,11 +295,7 @@ bson_mem_restore_vtable (void)
299295
bson_mem_vtable_t vtable = {
300296
malloc,
301297
calloc,
302-
#ifdef BSON_HAVE_REALLOCF
303-
reallocf,
304-
#else
305298
realloc,
306-
#endif
307299
free,
308300
};
309301

0 commit comments

Comments
 (0)