@@ -32,7 +32,7 @@ typedef unsigned long int ub4; /* unsigned 4-byte quantities */
32
32
typedef unsigned char ub1 ; /* unsigned 1-byte quantities */
33
33
34
34
/* how many powers of 2's worth of buckets we use */
35
- static unsigned int hashpower = HASHPOWER_DEFAULT ;
35
+ unsigned int hashpower = HASHPOWER_DEFAULT ;
36
36
37
37
#define hashsize (n ) ((ub4)1<<(n))
38
38
#define hashmask (n ) (hashsize(n)-1)
@@ -51,6 +51,7 @@ static unsigned int hash_items = 0;
51
51
52
52
/* Flag: Are we in the middle of expanding now? */
53
53
static bool expanding = false;
54
+ static bool started_expanding = false;
54
55
55
56
/*
56
57
* During expansion we migrate values with bucket granularity; this is how
@@ -136,13 +137,19 @@ static void assoc_expand(void) {
136
137
stats .hash_bytes += hashsize (hashpower ) * sizeof (void * );
137
138
stats .hash_is_expanding = 1 ;
138
139
STATS_UNLOCK ();
139
- pthread_cond_signal (& maintenance_cond );
140
140
} else {
141
141
primary_hashtable = old_hashtable ;
142
142
/* Bad news, but we can keep running. */
143
143
}
144
144
}
145
145
146
+ static void assoc_start_expand (void ) {
147
+ if (started_expanding )
148
+ return ;
149
+ started_expanding = true;
150
+ pthread_cond_signal (& maintenance_cond );
151
+ }
152
+
146
153
/* Note: this isn't an assoc_update. The key must not already exist to call this */
147
154
int assoc_insert (item * it , const uint32_t hv ) {
148
155
unsigned int oldbucket ;
@@ -161,7 +168,7 @@ int assoc_insert(item *it, const uint32_t hv) {
161
168
162
169
hash_items ++ ;
163
170
if (! expanding && hash_items > (hashsize (hashpower ) * 3 ) / 2 ) {
164
- assoc_expand ();
171
+ assoc_start_expand ();
165
172
}
166
173
167
174
MEMCACHED_ASSOC_INSERT (ITEM_key (it ), it -> nkey , hash_items );
@@ -201,6 +208,7 @@ static void *assoc_maintenance_thread(void *arg) {
201
208
202
209
/* Lock the cache, and bulk move multiple buckets to the new
203
210
* hash table. */
211
+ item_lock_global ();
204
212
mutex_lock (& cache_lock );
205
213
206
214
for (ii = 0 ; ii < hash_bulk_move && expanding ; ++ ii ) {
@@ -230,12 +238,24 @@ static void *assoc_maintenance_thread(void *arg) {
230
238
}
231
239
}
232
240
241
+ mutex_unlock (& cache_lock );
242
+ item_unlock_global ();
243
+
233
244
if (!expanding ) {
245
+ /* finished expanding. tell all threads to use fine-grained locks */
246
+ switch_item_lock_type (ITEM_LOCK_GRANULAR );
247
+ started_expanding = false;
248
+ slabs_rebalancer_resume ();
234
249
/* We are done expanding.. just wait for next invocation */
235
250
pthread_cond_wait (& maintenance_cond , & cache_lock );
251
+ /* Before doing anything, tell threads to use a global lock */
252
+ mutex_unlock (& cache_lock );
253
+ slabs_rebalancer_pause ();
254
+ switch_item_lock_type (ITEM_LOCK_GLOBAL );
255
+ mutex_lock (& cache_lock );
256
+ assoc_expand ();
257
+ mutex_unlock (& cache_lock );
236
258
}
237
-
238
- mutex_unlock (& cache_lock );
239
259
}
240
260
return NULL ;
241
261
}
0 commit comments