MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
slabs.c
1 /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3  * Slabs memory allocation, based on powers-of-N. Slabs are up to 1MB in size
4  * and are divided into chunks. The chunk sizes start off at the size of the
5  * "item" structure plus space for a small key and value. They increase by
6  * a multiplier factor from there, up to half the maximum slab size. The last
7  * slab size is always 1MB, since that's the maximum item size allowed by the
8  * memcached protocol.
9  */
10 #include "config.h"
11 
12 #include <fcntl.h>
13 #include <errno.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #include <assert.h>
18 #include <pthread.h>
19 #include <inttypes.h>
20 #include <stdarg.h>
21 
22 #include "default_engine.h"
23 
24 /*
25  * Forward Declarations
26  */
27 static int do_slabs_newslab(struct default_engine *engine, const unsigned int id);
28 static void *memory_allocate(struct default_engine *engine, size_t size);
29 
30 #ifndef DONT_PREALLOC_SLABS
31 /* Preallocate as many slab pages as possible (called from slabs_init)
32  on start-up, so users don't get confused out-of-memory errors when
33  they do have free (in-slab) space, but no space to make new slabs.
34  if maxslabs is 18 (POWER_LARGEST - POWER_SMALLEST + 1), then all
35  slab types can be made. if max memory is less than 18 MB, only the
36  smaller ones will be made. */
37 static void slabs_preallocate (const unsigned int maxslabs);
38 #endif
39 
40 /*
41  * Figures out which slab class (chunk size) is required to store an item of
42  * a given size.
43  *
44  * Given object size, return id to use when allocating/freeing memory for object
45  * 0 means error: can't store such a large object
46  */
47 
48 unsigned int slabs_clsid(struct default_engine *engine, const size_t size) {
49  int res = POWER_SMALLEST;
50 
51  if (size == 0)
52  return 0;
53  while (size > engine->slabs.slabclass[res].size)
54  if (res++ == engine->slabs.power_largest) /* won't fit in the biggest slab */
55  return 0;
56  return res;
57 }
58 
63 ENGINE_ERROR_CODE slabs_init(struct default_engine *engine,
64  const size_t limit,
65  const double factor,
66  const bool prealloc) {
67  int i = POWER_SMALLEST - 1;
68  unsigned int size = sizeof(hash_item) + engine->config.chunk_size;
69 
70  engine->slabs.mem_limit = limit;
71 
72  if (prealloc) {
73  /* Allocate everything in a big chunk with malloc */
74  engine->slabs.mem_base = malloc(engine->slabs.mem_limit);
75  if (engine->slabs.mem_base != NULL) {
76  engine->slabs.mem_current = engine->slabs.mem_base;
77  engine->slabs.mem_avail = engine->slabs.mem_limit;
78  } else {
79  return ENGINE_ENOMEM;
80  }
81  }
82 
83  memset(engine->slabs.slabclass, 0, sizeof(engine->slabs.slabclass));
84 
85  while (++i < POWER_LARGEST && size <= engine->config.item_size_max / factor) {
86  /* Make sure items are always n-byte aligned */
87  if (size % CHUNK_ALIGN_BYTES)
88  size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
89 
90  engine->slabs.slabclass[i].size = size;
91  engine->slabs.slabclass[i].perslab = engine->config.item_size_max / engine->slabs.slabclass[i].size;
92  size *= factor;
93  if (engine->config.verbose > 1) {
94  fprintf(stderr, "slab class %3d: chunk size %9u perslab %7u\n",
95  i, engine->slabs.slabclass[i].size, engine->slabs.slabclass[i].perslab);
96  }
97  }
98 
99  engine->slabs.power_largest = i;
100  engine->slabs.slabclass[engine->slabs.power_largest].size = engine->config.item_size_max;
101  engine->slabs.slabclass[engine->slabs.power_largest].perslab = 1;
102  if (engine->config.verbose > 1) {
103  fprintf(stderr, "slab class %3d: chunk size %9u perslab %7u\n",
104  i, engine->slabs.slabclass[i].size, engine->slabs.slabclass[i].perslab);
105  }
106 
107  /* for the test suite: faking of how much we've already malloc'd */
108  {
109  char *t_initial_malloc = getenv("T_MEMD_INITIAL_MALLOC");
110  if (t_initial_malloc) {
111  engine->slabs.mem_malloced = (size_t)atol(t_initial_malloc);
112  }
113 
114  }
115 
116 #ifndef DONT_PREALLOC_SLABS
117  {
118  char *pre_alloc = getenv("T_MEMD_SLABS_ALLOC");
119 
120  if (pre_alloc == NULL || atoi(pre_alloc) != 0) {
121  slabs_preallocate(power_largest);
122  }
123  }
124 #endif
125 
126  return ENGINE_SUCCESS;
127 }
128 
129 #ifndef DONT_PREALLOC_SLABS
130 static void slabs_preallocate (const unsigned int maxslabs) {
131  int i;
132  unsigned int prealloc = 0;
133 
134  /* pre-allocate a 1MB slab in every size class so people don't get
135  confused by non-intuitive "SERVER_ERROR out of memory"
136  messages. this is the most common question on the mailing
137  list. if you really don't want this, you can rebuild without
138  these three lines. */
139 
140  for (i = POWER_SMALLEST; i <= POWER_LARGEST; i++) {
141  if (++prealloc > maxslabs)
142  return;
143  do_slabs_newslab(i);
144  }
145 
146 }
147 #endif
148 
149 static int grow_slab_list (struct default_engine *engine, const unsigned int id) {
150  slabclass_t *p = &engine->slabs.slabclass[id];
151  if (p->slabs == p->list_size) {
152  size_t new_size = (p->list_size != 0) ? p->list_size * 2 : 16;
153  void *new_list = realloc(p->slab_list, new_size * sizeof(void *));
154  if (new_list == 0) return 0;
155  p->list_size = new_size;
156  p->slab_list = new_list;
157  }
158  return 1;
159 }
160 
161 static int do_slabs_newslab(struct default_engine *engine, const unsigned int id) {
162  slabclass_t *p = &engine->slabs.slabclass[id];
163  int len = p->size * p->perslab;
164  char *ptr;
165 
166  if ((engine->slabs.mem_limit && engine->slabs.mem_malloced + len > engine->slabs.mem_limit && p->slabs > 0) ||
167  (grow_slab_list(engine, id) == 0) ||
168  ((ptr = memory_allocate(engine, (size_t)len)) == 0)) {
169 
170  MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(id);
171  return 0;
172  }
173 
174  memset(ptr, 0, (size_t)len);
175  p->end_page_ptr = ptr;
176  p->end_page_free = p->perslab;
177 
178  p->slab_list[p->slabs++] = ptr;
179  engine->slabs.mem_malloced += len;
180  MEMCACHED_SLABS_SLABCLASS_ALLOCATE(id);
181 
182  return 1;
183 }
184 
185 /*@null@*/
186 static void *do_slabs_alloc(struct default_engine *engine, const size_t size, unsigned int id) {
187  slabclass_t *p;
188  void *ret = NULL;
189 
190  if (id < POWER_SMALLEST || id > engine->slabs.power_largest) {
191  MEMCACHED_SLABS_ALLOCATE_FAILED(size, 0);
192  return NULL;
193  }
194 
195  p = &engine->slabs.slabclass[id];
196 
197 #ifdef USE_SYSTEM_MALLOC
198  if (engine->slabs.mem_limit && engine->slabs.mem_malloced + size > engine->slabs.mem_limit) {
199  MEMCACHED_SLABS_ALLOCATE_FAILED(size, id);
200  return 0;
201  }
202  engine->slabs.mem_malloced += size;
203  ret = malloc(size);
204  MEMCACHED_SLABS_ALLOCATE(size, id, 0, ret);
205  return ret;
206 #endif
207 
208  /* fail unless we have space at the end of a recently allocated page,
209  we have something on our freelist, or we could allocate a new page */
210  if (! (p->end_page_ptr != 0 || p->sl_curr != 0 ||
211  do_slabs_newslab(engine, id) != 0)) {
212  /* We don't have more memory available */
213  ret = NULL;
214  } else if (p->sl_curr != 0) {
215  /* return off our freelist */
216  ret = p->slots[--p->sl_curr];
217  } else {
218  /* if we recently allocated a whole page, return from that */
219  assert(p->end_page_ptr != NULL);
220  ret = p->end_page_ptr;
221  if (--p->end_page_free != 0) {
222  p->end_page_ptr = ((caddr_t)p->end_page_ptr) + p->size;
223  } else {
224  p->end_page_ptr = 0;
225  }
226  }
227 
228  if (ret) {
229  p->requested += size;
230  MEMCACHED_SLABS_ALLOCATE(size, id, p->size, ret);
231  } else {
232  MEMCACHED_SLABS_ALLOCATE_FAILED(size, id);
233  }
234 
235  return ret;
236 }
237 
238 static void do_slabs_free(struct default_engine *engine, void *ptr, const size_t size, unsigned int id) {
239  slabclass_t *p;
240 
241  if (id < POWER_SMALLEST || id > engine->slabs.power_largest)
242  return;
243 
244  MEMCACHED_SLABS_FREE(size, id, ptr);
245  p = &engine->slabs.slabclass[id];
246 
247 #ifdef USE_SYSTEM_MALLOC
248  engine->slabs.mem_malloced -= size;
249  free(ptr);
250  return;
251 #endif
252 
253  if (p->sl_curr == p->sl_total) { /* need more space on the free list */
254  int new_size = (p->sl_total != 0) ? p->sl_total * 2 : 16; /* 16 is arbitrary */
255  void **new_slots = realloc(p->slots, new_size * sizeof(void *));
256  if (new_slots == 0)
257  return;
258  p->slots = new_slots;
259  p->sl_total = new_size;
260  }
261  p->slots[p->sl_curr++] = ptr;
262  p->requested -= size;
263  return;
264 }
265 
266 void add_statistics(const void *cookie, ADD_STAT add_stats,
267  const char* prefix, int num, const char *key,
268  const char *fmt, ...) {
269  char name[80], val[80];
270  int klen = 0, vlen;
271  va_list ap;
272 
273  assert(cookie);
274  assert(add_stats);
275  assert(key);
276 
277  va_start(ap, fmt);
278  vlen = vsnprintf(val, sizeof(val) - 1, fmt, ap);
279  va_end(ap);
280 
281  if (prefix != NULL) {
282  klen = snprintf(name, sizeof(name), "%s:", prefix);
283  }
284 
285  if (num != -1) {
286  klen += snprintf(name + klen, sizeof(name) - klen, "%d:", num);
287  }
288 
289  klen += snprintf(name + klen, sizeof(name) - klen, "%s", key);
290 
291  add_stats(name, klen, val, vlen, cookie);
292 }
293 
294 /*@null@*/
295 static void do_slabs_stats(struct default_engine *engine, ADD_STAT add_stats, const void *cookie) {
296  int i, total;
297  /* Get the per-thread stats which contain some interesting aggregates */
298 #ifdef FUTURE
299  struct conn *conn = (struct conn*)cookie;
300  struct thread_stats thread_stats;
301  threadlocal_stats_aggregate(c, &thread_stats);
302 #endif
303 
304  total = 0;
305  for(i = POWER_SMALLEST; i <= engine->slabs.power_largest; i++) {
306  slabclass_t *p = &engine->slabs.slabclass[i];
307  if (p->slabs != 0) {
308  uint32_t perslab, slabs;
309  slabs = p->slabs;
310  perslab = p->perslab;
311 
312  add_statistics(cookie, add_stats, NULL, i, "chunk_size", "%u",
313  p->size);
314  add_statistics(cookie, add_stats, NULL, i, "chunks_per_page", "%u",
315  perslab);
316  add_statistics(cookie, add_stats, NULL, i, "total_pages", "%u",
317  slabs);
318  add_statistics(cookie, add_stats, NULL, i, "total_chunks", "%u",
319  slabs * perslab);
320  add_statistics(cookie, add_stats, NULL, i, "used_chunks", "%u",
321  slabs*perslab - p->sl_curr - p->end_page_free);
322  add_statistics(cookie, add_stats, NULL, i, "free_chunks", "%u",
323  p->sl_curr);
324  add_statistics(cookie, add_stats, NULL, i, "free_chunks_end", "%u",
325  p->end_page_free);
326  add_statistics(cookie, add_stats, NULL, i, "mem_requested", "%zu",
327  p->requested);
328 #ifdef FUTURE
329  add_statistics(cookie, add_stats, NULL, i, "get_hits", "%"PRIu64,
330  thread_stats.slab_stats[i].get_hits);
331  add_statistics(cookie, add_stats, NULL, i, "cmd_set", "%"PRIu64,
332  thread_stats.slab_stats[i].set_cmds);
333  add_statistics(cookie, add_stats, NULL, i, "delete_hits", "%"PRIu64,
334  thread_stats.slab_stats[i].delete_hits);
335  add_statistics(cookie, add_stats, NULL, i, "cas_hits", "%"PRIu64,
336  thread_stats.slab_stats[i].cas_hits);
337  add_statistics(cookie, add_stats, NULL, i, "cas_badval", "%"PRIu64,
338  thread_stats.slab_stats[i].cas_badval);
339 #endif
340  total++;
341  }
342  }
343 
344  /* add overall slab stats and append terminator */
345 
346  add_statistics(cookie, add_stats, NULL, -1, "active_slabs", "%d", total);
347  add_statistics(cookie, add_stats, NULL, -1, "total_malloced", "%zu",
348  engine->slabs.mem_malloced);
349 }
350 
351 static void *memory_allocate(struct default_engine *engine, size_t size) {
352  void *ret;
353 
354  if (engine->slabs.mem_base == NULL) {
355  /* We are not using a preallocated large memory chunk */
356  ret = malloc(size);
357  } else {
358  ret = engine->slabs.mem_current;
359 
360  if (size > engine->slabs.mem_avail) {
361  return NULL;
362  }
363 
364  /* mem_current pointer _must_ be aligned!!! */
365  if (size % CHUNK_ALIGN_BYTES) {
366  size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
367  }
368 
369  engine->slabs.mem_current = ((char*)engine->slabs.mem_current) + size;
370  if (size < engine->slabs.mem_avail) {
371  engine->slabs.mem_avail -= size;
372  } else {
373  engine->slabs.mem_avail = 0;
374  }
375  }
376 
377  return ret;
378 }
379 
380 void *slabs_alloc(struct default_engine *engine, size_t size, unsigned int id) {
381  void *ret;
382 
383  pthread_mutex_lock(&engine->slabs.lock);
384  ret = do_slabs_alloc(engine, size, id);
385  pthread_mutex_unlock(&engine->slabs.lock);
386  return ret;
387 }
388 
389 void slabs_free(struct default_engine *engine, void *ptr, size_t size, unsigned int id) {
390  pthread_mutex_lock(&engine->slabs.lock);
391  do_slabs_free(engine, ptr, size, id);
392  pthread_mutex_unlock(&engine->slabs.lock);
393 }
394 
395 void slabs_stats(struct default_engine *engine, ADD_STAT add_stats, const void *c) {
396  pthread_mutex_lock(&engine->slabs.lock);
397  do_slabs_stats(engine, add_stats, c);
398  pthread_mutex_unlock(&engine->slabs.lock);
399 }