MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
slabs.c
1 /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3  * Slabs memory allocation, based on powers-of-N. Slabs are up to 1MB in size
4  * and are divided into chunks. The chunk sizes start off at the size of the
5  * "item" structure plus space for a small key and value. They increase by
6  * a multiplier factor from there, up to half the maximum slab size. The last
7  * slab size is always 1MB, since that's the maximum item size allowed by the
8  * memcached protocol.
9  */
10 #include "config.h"
11 
12 #include <fcntl.h>
13 #include <errno.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #include <assert.h>
18 #include <pthread.h>
19 #include <inttypes.h>
20 #include <stdarg.h>
21 
22 #include "default_engine.h"
23 
24 /*
25  * Forward Declarations
26  */
27 static int do_slabs_newslab(struct default_engine *engine, const unsigned int id);
28 static void *memory_allocate(struct default_engine *engine, size_t size);
29 
30 #ifndef DONT_PREALLOC_SLABS
31 /* Preallocate as many slab pages as possible (called from slabs_init)
32  on start-up, so users don't get confused out-of-memory errors when
33  they do have free (in-slab) space, but no space to make new slabs.
34  if maxslabs is 18 (POWER_LARGEST - POWER_SMALLEST + 1), then all
35  slab types can be made. if max memory is less than 18 MB, only the
36  smaller ones will be made. */
37 static void slabs_preallocate (const unsigned int maxslabs);
38 #endif
39 
40 /*
41  * Figures out which slab class (chunk size) is required to store an item of
42  * a given size.
43  *
44  * Given object size, return id to use when allocating/freeing memory for object
45  * 0 means error: can't store such a large object
46  */
47 
48 unsigned int slabs_clsid(struct default_engine *engine, const size_t size) {
49  int res = POWER_SMALLEST;
50 
51  if (size == 0)
52  return 0;
53  while (size > engine->slabs.slabclass[res].size)
54  if (res++ == engine->slabs.power_largest) /* won't fit in the biggest slab */
55  return 0;
56  return res;
57 }
58 
63 ENGINE_ERROR_CODE slabs_init(struct default_engine *engine,
64  const size_t limit,
65  const double factor,
66  const bool prealloc) {
67  int i = POWER_SMALLEST - 1;
68  unsigned int size = sizeof(hash_item) + engine->config.chunk_size;
69 
70  engine->slabs.mem_limit = limit;
71 
72  if (prealloc) {
73  /* Allocate everything in a big chunk with malloc */
74  engine->slabs.mem_base = malloc(engine->slabs.mem_limit);
75  if (engine->slabs.mem_base != NULL) {
76  engine->slabs.mem_current = engine->slabs.mem_base;
77  engine->slabs.mem_avail = engine->slabs.mem_limit;
78  } else {
79  return ENGINE_ENOMEM;
80  }
81  }
82 
83  memset(engine->slabs.slabclass, 0, sizeof(engine->slabs.slabclass));
84 
85  while (++i < POWER_LARGEST && size <= engine->config.item_size_max / factor) {
86  /* Make sure items are always n-byte aligned */
87  if (size % CHUNK_ALIGN_BYTES)
88  size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
89 
90  engine->slabs.slabclass[i].size = size;
91  engine->slabs.slabclass[i].perslab = engine->config.item_size_max / engine->slabs.slabclass[i].size;
92  size *= factor;
93  if (engine->config.verbose > 1) {
95  logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER);
96  logger->log(EXTENSION_LOG_INFO, NULL,
97  "slab class %3d: chunk size %9u perslab %7u\n",
98  i, engine->slabs.slabclass[i].size,
99  engine->slabs.slabclass[i].perslab);
100  }
101  }
102 
103  engine->slabs.power_largest = i;
104  engine->slabs.slabclass[engine->slabs.power_largest].size = engine->config.item_size_max;
105  engine->slabs.slabclass[engine->slabs.power_largest].perslab = 1;
106  if (engine->config.verbose > 1) {
108  logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER);
109  logger->log(EXTENSION_LOG_INFO, NULL,
110  "slab class %3d: chunk size %9u perslab %7u\n",
111  i, engine->slabs.slabclass[i].size,
112  engine->slabs.slabclass[i].perslab);
113  }
114 
115  /* for the test suite: faking of how much we've already malloc'd */
116  {
117  char *t_initial_malloc = getenv("T_MEMD_INITIAL_MALLOC");
118  if (t_initial_malloc) {
119  engine->slabs.mem_malloced = (size_t)atol(t_initial_malloc);
120  }
121 
122  }
123 
124 #ifndef DONT_PREALLOC_SLABS
125  {
126  char *pre_alloc = getenv("T_MEMD_SLABS_ALLOC");
127 
128  if (pre_alloc == NULL || atoi(pre_alloc) != 0) {
129  slabs_preallocate(power_largest);
130  }
131  }
132 #endif
133 
134  return ENGINE_SUCCESS;
135 }
136 
137 #ifndef DONT_PREALLOC_SLABS
138 static void slabs_preallocate (const unsigned int maxslabs) {
139  int i;
140  unsigned int prealloc = 0;
141 
142  /* pre-allocate a 1MB slab in every size class so people don't get
143  confused by non-intuitive "SERVER_ERROR out of memory"
144  messages. this is the most common question on the mailing
145  list. if you really don't want this, you can rebuild without
146  these three lines. */
147 
148  for (i = POWER_SMALLEST; i <= POWER_LARGEST; i++) {
149  if (++prealloc > maxslabs)
150  return;
151  do_slabs_newslab(i);
152  }
153 
154 }
155 #endif
156 
157 static int grow_slab_list (struct default_engine *engine, const unsigned int id) {
158  slabclass_t *p = &engine->slabs.slabclass[id];
159  if (p->slabs == p->list_size) {
160  size_t new_size = (p->list_size != 0) ? p->list_size * 2 : 16;
161  void *new_list = realloc(p->slab_list, new_size * sizeof(void *));
162  if (new_list == 0) return 0;
163  p->list_size = new_size;
164  p->slab_list = new_list;
165  }
166  return 1;
167 }
168 
169 static int do_slabs_newslab(struct default_engine *engine, const unsigned int id) {
170  slabclass_t *p = &engine->slabs.slabclass[id];
171  int len = p->size * p->perslab;
172  char *ptr;
173 
174  if ((engine->slabs.mem_limit && engine->slabs.mem_malloced + len > engine->slabs.mem_limit && p->slabs > 0) ||
175  (grow_slab_list(engine, id) == 0) ||
176  ((ptr = memory_allocate(engine, (size_t)len)) == 0)) {
177 
178  MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(id);
179  return 0;
180  }
181 
182  memset(ptr, 0, (size_t)len);
183  p->end_page_ptr = ptr;
184  p->end_page_free = p->perslab;
185 
186  p->slab_list[p->slabs++] = ptr;
187  engine->slabs.mem_malloced += len;
188  MEMCACHED_SLABS_SLABCLASS_ALLOCATE(id);
189 
190  return 1;
191 }
192 
193 /*@null@*/
194 static void *do_slabs_alloc(struct default_engine *engine, const size_t size, unsigned int id) {
195  slabclass_t *p;
196  void *ret = NULL;
197 
198  if (id < POWER_SMALLEST || id > engine->slabs.power_largest) {
199  MEMCACHED_SLABS_ALLOCATE_FAILED(size, 0);
200  return NULL;
201  }
202 
203  p = &engine->slabs.slabclass[id];
204 
205 #ifdef USE_SYSTEM_MALLOC
206  if (engine->slabs.mem_limit && engine->slabs.mem_malloced + size > engine->slabs.mem_limit) {
207  MEMCACHED_SLABS_ALLOCATE_FAILED(size, id);
208  return 0;
209  }
210  engine->slabs.mem_malloced += size;
211  ret = malloc(size);
212  MEMCACHED_SLABS_ALLOCATE(size, id, 0, ret);
213  return ret;
214 #endif
215 
216  /* fail unless we have space at the end of a recently allocated page,
217  we have something on our freelist, or we could allocate a new page */
218  if (! (p->end_page_ptr != 0 || p->sl_curr != 0 ||
219  do_slabs_newslab(engine, id) != 0)) {
220  /* We don't have more memory available */
221  ret = NULL;
222  } else if (p->sl_curr != 0) {
223  /* return off our freelist */
224  ret = p->slots[--p->sl_curr];
225  } else {
226  /* if we recently allocated a whole page, return from that */
227  assert(p->end_page_ptr != NULL);
228  ret = p->end_page_ptr;
229  if (--p->end_page_free != 0) {
230  p->end_page_ptr = ((caddr_t)p->end_page_ptr) + p->size;
231  } else {
232  p->end_page_ptr = 0;
233  }
234  }
235 
236  if (ret) {
237  p->requested += size;
238  MEMCACHED_SLABS_ALLOCATE(size, id, p->size, ret);
239  } else {
240  MEMCACHED_SLABS_ALLOCATE_FAILED(size, id);
241  }
242 
243  return ret;
244 }
245 
246 static void do_slabs_free(struct default_engine *engine, void *ptr, const size_t size, unsigned int id) {
247  slabclass_t *p;
248 
249  if (id < POWER_SMALLEST || id > engine->slabs.power_largest)
250  return;
251 
252  MEMCACHED_SLABS_FREE(size, id, ptr);
253  p = &engine->slabs.slabclass[id];
254 
255 #ifdef USE_SYSTEM_MALLOC
256  engine->slabs.mem_malloced -= size;
257  free(ptr);
258  return;
259 #endif
260 
261  if (p->sl_curr == p->sl_total) { /* need more space on the free list */
262  int new_size = (p->sl_total != 0) ? p->sl_total * 2 : 16; /* 16 is arbitrary */
263  void **new_slots = realloc(p->slots, new_size * sizeof(void *));
264  if (new_slots == 0)
265  return;
266  p->slots = new_slots;
267  p->sl_total = new_size;
268  }
269  p->slots[p->sl_curr++] = ptr;
270  p->requested -= size;
271  return;
272 }
273 
274 void add_statistics(const void *cookie, ADD_STAT add_stats,
275  const char* prefix, int num, const char *key,
276  const char *fmt, ...) {
277  char name[80], val[80];
278  int klen = 0, vlen;
279  va_list ap;
280 
281  assert(cookie);
282  assert(add_stats);
283  assert(key);
284 
285  va_start(ap, fmt);
286  vlen = vsnprintf(val, sizeof(val) - 1, fmt, ap);
287  va_end(ap);
288 
289  if (prefix != NULL) {
290  klen = snprintf(name, sizeof(name), "%s:", prefix);
291  }
292 
293  if (num != -1) {
294  klen += snprintf(name + klen, sizeof(name) - klen, "%d:", num);
295  }
296 
297  klen += snprintf(name + klen, sizeof(name) - klen, "%s", key);
298 
299  add_stats(name, klen, val, vlen, cookie);
300 }
301 
302 /*@null@*/
303 static void do_slabs_stats(struct default_engine *engine, ADD_STAT add_stats, const void *cookie) {
304  int i, total;
305  /* Get the per-thread stats which contain some interesting aggregates */
306 #ifdef FUTURE
307  struct conn *conn = (struct conn*)cookie;
308  struct thread_stats thread_stats;
309  threadlocal_stats_aggregate(c, &thread_stats);
310 #endif
311 
312  total = 0;
313  for(i = POWER_SMALLEST; i <= engine->slabs.power_largest; i++) {
314  slabclass_t *p = &engine->slabs.slabclass[i];
315  if (p->slabs != 0) {
316  uint32_t perslab, slabs;
317  slabs = p->slabs;
318  perslab = p->perslab;
319 
320  add_statistics(cookie, add_stats, NULL, i, "chunk_size", "%u",
321  p->size);
322  add_statistics(cookie, add_stats, NULL, i, "chunks_per_page", "%u",
323  perslab);
324  add_statistics(cookie, add_stats, NULL, i, "total_pages", "%u",
325  slabs);
326  add_statistics(cookie, add_stats, NULL, i, "total_chunks", "%u",
327  slabs * perslab);
328  add_statistics(cookie, add_stats, NULL, i, "used_chunks", "%u",
329  slabs*perslab - p->sl_curr - p->end_page_free);
330  add_statistics(cookie, add_stats, NULL, i, "free_chunks", "%u",
331  p->sl_curr);
332  add_statistics(cookie, add_stats, NULL, i, "free_chunks_end", "%u",
333  p->end_page_free);
334  add_statistics(cookie, add_stats, NULL, i, "mem_requested", "%zu",
335  p->requested);
336 #ifdef FUTURE
337  add_statistics(cookie, add_stats, NULL, i, "get_hits", "%"PRIu64,
338  thread_stats.slab_stats[i].get_hits);
339  add_statistics(cookie, add_stats, NULL, i, "cmd_set", "%"PRIu64,
340  thread_stats.slab_stats[i].set_cmds);
341  add_statistics(cookie, add_stats, NULL, i, "delete_hits", "%"PRIu64,
342  thread_stats.slab_stats[i].delete_hits);
343  add_statistics(cookie, add_stats, NULL, i, "cas_hits", "%"PRIu64,
344  thread_stats.slab_stats[i].cas_hits);
345  add_statistics(cookie, add_stats, NULL, i, "cas_badval", "%"PRIu64,
346  thread_stats.slab_stats[i].cas_badval);
347 #endif
348  total++;
349  }
350  }
351 
352  /* add overall slab stats and append terminator */
353 
354  add_statistics(cookie, add_stats, NULL, -1, "active_slabs", "%d", total);
355  add_statistics(cookie, add_stats, NULL, -1, "total_malloced", "%zu",
356  engine->slabs.mem_malloced);
357 }
358 
359 static void *memory_allocate(struct default_engine *engine, size_t size) {
360  void *ret;
361 
362  if (engine->slabs.mem_base == NULL) {
363  /* We are not using a preallocated large memory chunk */
364  ret = malloc(size);
365  } else {
366  ret = engine->slabs.mem_current;
367 
368  if (size > engine->slabs.mem_avail) {
369  return NULL;
370  }
371 
372  /* mem_current pointer _must_ be aligned!!! */
373  if (size % CHUNK_ALIGN_BYTES) {
374  size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
375  }
376 
377  engine->slabs.mem_current = ((char*)engine->slabs.mem_current) + size;
378  if (size < engine->slabs.mem_avail) {
379  engine->slabs.mem_avail -= size;
380  } else {
381  engine->slabs.mem_avail = 0;
382  }
383  }
384 
385  return ret;
386 }
387 
388 void *slabs_alloc(struct default_engine *engine, size_t size, unsigned int id) {
389  void *ret;
390 
391  pthread_mutex_lock(&engine->slabs.lock);
392  ret = do_slabs_alloc(engine, size, id);
393  pthread_mutex_unlock(&engine->slabs.lock);
394  return ret;
395 }
396 
397 void slabs_free(struct default_engine *engine, void *ptr, size_t size, unsigned int id) {
398  pthread_mutex_lock(&engine->slabs.lock);
399  do_slabs_free(engine, ptr, size, id);
400  pthread_mutex_unlock(&engine->slabs.lock);
401 }
402 
403 void slabs_stats(struct default_engine *engine, ADD_STAT add_stats, const void *c) {
404  pthread_mutex_lock(&engine->slabs.lock);
405  do_slabs_stats(engine, add_stats, c);
406  pthread_mutex_unlock(&engine->slabs.lock);
407 }
408 
409 void slabs_adjust_mem_requested(struct default_engine *engine, unsigned int id, size_t old, size_t ntotal)
410 {
411  pthread_mutex_lock(&engine->slabs.lock);
412  slabclass_t *p;
413  if (id < POWER_SMALLEST || id > engine->slabs.power_largest) {
415  logger = (void*)engine->server.extension->get_extension(EXTENSION_LOGGER);
416  logger->log(EXTENSION_LOG_WARNING, NULL,
417  "Internal error! Invalid slab class\n");
418  abort();
419  }
420 
421  p = &engine->slabs.slabclass[id];
422  p->requested = p->requested - old + ntotal;
423  pthread_mutex_unlock(&engine->slabs.lock);
424 }