old_api.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 13 High Level Memory Pool Management */
10 
11 #include "squid.h"
12 #include "base/PackableStream.h"
13 #include "ClientInfo.h"
14 #include "dlink.h"
15 #include "event.h"
16 #include "fs_io.h"
17 #include "icmp/net_db.h"
18 #include "md5.h"
19 #include "mem/Allocator.h"
20 #include "mem/Pool.h"
21 #include "mem/Stats.h"
22 #include "MemBuf.h"
23 #include "mgr/Registration.h"
24 #include "SquidConfig.h"
25 #include "Store.h"
26 
27 #include <iomanip>
28 
29 /* forward declarations */
30 static void memFree32B(void *);
31 static void memFree64B(void *);
32 static void memFree128B(void *);
33 static void memFree256B(void *);
34 static void memFree512B(void *);
35 static void memFree1K(void *);
36 static void memFree2K(void *);
37 static void memFree4K(void *);
38 static void memFree8K(void *);
39 static void memFree16K(void *);
40 static void memFree32K(void *);
41 static void memFree64K(void *);
42 
43 /* module locals */
44 static double xm_time = 0;
45 static double xm_deltat = 0;
46 
47 struct PoolMeta {
48  const char *name;
49  size_t obj_size;
50 };
51 
54 
55 /* local routines */
56 
57 // XXX: refactor objects using these pools to use MEMPROXY classes instead
58 // then remove this function entirely
59 static Mem::Allocator *&
60 GetPool(size_t type)
61 {
62  static Mem::Allocator *pools[MEM_MAX];
63  static bool initialized = false;
64 
65  if (!initialized) {
66  memset(pools, '\0', sizeof(pools));
67  initialized = true;
68  // Mem::Init() makes use of GetPool(type) to initialize
69  // the actual pools. So must come after the flag is true
70  Mem::Init();
71  }
72 
73  return pools[type];
74 }
75 
76 void
78 {
79  PackableStream stream(*sentry);
80  Report(stream);
81 
82  stream << "Large buffers: " <<
83  HugeBufCountMeter.currentLevel() << " (" <<
84  HugeBufVolumeMeter.currentLevel() / 1024 << " KB)\n";
85 
86 #if WITH_VALGRIND
87  if (RUNNING_ON_VALGRIND) {
88  long int leaked = 0, dubious = 0, reachable = 0, suppressed = 0;
89  stream << "Valgrind Report:\n";
90  stream << "Type\tAmount\n";
91  debugs(13, DBG_IMPORTANT, "Asking valgrind for memleaks");
92  VALGRIND_DO_LEAK_CHECK;
93  debugs(13, DBG_IMPORTANT, "Getting valgrind statistics");
94  VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed);
95  stream << "Leaked\t" << leaked << "\n";
96  stream << "Dubious\t" << dubious << "\n";
97  stream << "Reachable\t" << reachable << "\n";
98  stream << "Suppressed\t" << suppressed << "\n";
99  }
100 #endif
101  stream.flush();
102 }
103 
104 /*
105  * we have a limit on _total_ amount of idle memory so we ignore max_pages for now.
106  * Will ignore repeated calls for the same pool type.
107  */
108 static void
109 memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
110 {
111  assert(name && size);
112 
113  if (GetPool(type) != nullptr)
114  return;
115 
116  GetPool(type) = memPoolCreate(name, size);
117  GetPool(type)->zeroBlocks(doZero);
118 }
119 
120 /* find appropriate pool and use it (pools always init buffer with 0s) */
121 void *
123 {
124  assert(GetPool(type));
125  return GetPool(type)->alloc();
126 }
127 
128 /* give memory back to the pool */
129 void
130 memFree(void *p, int type)
131 {
132  assert(GetPool(type));
133  GetPool(type)->freeOne(p);
134 }
135 
136 /* Find the best fit MEM_X_BUF type */
137 static mem_type
138 memFindBufSizeType(size_t net_size, size_t * gross_size)
139 {
140  mem_type type;
141  size_t size;
142 
143  if (net_size <= 32) {
144  type = MEM_32B_BUF;
145  size = 32;
146  } else if (net_size <= 64) {
147  type = MEM_64B_BUF;
148  size = 64;
149  } else if (net_size <= 128) {
150  type = MEM_128B_BUF;
151  size = 128;
152  } else if (net_size <= 256) {
153  type = MEM_256B_BUF;
154  size = 256;
155  } else if (net_size <= 512) {
156  type = MEM_512B_BUF;
157  size = 512;
158  } else if (net_size <= 1024) {
159  type = MEM_1K_BUF;
160  size = 1024;
161  } else if (net_size <= 2 * 1024) {
162  type = MEM_2K_BUF;
163  size = 2 * 1024;
164  } else if (net_size <= 4 * 1024) {
165  type = MEM_4K_BUF;
166  size = 4 * 1024;
167  } else if (net_size <= 8 * 1024) {
168  type = MEM_8K_BUF;
169  size = 8 * 1024;
170  } else if (net_size <= 16 * 1024) {
171  type = MEM_16K_BUF;
172  size = 16 * 1024;
173  } else if (net_size <= 32 * 1024) {
174  type = MEM_32K_BUF;
175  size = 32 * 1024;
176  } else if (net_size <= 64 * 1024) {
177  type = MEM_64K_BUF;
178  size = 64 * 1024;
179  } else {
180  type = MEM_NONE;
181  size = net_size;
182  }
183 
184  if (gross_size)
185  *gross_size = size;
186 
187  return type;
188 }
189 
190 /* allocate a variable size buffer using best-fit pool */
191 void *
192 memAllocBuf(size_t net_size, size_t * gross_size)
193 {
194  mem_type type = memFindBufSizeType(net_size, gross_size);
195 
196  if (type != MEM_NONE)
197  return memAllocate(type);
198  else {
200  HugeBufVolumeMeter += net_size;
201  return xmalloc(net_size);
202  }
203 }
204 
205 /* resize a variable sized buffer using best-fit pool */
206 void *
207 memReallocBuf(void *oldbuf, size_t net_size, size_t * gross_size)
208 {
209  /* XXX This can be optimized on very large buffers to use realloc() */
210  /* TODO: if the existing gross size is >= new gross size, do nothing */
211  size_t new_gross_size;
212  void *newbuf = memAllocBuf(net_size, &new_gross_size);
213 
214  if (oldbuf) {
215  size_t data_size = *gross_size;
216 
217  if (data_size > net_size)
218  data_size = net_size;
219 
220  memcpy(newbuf, oldbuf, data_size);
221 
222  memFreeBuf(*gross_size, oldbuf);
223  }
224 
225  *gross_size = new_gross_size;
226  return newbuf;
227 }
228 
229 /* free buffer allocated with memAllocBuf() */
230 void
231 memFreeBuf(size_t size, void *buf)
232 {
233  mem_type type = memFindBufSizeType(size, nullptr);
234 
235  if (type != MEM_NONE)
236  memFree(buf, type);
237  else {
238  xfree(buf);
241  }
242 }
243 
244 static double clean_interval = 15.0; /* time to live of idle chunk before release */
245 
246 void
248 {
249  MemPools::GetInstance().clean(static_cast<time_t>(clean_interval));
250  eventAdd("memPoolCleanIdlePools", CleanIdlePools, nullptr, clean_interval, 1);
251 }
252 
253 void
255 {
256  int64_t new_pool_limit;
257 
259  if (!Config.onoff.mem_pools)
260  new_pool_limit = 0;
261  else if (Config.MemPools.limit > 0)
262  new_pool_limit = Config.MemPools.limit;
263  else {
264  if (Config.MemPools.limit == 0)
265  debugs(13, DBG_IMPORTANT, "memory_pools_limit 0 has been chagned to memory_pools_limit none. Please update your config");
266  new_pool_limit = -1;
267  }
268 
269  MemPools::GetInstance().setIdleLimit(new_pool_limit);
270 }
271 
272 static mem_type &
274 {
275  auto tmp = static_cast<int>(aMem);
276  aMem = static_cast<mem_type>(++tmp);
277  return aMem;
278 }
279 
280 void
282 {
283  /* all pools are ready to be used */
284  static bool MemIsInitialized = false;
285  if (MemIsInitialized)
286  return;
287 
298  memDataInit(MEM_32B_BUF, "32B Buffer", 32, 10, false);
299  memDataInit(MEM_64B_BUF, "64B Buffer", 64, 10, false);
300  memDataInit(MEM_128B_BUF, "128B Buffer", 128, 10, false);
301  memDataInit(MEM_256B_BUF, "256B Buffer", 256, 10, false);
302  memDataInit(MEM_512B_BUF, "512B Buffer", 512, 10, false);
303  memDataInit(MEM_1K_BUF, "1K Buffer", 1024, 10, false);
304  memDataInit(MEM_2K_BUF, "2K Buffer", 2048, 10, false);
305  memDataInit(MEM_4K_BUF, "4K Buffer", 4096, 10, false);
306  memDataInit(MEM_8K_BUF, "8K Buffer", 8192, 10, false);
307  memDataInit(MEM_16K_BUF, "16K Buffer", 16384, 10, false);
308  memDataInit(MEM_32K_BUF, "32K Buffer", 32768, 10, false);
309  memDataInit(MEM_64K_BUF, "64K Buffer", 65536, 10, false);
310  // TODO: Carefully stop zeroing these objects memory and drop the doZero parameter
311  memDataInit(MEM_MD5_DIGEST, "MD5 digest", SQUID_MD5_DIGEST_LENGTH, 0, true);
312  GetPool(MEM_MD5_DIGEST)->setChunkSize(512 * 1024);
313 
314  // Test that all entries are initialized
315  for (auto t = MEM_NONE; ++t < MEM_MAX;) {
316  // If you hit this assertion, then you forgot to add a
317  // memDataInit() line for type 't'.
318  assert(GetPool(t));
319  }
320 
321  MemIsInitialized = true;
322 
323  // finally register with the cache manager
324  Mgr::RegisterAction("mem", "Memory Utilization", Mem::Stats, 0, 1);
325 }
326 
327 void
328 memClean(void)
329 {
330  if (Config.MemPools.limit > 0) // do not reset if disabled or same
333 
334  Mem::PoolStats stats;
335  const auto poolsInUse = Mem::GlobalStats(stats);
336  if (stats.items_inuse) {
337  debugs(13, 2, stats.items_inuse <<
338  " items in " << stats.chunks_inuse << " chunks and " <<
339  poolsInUse << " pools are left dirty");
340  }
341 }
342 
343 int
345 {
346  return GetPool(type)->getInUseCount();
347 }
348 
349 /* ick */
350 
351 void
352 memFree32B(void *p)
353 {
354  memFree(p, MEM_32B_BUF);
355 }
356 
357 void
358 memFree64B(void *p)
359 {
360  memFree(p, MEM_64B_BUF);
361 }
362 
363 void
364 memFree128B(void *p)
365 {
366  memFree(p, MEM_128B_BUF);
367 }
368 
369 void
370 memFree256B(void *p)
371 {
372  memFree(p, MEM_256B_BUF);
373 }
374 
375 void
376 memFree512B(void *p)
377 {
378  memFree(p, MEM_512B_BUF);
379 }
380 
381 void
382 memFree1K(void *p)
383 {
384  memFree(p, MEM_1K_BUF);
385 }
386 
387 void
388 memFree2K(void *p)
389 {
390  memFree(p, MEM_2K_BUF);
391 }
392 
393 void
394 memFree4K(void *p)
395 {
396  memFree(p, MEM_4K_BUF);
397 }
398 
399 void
400 memFree8K(void *p)
401 {
402  memFree(p, MEM_8K_BUF);
403 }
404 
405 void
406 memFree16K(void *p)
407 {
408  memFree(p, MEM_16K_BUF);
409 }
410 
411 void
412 memFree32K(void *p)
413 {
414  memFree(p, MEM_32K_BUF);
415 }
416 
417 void
418 memFree64K(void *p)
419 {
420  memFree(p, MEM_64K_BUF);
421 }
422 
423 static void
424 cxx_xfree(void * ptr)
425 {
426  xfree(ptr);
427 }
428 
429 FREE *
431 {
432  switch (size) {
433 
434  case 32:
435  return memFree32B;
436 
437  case 64:
438  return memFree64B;
439 
440  case 128:
441  return memFree128B;
442 
443  case 256:
444  return memFree256B;
445 
446  case 512:
447  return memFree512B;
448 
449  case 1024:
450  return memFree1K;
451 
452  case 2 * 1024:
453  return memFree2K;
454 
455  case 4 * 1024:
456  return memFree4K;
457 
458  case 8 * 1024:
459  return memFree8K;
460 
461  case 16 * 1024:
462  return memFree16K;
463 
464  case 32 * 1024:
465  return memFree32K;
466 
467  case 64 * 1024:
468  return memFree64K;
469 
470  default:
473  return cxx_xfree;
474  }
475 }
476 
477 void
478 Mem::PoolReport(const PoolStats *mp_st, const PoolMeter *AllMeter, std::ostream &stream)
479 {
480  int excess = 0;
481  int needed = 0;
482  PoolMeter *pm = mp_st->meter;
483  const char *delim = "\t ";
484 
485  stream.setf(std::ios_base::fixed);
486  stream << std::setw(20) << std::left << mp_st->label << delim;
487  stream << std::setw(4) << std::right << mp_st->obj_size << delim;
488 
489  /* Chunks */
490  if (mp_st->chunk_capacity) {
491  stream << std::setw(4) << toKB(mp_st->obj_size * mp_st->chunk_capacity) << delim;
492  stream << std::setw(4) << mp_st->chunk_capacity << delim;
493 
494  needed = mp_st->items_inuse / mp_st->chunk_capacity;
495 
496  if (mp_st->items_inuse % mp_st->chunk_capacity)
497  ++needed;
498 
499  excess = mp_st->chunks_inuse - needed;
500 
501  stream << std::setw(4) << mp_st->chunks_alloc << delim;
502  stream << std::setw(4) << mp_st->chunks_inuse << delim;
503  stream << std::setw(4) << mp_st->chunks_free << delim;
504  stream << std::setw(4) << mp_st->chunks_partial << delim;
505  stream << std::setprecision(3) << xpercent(excess, needed) << delim;
506  } else {
507  stream << delim;
508  stream << delim;
509  stream << delim;
510  stream << delim;
511  stream << delim;
512  stream << delim;
513  stream << delim;
514  }
515  /*
516  * Fragmentation calculation:
517  * needed = inuse.currentLevel() / chunk_capacity
518  * excess = used - needed
519  * fragmentation = excess / needed * 100%
520  *
521  * Fragm = (alloced - (inuse / obj_ch) ) / alloced
522  */
523  /* allocated */
524  stream << mp_st->items_alloc << delim;
525  stream << toKB(mp_st->obj_size * pm->alloc.currentLevel()) << delim;
526  stream << toKB(mp_st->obj_size * pm->alloc.peak()) << delim;
527  stream << std::setprecision(2) << ((squid_curtime - pm->alloc.peakTime()) / 3600.) << delim;
528  stream << std::setprecision(3) << xpercent(mp_st->obj_size * pm->alloc.currentLevel(), AllMeter->alloc.currentLevel()) << delim;
529  /* in use */
530  stream << mp_st->items_inuse << delim;
531  stream << toKB(mp_st->obj_size * pm->inuse.currentLevel()) << delim;
532  stream << toKB(mp_st->obj_size * pm->inuse.peak()) << delim;
533  stream << std::setprecision(2) << ((squid_curtime - pm->inuse.peakTime()) / 3600.) << delim;
534  stream << std::setprecision(3) << xpercent(pm->inuse.currentLevel(), pm->alloc.currentLevel()) << delim;
535  /* idle */
536  stream << mp_st->items_idle << delim;
537  stream << toKB(mp_st->obj_size * pm->idle.currentLevel()) << delim;
538  stream << toKB(mp_st->obj_size * pm->idle.peak()) << delim;
539  /* saved */
540  stream << (int)pm->gb_saved.count << delim;
541  stream << std::setprecision(3) << xpercent(pm->gb_saved.count, AllMeter->gb_allocated.count) << delim;
542  stream << std::setprecision(3) << xpercent(pm->gb_saved.bytes, AllMeter->gb_allocated.bytes) << delim;
543  stream << std::setprecision(3) << xdiv(pm->gb_allocated.count - pm->gb_oallocated.count, xm_deltat) << "\n";
545 }
546 
547 void
548 Mem::Report(std::ostream &stream)
549 {
550  static char buf[64];
551  int not_used = 0;
552 
553  /* caption */
554  stream << "Current memory usage:\n";
555  /* heading */
556  stream << "Pool\t Obj Size\t"
557  "Chunks\t\t\t\t\t\t\t"
558  "Allocated\t\t\t\t\t"
559  "In Use\t\t\t\t\t"
560  "Idle\t\t\t"
561  "Allocations Saved\t\t\t"
562  "Rate\t"
563  "\n"
564  " \t (bytes)\t"
565  "KB/ch\t obj/ch\t"
566  "(#)\t used\t free\t part\t %Frag\t "
567  "(#)\t (KB)\t high (KB)\t high (hrs)\t %Tot\t"
568  "(#)\t (KB)\t high (KB)\t high (hrs)\t %alloc\t"
569  "(#)\t (KB)\t high (KB)\t"
570  "(#)\t %cnt\t %vol\t"
571  "(#)/sec\t"
572  "\n";
575 
576  /* Get stats for Totals report line */
577  PoolStats mp_total;
578  const auto poolsInUse = GlobalStats(mp_total);
579 
580  std::vector<PoolStats> usedPools;
581  usedPools.reserve(poolsInUse);
582 
583  /* main table */
584  for (const auto pool : MemPools::GetInstance().pools) {
585  PoolStats mp_stats;
586  pool->getStats(mp_stats);
587 
588  if (mp_stats.pool->meter.gb_allocated.count > 0)
589  usedPools.emplace_back(mp_stats);
590  else
591  ++not_used;
592  }
593 
594  // sort on %Total Allocated (largest first)
595  std::sort(usedPools.begin(), usedPools.end(), [](const PoolStats &a, const PoolStats &b) {
596  return (double(a.obj_size) * a.meter->alloc.currentLevel()) > (double(b.obj_size) * b.meter->alloc.currentLevel());
597  });
598 
599  for (const auto &pool: usedPools) {
600  PoolReport(&pool, mp_total.meter, stream);
601  }
602 
603  PoolReport(&mp_total, mp_total.meter, stream);
604 
605  /* Cumulative */
606  stream << "Cumulative allocated volume: "<< double_to_str(buf, 64, mp_total.meter->gb_allocated.bytes) << "\n";
607  /* overhead */
608  stream << "Current overhead: " << mp_total.overhead << " bytes (" <<
609  std::setprecision(3) << xpercent(mp_total.overhead, mp_total.meter->inuse.currentLevel()) << "%)\n";
610  /* limits */
611  if (MemPools::GetInstance().idleLimit() >= 0)
612  stream << "Idle pool limit: " << std::setprecision(2) << toMB(MemPools::GetInstance().idleLimit()) << " MB\n";
613  /* limits */
614  auto poolCount = MemPools::GetInstance().pools.size();
615  stream << "Total Pools created: " << poolCount << "\n";
616  stream << "Pools ever used: " << poolCount - not_used << " (shown above)\n";
617  stream << "Currently in use: " << poolsInUse << "\n";
618 }
619 
void clean(time_t maxage)
Definition: Pool.cc:105
double current_dtime
the current UNIX time in seconds (with microsecond precision)
Definition: stub_libtime.cc:19
static mem_type & operator++(mem_type &aMem)
Definition: old_api.cc:273
Allocator * pool
Definition: Stats.h:20
static void memFree1K(void *)
Definition: old_api.cc:382
std::list< Mem::Allocator * > pools
Definition: Pool.h:112
int items_idle
Definition: Stats.h:34
PoolMeter * meter
Definition: Stats.h:22
#define xmalloc
@ MEM_64K_BUF
Definition: forward.h:53
time_t peakTime() const
Definition: Meter.h:28
static double clean_interval
Definition: old_api.cc:244
@ MEM_512B_BUF
Definition: forward.h:46
void FREE(void *)
Definition: forward.h:37
void * memAllocate(mem_type type)
Allocate one element from the typed pool.
Definition: old_api.cc:122
static void memFree16K(void *)
Definition: old_api.cc:406
static void memFree64K(void *)
Definition: old_api.cc:418
void memFreeBuf(size_t size, void *buf)
Definition: old_api.cc:231
void PoolReport(const PoolStats *, const PoolMeter *, std::ostream &)
Definition: old_api.cc:478
void setIdleLimit(const ssize_t newLimit)
Definition: Pool.h:78
mem_type
Types of memory pool which do not yet use MEMPROXY_CLASS() API.
Definition: forward.h:40
int obj_size
Definition: Stats.h:23
void * alloc()
provide (and reserve) memory suitable for storing one object
Definition: Allocator.h:44
@ MEM_MD5_DIGEST
Definition: forward.h:54
void * memAllocBuf(size_t net_size, size_t *gross_size)
Definition: old_api.cc:192
struct SquidConfig::@97 onoff
@ MEM_128B_BUF
Definition: forward.h:44
static void cxx_xfree(void *ptr)
Definition: old_api.cc:424
const char * label
Definition: Stats.h:21
int items_inuse
Definition: Stats.h:33
FREE * memFreeBufFunc(size_t size)
Definition: old_api.cc:430
Meter alloc
Definition: Meter.h:89
static void memFree256B(void *)
Definition: old_api.cc:370
static void memFree32K(void *)
Definition: old_api.cc:412
@ MEM_32B_BUF
Definition: forward.h:42
static void memFree32B(void *)
Definition: old_api.cc:352
size_t GlobalStats(PoolStats &)
Definition: Stats.cc:15
void memFree(void *p, int type)
Free a element allocated by memAllocate()
Definition: old_api.cc:130
ssize_t peak() const
Definition: Meter.h:27
struct SquidConfig::@102 MemPools
void zeroBlocks(const bool doIt)
Definition: Allocator.h:62
ssize_t currentLevel() const
Definition: Meter.h:26
static mem_type memFindBufSizeType(size_t net_size, size_t *gross_size)
Definition: old_api.cc:138
size_t obj_size
Definition: old_api.cc:49
void memClean(void)
Main cleanup handler.
Definition: old_api.cc:328
void Report(std::ostream &)
Definition: old_api.cc:548
static void memFree128B(void *)
Definition: old_api.cc:364
void memConfigure(void)
Definition: old_api.cc:254
static void memDataInit(mem_type type, const char *name, size_t size, int, bool doZero)
Definition: old_api.cc:109
int getInUseCount() const
the difference between the number of alloc() and freeOne() calls
Definition: Allocator.h:59
int memInUse(mem_type type)
Definition: old_api.cc:344
static Mem::Meter HugeBufVolumeMeter
Definition: old_api.cc:53
void * memReallocBuf(void *oldbuf, size_t net_size, size_t *gross_size)
Definition: old_api.cc:207
#define SQUID_MD5_DIGEST_LENGTH
Definition: md5.h:66
static void memFree64B(void *)
Definition: old_api.cc:358
void Stats(StoreEntry *)
Definition: old_api.cc:77
#define toKB(size)
Definition: Pool.h:54
@ MEM_256B_BUF
Definition: forward.h:45
int size
Definition: ModDevPoll.cc:69
void freeOne(void *obj)
return memory reserved by alloc()
Definition: Allocator.h:51
static void memFree2K(void *)
Definition: old_api.cc:388
int overhead
Definition: Stats.h:36
int chunk_capacity
Definition: Stats.h:24
double xpercent(double part, double whole)
Definition: util.cc:40
static double xm_time
Definition: old_api.cc:44
int chunks_partial
Definition: Stats.h:29
#define memPoolCreate
Creates a named MemPool of elements with the given size.
Definition: Pool.h:123
mgb_t gb_allocated
Definition: Meter.h:94
#define assert(EX)
Definition: assert.h:17
int chunks_free
Definition: Stats.h:30
static double xm_deltat
Definition: old_api.cc:45
int64_t limit
Definition: SquidConfig.h:441
#define toMB(size)
Definition: Pool.h:52
time_t squid_curtime
Definition: stub_libtime.cc:20
@ MEM_NONE
Definition: forward.h:41
#define xfree
@ MEM_64B_BUF
Definition: forward.h:43
Meter inuse
Definition: Meter.h:90
static void memFree512B(void *)
Definition: old_api.cc:376
const char * name
Definition: old_api.cc:48
@ MEM_16K_BUF
Definition: forward.h:51
@ MEM_32K_BUF
Definition: forward.h:52
@ MEM_4K_BUF
Definition: forward.h:49
@ MEM_8K_BUF
Definition: forward.h:50
int chunks_alloc
Definition: Stats.h:27
const char * double_to_str(char *buf, int buf_size, double value)
Definition: util.cc:77
int chunks_inuse
Definition: Stats.h:28
void RegisterAction(char const *action, char const *desc, OBJH *handler, Protected, Atomic, Format)
Definition: Registration.cc:54
void Init()
Definition: old_api.cc:281
PoolMeter meter
statistics tracked for this allocator
Definition: Allocator.h:115
static MemPools & GetInstance()
Definition: Pool.cc:27
virtual void setChunkSize(size_t)
XXX: Misplaced – not all allocators have a notion of a "chunk". See MemPoolChunked.
Definition: Allocator.h:65
#define DBG_IMPORTANT
Definition: Stream.h:38
@ MEM_MAX
Definition: forward.h:55
void CleanIdlePools(void *unused)
Definition: old_api.cc:247
static void memFree8K(void *)
Definition: old_api.cc:400
mgb_t gb_oallocated
Definition: Meter.h:95
mgb_t gb_saved
Definition: Meter.h:98
int items_alloc
Definition: Stats.h:32
static Mem::Meter HugeBufCountMeter
Definition: old_api.cc:52
@ MEM_1K_BUF
Definition: forward.h:47
double xdiv(double nom, double denom)
Definition: util.cc:53
@ MEM_2K_BUF
Definition: forward.h:48
static void memFree4K(void *)
Definition: old_api.cc:394
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Stream.h:192
Meter idle
Definition: Meter.h:91
static Mem::Allocator *& GetPool(size_t type)
Definition: old_api.cc:60
void eventAdd(const char *name, EVH *func, void *arg, double when, int weight, bool cbdata)
Definition: event.cc:107
class SquidConfig Config
Definition: SquidConfig.cc:12
int unsigned int
Definition: stub_fd.cc:19

 

Introduction

Documentation

Support

Miscellaneous