RockSwapDir.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 47 Store Directory Routines */
10 
11 #include "squid.h"
12 #include "base/IoManip.h"
13 #include "cache_cf.h"
14 #include "CollapsedForwarding.h"
15 #include "ConfigOption.h"
16 #include "DiskIO/DiskIOModule.h"
17 #include "DiskIO/DiskIOStrategy.h"
18 #include "DiskIO/ReadRequest.h"
19 #include "DiskIO/WriteRequest.h"
21 #include "fs/rock/RockIoRequests.h"
22 #include "fs/rock/RockIoState.h"
23 #include "fs/rock/RockSwapDir.h"
24 #include "globals.h"
25 #include "ipc/mem/Pages.h"
26 #include "MemObject.h"
27 #include "Parsing.h"
28 #include "SquidConfig.h"
29 #include "SquidMath.h"
30 #include "tools.h"
31 
32 #include <cstdlib>
33 #include <iomanip>
34 #include <limits>
35 
36 #if HAVE_SYS_STAT_H
37 #include <sys/stat.h>
38 #endif
39 
41  slotSize(HeaderSize), filePath(nullptr), map(nullptr), io(nullptr),
42  waitingForPage(nullptr)
43 {
44 }
45 
47 {
48  delete io;
49  delete map;
50  safe_free(filePath);
51 }
52 
53 // called when Squid core needs a StoreEntry with a given key
54 StoreEntry *
56 {
57  if (!map || !theFile || !theFile->canRead())
58  return nullptr;
59 
60  sfileno filen;
61  const Ipc::StoreMapAnchor *const slot = map->openForReading(key, filen);
62  if (!slot)
63  return nullptr;
64 
65  // create a brand new store entry and initialize it with stored basics
66  StoreEntry *e = new StoreEntry();
67  e->createMemObject();
68  anchorEntry(*e, filen, *slot);
69  trackReferences(*e);
70  return e;
71 }
72 
73 bool
75 {
76  Assure(!entry.hasDisk());
77 
78  if (!map || !theFile || !theFile->canRead())
79  return false;
80 
81  sfileno filen;
82  const Ipc::StoreMapAnchor *const slot = map->openForReading(
83  reinterpret_cast<cache_key*>(entry.key), filen);
84  if (!slot)
85  return false;
86 
87  anchorEntry(entry, filen, *slot);
88  return true;
89 }
90 
91 bool
93 {
94  if (!map || !theFile || !theFile->canRead())
95  return false;
96 
97  assert(entry.hasDisk(index));
98 
99  const auto &anchor = map->readableEntry(entry.swap_filen);
100  entry.swap_file_sz = anchor.basics.swap_file_sz;
101  return true;
102 }
103 
104 void
106 {
107  anchor.exportInto(e);
108 
109  const bool complete = anchor.complete();
110  e.store_status = complete ? STORE_OK : STORE_PENDING;
111  // SWAPOUT_WRITING: even though another worker writes?
112  e.attachToDisk(index, filen, complete ? SWAPOUT_DONE : SWAPOUT_WRITING);
113 
115 
117 }
118 
120 {
121  assert(e.hasDisk(index));
122 
123  ignoreReferences(e);
124 
125  // do not rely on e.swap_status here because there is an async delay
126  // before it switches from SWAPOUT_WRITING to SWAPOUT_DONE.
127 
128  // since e has swap_filen, its slot is locked for reading and/or writing
129  // but it is difficult to know whether THIS worker is reading or writing e,
130  // especially since we may switch from writing to reading. This code relies
131  // on Rock::IoState::writeableAnchor_ being set when we locked for writing.
132  if (e.mem_obj && e.mem_obj->swapout.sio != nullptr &&
133  dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_) {
134  map->abortWriting(e.swap_filen);
135  e.detachFromDisk();
136  dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_ = nullptr;
138  e.storeWriterDone();
139  } else {
140  map->closeForReading(e.swap_filen);
141  e.detachFromDisk();
142  }
143 }
144 
145 uint64_t
147 {
148  const uint64_t spaceSize = !freeSlots ?
149  maxSize() : (slotSize * freeSlots->size());
150  // everything that is not free is in use
151  return maxSize() - spaceSize;
152 }
153 
154 uint64_t
156 {
157  return map ? map->entryCount() : 0;
158 }
159 
162 bool
164 {
165  return ::SwapDir::doReportStat() && (!UsingSmp() || IamDiskProcess());
166 }
167 
168 void
170 {
171  // nothing to do; handleWriteCompletionSuccess() did everything for us
172  assert(!e.mem_obj ||
173  !e.mem_obj->swapout.sio ||
174  !dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_);
175 }
176 
177 void
179 {
180  debugs(47, 5, entry);
181  disconnect(entry); // calls abortWriting() to free the disk entry
182 }
183 
184 int64_t
186 {
187  // the max value is an invalid one; all values must be below the limit
191 }
192 
193 int64_t
195 {
196  const int64_t sWanted = (maxSize() - HeaderSize)/slotSize;
197  const int64_t sLimitLo = map ? map->sliceLimit() : 0; // dynamic shrinking unsupported
198  const int64_t sLimitHi = slotLimitAbsolute();
199  return min(max(sLimitLo, sWanted), sLimitHi);
200 }
201 
202 int64_t
204 {
205  return min(slotLimitActual(), entryLimitAbsolute());
206 }
207 
208 // TODO: encapsulate as a tool
209 void
211 {
212  assert(path);
213  assert(filePath);
214 
215  if (UsingSmp() && !IamDiskProcess()) {
216  debugs (47,3, "disker will create in " << path);
217  return;
218  }
219 
220  debugs (47,3, "creating in " << path);
221 
222  struct stat dir_sb;
223  if (::stat(path, &dir_sb) == 0) {
224  struct stat file_sb;
225  if (::stat(filePath, &file_sb) == 0) {
226  debugs (47, DBG_IMPORTANT, "Skipping existing Rock db: " << filePath);
227  return;
228  }
229  // else the db file is not there or is not accessible, and we will try
230  // to create it later below, generating a detailed error on failures.
231  } else { // path does not exist or is inaccessible
232  // If path exists but is not accessible, mkdir() below will fail, and
233  // the admin should see the error and act accordingly, so there is
234  // no need to distinguish ENOENT from other possible stat() errors.
235  debugs (47, DBG_IMPORTANT, "Creating Rock db directory: " << path);
236  const int res = mkdir(path, 0700);
237  if (res != 0)
238  createError("mkdir");
239  }
240 
241  debugs (47, DBG_IMPORTANT, "Creating Rock db: " << filePath);
242  const int swap = open(filePath, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0600);
243  if (swap < 0)
244  createError("create");
245 
246 #if SLOWLY_FILL_WITH_ZEROS
247  char block[1024];
248  Must(maxSize() % sizeof(block) == 0);
249  memset(block, '\0', sizeof(block));
250 
251  for (off_t offset = 0; offset < maxSize(); offset += sizeof(block)) {
252  if (write(swap, block, sizeof(block)) != sizeof(block))
253  createError("write");
254  }
255 #else
256  if (ftruncate(swap, maxSize()) != 0)
257  createError("truncate");
258 
259  char header[HeaderSize];
260  memset(header, '\0', sizeof(header));
261  if (write(swap, header, sizeof(header)) != sizeof(header))
262  createError("write");
263 #endif
264 
265  close(swap);
266 }
267 
268 // report Rock DB creation error and exit
269 void
270 Rock::SwapDir::createError(const char *const msg)
271 {
272  int xerrno = errno; // XXX: where does errno come from?
273  debugs(47, DBG_CRITICAL, "ERROR: Failed to initialize Rock Store db in " <<
274  filePath << "; " << msg << " error: " << xstrerr(xerrno));
275  fatal("Rock Store db creation error");
276 }
277 
278 void
280 {
281  debugs(47,2, MYNAME);
282 
283  // XXX: SwapDirs aren't refcounted. We make IORequestor calls, which
284  // are refcounted. We up our count once to avoid implicit delete's.
285  lock();
286 
287  freeSlots = shm_old(Ipc::Mem::PageStack)(freeSlotsPath());
288 
289  Must(!map);
290  map = new DirMap(inodeMapPath());
291  map->cleaner = this;
292 
293  const char *ioModule = needsDiskStrand() ? "IpcIo" : "Blocking";
294  if (DiskIOModule *m = DiskIOModule::Find(ioModule)) {
295  debugs(47,2, "Using DiskIO module: " << ioModule);
296  io = m->createStrategy();
297  io->init();
298  } else {
299  debugs(47, DBG_CRITICAL, "FATAL: Rock store is missing DiskIO module: " <<
300  ioModule);
301  fatal("Rock Store missing a required DiskIO module");
302  }
303 
304  theFile = io->newFile(filePath);
305  theFile->configure(fileConfig);
306  theFile->open(O_RDWR, 0644, this);
307 }
308 
309 bool
311 {
312  const bool wontEvenWorkWithoutDisker = Config.workers > 1;
313  const bool wouldWorkBetterWithDisker = DiskIOModule::Find("IpcIo");
314  return InDaemonMode() && (wontEvenWorkWithoutDisker ||
315  wouldWorkBetterWithDisker);
316 }
317 
318 void
319 Rock::SwapDir::parse(int anIndex, char *aPath)
320 {
321  index = anIndex;
322 
323  path = xstrdup(aPath);
324 
325  // cache store is located at path/db
326  String fname(path);
327  fname.append("/rock");
328  filePath = xstrdup(fname.termedBuf());
329 
330  parseSize(false);
331  parseOptions(0);
332 
333  // Current openForWriting() code overwrites the old slot if needed
334  // and possible, so proactively removing old slots is probably useless.
335  assert(!repl); // repl = createRemovalPolicy(Config.replPolicy);
336 
337  validateOptions();
338 }
339 
340 void
342 {
343  parseSize(true);
344  parseOptions(1);
345  // TODO: can we reconfigure the replacement policy (repl)?
346  validateOptions();
347 }
348 
350 void
351 Rock::SwapDir::parseSize(const bool reconfig)
352 {
353  const int i = GetInteger();
354  if (i < 0)
355  fatal("negative Rock cache_dir size value");
356  const uint64_t new_max_size =
357  static_cast<uint64_t>(i) << 20; // MBytes to Bytes
358  if (!reconfig)
359  max_size = new_max_size;
360  else if (new_max_size != max_size) {
361  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir '" << path << "' size "
362  "cannot be changed dynamically, value left unchanged (" <<
363  (max_size >> 20) << " MB)");
364  }
365 }
366 
367 ConfigOption *
369 {
371  ConfigOptionVector *vector = dynamic_cast<ConfigOptionVector*>(copt);
372  if (vector) {
373  // if copt is actually a ConfigOptionVector
374  vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseSizeOption, &SwapDir::dumpSizeOption));
375  vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseTimeOption, &SwapDir::dumpTimeOption));
376  vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseRateOption, &SwapDir::dumpRateOption));
377  } else {
378  // we don't know how to handle copt, as it's not a ConfigOptionVector.
379  // free it (and return nullptr)
380  delete copt;
381  copt = nullptr;
382  }
383  return copt;
384 }
385 
386 bool
387 Rock::SwapDir::allowOptionReconfigure(const char *const option) const
388 {
389  return strcmp(option, "slot-size") != 0 &&
391 }
392 
394 bool
395 Rock::SwapDir::parseTimeOption(char const *option, const char *value, int reconfig)
396 {
397  // TODO: ::SwapDir or, better, Config should provide time-parsing routines,
398  // including time unit handling. Same for size and rate.
399 
400  time_msec_t *storedTime;
401  if (strcmp(option, "swap-timeout") == 0)
402  storedTime = &fileConfig.ioTimeout;
403  else
404  return false;
405 
406  if (!value) {
407  self_destruct();
408  return false;
409  }
410 
411  // TODO: handle time units and detect parsing errors better
412  const int64_t parsedValue = strtoll(value, nullptr, 10);
413  if (parsedValue < 0) {
414  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
415  self_destruct();
416  return false;
417  }
418 
419  const time_msec_t newTime = static_cast<time_msec_t>(parsedValue);
420 
421  if (!reconfig)
422  *storedTime = newTime;
423  else if (*storedTime != newTime) {
424  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
425  << " cannot be changed dynamically, value left unchanged: " <<
426  *storedTime);
427  }
428 
429  return true;
430 }
431 
433 void
435 {
436  if (fileConfig.ioTimeout)
437  storeAppendPrintf(e, " swap-timeout=%" PRId64,
438  static_cast<int64_t>(fileConfig.ioTimeout));
439 }
440 
442 bool
443 Rock::SwapDir::parseRateOption(char const *option, const char *value, int isaReconfig)
444 {
445  int *storedRate;
446  if (strcmp(option, "max-swap-rate") == 0)
447  storedRate = &fileConfig.ioRate;
448  else
449  return false;
450 
451  if (!value) {
452  self_destruct();
453  return false;
454  }
455 
456  // TODO: handle time units and detect parsing errors better
457  const int64_t parsedValue = strtoll(value, nullptr, 10);
458  if (parsedValue < 0) {
459  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
460  self_destruct();
461  return false;
462  }
463 
464  const int newRate = static_cast<int>(parsedValue);
465 
466  if (newRate < 0) {
467  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << newRate);
468  self_destruct();
469  return false;
470  }
471 
472  if (!isaReconfig)
473  *storedRate = newRate;
474  else if (*storedRate != newRate) {
475  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
476  << " cannot be changed dynamically, value left unchanged: " <<
477  *storedRate);
478  }
479 
480  return true;
481 }
482 
484 void
486 {
487  if (fileConfig.ioRate >= 0)
488  storeAppendPrintf(e, " max-swap-rate=%d", fileConfig.ioRate);
489 }
490 
492 bool
493 Rock::SwapDir::parseSizeOption(char const *option, const char *value, int reconfig)
494 {
495  uint64_t *storedSize;
496  if (strcmp(option, "slot-size") == 0)
497  storedSize = &slotSize;
498  else
499  return false;
500 
501  if (!value) {
502  self_destruct();
503  return false;
504  }
505 
506  // TODO: handle size units and detect parsing errors better
507  const uint64_t newSize = strtoll(value, nullptr, 10);
508  if (newSize <= 0) {
509  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must be positive; got: " << newSize);
510  self_destruct();
511  return false;
512  }
513 
514  if (newSize <= sizeof(DbCellHeader)) {
515  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must exceed " << sizeof(DbCellHeader) << "; got: " << newSize);
516  self_destruct();
517  return false;
518  }
519 
520  if (!reconfig)
521  *storedSize = newSize;
522  else if (*storedSize != newSize) {
523  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
524  << " cannot be changed dynamically, value left unchanged: " <<
525  *storedSize);
526  }
527 
528  return true;
529 }
530 
532 void
534 {
535  storeAppendPrintf(e, " slot-size=%" PRId64, slotSize);
536 }
537 
539 void
541 {
542  if (slotSize <= 0)
543  fatal("Rock store requires a positive slot-size");
544 
545  const int64_t maxSizeRoundingWaste = 1024 * 1024; // size is configured in MB
546  const int64_t slotSizeRoundingWaste = slotSize;
547  const int64_t maxRoundingWaste =
548  max(maxSizeRoundingWaste, slotSizeRoundingWaste);
549 
550  // an entry consumes at least one slot; round up to reduce false warnings
551  const int64_t blockSize = static_cast<int64_t>(slotSize);
552  const int64_t maxObjSize = max(blockSize,
553  ((maxObjectSize()+blockSize-1)/blockSize)*blockSize);
554 
555  // Does the "sfileno*max-size" limit match configured db capacity?
556  const double entriesMayOccupy = entryLimitAbsolute()*static_cast<double>(maxObjSize);
557  if (entriesMayOccupy + maxRoundingWaste < maxSize()) {
558  const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
559  debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to entry limits:" <<
560  "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
561  "\n\tconfigured db slot size: " << slotSize << " bytes" <<
562  "\n\tconfigured maximum entry size: " << maxObjectSize() << " bytes" <<
563  "\n\tmaximum number of cache_dir entries supported by Squid: " << entryLimitAbsolute() <<
564  "\n\tdisk space all entries may use: " << entriesMayOccupy << " bytes" <<
565  "\n\tdisk space wasted: " << diskWasteSize << " bytes");
566  }
567 
568  // Does the "absolute slot count" limit match configured db capacity?
569  const double slotsMayOccupy = slotLimitAbsolute()*static_cast<double>(slotSize);
570  if (slotsMayOccupy + maxRoundingWaste < maxSize()) {
571  const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
572  debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to slot limits:" <<
573  "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
574  "\n\tconfigured db slot size: " << slotSize << " bytes" <<
575  "\n\tmaximum number of rock cache_dir slots supported by Squid: " << slotLimitAbsolute() <<
576  "\n\tdisk space all slots may use: " << slotsMayOccupy << " bytes" <<
577  "\n\tdisk space wasted: " << diskWasteSize << " bytes");
578  }
579 }
580 
581 bool
582 Rock::SwapDir::canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const
583 {
584  if (diskSpaceNeeded >= 0)
585  diskSpaceNeeded += sizeof(DbCellHeader);
586  if (!::SwapDir::canStore(e, diskSpaceNeeded, load))
587  return false;
588 
589  if (!theFile || !theFile->canWrite())
590  return false;
591 
592  if (!map)
593  return false;
594 
595  // Do not start I/O transaction if there are less than 10% free pages left.
596  // TODO: reserve page instead
597  if (needsDiskStrand() &&
599  debugs(47, 5, "too few shared pages for IPC I/O left");
600  return false;
601  }
602 
603  if (io->shedLoad())
604  return false;
605 
606  load = io->load();
607  return true;
608 }
609 
611 Rock::SwapDir::createStoreIO(StoreEntry &e, StoreIOState::STIOCB * const cbIo, void * const cbData)
612 {
613  if (!theFile || theFile->error()) {
614  debugs(47,4, theFile);
615  return nullptr;
616  }
617 
618  sfileno filen;
619  Ipc::StoreMapAnchor *const slot =
620  map->openForWriting(reinterpret_cast<const cache_key *>(e.key), filen);
621  if (!slot) {
622  debugs(47, 5, "map->add failed");
623  return nullptr;
624  }
625 
626  assert(filen >= 0);
627  slot->set(e);
628 
629  // XXX: We rely on our caller, storeSwapOutStart(), to set e.fileno.
630  // If that does not happen, the entry will not decrement the read level!
631 
632  Rock::SwapDir::Pointer self(this);
633  IoState *sio = new IoState(self, &e, cbIo, cbData);
634 
635  sio->swap_dirn = index;
636  sio->swap_filen = filen;
637  sio->writeableAnchor_ = slot;
638 
639  debugs(47,5, "dir " << index << " created new filen " <<
640  asHex(sio->swap_filen).upperCase().minDigits(8) << " starting at " <<
641  diskOffset(sio->swap_filen));
642 
643  sio->file(theFile);
644 
645  trackReferences(e);
646  return sio;
647 }
648 
651 {
652  if (!theFile || theFile->error()) {
653  debugs(47,4, theFile);
654  return nullptr;
655  }
656 
657  Must(update.fresh);
658  Must(update.fresh.fileNo >= 0);
659 
660  Rock::SwapDir::Pointer self(this);
661  IoState *sio = new IoState(self, update.entry, cbIo, data);
662 
663  sio->swap_dirn = index;
664  sio->swap_filen = update.fresh.fileNo;
665  sio->writeableAnchor_ = update.fresh.anchor;
666 
667  debugs(47,5, "dir " << index << " updating filen " <<
668  asHex(sio->swap_filen).upperCase().minDigits(8) << " starting at " <<
669  diskOffset(sio->swap_filen));
670 
671  sio->file(theFile);
672  return sio;
673 }
674 
675 int64_t
676 Rock::SwapDir::diskOffset(const SlotId sid) const
677 {
678  assert(sid >= 0);
679  return HeaderSize + slotSize*sid;
680 }
681 
682 int64_t
684 {
685  assert(pageId);
686  return diskOffset(pageId.number - 1);
687 }
688 
689 int64_t
691 {
692  assert(map);
693  return diskOffset(map->sliceLimit());
694 }
695 
698 {
699  Ipc::Mem::PageId pageId;
700 
701  if (freeSlots->pop(pageId)) {
702  const auto slotId = pageId.number - 1;
703  debugs(47, 5, "got a previously free slot: " << slotId);
704  map->prepFreeSlice(slotId);
705  return slotId;
706  }
707 
708  // catch free slots delivered to noteFreeMapSlice()
709  assert(!waitingForPage);
710  waitingForPage = &pageId;
711  if (map->purgeOne()) {
712  assert(!waitingForPage); // noteFreeMapSlice() should have cleared it
713  assert(pageId.set());
714  const auto slotId = pageId.number - 1;
715  debugs(47, 5, "got a previously busy slot: " << slotId);
716  map->prepFreeSlice(slotId);
717  return slotId;
718  }
719  assert(waitingForPage == &pageId);
720  waitingForPage = nullptr;
721 
722  // This may happen when the number of available db slots is close to the
723  // number of concurrent requests reading or writing those slots, which may
724  // happen when the db is "small" compared to the request traffic OR when we
725  // are rebuilding and have not loaded "many" entries or empty slots yet.
726  debugs(47, 3, "cannot get a slot; entries: " << map->entryCount());
727  throw TexcHere("ran out of free db slots");
728 }
729 
730 bool
732 {
733  return 0 <= slotId && slotId < slotLimitActual();
734 }
735 
736 void
738 {
739  Ipc::Mem::PageId pageId;
741  pageId.number = sliceId+1;
742  if (waitingForPage) {
743  *waitingForPage = pageId;
744  waitingForPage = nullptr;
745  } else {
746  freeSlots->push(pageId);
747  }
748 }
749 
750 // tries to open an old entry with swap_filen for reading
752 Rock::SwapDir::openStoreIO(StoreEntry &e, StoreIOState::STIOCB * const cbIo, void * const cbData)
753 {
754  if (!theFile || theFile->error()) {
755  debugs(47,4, theFile);
756  return nullptr;
757  }
758 
759  if (!e.hasDisk()) {
760  debugs(47,4, e);
761  return nullptr;
762  }
763 
764  // Do not start I/O transaction if there are less than 10% free pages left.
765  // TODO: reserve page instead
766  if (needsDiskStrand() &&
768  debugs(47, 5, "too few shared pages for IPC I/O left");
769  return nullptr;
770  }
771 
772  // The are two ways an entry can get swap_filen: our get() locked it for
773  // reading or our storeSwapOutStart() locked it for writing. Peeking at our
774  // locked entry is safe, but no support for reading the entry we swap out.
775  const Ipc::StoreMapAnchor *slot = map->peekAtReader(e.swap_filen);
776  if (!slot)
777  return nullptr; // we were writing after all
778 
779  Rock::SwapDir::Pointer self(this);
780  IoState *sio = new IoState(self, &e, cbIo, cbData);
781 
782  sio->swap_dirn = index;
783  sio->swap_filen = e.swap_filen;
784  sio->readableAnchor_ = slot;
785  sio->file(theFile);
786 
787  debugs(47,5, "dir " << index << " has old filen: " <<
788  asHex(sio->swap_filen).upperCase().minDigits(8));
789 
790  // When StoreEntry::swap_filen for e was set by our anchorEntry(), e had a
791  // public key, but it could have gone private since then (while keeping the
792  // anchor lock). The stale anchor key is not (and cannot be) erased (until
793  // the marked-for-deletion/release anchor/entry is unlocked is recycled).
794  const auto ourAnchor = [&]() {
795  if (const auto publicKey = e.publicKey())
796  return slot->sameKey(publicKey);
797  return true; // cannot check
798  };
799  assert(ourAnchor());
800 
801  // For collapsed disk hits: e.swap_file_sz and slot->basics.swap_file_sz
802  // may still be zero and basics.swap_file_sz may grow.
804 
805  return sio;
806 }
807 
808 void
810 {
811  if (!theFile)
812  fatalf("Rock cache_dir failed to initialize db file: %s", filePath);
813 
814  if (theFile->error()) {
815  int xerrno = errno; // XXX: where does errno come from
816  fatalf("Rock cache_dir at %s failed to open db file: %s", filePath,
817  xstrerr(xerrno));
818  }
819 
820  debugs(47, 2, "Rock cache_dir[" << index << "] limits: " <<
821  std::setw(12) << maxSize() << " disk bytes, " <<
822  std::setw(7) << map->entryLimit() << " entries, and " <<
823  std::setw(7) << map->sliceLimit() << " slots");
824 
825  if (!Rebuild::Start(*this))
826  storeRebuildComplete(nullptr);
827 }
828 
829 void
831 {
832  theFile = nullptr;
833 }
834 
835 void
836 Rock::SwapDir::readCompleted(const char *, int rlen, int errflag, RefCount< ::ReadRequest> r)
837 {
838  ReadRequest *request = dynamic_cast<Rock::ReadRequest*>(r.getRaw());
839  assert(request);
840  IoState::Pointer sio = request->sio;
841  sio->handleReadCompletion(*request, rlen, errflag);
842 }
843 
844 void
846 {
847  // TODO: Move details into IoState::handleWriteCompletion() after figuring
848  // out how to deal with map access. See readCompleted().
849 
850  Rock::WriteRequest *request = dynamic_cast<Rock::WriteRequest*>(r.getRaw());
851  assert(request);
852  assert(request->sio != nullptr);
853  IoState &sio = *request->sio;
854 
855  // quit if somebody called IoState::close() while we were waiting
856  if (!sio.stillWaiting()) {
857  debugs(79, 3, "ignoring closed entry " << sio.swap_filen);
858  noteFreeMapSlice(request->sidCurrent);
859  return;
860  }
861 
862  debugs(79, 7, "errflag=" << errflag << " rlen=" << request->len << " eof=" << request->eof);
863 
864  if (errflag != DISK_OK)
865  handleWriteCompletionProblem(errflag, *request);
866  else if (!sio.expectedReply(request->id))
867  handleWriteCompletionProblem(DISK_ERROR, *request);
868  else
869  handleWriteCompletionSuccess(*request);
870 
871  if (sio.touchingStoreEntry())
873 }
874 
876 void
878 {
879  auto &sio = *(request.sio);
880  sio.splicingPoint = request.sidCurrent;
881  // do not increment sio.offset_ because we do it in sio->write()
882 
883  assert(sio.writeableAnchor_);
884  if (sio.writeableAnchor_->start < 0) { // wrote the first slot
885  Must(request.sidPrevious < 0);
886  sio.writeableAnchor_->start = request.sidCurrent;
887  } else {
888  Must(request.sidPrevious >= 0);
889  map->writeableSlice(sio.swap_filen, request.sidPrevious).next = request.sidCurrent;
890  }
891 
892  // finalize the shared slice info after writing slice contents to disk;
893  // the chain gets possession of the slice we were writing
894  Ipc::StoreMap::Slice &slice =
895  map->writeableSlice(sio.swap_filen, request.sidCurrent);
896  slice.size = request.len - sizeof(DbCellHeader);
897  Must(slice.next < 0);
898 
899  if (request.eof) {
900  assert(sio.e);
901  if (sio.touchingStoreEntry()) {
902  sio.e->swap_file_sz = sio.writeableAnchor_->basics.swap_file_sz =
903  sio.offset_;
904 
905  map->switchWritingToReading(sio.swap_filen);
906  // sio.e keeps the (now read) lock on the anchor
907  // storeSwapOutFileClosed() sets swap_status and calls storeWriterDone()
908  }
909  sio.writeableAnchor_ = nullptr;
910  sio.finishedWriting(DISK_OK);
911  }
912 }
913 
915 void
917 {
918  auto &sio = *request.sio;
919 
920  noteFreeMapSlice(request.sidCurrent);
921 
922  writeError(sio);
923  sio.finishedWriting(errflag);
924  // and hope that Core will call disconnect() to close the map entry
925 }
926 
927 void
929 {
930  // Do not abortWriting here. The entry should keep the write lock
931  // instead of losing association with the store and confusing core.
932  map->freeEntry(sio.swap_filen); // will mark as unusable, just in case
933 
934  if (sio.touchingStoreEntry())
936  // else noop: a fresh entry update error does not affect stale entry readers
937 
938  // All callers must also call IoState callback, to propagate the error.
939 }
940 
941 void
943 {
944  if (!map)
945  return;
946 
947  Ipc::StoreMapUpdate update(updatedE);
948  if (!map->openForUpdating(update, updatedE->swap_filen))
949  return;
950 
951  try {
952  AsyncJob::Start(new HeaderUpdater(this, update));
953  } catch (const std::exception &ex) {
954  debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
955  map->abortUpdating(update);
956  }
957 }
958 
959 bool
961 {
962  return freeSlots != nullptr && !freeSlots->size();
963 }
964 
965 // storeSwapOutFileClosed calls this nethod on DISK_NO_SPACE_LEFT,
966 // but it should not happen for us
967 void
969 {
970  debugs(20, DBG_IMPORTANT, "ERROR: Squid BUG: No space left with rock cache_dir: " <<
971  filePath);
972 }
973 
975 void
977 {
978  // The Store calls this to free some db space, but there is nothing wrong
979  // with a full() db, except when db has to shrink after reconfigure, and
980  // we do not support shrinking yet (it would have to purge specific slots).
981  // TODO: Disable maintain() requests when they are pointless.
982 }
983 
984 void
986 {
987  debugs(47, 5, &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
988  if (repl && repl->Referenced)
989  repl->Referenced(repl, &e, &e.repl);
990 }
991 
992 bool
994 {
995  debugs(47, 5, &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
996  if (repl && repl->Dereferenced)
997  repl->Dereferenced(repl, &e, &e.repl);
998 
999  // no need to keep e in the global store_table for us; we have our own map
1000  return false;
1001 }
1002 
1003 bool
1005 {
1006  // no entry-specific files to unlink
1007  return false;
1008 }
1009 
1010 void
1012 {
1013  if (map)
1014  map->freeEntryByKey(key); // may not be there
1015 }
1016 
1017 void
1019 {
1020  debugs(47, 5, e);
1021  if (e.hasDisk(index)) {
1022  if (map->freeEntry(e.swap_filen))
1024  if (!e.locked())
1025  disconnect(e);
1026  } else if (const auto key = e.publicKey()) {
1027  evictIfFound(key);
1028  }
1029 }
1030 
1031 void
1033 {
1034  debugs(47, 5, e);
1035  if (repl)
1036  repl->Add(repl, &e, &e.repl);
1037 }
1038 
1039 void
1041 {
1042  debugs(47, 5, e);
1043  if (repl)
1044  repl->Remove(repl, &e, &e.repl);
1045 }
1046 
1047 void
1049 {
1050  storeAppendPrintf(&e, "\n");
1051  storeAppendPrintf(&e, "Maximum Size: %" PRIu64 " KB\n", maxSize() >> 10);
1052  storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
1053  currentSize() / 1024.0,
1054  Math::doublePercent(currentSize(), maxSize()));
1055 
1056  const int entryLimit = entryLimitActual();
1057  const int slotLimit = slotLimitActual();
1058  storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
1059  if (map && entryLimit > 0) {
1060  const int entryCount = map->entryCount();
1061  storeAppendPrintf(&e, "Current entries: %9d %.2f%%\n",
1062  entryCount, (100.0 * entryCount / entryLimit));
1063  }
1064 
1065  storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
1066  if (map && slotLimit > 0) {
1067  const unsigned int slotsFree = !freeSlots ? 0 : freeSlots->size();
1068  if (slotsFree <= static_cast<unsigned int>(slotLimit)) {
1069  const int usedSlots = slotLimit - static_cast<int>(slotsFree);
1070  storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
1071  usedSlots, (100.0 * usedSlots / slotLimit));
1072  }
1073  if (slotLimit < 100) { // XXX: otherwise too expensive to count
1075  map->updateStats(stats);
1076  stats.dump(e);
1077  }
1078  }
1079 
1080  storeAppendPrintf(&e, "Pending operations: %d out of %d\n",
1082 
1083  storeAppendPrintf(&e, "Flags:");
1084 
1085  if (flags.selected)
1086  storeAppendPrintf(&e, " SELECTED");
1087 
1088  if (flags.read_only)
1089  storeAppendPrintf(&e, " READ-ONLY");
1090 
1091  storeAppendPrintf(&e, "\n");
1092 
1093 }
1094 
1095 SBuf
1097 {
1098  return Ipc::Mem::Segment::Name(SBuf(path), "map");
1099 }
1100 
1101 const char *
1103 {
1104  static String spacesPath;
1105  spacesPath = path;
1106  spacesPath.append("_spaces");
1107  return spacesPath.termedBuf();
1108 }
1109 
1110 bool
1112 {
1113  return map->hasReadableEntry(reinterpret_cast<const cache_key*>(e.key));
1114 }
1115 
1117 
1119 {
1120  Must(mapOwners.empty() && freeSlotsOwners.empty());
1121  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1122  if (const Rock::SwapDir *const sd = dynamic_cast<Rock::SwapDir *>(INDEXSD(i))) {
1123  rebuildStatsOwners.push_back(Rebuild::Stats::Init(*sd));
1124 
1125  const int64_t capacity = sd->slotLimitActual();
1126 
1127  SwapDir::DirMap::Owner *const mapOwner =
1128  SwapDir::DirMap::Init(sd->inodeMapPath(), capacity);
1129  mapOwners.push_back(mapOwner);
1130 
1131  // TODO: somehow remove pool id and counters from PageStack?
1134  config.pageSize = 0; // this is an index of slots on _disk_
1135  config.capacity = capacity;
1136  config.createFull = false; // Rebuild finds and pushes free slots
1137  Ipc::Mem::Owner<Ipc::Mem::PageStack> *const freeSlotsOwner =
1138  shm_new(Ipc::Mem::PageStack)(sd->freeSlotsPath(), config);
1139  freeSlotsOwners.push_back(freeSlotsOwner);
1140  }
1141  }
1142 }
1143 
1145 {
1146  for (size_t i = 0; i < mapOwners.size(); ++i) {
1147  delete rebuildStatsOwners[i];
1148  delete mapOwners[i];
1149  delete freeSlotsOwners[i];
1150  }
1151 }
1152 
void fatal(const char *message)
Definition: fatal.cc:28
const char * xstrerr(int error)
Definition: xstrerror.cc:83
int64_t diskOffset(Ipc::Mem::PageId &pageId) const
Definition: RockSwapDir.cc:683
StoreIOState::Pointer openStoreIO(StoreEntry &, StoreIOState::STIOCB *, void *) override
Definition: RockSwapDir.cc:752
#define INDEXSD(i)
Definition: SquidConfig.h:74
approximate stats of a set of ReadWriteLocks
Definition: ReadWriteLock.h:70
void evictCached(StoreEntry &) override
static char vector[AUTH_VECTOR_LEN]
uint32_t poolId
pool ID
Definition: PageStack.h:125
#define DBG_CRITICAL
Definition: Stream.h:37
const char * freeSlotsPath() const
void updateHeaders(StoreEntry *e) override
make stored metadata and HTTP headers the same as in the given entry
Definition: RockSwapDir.cc:942
const cache_key * publicKey() const
Definition: Store.h:112
Definition: forward.h:27
void ioCompletedNotification() override
Definition: RockSwapDir.cc:809
unsigned char cache_key
Store key.
Definition: forward.h:29
#define EBIT_SET(flag, bit)
Definition: defines.h:65
virtual bool allowOptionReconfigure(const char *const) const
Definition: Disk.h:85
MemObject * mem_obj
Definition: Store.h:220
bool hasReadableEntry(const StoreEntry &) const override
whether this cache dir has an entry with e.key
void createMemObject()
Definition: store.cc:1575
IoXactionId id
identifies this write transaction for the requesting IoState
PoolId pool
Definition: Page.h:39
uint64_t currentCount() const override
the total number of objects stored right now
Definition: RockSwapDir.cc:155
Shared memory page identifier, address, or handler.
Definition: Page.h:23
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:855
bool sameKey(const cache_key *const aKey) const
Definition: StoreMap.cc:952
SlotId sidCurrent
slot being written using this write request
@ ENTRY_VALIDATED
Definition: enums.h:108
Definition: SBuf.h:93
void anchorEntry(StoreEntry &e, const sfileno filen, const Ipc::StoreMapAnchor &anchor)
Definition: RockSwapDir.cc:105
void detachFromDisk()
Definition: store.cc:1953
StoreIOState::Pointer sio
Definition: MemObject.h:162
void create() override
called when the runner should create a new memory segment
SBuf inodeMapPath() const
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
Definition: StoreMap.cc:959
Edition fresh
new anchor and the updated chain prefix
Definition: StoreMap.h:209
#define xstrdup
bool touchingStoreEntry() const
Definition: StoreIOState.cc:52
const A & max(A const &lhs, A const &rhs)
#define PRIu64
Definition: types.h:114
@ SWAPOUT_WRITING
Definition: enums.h:56
C * getRaw() const
Definition: RefCount.h:89
uint16_t flags
Definition: Store.h:231
void parseSize(const bool reconfiguring)
parses anonymous cache_dir size option
Definition: RockSwapDir.cc:351
Store::DiskConfig cacheSwap
Definition: SquidConfig.h:423
#define shm_new(Class)
Definition: Pointer.h:200
@ SWAPOUT_DONE
Definition: enums.h:59
int store_open_disk_fd
bool unlinkdUseful() const override
whether SwapDir may benefit from unlinkd
void create() override
create system resources needed for this store to operate in the future
Definition: RockSwapDir.cc:210
void self_destruct(void)
Definition: cache_cf.cc:276
void maintain() override
purge while full(); it should be sufficient to purge just one
Definition: RockSwapDir.cc:976
void handleWriteCompletionProblem(const int errflag, const WriteRequest &request)
code shared by writeCompleted() error handling cases
Definition: RockSwapDir.cc:916
void dumpTimeOption(StoreEntry *e) const
reports time-specific options; mimics SwapDir::optionObjectSizeDump()
Definition: RockSwapDir.cc:434
#define DISK_OK
Definition: defines.h:27
bool stillWaiting() const
whether we are still waiting for the I/O results (i.e., not closed)
Definition: RockIoState.h:43
void storeRebuildComplete(StoreRebuildData *dc)
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
Definition: TextException.h:63
bool parseSizeOption(char const *option, const char *value, int reconfiguring)
parses size-specific options; mimics SwapDir::optionObjectSizeParse()
Definition: RockSwapDir.cc:493
bool needsDiskStrand() const override
needs a dedicated kid process
Definition: RockSwapDir.cc:310
void validateOptions()
warns of configuration problems; may quit
Definition: RockSwapDir.cc:540
double doublePercent(const double, const double)
Definition: SquidMath.cc:25
IoState::Pointer sio
void reconfigure() override
Definition: RockSwapDir.cc:341
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
Definition: StoreMap.h:49
#define O_BINARY
Definition: defines.h:134
struct Ipc::StoreMapAnchor::Basics basics
void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition: RockSwapDir.cc:985
IoState::Pointer sio
void noteFreeMapSlice(const Ipc::StoreMapSliceId fileno) override
adjust slice-linked state before a locked Readable slice is erased
Definition: RockSwapDir.cc:737
ConfigOption * getOptionTree() const override
Definition: RockSwapDir.cc:368
bool IamDiskProcess() STUB_RETVAL_NOP(false) bool InDaemonMode() STUB_RETVAL_NOP(false) bool UsingSmp() STUB_RETVAL_NOP(false) bool IamCoordinatorProcess() STUB_RETVAL(false) bool IamPrimaryProcess() STUB_RETVAL(false) int NumberOfKids() STUB_RETVAL(0) void setMaxFD(void) STUB void setSystemLimits(void) STUB void squid_signal(int
whether the current process is dedicated to managing a cache_dir
int32_t StoreMapSliceId
Definition: StoreMap.h:24
void append(char const *buf, int len)
Definition: String.cc:130
int max_open_disk_fds
Definition: SquidConfig.h:456
bool anchorToCache(StoreEntry &) override
Definition: RockSwapDir.cc:74
int64_t entryLimitActual() const
max number of possible entries in db
Definition: RockSwapDir.cc:203
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1929
SlotId sidPrevious
slot that will point to sidCurrent in the cache_dir map
sdirno swap_dirn
Definition: Store.h:237
bool set() const
true if and only if both critical components have been initialized
Definition: Page.h:29
ping_status_t ping_status
Definition: Store.h:241
AsHex< Integer > asHex(const Integer n)
a helper to ease AsHex object creation
Definition: IoManip.h:169
int64_t diskOffsetLimit() const
Definition: RockSwapDir.cc:690
StoreEntry * entry
the store entry being updated
Definition: StoreMap.h:207
#define shm_old(Class)
Definition: Pointer.h:201
#define safe_free(x)
Definition: xalloc.h:73
store_status_t store_status
Definition: Store.h:243
~SwapDirRr() override
#define assert(EX)
Definition: assert.h:17
void createError(const char *const msg)
Definition: RockSwapDir.cc:270
void handleWriteCompletionSuccess(const WriteRequest &request)
code shared by writeCompleted() success handling cases
Definition: RockSwapDir.cc:877
std::atomic< uint64_t > swap_file_sz
Definition: StoreMap.h:105
size_t PageLevel()
approximate total number of shared memory pages used now
Definition: Pages.cc:80
void fatalf(const char *fmt,...)
Definition: fatal.cc:68
StoreIOState::Pointer createUpdateIO(const Ipc::StoreMapUpdate &, StoreIOState::STIOCB *, void *)
Definition: RockSwapDir.cc:650
bool expectedReply(const IoXactionId receivedId)
Definition: RockIoState.cc:333
Aggregates information required for updating entry metadata and headers.
Definition: StoreMap.h:181
void ignoreReferences(StoreEntry &e)
delete from repl policy scope
@ PING_NONE
Has not considered whether to send ICP queries to peers yet.
Definition: enums.h:36
static Ipc::Mem::Owner< Stats > * Init(const SwapDir &)
Definition: RockRebuild.cc:258
bool canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const override
check whether we can store the entry; if we can, report current load
Definition: RockSwapDir.cc:582
void diskFull() override
Definition: RockSwapDir.cc:968
#define Assure(condition)
Definition: Assure.h:35
SlotId reserveSlotForWriting()
finds and returns a free db slot to fill or throws
Definition: RockSwapDir.cc:697
bool eof
whether this is the last request for the entry
void disconnect(StoreEntry &e) override
called when the entry is about to forget its association with cache_dir
Definition: RockSwapDir.cc:119
StoreMapAnchor * anchor
StoreMap::anchors[fileNo], for convenience/speed.
Definition: StoreMap.h:193
void dump(StoreEntry &e) const
int64_t strtoll(const char *nptr, char **endptr, int base)
Definition: strtoll.c:61
bool complete() const
Definition: StoreMap.h:77
virtual ConfigOption * getOptionTree() const
Definition: Disk.cc:258
void finalizeSwapoutFailure(StoreEntry &) override
abort the failed swapout that has been already noticed by Store
Definition: RockSwapDir.cc:178
uint64_t currentSize() const override
current size
Definition: RockSwapDir.cc:146
bool updateAnchored(StoreEntry &) override
Definition: RockSwapDir.cc:92
@ STORE_OK
Definition: enums.h:45
signed_int32_t sfileno
Definition: forward.h:22
void handleReadCompletion(Rock::ReadRequest &request, const int rlen, const int errFlag)
forwards read data (or an error) to the reader that initiated this I/O
Definition: RockIoState.cc:150
static SBuf Name(const SBuf &prefix, const char *suffix)
concatenates parts of a name to form a complete name (or its prefix)
Definition: Segment.cc:52
size_t pageSize
page size, used to calculate shared memory size
Definition: PageStack.h:126
Ipc::StoreMapAnchor * writeableAnchor_
starting point for writing
Definition: RockIoState.h:57
void closeCompleted() override
Definition: RockSwapDir.cc:830
void exportInto(StoreEntry &) const
load StoreEntry basics that were previously stored with set()
Definition: StoreMap.cc:979
void writeError(StoreIOState &sio)
Definition: RockSwapDir.cc:928
initializes shared memory segments used by Rock::SwapDir
Definition: RockSwapDir.h:155
void trackReferences(StoreEntry &e)
add to replacement policy scope
void STIOCB(void *their_data, int errflag, StoreIOState::Pointer self)
Definition: StoreIOState.h:39
const char * termedBuf() const
Definition: SquidString.h:92
bool validSlotId(const SlotId slotId) const
whether the given slot ID may point to a slot in this db
Definition: RockSwapDir.cc:731
uint32_t number
page number within the segment
Definition: Page.h:42
~SwapDir() override
Definition: RockSwapDir.cc:46
sfileno fileNo
StoreMap::fileNos[name], for convenience/speed.
Definition: StoreMap.h:194
size_t PageLimit()
the total number of shared memory pages that can be in use at any time
Definition: Pages.cc:55
sfileno SlotId
db cell number, starting with cell 0 (always occupied by the db header)
Definition: forward.h:30
PageStack construction and SharedMemorySize calculation parameters.
Definition: PageStack.h:123
int64_t slotLimitAbsolute() const
Rock store implementation limit.
Definition: RockSwapDir.cc:185
static bool Start(SwapDir &dir)
Definition: RockRebuild.cc:280
void evictIfFound(const cache_key *) override
#define Must(condition)
Definition: TextException.h:75
int64_t slotLimitActual() const
total number of slots in this db
Definition: RockSwapDir.cc:194
uint64_t swap_file_sz
Definition: Store.h:229
StoreEntry * get(const cache_key *key) override
Definition: RockSwapDir.cc:55
bool parseRateOption(char const *option, const char *value, int reconfiguring)
parses rate-specific options; mimics SwapDir::optionObjectSizeParse()
Definition: RockSwapDir.cc:443
#define DBG_IMPORTANT
Definition: Stream.h:38
StoreEntry * e
Definition: StoreIOState.h:73
bool createFull
whether a newly created PageStack should be prefilled with PageIds
Definition: PageStack.h:130
#define MYNAME
Definition: Stream.h:219
static DiskIOModule * Find(char const *type)
#define PRId64
Definition: types.h:104
DefineRunnerRegistratorIn(Rock, SwapDirRr)
void init() override
Definition: RockSwapDir.cc:279
#define DISK_ERROR
Definition: defines.h:28
PageCount capacity
the maximum number of pages
Definition: PageStack.h:127
void finalizeSwapoutSuccess(const StoreEntry &) override
finalize the successful swapout that has been already noticed by Store
Definition: RockSwapDir.cc:169
void parse(int index, char *path) override
Definition: RockSwapDir.cc:319
void attachToDisk(const sdirno, const sfileno, const swap_status_t)
Definition: store.cc:1940
void writeCompleted(int errflag, size_t len, RefCount< ::WriteRequest >) override
Definition: RockSwapDir.cc:845
static void Broadcast(const StoreEntry &e, const bool includingThisWorker=false)
notify other workers about changes in entry state (e.g., new data)
std::atomic< Size > size
slice contents size
Definition: StoreMap.h:48
int locked() const
Definition: Store.h:145
bool full() const
no more entries can be stored without purging
Definition: RockSwapDir.cc:960
bool UsingSmp()
Whether there should be more than one worker process running.
Definition: tools.cc:696
void dumpRateOption(StoreEntry *e) const
reports rate-specific options; mimics SwapDir::optionObjectSizeDump()
Definition: RockSwapDir.cc:485
void storeWriterDone()
called when a store writer ends its work (successfully or not)
Definition: store.cc:1808
StoreIOState::Pointer createStoreIO(StoreEntry &, StoreIOState::STIOCB *, void *) override
Definition: RockSwapDir.cc:611
bool doReportStat() const override
Definition: RockSwapDir.cc:163
sfileno swap_filen
Definition: StoreIOState.h:72
void readCompleted(const char *buf, int len, int errflag, RefCount< ::ReadRequest >) override
Definition: RockSwapDir.cc:836
uint64_t time_msec_t
Definition: gadgets.h:16
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Stream.h:192
const A & min(A const &lhs, A const &rhs)
void Init(void)
prepares to parse ACLs configuration
Definition: AclRegs.cc:189
void dumpSizeOption(StoreEntry *e) const
reports size-specific options; mimics SwapDir::optionObjectSizeDump()
Definition: RockSwapDir.cc:533
bool parseTimeOption(char const *option, const char *value, int reconfiguring)
parses time-specific options; mimics SwapDir::optionObjectSizeParse()
Definition: RockSwapDir.cc:395
static PoolId IdForSwapDirSpace(const int dirIdx)
stack of free rock cache_dir slot numbers
Definition: PageStack.h:171
RemovalPolicyNode repl
Definition: Store.h:221
void statfs(StoreEntry &e) const override
class SquidConfig Config
Definition: SquidConfig.cc:12
bool allowOptionReconfigure(const char *const option) const override
Definition: RockSwapDir.cc:387
static void Start(const Pointer &job)
Definition: AsyncJob.cc:37
@ STORE_PENDING
Definition: enums.h:46
SwapOut swapout
Definition: MemObject.h:169
sfileno swap_filen
unique ID inside a cache_dir for swapped out entries; -1 for others
Definition: Store.h:235
bool dereference(StoreEntry &e) override
Definition: RockSwapDir.cc:993
int GetInteger(void)
Definition: Parsing.cc:148
bool InDaemonMode()
Whether we are running in daemon mode.
Definition: tools.cc:690

 

Introduction

Documentation

Support

Miscellaneous