RockSwapDir.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 47 Store Directory Routines */
10 
11 #include "squid.h"
12 #include "base/IoManip.h"
13 #include "cache_cf.h"
14 #include "CollapsedForwarding.h"
15 #include "compat/socket.h"
16 #include "compat/unistd.h"
17 #include "ConfigOption.h"
18 #include "DiskIO/DiskIOModule.h"
19 #include "DiskIO/DiskIOStrategy.h"
20 #include "DiskIO/ReadRequest.h"
21 #include "DiskIO/WriteRequest.h"
23 #include "fs/rock/RockIoRequests.h"
24 #include "fs/rock/RockIoState.h"
25 #include "fs/rock/RockSwapDir.h"
26 #include "globals.h"
27 #include "ipc/mem/Pages.h"
28 #include "MemObject.h"
29 #include "Parsing.h"
30 #include "SquidConfig.h"
31 #include "SquidMath.h"
32 #include "tools.h"
33 
34 #include <cstdlib>
35 #include <iomanip>
36 #include <limits>
37 
38 #if HAVE_SYS_STAT_H
39 #include <sys/stat.h>
40 #endif
41 
43  slotSize(HeaderSize), filePath(nullptr), map(nullptr), io(nullptr),
44  waitingForPage(nullptr)
45 {
46 }
47 
49 {
50  delete io;
51  delete map;
52  safe_free(filePath);
53 }
54 
55 // called when Squid core needs a StoreEntry with a given key
56 StoreEntry *
58 {
59  if (!map || !theFile || !theFile->canRead())
60  return nullptr;
61 
62  sfileno filen;
63  const Ipc::StoreMapAnchor *const slot = map->openForReading(key, filen);
64  if (!slot)
65  return nullptr;
66 
67  // create a brand new store entry and initialize it with stored basics
68  StoreEntry *e = new StoreEntry();
69  e->createMemObject();
70  anchorEntry(*e, filen, *slot);
71  trackReferences(*e);
72  return e;
73 }
74 
75 bool
77 {
78  Assure(!entry.hasDisk());
79 
80  if (!map || !theFile || !theFile->canRead())
81  return false;
82 
83  sfileno filen;
84  const Ipc::StoreMapAnchor *const slot = map->openForReading(
85  reinterpret_cast<cache_key*>(entry.key), filen);
86  if (!slot)
87  return false;
88 
89  anchorEntry(entry, filen, *slot);
90  return true;
91 }
92 
93 bool
95 {
96  if (!map || !theFile || !theFile->canRead())
97  return false;
98 
99  assert(entry.hasDisk(index));
100 
101  const auto &anchor = map->readableEntry(entry.swap_filen);
102  entry.swap_file_sz = anchor.basics.swap_file_sz;
103  return true;
104 }
105 
106 void
108 {
109  anchor.exportInto(e);
110 
111  const bool complete = anchor.complete();
112  e.store_status = complete ? STORE_OK : STORE_PENDING;
113  // SWAPOUT_WRITING: even though another worker writes?
114  e.attachToDisk(index, filen, complete ? SWAPOUT_DONE : SWAPOUT_WRITING);
115 
117 
119 }
120 
122 {
123  assert(e.hasDisk(index));
124 
125  ignoreReferences(e);
126 
127  // do not rely on e.swap_status here because there is an async delay
128  // before it switches from SWAPOUT_WRITING to SWAPOUT_DONE.
129 
130  // since e has swap_filen, its slot is locked for reading and/or writing
131  // but it is difficult to know whether THIS worker is reading or writing e,
132  // especially since we may switch from writing to reading. This code relies
133  // on Rock::IoState::writeableAnchor_ being set when we locked for writing.
134  if (e.mem_obj && e.mem_obj->swapout.sio != nullptr &&
135  dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_) {
136  map->abortWriting(e.swap_filen);
137  e.detachFromDisk();
138  dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_ = nullptr;
140  e.storeWriterDone();
141  } else {
142  map->closeForReading(e.swap_filen);
143  e.detachFromDisk();
144  }
145 }
146 
147 uint64_t
149 {
150  const uint64_t spaceSize = !freeSlots ?
151  maxSize() : (slotSize * freeSlots->size());
152  // everything that is not free is in use
153  return maxSize() - spaceSize;
154 }
155 
156 uint64_t
158 {
159  return map ? map->entryCount() : 0;
160 }
161 
164 bool
166 {
167  return ::SwapDir::doReportStat() && (!UsingSmp() || IamDiskProcess());
168 }
169 
170 void
172 {
173  // nothing to do; handleWriteCompletionSuccess() did everything for us
174  assert(!e.mem_obj ||
175  !e.mem_obj->swapout.sio ||
176  !dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_);
177 }
178 
179 void
181 {
182  debugs(47, 5, entry);
183  disconnect(entry); // calls abortWriting() to free the disk entry
184 }
185 
186 int64_t
188 {
189  // the max value is an invalid one; all values must be below the limit
193 }
194 
195 int64_t
197 {
198  const int64_t sWanted = (maxSize() - HeaderSize)/slotSize;
199  const int64_t sLimitLo = map ? map->sliceLimit() : 0; // dynamic shrinking unsupported
200  const int64_t sLimitHi = slotLimitAbsolute();
201  return min(max(sLimitLo, sWanted), sLimitHi);
202 }
203 
204 int64_t
206 {
207  return min(slotLimitActual(), entryLimitAbsolute());
208 }
209 
210 // TODO: encapsulate as a tool
211 void
213 {
214  assert(path);
215  assert(filePath);
216 
217  if (UsingSmp() && !IamDiskProcess()) {
218  debugs (47,3, "disker will create in " << path);
219  return;
220  }
221 
222  debugs (47,3, "creating in " << path);
223 
224  struct stat dir_sb;
225  if (::stat(path, &dir_sb) == 0) {
226  struct stat file_sb;
227  if (::stat(filePath, &file_sb) == 0) {
228  debugs (47, DBG_IMPORTANT, "Skipping existing Rock db: " << filePath);
229  return;
230  }
231  // else the db file is not there or is not accessible, and we will try
232  // to create it later below, generating a detailed error on failures.
233  } else { // path does not exist or is inaccessible
234  // If path exists but is not accessible, mkdir() below will fail, and
235  // the admin should see the error and act accordingly, so there is
236  // no need to distinguish ENOENT from other possible stat() errors.
237  debugs (47, DBG_IMPORTANT, "Creating Rock db directory: " << path);
238  const int res = mkdir(path, 0700);
239  if (res != 0)
240  createError("mkdir");
241  }
242 
243  debugs (47, DBG_IMPORTANT, "Creating Rock db: " << filePath);
244  const auto swap = xopen(filePath, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0600);
245  if (swap < 0)
246  createError("create");
247 
248 #if SLOWLY_FILL_WITH_ZEROS
249  char block[1024];
250  Must(maxSize() % sizeof(block) == 0);
251  memset(block, '\0', sizeof(block));
252 
253  for (off_t offset = 0; offset < maxSize(); offset += sizeof(block)) {
254  if (xwrite(swap, block, sizeof(block)) != sizeof(block))
255  createError("write");
256  }
257 #else
258  if (ftruncate(swap, maxSize()) != 0)
259  createError("truncate");
260 
261  char header[HeaderSize];
262  memset(header, '\0', sizeof(header));
263  if (xwrite(swap, header, sizeof(header)) != sizeof(header))
264  createError("write");
265 #endif
266 
267  xclose(swap);
268 }
269 
270 // report Rock DB creation error and exit
271 void
272 Rock::SwapDir::createError(const char *const msg)
273 {
274  int xerrno = errno; // XXX: where does errno come from?
275  debugs(47, DBG_CRITICAL, "ERROR: Failed to initialize Rock Store db in " <<
276  filePath << "; " << msg << " error: " << xstrerr(xerrno));
277  fatal("Rock Store db creation error");
278 }
279 
280 void
282 {
283  debugs(47,2, MYNAME);
284 
285  // XXX: SwapDirs aren't refcounted. We make IORequestor calls, which
286  // are refcounted. We up our count once to avoid implicit delete's.
287  lock();
288 
289  freeSlots = shm_old(Ipc::Mem::PageStack)(freeSlotsPath());
290 
291  Must(!map);
292  map = new DirMap(inodeMapPath());
293  map->cleaner = this;
294 
295  const char *ioModule = needsDiskStrand() ? "IpcIo" : "Blocking";
296  if (DiskIOModule *m = DiskIOModule::Find(ioModule)) {
297  debugs(47,2, "Using DiskIO module: " << ioModule);
298  io = m->createStrategy();
299  io->init();
300  } else {
301  debugs(47, DBG_CRITICAL, "FATAL: Rock store is missing DiskIO module: " <<
302  ioModule);
303  fatal("Rock Store missing a required DiskIO module");
304  }
305 
306  theFile = io->newFile(filePath);
307  theFile->configure(fileConfig);
308  theFile->open(O_RDWR, 0644, this);
309 }
310 
311 bool
313 {
314  const bool wontEvenWorkWithoutDisker = Config.workers > 1;
315  const bool wouldWorkBetterWithDisker = DiskIOModule::Find("IpcIo");
316  return InDaemonMode() && (wontEvenWorkWithoutDisker ||
317  wouldWorkBetterWithDisker);
318 }
319 
320 void
321 Rock::SwapDir::parse(int anIndex, char *aPath)
322 {
323  index = anIndex;
324 
325  path = xstrdup(aPath);
326 
327  // cache store is located at path/db
328  String fname(path);
329  fname.append("/rock");
330  filePath = xstrdup(fname.termedBuf());
331 
332  parseSize(false);
333  parseOptions(0);
334 
335  // Current openForWriting() code overwrites the old slot if needed
336  // and possible, so proactively removing old slots is probably useless.
337  assert(!repl); // repl = createRemovalPolicy(Config.replPolicy);
338 
339  validateOptions();
340 }
341 
342 void
344 {
345  parseSize(true);
346  parseOptions(1);
347  // TODO: can we reconfigure the replacement policy (repl)?
348  validateOptions();
349 }
350 
352 void
353 Rock::SwapDir::parseSize(const bool reconfig)
354 {
355  const int i = GetInteger();
356  if (i < 0)
357  fatal("negative Rock cache_dir size value");
358  const uint64_t new_max_size =
359  static_cast<uint64_t>(i) << 20; // MBytes to Bytes
360  if (!reconfig)
361  max_size = new_max_size;
362  else if (new_max_size != max_size) {
363  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir '" << path << "' size "
364  "cannot be changed dynamically, value left unchanged (" <<
365  (max_size >> 20) << " MB)");
366  }
367 }
368 
369 ConfigOption *
371 {
373  ConfigOptionVector *vector = dynamic_cast<ConfigOptionVector*>(copt);
374  if (vector) {
375  // if copt is actually a ConfigOptionVector
376  vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseSizeOption, &SwapDir::dumpSizeOption));
377  vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseTimeOption, &SwapDir::dumpTimeOption));
378  vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseRateOption, &SwapDir::dumpRateOption));
379  } else {
380  // we don't know how to handle copt, as it's not a ConfigOptionVector.
381  // free it (and return nullptr)
382  delete copt;
383  copt = nullptr;
384  }
385  return copt;
386 }
387 
388 bool
389 Rock::SwapDir::allowOptionReconfigure(const char *const option) const
390 {
391  return strcmp(option, "slot-size") != 0 &&
393 }
394 
396 bool
397 Rock::SwapDir::parseTimeOption(char const *option, const char *value, int reconfig)
398 {
399  // TODO: ::SwapDir or, better, Config should provide time-parsing routines,
400  // including time unit handling. Same for size and rate.
401 
402  time_msec_t *storedTime;
403  if (strcmp(option, "swap-timeout") == 0)
404  storedTime = &fileConfig.ioTimeout;
405  else
406  return false;
407 
408  if (!value) {
409  self_destruct();
410  return false;
411  }
412 
413  // TODO: handle time units and detect parsing errors better
414  const int64_t parsedValue = strtoll(value, nullptr, 10);
415  if (parsedValue < 0) {
416  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
417  self_destruct();
418  return false;
419  }
420 
421  const time_msec_t newTime = static_cast<time_msec_t>(parsedValue);
422 
423  if (!reconfig)
424  *storedTime = newTime;
425  else if (*storedTime != newTime) {
426  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
427  << " cannot be changed dynamically, value left unchanged: " <<
428  *storedTime);
429  }
430 
431  return true;
432 }
433 
435 void
437 {
438  if (fileConfig.ioTimeout)
439  storeAppendPrintf(e, " swap-timeout=%" PRId64,
440  static_cast<int64_t>(fileConfig.ioTimeout));
441 }
442 
444 bool
445 Rock::SwapDir::parseRateOption(char const *option, const char *value, int isaReconfig)
446 {
447  int *storedRate;
448  if (strcmp(option, "max-swap-rate") == 0)
449  storedRate = &fileConfig.ioRate;
450  else
451  return false;
452 
453  if (!value) {
454  self_destruct();
455  return false;
456  }
457 
458  // TODO: handle time units and detect parsing errors better
459  const int64_t parsedValue = strtoll(value, nullptr, 10);
460  if (parsedValue < 0) {
461  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
462  self_destruct();
463  return false;
464  }
465 
466  const int newRate = static_cast<int>(parsedValue);
467 
468  if (newRate < 0) {
469  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << newRate);
470  self_destruct();
471  return false;
472  }
473 
474  if (!isaReconfig)
475  *storedRate = newRate;
476  else if (*storedRate != newRate) {
477  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
478  << " cannot be changed dynamically, value left unchanged: " <<
479  *storedRate);
480  }
481 
482  return true;
483 }
484 
486 void
488 {
489  if (fileConfig.ioRate >= 0)
490  storeAppendPrintf(e, " max-swap-rate=%d", fileConfig.ioRate);
491 }
492 
494 bool
495 Rock::SwapDir::parseSizeOption(char const *option, const char *value, int reconfig)
496 {
497  uint64_t *storedSize;
498  if (strcmp(option, "slot-size") == 0)
499  storedSize = &slotSize;
500  else
501  return false;
502 
503  if (!value) {
504  self_destruct();
505  return false;
506  }
507 
508  // TODO: handle size units and detect parsing errors better
509  const uint64_t newSize = strtoll(value, nullptr, 10);
510  if (newSize <= 0) {
511  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must be positive; got: " << newSize);
512  self_destruct();
513  return false;
514  }
515 
516  if (newSize <= sizeof(DbCellHeader)) {
517  debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must exceed " << sizeof(DbCellHeader) << "; got: " << newSize);
518  self_destruct();
519  return false;
520  }
521 
522  if (!reconfig)
523  *storedSize = newSize;
524  else if (*storedSize != newSize) {
525  debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
526  << " cannot be changed dynamically, value left unchanged: " <<
527  *storedSize);
528  }
529 
530  return true;
531 }
532 
534 void
536 {
537  storeAppendPrintf(e, " slot-size=%" PRId64, slotSize);
538 }
539 
541 void
543 {
544  if (slotSize <= 0)
545  fatal("Rock store requires a positive slot-size");
546 
547  const int64_t maxSizeRoundingWaste = 1024 * 1024; // size is configured in MB
548  const int64_t slotSizeRoundingWaste = slotSize;
549  const int64_t maxRoundingWaste =
550  max(maxSizeRoundingWaste, slotSizeRoundingWaste);
551 
552  // an entry consumes at least one slot; round up to reduce false warnings
553  const int64_t blockSize = static_cast<int64_t>(slotSize);
554  const int64_t maxObjSize = max(blockSize,
555  ((maxObjectSize()+blockSize-1)/blockSize)*blockSize);
556 
557  // Does the "sfileno*max-size" limit match configured db capacity?
558  const double entriesMayOccupy = entryLimitAbsolute()*static_cast<double>(maxObjSize);
559  if (entriesMayOccupy + maxRoundingWaste < maxSize()) {
560  const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
561  debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to entry limits:" <<
562  "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
563  "\n\tconfigured db slot size: " << slotSize << " bytes" <<
564  "\n\tconfigured maximum entry size: " << maxObjectSize() << " bytes" <<
565  "\n\tmaximum number of cache_dir entries supported by Squid: " << entryLimitAbsolute() <<
566  "\n\tdisk space all entries may use: " << entriesMayOccupy << " bytes" <<
567  "\n\tdisk space wasted: " << diskWasteSize << " bytes");
568  }
569 
570  // Does the "absolute slot count" limit match configured db capacity?
571  const double slotsMayOccupy = slotLimitAbsolute()*static_cast<double>(slotSize);
572  if (slotsMayOccupy + maxRoundingWaste < maxSize()) {
573  const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
574  debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to slot limits:" <<
575  "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
576  "\n\tconfigured db slot size: " << slotSize << " bytes" <<
577  "\n\tmaximum number of rock cache_dir slots supported by Squid: " << slotLimitAbsolute() <<
578  "\n\tdisk space all slots may use: " << slotsMayOccupy << " bytes" <<
579  "\n\tdisk space wasted: " << diskWasteSize << " bytes");
580  }
581 }
582 
583 bool
584 Rock::SwapDir::canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const
585 {
586  if (diskSpaceNeeded >= 0)
587  diskSpaceNeeded += sizeof(DbCellHeader);
588  if (!::SwapDir::canStore(e, diskSpaceNeeded, load))
589  return false;
590 
591  if (!theFile || !theFile->canWrite())
592  return false;
593 
594  if (!map)
595  return false;
596 
597  // Do not start I/O transaction if there are less than 10% free pages left.
598  // TODO: reserve page instead
599  if (needsDiskStrand() &&
601  debugs(47, 5, "too few shared pages for IPC I/O left");
602  return false;
603  }
604 
605  if (io->shedLoad())
606  return false;
607 
608  load = io->load();
609  return true;
610 }
611 
613 Rock::SwapDir::createStoreIO(StoreEntry &e, StoreIOState::STIOCB * const cbIo, void * const cbData)
614 {
615  if (!theFile || theFile->error()) {
616  debugs(47,4, theFile);
617  return nullptr;
618  }
619 
620  sfileno filen;
621  Ipc::StoreMapAnchor *const slot =
622  map->openForWriting(reinterpret_cast<const cache_key *>(e.key), filen);
623  if (!slot) {
624  debugs(47, 5, "map->add failed");
625  return nullptr;
626  }
627 
628  assert(filen >= 0);
629  slot->set(e);
630 
631  // XXX: We rely on our caller, storeSwapOutStart(), to set e.fileno.
632  // If that does not happen, the entry will not decrement the read level!
633 
634  Rock::SwapDir::Pointer self(this);
635  IoState *sio = new IoState(self, &e, cbIo, cbData);
636 
637  sio->swap_dirn = index;
638  sio->swap_filen = filen;
639  sio->writeableAnchor_ = slot;
640 
641  debugs(47,5, "dir " << index << " created new filen " <<
642  asHex(sio->swap_filen).upperCase().minDigits(8) << " starting at " <<
643  diskOffset(sio->swap_filen));
644 
645  sio->file(theFile);
646 
647  trackReferences(e);
648  return sio;
649 }
650 
653 {
654  if (!theFile || theFile->error()) {
655  debugs(47,4, theFile);
656  return nullptr;
657  }
658 
659  Must(update.fresh);
660  Must(update.fresh.fileNo >= 0);
661 
662  Rock::SwapDir::Pointer self(this);
663  IoState *sio = new IoState(self, update.entry, cbIo, data);
664 
665  sio->swap_dirn = index;
666  sio->swap_filen = update.fresh.fileNo;
667  sio->writeableAnchor_ = update.fresh.anchor;
668 
669  debugs(47,5, "dir " << index << " updating filen " <<
670  asHex(sio->swap_filen).upperCase().minDigits(8) << " starting at " <<
671  diskOffset(sio->swap_filen));
672 
673  sio->file(theFile);
674  return sio;
675 }
676 
677 int64_t
678 Rock::SwapDir::diskOffset(const SlotId sid) const
679 {
680  assert(sid >= 0);
681  return HeaderSize + slotSize*sid;
682 }
683 
684 int64_t
686 {
687  assert(pageId);
688  return diskOffset(pageId.number - 1);
689 }
690 
691 int64_t
693 {
694  assert(map);
695  return diskOffset(map->sliceLimit());
696 }
697 
700 {
701  Ipc::Mem::PageId pageId;
702 
703  if (freeSlots->pop(pageId)) {
704  const auto slotId = pageId.number - 1;
705  debugs(47, 5, "got a previously free slot: " << slotId);
706  map->prepFreeSlice(slotId);
707  return slotId;
708  }
709 
710  // catch free slots delivered to noteFreeMapSlice()
711  assert(!waitingForPage);
712  waitingForPage = &pageId;
713  if (map->purgeOne()) {
714  assert(!waitingForPage); // noteFreeMapSlice() should have cleared it
715  assert(pageId.set());
716  const auto slotId = pageId.number - 1;
717  debugs(47, 5, "got a previously busy slot: " << slotId);
718  map->prepFreeSlice(slotId);
719  return slotId;
720  }
721  assert(waitingForPage == &pageId);
722  waitingForPage = nullptr;
723 
724  // This may happen when the number of available db slots is close to the
725  // number of concurrent requests reading or writing those slots, which may
726  // happen when the db is "small" compared to the request traffic OR when we
727  // are rebuilding and have not loaded "many" entries or empty slots yet.
728  debugs(47, 3, "cannot get a slot; entries: " << map->entryCount());
729  throw TexcHere("ran out of free db slots");
730 }
731 
732 bool
734 {
735  return 0 <= slotId && slotId < slotLimitActual();
736 }
737 
738 void
740 {
741  Ipc::Mem::PageId pageId;
743  pageId.number = sliceId+1;
744  if (waitingForPage) {
745  *waitingForPage = pageId;
746  waitingForPage = nullptr;
747  } else {
748  freeSlots->push(pageId);
749  }
750 }
751 
752 // tries to open an old entry with swap_filen for reading
754 Rock::SwapDir::openStoreIO(StoreEntry &e, StoreIOState::STIOCB * const cbIo, void * const cbData)
755 {
756  if (!theFile || theFile->error()) {
757  debugs(47,4, theFile);
758  return nullptr;
759  }
760 
761  if (!e.hasDisk()) {
762  debugs(47,4, e);
763  return nullptr;
764  }
765 
766  // Do not start I/O transaction if there are less than 10% free pages left.
767  // TODO: reserve page instead
768  if (needsDiskStrand() &&
770  debugs(47, 5, "too few shared pages for IPC I/O left");
771  return nullptr;
772  }
773 
774  // The are two ways an entry can get swap_filen: our get() locked it for
775  // reading or our storeSwapOutStart() locked it for writing. Peeking at our
776  // locked entry is safe, but no support for reading the entry we swap out.
777  const Ipc::StoreMapAnchor *slot = map->peekAtReader(e.swap_filen);
778  if (!slot)
779  return nullptr; // we were writing after all
780 
781  Rock::SwapDir::Pointer self(this);
782  IoState *sio = new IoState(self, &e, cbIo, cbData);
783 
784  sio->swap_dirn = index;
785  sio->swap_filen = e.swap_filen;
786  sio->readableAnchor_ = slot;
787  sio->file(theFile);
788 
789  debugs(47,5, "dir " << index << " has old filen: " <<
790  asHex(sio->swap_filen).upperCase().minDigits(8));
791 
792  // When StoreEntry::swap_filen for e was set by our anchorEntry(), e had a
793  // public key, but it could have gone private since then (while keeping the
794  // anchor lock). The stale anchor key is not (and cannot be) erased (until
795  // the marked-for-deletion/release anchor/entry is unlocked is recycled).
796  const auto ourAnchor = [&]() {
797  if (const auto publicKey = e.publicKey())
798  return slot->sameKey(publicKey);
799  return true; // cannot check
800  };
801  assert(ourAnchor());
802 
803  // For collapsed disk hits: e.swap_file_sz and slot->basics.swap_file_sz
804  // may still be zero and basics.swap_file_sz may grow.
806 
807  return sio;
808 }
809 
810 void
812 {
813  if (!theFile)
814  fatalf("Rock cache_dir failed to initialize db file: %s", filePath);
815 
816  if (theFile->error()) {
817  int xerrno = errno; // XXX: where does errno come from
818  fatalf("Rock cache_dir at %s failed to open db file: %s", filePath,
819  xstrerr(xerrno));
820  }
821 
822  debugs(47, 2, "Rock cache_dir[" << index << "] limits: " <<
823  std::setw(12) << maxSize() << " disk bytes, " <<
824  std::setw(7) << map->entryLimit() << " entries, and " <<
825  std::setw(7) << map->sliceLimit() << " slots");
826 
827  if (!Rebuild::Start(*this))
828  storeRebuildComplete(nullptr);
829 }
830 
831 void
833 {
834  theFile = nullptr;
835 }
836 
837 void
838 Rock::SwapDir::readCompleted(const char *, int rlen, int errflag, RefCount< ::ReadRequest> r)
839 {
840  ReadRequest *request = dynamic_cast<Rock::ReadRequest*>(r.getRaw());
841  assert(request);
842  IoState::Pointer sio = request->sio;
843  sio->handleReadCompletion(*request, rlen, errflag);
844 }
845 
846 void
848 {
849  // TODO: Move details into IoState::handleWriteCompletion() after figuring
850  // out how to deal with map access. See readCompleted().
851 
852  Rock::WriteRequest *request = dynamic_cast<Rock::WriteRequest*>(r.getRaw());
853  assert(request);
854  assert(request->sio != nullptr);
855  IoState &sio = *request->sio;
856 
857  // quit if somebody called IoState::close() while we were waiting
858  if (!sio.stillWaiting()) {
859  debugs(79, 3, "ignoring closed entry " << sio.swap_filen);
860  noteFreeMapSlice(request->sidCurrent);
861  return;
862  }
863 
864  debugs(79, 7, "errflag=" << errflag << " rlen=" << request->len << " eof=" << request->eof);
865 
866  if (errflag != DISK_OK)
867  handleWriteCompletionProblem(errflag, *request);
868  else if (!sio.expectedReply(request->id))
869  handleWriteCompletionProblem(DISK_ERROR, *request);
870  else
871  handleWriteCompletionSuccess(*request);
872 
873  if (sio.touchingStoreEntry())
875 }
876 
878 void
880 {
881  auto &sio = *(request.sio);
882  sio.splicingPoint = request.sidCurrent;
883  // do not increment sio.offset_ because we do it in sio->write()
884 
885  assert(sio.writeableAnchor_);
886  if (sio.writeableAnchor_->start < 0) { // wrote the first slot
887  Must(request.sidPrevious < 0);
888  sio.writeableAnchor_->start = request.sidCurrent;
889  } else {
890  Must(request.sidPrevious >= 0);
891  map->writeableSlice(sio.swap_filen, request.sidPrevious).next = request.sidCurrent;
892  }
893 
894  // finalize the shared slice info after writing slice contents to disk;
895  // the chain gets possession of the slice we were writing
896  Ipc::StoreMap::Slice &slice =
897  map->writeableSlice(sio.swap_filen, request.sidCurrent);
898  slice.size = request.len - sizeof(DbCellHeader);
899  Must(slice.next < 0);
900 
901  if (request.eof) {
902  assert(sio.e);
903  if (sio.touchingStoreEntry()) {
904  sio.e->swap_file_sz = sio.writeableAnchor_->basics.swap_file_sz =
905  sio.offset_;
906 
907  map->switchWritingToReading(sio.swap_filen);
908  // sio.e keeps the (now read) lock on the anchor
909  // storeSwapOutFileClosed() sets swap_status and calls storeWriterDone()
910  }
911  sio.writeableAnchor_ = nullptr;
912  sio.finishedWriting(DISK_OK);
913  }
914 }
915 
917 void
919 {
920  auto &sio = *request.sio;
921 
922  noteFreeMapSlice(request.sidCurrent);
923 
924  writeError(sio);
925  sio.finishedWriting(errflag);
926  // and hope that Core will call disconnect() to close the map entry
927 }
928 
929 void
931 {
932  // Do not abortWriting here. The entry should keep the write lock
933  // instead of losing association with the store and confusing core.
934  map->freeEntry(sio.swap_filen); // will mark as unusable, just in case
935 
936  if (sio.touchingStoreEntry())
938  // else noop: a fresh entry update error does not affect stale entry readers
939 
940  // All callers must also call IoState callback, to propagate the error.
941 }
942 
943 void
945 {
946  if (!map)
947  return;
948 
949  Ipc::StoreMapUpdate update(updatedE);
950  if (!map->openForUpdating(update, updatedE->swap_filen))
951  return;
952 
953  try {
954  AsyncJob::Start(new HeaderUpdater(this, update));
955  } catch (const std::exception &ex) {
956  debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
957  map->abortUpdating(update);
958  }
959 }
960 
961 bool
963 {
964  return freeSlots != nullptr && !freeSlots->size();
965 }
966 
967 // storeSwapOutFileClosed calls this method on DISK_NO_SPACE_LEFT,
968 // but it should not happen for us
969 void
971 {
972  debugs(20, DBG_IMPORTANT, "ERROR: Squid BUG: No space left with rock cache_dir: " <<
973  filePath);
974 }
975 
977 void
979 {
980  // The Store calls this to free some db space, but there is nothing wrong
981  // with a full() db, except when db has to shrink after reconfigure, and
982  // we do not support shrinking yet (it would have to purge specific slots).
983  // TODO: Disable maintain() requests when they are pointless.
984 }
985 
986 void
988 {
989  debugs(47, 5, &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
990  if (repl && repl->Referenced)
991  repl->Referenced(repl, &e, &e.repl);
992 }
993 
994 bool
996 {
997  debugs(47, 5, &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
998  if (repl && repl->Dereferenced)
999  repl->Dereferenced(repl, &e, &e.repl);
1000 
1001  // no need to keep e in the global store_table for us; we have our own map
1002  return false;
1003 }
1004 
1005 bool
1007 {
1008  // no entry-specific files to unlink
1009  return false;
1010 }
1011 
1012 void
1014 {
1015  if (map)
1016  map->freeEntryByKey(key); // may not be there
1017 }
1018 
1019 void
1021 {
1022  debugs(47, 5, e);
1023  if (e.hasDisk(index)) {
1024  if (map->freeEntry(e.swap_filen))
1026  if (!e.locked())
1027  disconnect(e);
1028  } else if (const auto key = e.publicKey()) {
1029  evictIfFound(key);
1030  }
1031 }
1032 
1033 void
1035 {
1036  debugs(47, 5, e);
1037  if (repl)
1038  repl->Add(repl, &e, &e.repl);
1039 }
1040 
1041 void
1043 {
1044  debugs(47, 5, e);
1045  if (repl)
1046  repl->Remove(repl, &e, &e.repl);
1047 }
1048 
1049 void
1051 {
1052  storeAppendPrintf(&e, "\n");
1053  storeAppendPrintf(&e, "Maximum Size: %" PRIu64 " KB\n", maxSize() >> 10);
1054  storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
1055  currentSize() / 1024.0,
1056  Math::doublePercent(currentSize(), maxSize()));
1057 
1058  const int entryLimit = entryLimitActual();
1059  const int slotLimit = slotLimitActual();
1060  storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
1061  if (map && entryLimit > 0) {
1062  const int entryCount = map->entryCount();
1063  storeAppendPrintf(&e, "Current entries: %9d %.2f%%\n",
1064  entryCount, (100.0 * entryCount / entryLimit));
1065  }
1066 
1067  storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
1068  if (map && slotLimit > 0) {
1069  const unsigned int slotsFree = !freeSlots ? 0 : freeSlots->size();
1070  if (slotsFree <= static_cast<unsigned int>(slotLimit)) {
1071  const int usedSlots = slotLimit - static_cast<int>(slotsFree);
1072  storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
1073  usedSlots, (100.0 * usedSlots / slotLimit));
1074  }
1075  if (slotLimit < 100) { // XXX: otherwise too expensive to count
1077  map->updateStats(stats);
1078  stats.dump(e);
1079  }
1080  }
1081 
1082  storeAppendPrintf(&e, "Pending operations: %d out of %d\n",
1084 
1085  storeAppendPrintf(&e, "Flags:");
1086 
1087  if (flags.selected)
1088  storeAppendPrintf(&e, " SELECTED");
1089 
1090  if (flags.read_only)
1091  storeAppendPrintf(&e, " READ-ONLY");
1092 
1093  storeAppendPrintf(&e, "\n");
1094 
1095 }
1096 
1097 SBuf
1099 {
1100  return Ipc::Mem::Segment::Name(SBuf(path), "map");
1101 }
1102 
1103 const char *
1105 {
1106  static String spacesPath;
1107  spacesPath = path;
1108  spacesPath.append("_spaces");
1109  return spacesPath.termedBuf();
1110 }
1111 
1112 bool
1114 {
1115  return map->hasReadableEntry(reinterpret_cast<const cache_key*>(e.key));
1116 }
1117 
1119 
1121 {
1122  Must(mapOwners.empty() && freeSlotsOwners.empty());
1123  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
1124  if (const Rock::SwapDir *const sd = dynamic_cast<Rock::SwapDir *>(INDEXSD(i))) {
1125  rebuildStatsOwners.push_back(Rebuild::Stats::Init(*sd));
1126 
1127  const int64_t capacity = sd->slotLimitActual();
1128 
1129  SwapDir::DirMap::Owner *const mapOwner =
1130  SwapDir::DirMap::Init(sd->inodeMapPath(), capacity);
1131  mapOwners.push_back(mapOwner);
1132 
1133  // TODO: somehow remove pool id and counters from PageStack?
1136  config.pageSize = 0; // this is an index of slots on _disk_
1137  config.capacity = capacity;
1138  config.createFull = false; // Rebuild finds and pushes free slots
1139  Ipc::Mem::Owner<Ipc::Mem::PageStack> *const freeSlotsOwner =
1140  shm_new(Ipc::Mem::PageStack)(sd->freeSlotsPath(), config);
1141  freeSlotsOwners.push_back(freeSlotsOwner);
1142  }
1143  }
1144 }
1145 
1147 {
1148  for (size_t i = 0; i < mapOwners.size(); ++i) {
1149  delete rebuildStatsOwners[i];
1150  delete mapOwners[i];
1151  delete freeSlotsOwners[i];
1152  }
1153 }
1154 
void fatal(const char *message)
Definition: fatal.cc:28
const char * xstrerr(int error)
Definition: xstrerror.cc:83
int64_t diskOffset(Ipc::Mem::PageId &pageId) const
Definition: RockSwapDir.cc:685
StoreIOState::Pointer openStoreIO(StoreEntry &, StoreIOState::STIOCB *, void *) override
Definition: RockSwapDir.cc:754
#define INDEXSD(i)
Definition: SquidConfig.h:74
approximate stats of a set of ReadWriteLocks
Definition: ReadWriteLock.h:70
void evictCached(StoreEntry &) override
static char vector[AUTH_VECTOR_LEN]
uint32_t poolId
pool ID
Definition: PageStack.h:125
#define DBG_CRITICAL
Definition: Stream.h:37
const char * freeSlotsPath() const
void updateHeaders(StoreEntry *e) override
make stored metadata and HTTP headers the same as in the given entry
Definition: RockSwapDir.cc:944
const cache_key * publicKey() const
Definition: Store.h:112
Definition: forward.h:27
void ioCompletedNotification() override
Definition: RockSwapDir.cc:811
unsigned char cache_key
Store key.
Definition: forward.h:29
#define EBIT_SET(flag, bit)
Definition: defines.h:65
virtual bool allowOptionReconfigure(const char *const) const
Definition: Disk.h:85
MemObject * mem_obj
Definition: Store.h:220
bool hasReadableEntry(const StoreEntry &) const override
whether this cache dir has an entry with e.key
void createMemObject()
Definition: store.cc:1575
IoXactionId id
identifies this write transaction for the requesting IoState
PoolId pool
Definition: Page.h:39
uint64_t currentCount() const override
the total number of objects stored right now
Definition: RockSwapDir.cc:157
Shared memory page identifier, address, or handler.
Definition: Page.h:23
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:855
bool sameKey(const cache_key *const aKey) const
Definition: StoreMap.cc:952
SlotId sidCurrent
slot being written using this write request
Definition: SBuf.h:93
void anchorEntry(StoreEntry &e, const sfileno filen, const Ipc::StoreMapAnchor &anchor)
Definition: RockSwapDir.cc:107
void detachFromDisk()
Definition: store.cc:1953
StoreIOState::Pointer sio
Definition: MemObject.h:162
void create() override
called when the runner should create a new memory segment
SBuf inodeMapPath() const
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
Definition: StoreMap.cc:959
Edition fresh
new anchor and the updated chain prefix
Definition: StoreMap.h:209
#define xstrdup
bool touchingStoreEntry() const
Definition: StoreIOState.cc:52
const A & max(A const &lhs, A const &rhs)
int xwrite(int fd, const void *buf, size_t bufSize)
POSIX write(2) equivalent.
Definition: unistd.h:67
#define PRIu64
Definition: types.h:114
@ SWAPOUT_WRITING
Definition: enums.h:56
C * getRaw() const
Definition: RefCount.h:89
uint16_t flags
Definition: Store.h:231
void parseSize(const bool reconfiguring)
parses anonymous cache_dir size option
Definition: RockSwapDir.cc:353
Store::DiskConfig cacheSwap
Definition: SquidConfig.h:423
#define shm_new(Class)
Definition: Pointer.h:200
@ SWAPOUT_DONE
Definition: enums.h:59
int store_open_disk_fd
bool unlinkdUseful() const override
whether SwapDir may benefit from unlinkd
void create() override
create system resources needed for this store to operate in the future
Definition: RockSwapDir.cc:212
void self_destruct(void)
Definition: cache_cf.cc:275
void maintain() override
purge while full(); it should be sufficient to purge just one
Definition: RockSwapDir.cc:978
void handleWriteCompletionProblem(const int errflag, const WriteRequest &request)
code shared by writeCompleted() error handling cases
Definition: RockSwapDir.cc:918
void dumpTimeOption(StoreEntry *e) const
reports time-specific options; mimics SwapDir::optionObjectSizeDump()
Definition: RockSwapDir.cc:436
#define DISK_OK
Definition: defines.h:27
bool stillWaiting() const
whether we are still waiting for the I/O results (i.e., not closed)
Definition: RockIoState.h:43
void storeRebuildComplete(StoreRebuildData *dc)
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
Definition: TextException.h:63
bool parseSizeOption(char const *option, const char *value, int reconfiguring)
parses size-specific options; mimics SwapDir::optionObjectSizeParse()
Definition: RockSwapDir.cc:495
bool needsDiskStrand() const override
needs a dedicated kid process
Definition: RockSwapDir.cc:312
void validateOptions()
warns of configuration problems; may quit
Definition: RockSwapDir.cc:542
double doublePercent(const double, const double)
Definition: SquidMath.cc:25
IoState::Pointer sio
void reconfigure() override
Definition: RockSwapDir.cc:343
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
Definition: StoreMap.h:49
#define O_BINARY
Definition: defines.h:134
struct Ipc::StoreMapAnchor::Basics basics
void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition: RockSwapDir.cc:987
IoState::Pointer sio
void noteFreeMapSlice(const Ipc::StoreMapSliceId fileno) override
adjust slice-linked state before a locked Readable slice is erased
Definition: RockSwapDir.cc:739
ConfigOption * getOptionTree() const override
Definition: RockSwapDir.cc:370
bool IamDiskProcess() STUB_RETVAL_NOP(false) bool InDaemonMode() STUB_RETVAL_NOP(false) bool UsingSmp() STUB_RETVAL_NOP(false) bool IamCoordinatorProcess() STUB_RETVAL(false) bool IamPrimaryProcess() STUB_RETVAL(false) int NumberOfKids() STUB_RETVAL(0) void setMaxFD(void) STUB void setSystemLimits(void) STUB void squid_signal(int
whether the current process is dedicated to managing a cache_dir
int32_t StoreMapSliceId
Definition: StoreMap.h:24
void append(char const *buf, int len)
Definition: String.cc:131
int max_open_disk_fds
Definition: SquidConfig.h:456
bool anchorToCache(StoreEntry &) override
Definition: RockSwapDir.cc:76
int64_t entryLimitActual() const
max number of possible entries in db
Definition: RockSwapDir.cc:205
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1929
SlotId sidPrevious
slot that will point to sidCurrent in the cache_dir map
sdirno swap_dirn
Definition: Store.h:237
bool set() const
true if and only if both critical components have been initialized
Definition: Page.h:29
ping_status_t ping_status
Definition: Store.h:241
AsHex< Integer > asHex(const Integer n)
a helper to ease AsHex object creation
Definition: IoManip.h:169
int64_t diskOffsetLimit() const
Definition: RockSwapDir.cc:692
StoreEntry * entry
the store entry being updated
Definition: StoreMap.h:207
#define shm_old(Class)
Definition: Pointer.h:201
#define safe_free(x)
Definition: xalloc.h:73
store_status_t store_status
Definition: Store.h:243
~SwapDirRr() override
#define assert(EX)
Definition: assert.h:17
void createError(const char *const msg)
Definition: RockSwapDir.cc:272
void handleWriteCompletionSuccess(const WriteRequest &request)
code shared by writeCompleted() success handling cases
Definition: RockSwapDir.cc:879
std::atomic< uint64_t > swap_file_sz
Definition: StoreMap.h:105
size_t PageLevel()
approximate total number of shared memory pages used now
Definition: Pages.cc:80
void fatalf(const char *fmt,...)
Definition: fatal.cc:68
StoreIOState::Pointer createUpdateIO(const Ipc::StoreMapUpdate &, StoreIOState::STIOCB *, void *)
Definition: RockSwapDir.cc:652
bool expectedReply(const IoXactionId receivedId)
Definition: RockIoState.cc:333
Aggregates information required for updating entry metadata and headers.
Definition: StoreMap.h:181
void ignoreReferences(StoreEntry &e)
delete from repl policy scope
@ PING_NONE
Has not considered whether to send ICP queries to peers yet.
Definition: enums.h:36
static Ipc::Mem::Owner< Stats > * Init(const SwapDir &)
Definition: RockRebuild.cc:259
bool canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const override
check whether we can store the entry; if we can, report current load
Definition: RockSwapDir.cc:584
void diskFull() override
Definition: RockSwapDir.cc:970
#define Assure(condition)
Definition: Assure.h:35
SlotId reserveSlotForWriting()
finds and returns a free db slot to fill or throws
Definition: RockSwapDir.cc:699
bool eof
whether this is the last request for the entry
void disconnect(StoreEntry &e) override
called when the entry is about to forget its association with cache_dir
Definition: RockSwapDir.cc:121
StoreMapAnchor * anchor
StoreMap::anchors[fileNo], for convenience/speed.
Definition: StoreMap.h:193
void dump(StoreEntry &e) const
int64_t strtoll(const char *nptr, char **endptr, int base)
Definition: strtoll.c:61
bool complete() const
Definition: StoreMap.h:77
virtual ConfigOption * getOptionTree() const
Definition: Disk.cc:258
void finalizeSwapoutFailure(StoreEntry &) override
abort the failed swapout that has been already noticed by Store
Definition: RockSwapDir.cc:180
uint64_t currentSize() const override
current size
Definition: RockSwapDir.cc:148
bool updateAnchored(StoreEntry &) override
Definition: RockSwapDir.cc:94
@ STORE_OK
Definition: enums.h:45
signed_int32_t sfileno
Definition: forward.h:22
void handleReadCompletion(Rock::ReadRequest &request, const int rlen, const int errFlag)
forwards read data (or an error) to the reader that initiated this I/O
Definition: RockIoState.cc:150
static SBuf Name(const SBuf &prefix, const char *suffix)
concatenates parts of a name to form a complete name (or its prefix)
Definition: Segment.cc:51
size_t pageSize
page size, used to calculate shared memory size
Definition: PageStack.h:126
Ipc::StoreMapAnchor * writeableAnchor_
starting point for writing
Definition: RockIoState.h:57
void closeCompleted() override
Definition: RockSwapDir.cc:832
void exportInto(StoreEntry &) const
load StoreEntry basics that were previously stored with set()
Definition: StoreMap.cc:979
void writeError(StoreIOState &sio)
Definition: RockSwapDir.cc:930
initializes shared memory segments used by Rock::SwapDir
Definition: RockSwapDir.h:155
void trackReferences(StoreEntry &e)
add to replacement policy scope
void STIOCB(void *their_data, int errflag, StoreIOState::Pointer self)
Definition: StoreIOState.h:39
const char * termedBuf() const
Definition: SquidString.h:93
bool validSlotId(const SlotId slotId) const
whether the given slot ID may point to a slot in this db
Definition: RockSwapDir.cc:733
uint32_t number
page number within the segment
Definition: Page.h:42
~SwapDir() override
Definition: RockSwapDir.cc:48
sfileno fileNo
StoreMap::fileNos[name], for convenience/speed.
Definition: StoreMap.h:194
size_t PageLimit()
the total number of shared memory pages that can be in use at any time
Definition: Pages.cc:55
sfileno SlotId
db cell number, starting with cell 0 (always occupied by the db header)
Definition: forward.h:30
PageStack construction and SharedMemorySize calculation parameters.
Definition: PageStack.h:123
int64_t slotLimitAbsolute() const
Rock store implementation limit.
Definition: RockSwapDir.cc:187
static bool Start(SwapDir &dir)
Definition: RockRebuild.cc:281
void evictIfFound(const cache_key *) override
#define Must(condition)
Definition: TextException.h:75
int64_t slotLimitActual() const
total number of slots in this db
Definition: RockSwapDir.cc:196
uint64_t swap_file_sz
Definition: Store.h:229
StoreEntry * get(const cache_key *key) override
Definition: RockSwapDir.cc:57
bool parseRateOption(char const *option, const char *value, int reconfiguring)
parses rate-specific options; mimics SwapDir::optionObjectSizeParse()
Definition: RockSwapDir.cc:445
#define DBG_IMPORTANT
Definition: Stream.h:38
StoreEntry * e
Definition: StoreIOState.h:73
bool createFull
whether a newly created PageStack should be prefilled with PageIds
Definition: PageStack.h:130
#define MYNAME
Definition: Stream.h:219
int xopen(const char *filename, int oflag, int pmode=0)
POSIX open(2) equivalent.
Definition: unistd.h:55
static DiskIOModule * Find(char const *type)
#define PRId64
Definition: types.h:104
DefineRunnerRegistratorIn(Rock, SwapDirRr)
void init() override
Definition: RockSwapDir.cc:281
#define DISK_ERROR
Definition: defines.h:28
int xclose(int fd)
POSIX close(2) equivalent.
Definition: unistd.h:43
PageCount capacity
the maximum number of pages
Definition: PageStack.h:127
void finalizeSwapoutSuccess(const StoreEntry &) override
finalize the successful swapout that has been already noticed by Store
Definition: RockSwapDir.cc:171
void parse(int index, char *path) override
Definition: RockSwapDir.cc:321
void attachToDisk(const sdirno, const sfileno, const swap_status_t)
Definition: store.cc:1940
void writeCompleted(int errflag, size_t len, RefCount< ::WriteRequest >) override
Definition: RockSwapDir.cc:847
static void Broadcast(const StoreEntry &e, const bool includingThisWorker=false)
notify other workers about changes in entry state (e.g., new data)
std::atomic< Size > size
slice contents size
Definition: StoreMap.h:48
int locked() const
Definition: Store.h:145
bool full() const
no more entries can be stored without purging
Definition: RockSwapDir.cc:962
bool UsingSmp()
Whether there should be more than one worker process running.
Definition: tools.cc:697
void dumpRateOption(StoreEntry *e) const
reports rate-specific options; mimics SwapDir::optionObjectSizeDump()
Definition: RockSwapDir.cc:487
void storeWriterDone()
called when a store writer ends its work (successfully or not)
Definition: store.cc:1808
StoreIOState::Pointer createStoreIO(StoreEntry &, StoreIOState::STIOCB *, void *) override
Definition: RockSwapDir.cc:613
bool doReportStat() const override
Definition: RockSwapDir.cc:165
@ ENTRY_VALIDATED
Definition: enums.h:108
sfileno swap_filen
Definition: StoreIOState.h:72
void readCompleted(const char *buf, int len, int errflag, RefCount< ::ReadRequest >) override
Definition: RockSwapDir.cc:838
uint64_t time_msec_t
Definition: gadgets.h:16
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Stream.h:192
const A & min(A const &lhs, A const &rhs)
void Init(void)
prepares to parse ACLs configuration
Definition: AclRegs.cc:186
void dumpSizeOption(StoreEntry *e) const
reports size-specific options; mimics SwapDir::optionObjectSizeDump()
Definition: RockSwapDir.cc:535
bool parseTimeOption(char const *option, const char *value, int reconfiguring)
parses time-specific options; mimics SwapDir::optionObjectSizeParse()
Definition: RockSwapDir.cc:397
static PoolId IdForSwapDirSpace(const int dirIdx)
stack of free rock cache_dir slot numbers
Definition: PageStack.h:171
RemovalPolicyNode repl
Definition: Store.h:221
void statfs(StoreEntry &e) const override
class SquidConfig Config
Definition: SquidConfig.cc:12
bool allowOptionReconfigure(const char *const option) const override
Definition: RockSwapDir.cc:389
static void Start(const Pointer &job)
Definition: AsyncJob.cc:37
@ STORE_PENDING
Definition: enums.h:46
SwapOut swapout
Definition: MemObject.h:169
sfileno swap_filen
unique ID inside a cache_dir for swapped out entries; -1 for others
Definition: Store.h:235
bool dereference(StoreEntry &e) override
Definition: RockSwapDir.cc:995
int GetInteger(void)
Definition: Parsing.cc:148
bool InDaemonMode()
Whether we are running in daemon mode.
Definition: tools.cc:691

 

Introduction

Documentation

Support

Miscellaneous