• Main Page
  • Related Pages
  • Namespaces
  • Data Structures
  • Files
  • File List
  • Globals

CacheDir.cc

Go to the documentation of this file.
00001 /** @file
00002 
00003   A brief file description
00004 
00005   @section license License
00006 
00007   Licensed to the Apache Software Foundation (ASF) under one
00008   or more contributor license agreements.  See the NOTICE file
00009   distributed with this work for additional information
00010   regarding copyright ownership.  The ASF licenses this file
00011   to you under the Apache License, Version 2.0 (the
00012   "License"); you may not use this file except in compliance
00013   with the License.  You may obtain a copy of the License at
00014 
00015       http://www.apache.org/licenses/LICENSE-2.0
00016 
00017   Unless required by applicable law or agreed to in writing, software
00018   distributed under the License is distributed on an "AS IS" BASIS,
00019   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00020   See the License for the specific language governing permissions and
00021   limitations under the License.
00022  */
00023 
00024 
00025 #include "P_Cache.h"
00026 
00027 // #define LOOP_CHECK_MODE 1
00028 #ifdef LOOP_CHECK_MODE
00029 #define DIR_LOOP_THRESHOLD            1000
00030 #endif
00031 #include "ink_stack_trace.h"
00032 
00033 #define CACHE_INC_DIR_USED(_m) do { \
00034 ProxyMutex *mutex = _m; \
00035 CACHE_INCREMENT_DYN_STAT(cache_direntries_used_stat); \
00036 } while (0) \
00037 
00038 #define CACHE_DEC_DIR_USED(_m) do { \
00039 ProxyMutex *mutex = _m; \
00040 CACHE_DECREMENT_DYN_STAT(cache_direntries_used_stat); \
00041 } while (0) \
00042 
00043 #define CACHE_INC_DIR_COLLISIONS(_m) do {\
00044 ProxyMutex *mutex = _m; \
00045 CACHE_INCREMENT_DYN_STAT(cache_directory_collision_count_stat); \
00046 } while (0);
00047 
00048 
00049 // Globals
00050 
00051 ClassAllocator<OpenDirEntry> openDirEntryAllocator("openDirEntry");
00052 Dir empty_dir;
00053 
00054 // OpenDir
00055 
00056 OpenDir::OpenDir()
00057 {
00058   SET_HANDLER(&OpenDir::signal_readers);
00059 }
00060 
00061 /*
00062    If allow_if_writers is false, open_write fails if there are other writers.
00063    max_writers sets the maximum number of concurrent writers that are
00064    allowed. Only The first writer can set the max_writers. It is ignored
00065    for later writers.
00066    Returns 1 on success and 0 on failure.
00067    */
00068 int
00069 OpenDir::open_write(CacheVC *cont, int allow_if_writers, int max_writers)
00070 {
00071   ink_assert(cont->vol->mutex->thread_holding == this_ethread());
00072   unsigned int h = cont->first_key.slice32(0);
00073   int b = h % OPEN_DIR_BUCKETS;
00074   for (OpenDirEntry *d = bucket[b].head; d; d = d->link.next) {
00075     if (!(d->writers.head->first_key == cont->first_key))
00076       continue;
00077     if (allow_if_writers && d->num_writers < d->max_writers) {
00078       d->writers.push(cont);
00079       d->num_writers++;
00080       cont->od = d;
00081       cont->write_vector = &d->vector;
00082       return 1;
00083     }
00084     return 0;
00085   }
00086   OpenDirEntry *od = THREAD_ALLOC(openDirEntryAllocator,
00087                                   cont->mutex->thread_holding);
00088   od->readers.head = NULL;
00089   od->writers.push(cont);
00090   od->num_writers = 1;
00091   od->max_writers = max_writers;
00092   od->vector.data.data = &od->vector.data.fast_data[0];
00093   od->dont_update_directory = 0;
00094   od->move_resident_alt = 0;
00095   od->reading_vec = 0;
00096   od->writing_vec = 0;
00097   dir_clear(&od->first_dir);
00098   cont->od = od;
00099   cont->write_vector = &od->vector;
00100   bucket[b].push(od);
00101   return 1;
00102 }
00103 
00104 int
00105 OpenDir::signal_readers(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
00106 {
00107   Queue<CacheVC, Link_CacheVC_opendir_link> newly_delayed_readers;
00108   EThread *t = mutex->thread_holding;
00109   CacheVC *c = NULL;
00110   while ((c = delayed_readers.dequeue())) {
00111     CACHE_TRY_LOCK(lock, c->mutex, t);
00112     if (lock) {
00113       c->f.open_read_timeout = 0;
00114       c->handleEvent(EVENT_IMMEDIATE, 0);
00115       continue;
00116     }
00117     newly_delayed_readers.push(c);
00118   }
00119   if (newly_delayed_readers.head) {
00120     delayed_readers = newly_delayed_readers;
00121     EThread *t1 = newly_delayed_readers.head->mutex->thread_holding;
00122     if (!t1)
00123       t1 = mutex->thread_holding;
00124     t1->schedule_in(this, HRTIME_MSECONDS(cache_config_mutex_retry_delay));
00125   }
00126   return 0;
00127 }
00128 
00129 int
00130 OpenDir::close_write(CacheVC *cont)
00131 {
00132   ink_assert(cont->vol->mutex->thread_holding == this_ethread());
00133   cont->od->writers.remove(cont);
00134   cont->od->num_writers--;
00135   if (!cont->od->writers.head) {
00136     unsigned int h = cont->first_key.slice32(0);
00137     int b = h % OPEN_DIR_BUCKETS;
00138     bucket[b].remove(cont->od);
00139     delayed_readers.append(cont->od->readers);
00140     signal_readers(0, 0);
00141     cont->od->vector.clear();
00142     THREAD_FREE(cont->od, openDirEntryAllocator, cont->mutex->thread_holding);
00143   }
00144   cont->od = NULL;
00145   return 0;
00146 }
00147 
00148 OpenDirEntry *
00149 OpenDir::open_read(INK_MD5 *key)
00150 {
00151   unsigned int h = key->slice32(0);
00152   int b = h % OPEN_DIR_BUCKETS;
00153   for (OpenDirEntry *d = bucket[b].head; d; d = d->link.next)
00154     if (d->writers.head->first_key == *key)
00155       return d;
00156   return NULL;
00157 }
00158 
00159 int
00160 OpenDirEntry::wait(CacheVC *cont, int msec)
00161 {
00162   ink_assert(cont->vol->mutex->thread_holding == this_ethread());
00163   cont->f.open_read_timeout = 1;
00164   ink_assert(!cont->trigger);
00165   cont->trigger = cont->vol->mutex->thread_holding->schedule_in_local(cont, HRTIME_MSECONDS(msec));
00166   readers.push(cont);
00167   return EVENT_CONT;
00168 }
00169 
00170 //
00171 // Cache Directory
00172 //
00173 
00174 // return value 1 means no loop
00175 // zero indicates loop
00176 int
00177 dir_bucket_loop_check(Dir *start_dir, Dir *seg)
00178 {
00179   if (start_dir == NULL)
00180     return 1;
00181 
00182   Dir *p1 = start_dir;
00183   Dir *p2 = start_dir;
00184 
00185   while (p2) {
00186     // p1 moves by one entry per iteration
00187     p1 = next_dir(p1, seg);
00188     // p2 moves by two entries per iteration
00189     p2 = next_dir(p2, seg);
00190     if (p2)
00191       p2 = next_dir(p2, seg);
00192     else
00193       return 1;
00194 
00195     if (p2 == p1)
00196       return 0;                 // we have a loop
00197   }
00198   return 1;
00199 }
00200 
00201 // adds all the directory entries
00202 // in a segment to the segment freelist
00203 void
00204 dir_init_segment(int s, Vol *d)
00205 {
00206   d->header->freelist[s] = 0;
00207   Dir *seg = dir_segment(s, d);
00208   int l, b;
00209   memset(seg, 0, SIZEOF_DIR * DIR_DEPTH * d->buckets);
00210   for (l = 1; l < DIR_DEPTH; l++) {
00211     for (b = 0; b < d->buckets; b++) {
00212       Dir *bucket = dir_bucket(b, seg);
00213       dir_free_entry(dir_bucket_row(bucket, l), s, d);
00214     }
00215   }
00216 }
00217 
00218 
00219 // break the infinite loop in directory entries
00220 // Note : abuse of the token bit in dir entries
00221 int
00222 dir_bucket_loop_fix(Dir *start_dir, int s, Vol *d)
00223 {
00224   if (!dir_bucket_loop_check(start_dir, dir_segment(s, d))) {
00225     Warning("Dir loop exists, clearing segment %d", s);
00226     dir_init_segment(s, d);
00227     return 1;
00228   }
00229   return 0;
00230 }
00231 
00232 int
00233 dir_freelist_length(Vol *d, int s)
00234 {
00235   int free = 0;
00236   Dir *seg = dir_segment(s, d);
00237   Dir *e = dir_from_offset(d->header->freelist[s], seg);
00238   if (dir_bucket_loop_fix(e, s, d))
00239     return (DIR_DEPTH - 1) * d->buckets;
00240   while (e) {
00241     free++;
00242     e = next_dir(e, seg);
00243   }
00244   return free;
00245 }
00246 
00247 int
00248 dir_bucket_length(Dir *b, int s, Vol *d)
00249 {
00250   Dir *e = b;
00251   int i = 0;
00252   Dir *seg = dir_segment(s, d);
00253 #ifdef LOOP_CHECK_MODE
00254   if (dir_bucket_loop_fix(b, s, d))
00255     return 1;
00256 #endif
00257   while (e) {
00258     i++;
00259     if (i > 100)
00260       return -1;
00261     e = next_dir(e, seg);
00262   }
00263   return i;
00264 }
00265 
00266 int
00267 check_dir(Vol *d)
00268 {
00269   int i, s;
00270   Debug("cache_check_dir", "inside check dir");
00271   for (s = 0; s < d->segments; s++) {
00272     Dir *seg = dir_segment(s, d);
00273     for (i = 0; i < d->buckets; i++) {
00274       Dir *b = dir_bucket(i, seg);
00275       if (!(dir_bucket_length(b, s, d) >= 0)) return 0;
00276       if (!(!dir_next(b) || dir_offset(b))) return 0;
00277       if (!(dir_bucket_loop_check(b, seg))) return 0;
00278     }
00279   }
00280   return 1;
00281 }
00282 
00283 inline void
00284 unlink_from_freelist(Dir *e, int s, Vol *d)
00285 {
00286   Dir *seg = dir_segment(s, d);
00287   Dir *p = dir_from_offset(dir_prev(e), seg);
00288   if (p)
00289     dir_set_next(p, dir_next(e));
00290   else
00291     d->header->freelist[s] = dir_next(e);
00292   Dir *n = dir_from_offset(dir_next(e), seg);
00293   if (n)
00294     dir_set_prev(n, dir_prev(e));
00295 }
00296 
00297 inline Dir *
00298 dir_delete_entry(Dir *e, Dir *p, int s, Vol *d)
00299 {
00300   Dir *seg = dir_segment(s, d);
00301   int no = dir_next(e);
00302   d->header->dirty = 1;
00303   if (p) {
00304     unsigned int fo = d->header->freelist[s];
00305     unsigned int eo = dir_to_offset(e, seg);
00306     dir_clear(e);
00307     dir_set_next(p, no);
00308     dir_set_next(e, fo);
00309     if (fo)
00310       dir_set_prev(dir_from_offset(fo, seg), eo);
00311     d->header->freelist[s] = eo;
00312   } else {
00313     Dir *n = next_dir(e, seg);
00314     if (n) {
00315       dir_assign(e, n);
00316       dir_delete_entry(n, e, s, d);
00317       return e;
00318     } else {
00319       dir_clear(e);
00320       return NULL;
00321     }
00322   }
00323   return dir_from_offset(no, seg);
00324 }
00325 
00326 inline void
00327 dir_clean_bucket(Dir *b, int s, Vol *vol)
00328 {
00329   Dir *e = b, *p = NULL;
00330   Dir *seg = dir_segment(s, vol);
00331 #ifdef LOOP_CHECK_MODE
00332   int loop_count = 0;
00333 #endif
00334   do {
00335 #ifdef LOOP_CHECK_MODE
00336     loop_count++;
00337     if (loop_count > DIR_LOOP_THRESHOLD) {
00338       if (dir_bucket_loop_fix(b, s, vol))
00339         return;
00340     }
00341 #endif
00342     if (!dir_valid(vol, e) || !dir_offset(e)) {
00343       if (is_debug_tag_set("dir_clean"))
00344         Debug("dir_clean", "cleaning %p tag %X boffset %" PRId64 " b %p p %p l %d",
00345               e, dir_tag(e), dir_offset(e), b, p, dir_bucket_length(b, s, vol));
00346       if (dir_offset(e))
00347         CACHE_DEC_DIR_USED(vol->mutex);
00348       e = dir_delete_entry(e, p, s, vol);
00349       continue;
00350     }
00351     p = e;
00352     e = next_dir(e, seg);
00353   } while (e);
00354 }
00355 
00356 void
00357 dir_clean_segment(int s, Vol *d)
00358 {
00359   Dir *seg = dir_segment(s, d);
00360   for (int64_t i = 0; i < d->buckets; i++) {
00361     dir_clean_bucket(dir_bucket(i, seg), s, d);
00362     ink_assert(!dir_next(dir_bucket(i, seg)) || dir_offset(dir_bucket(i, seg)));
00363   }
00364 }
00365 
00366 void
00367 dir_clean_vol(Vol *d)
00368 {
00369   for (int64_t i = 0; i < d->segments; i++)
00370     dir_clean_segment(i, d);
00371   CHECK_DIR(d);
00372 }
00373 
00374 #if TS_USE_INTERIM_CACHE == 1
00375 static inline void
00376 interim_dir_clean_bucket(Dir *b, int s, Vol *vol, int offset)
00377 {
00378   Dir *e = b, *p = NULL;
00379   Dir *seg = dir_segment(s, vol);
00380   do {
00381     if (dir_ininterim(e) && dir_get_index(e) == offset) {
00382       e = dir_delete_entry(e, p, s, vol);
00383       continue;
00384     }
00385     p = e;
00386     e = next_dir(e, seg);
00387   } while(e);
00388 }
00389 
00390 void
00391 clear_interimvol_dir(Vol *v, int offset)
00392 {
00393   for (int i = 0; i < v->segments; i++) {
00394     Dir *seg = dir_segment(i, v);
00395     for (int j = 0; j < v->buckets; j++) {
00396       interim_dir_clean_bucket(dir_bucket(j, seg), i, v, offset);
00397     }
00398   }
00399 }
00400 void
00401 dir_clean_bucket(Dir *b, int s, InterimCacheVol *d)
00402 {
00403   Dir *e = b, *p = NULL;
00404   Vol *vol = d->vol;
00405   Dir *seg = dir_segment(s, vol);
00406 #ifdef LOOP_CHECK_MODE
00407   int loop_count = 0;
00408 #endif
00409   do {
00410 #ifdef LOOP_CHECK_MODE
00411     loop_count++;
00412     if (loop_count > DIR_LOOP_THRESHOLD) {
00413       if (dir_bucket_loop_fix(b, s, vol))
00414         return;
00415     }
00416 #endif
00417     if (!dir_valid(d, e) || !dir_offset(e)) {
00418       if (is_debug_tag_set("dir_clean"))
00419         Debug("dir_clean", "cleaning %p tag %X boffset %" PRId64 " b %p p %p l %d",
00420               e, dir_tag(e), dir_offset(e), b, p, dir_bucket_length(b, s, vol));
00421       if (dir_offset(e))
00422         CACHE_DEC_DIR_USED(vol->mutex);
00423       e = dir_delete_entry(e, p, s, vol);
00424       continue;
00425     }
00426     p = e;
00427     e = next_dir(e, seg);
00428   } while (e);
00429 }
00430 void
00431 dir_clean_segment(int s, InterimCacheVol *d)
00432 {
00433   Dir *seg = dir_segment(s, d->vol);
00434   for (int i = 0; i < d->vol->buckets; i++) {
00435     dir_clean_bucket(dir_bucket(i, seg), s, d);
00436     ink_assert(!dir_next(dir_bucket(i, seg)) || dir_offset(dir_bucket(i, seg)));
00437   }
00438 }
00439 void
00440 dir_clean_interimvol(InterimCacheVol *d)
00441 {
00442   for (int i = 0; i < d->vol->segments; i++)
00443     dir_clean_segment(i, d);
00444   CHECK_DIR(d);
00445 }
00446 
00447 void
00448 dir_clean_range_interimvol(off_t start, off_t end, InterimCacheVol *svol)
00449 {
00450   Vol *vol = svol->vol;
00451   int offset = svol - vol->interim_vols;
00452 
00453   for (int i = 0; i < vol->buckets * DIR_DEPTH * vol->segments; i++) {
00454     Dir *e = dir_index(vol, i);
00455     if (dir_ininterim(e) && dir_get_index(e) == offset && !dir_token(e) &&
00456             dir_offset(e) >= (int64_t)start && dir_offset(e) < (int64_t)end) {
00457       CACHE_DEC_DIR_USED(vol->mutex);
00458       dir_set_offset(e, 0);     // delete
00459     }
00460   }
00461 
00462   dir_clean_interimvol(svol);
00463 }
00464 #endif
00465 
00466 void
00467 dir_clear_range(off_t start, off_t end, Vol *vol)
00468 {
00469   for (off_t i = 0; i < vol->buckets * DIR_DEPTH * vol->segments; i++) {
00470     Dir *e = dir_index(vol, i);
00471     if (!dir_token(e) && dir_offset(e) >= (int64_t)start && dir_offset(e) < (int64_t)end) {
00472       CACHE_DEC_DIR_USED(vol->mutex);
00473       dir_set_offset(e, 0);     // delete
00474     }
00475   }
00476   dir_clean_vol(vol);
00477 }
00478 
00479 void
00480 check_bucket_not_contains(Dir *b, Dir *e, Dir *seg)
00481 {
00482   Dir *x = b;
00483   do {
00484     if (x == e)
00485       break;
00486     x = next_dir(x, seg);
00487   } while (x);
00488   ink_assert(!x);
00489 }
00490 
00491 void
00492 freelist_clean(int s, Vol *vol)
00493 {
00494   dir_clean_segment(s, vol);
00495   if (vol->header->freelist[s])
00496     return;
00497   Warning("cache directory overflow on '%s' segment %d, purging...", vol->path, s);
00498   int n = 0;
00499   Dir *seg = dir_segment(s, vol);
00500   for (int bi = 0; bi < vol->buckets; bi++) {
00501     Dir *b = dir_bucket(bi, seg);
00502     for (int l = 0; l < DIR_DEPTH; l++) {
00503       Dir *e = dir_bucket_row(b, l);
00504       if (dir_head(e) && !(n++ % 10)) {
00505         CACHE_DEC_DIR_USED(vol->mutex);
00506         dir_set_offset(e, 0);   // delete
00507       }
00508     }
00509   }
00510   dir_clean_segment(s, vol);
00511 }
00512 
00513 inline Dir *
00514 freelist_pop(int s, Vol *d)
00515 {
00516   Dir *seg = dir_segment(s, d);
00517   Dir *e = dir_from_offset(d->header->freelist[s], seg);
00518   if (!e) {
00519     freelist_clean(s, d);
00520     return NULL;
00521   }
00522   d->header->freelist[s] = dir_next(e);
00523   // if the freelist if bad, punt.
00524   if (dir_offset(e)) {
00525     dir_init_segment(s, d);
00526     return NULL;
00527   }
00528   Dir *h = dir_from_offset(d->header->freelist[s], seg);
00529   if (h)
00530     dir_set_prev(h, 0);
00531   return e;
00532 }
00533 
00534 int
00535 dir_segment_accounted(int s, Vol *d, int offby, int *f, int *u, int *et, int *v, int *av, int *as)
00536 {
00537   int free = dir_freelist_length(d, s);
00538   int used = 0, empty = 0;
00539   int valid = 0, agg_valid = 0;
00540   int64_t agg_size = 0;
00541   Dir *seg = dir_segment(s, d);
00542   for (int bi = 0; bi < d->buckets; bi++) {
00543     Dir *b = dir_bucket(bi, seg);
00544     Dir *e = b;
00545     while (e) {
00546       if (!dir_offset(e)) {
00547         ink_assert(e == b);
00548         empty++;
00549       } else {
00550         used++;
00551         if (dir_valid(d, e))
00552           valid++;
00553         if (dir_agg_valid(d, e))
00554           agg_valid++;
00555         agg_size += dir_approx_size(e);
00556       }
00557       e = next_dir(e, seg);
00558       if (!e)
00559         break;
00560     }
00561   }
00562   if (f)
00563     *f = free;
00564   if (u)
00565     *u = used;
00566   if (et)
00567     *et = empty;
00568   if (v)
00569     *v = valid;
00570   if (av)
00571     *av = agg_valid;
00572   if (as)
00573     *as = used ? (int) (agg_size / used) : 0;
00574   ink_assert(d->buckets * DIR_DEPTH - (free + used + empty) <= offby);
00575   return d->buckets * DIR_DEPTH - (free + used + empty) <= offby;
00576 }
00577 
00578 void
00579 dir_free_entry(Dir *e, int s, Vol *d)
00580 {
00581   Dir *seg = dir_segment(s, d);
00582   unsigned int fo = d->header->freelist[s];
00583   unsigned int eo = dir_to_offset(e, seg);
00584   dir_set_next(e, fo);
00585   if (fo)
00586     dir_set_prev(dir_from_offset(fo, seg), eo);
00587   d->header->freelist[s] = eo;
00588 }
00589 
00590 int
00591 dir_probe(CacheKey *key, Vol *d, Dir *result, Dir ** last_collision)
00592 {
00593   ink_assert(d->mutex->thread_holding == this_ethread());
00594   int s = key->slice32(0) % d->segments;
00595   int b = key->slice32(1) % d->buckets;
00596   Dir *seg = dir_segment(s, d);
00597   Dir *e = NULL, *p = NULL, *collision = *last_collision;
00598   Vol *vol = d;
00599   CHECK_DIR(d);
00600 #ifdef LOOP_CHECK_MODE
00601   if (dir_bucket_loop_fix(dir_bucket(b, seg), s, d))
00602     return 0;
00603 #endif
00604 Lagain:
00605   e = dir_bucket(b, seg);
00606   if (dir_offset(e))
00607     do {
00608       if (dir_compare_tag(e, key)) {
00609         ink_assert(dir_offset(e));
00610         // Bug: 51680. Need to check collision before checking
00611         // dir_valid(). In case of a collision, if !dir_valid(), we
00612         // don't want to call dir_delete_entry.
00613         if (collision) {
00614           if (collision == e) {
00615             collision = NULL;
00616             // increment collison stat
00617             // Note: dir_probe could be called multiple times
00618             // for the same document and so the collision stat
00619             // may not accurately reflect the number of documents
00620             // having the same first_key
00621             DDebug("cache_stats", "Incrementing dir collisions");
00622             CACHE_INC_DIR_COLLISIONS(d->mutex);
00623           }
00624           goto Lcont;
00625         }
00626         if (dir_valid(d, e)) {
00627           DDebug("dir_probe_hit", "found %X %X vol %d bucket %d boffset %" PRId64 "", key->slice32(0), key->slice32(1), d->fd, b, dir_offset(e));
00628           dir_assign(result, e);
00629           *last_collision = e;
00630 #if !TS_USE_INTERIM_CACHE
00631           ink_assert(dir_offset(e) * CACHE_BLOCK_SIZE < d->len);
00632 #endif
00633           return 1;
00634         } else {                // delete the invalid entry
00635           CACHE_DEC_DIR_USED(d->mutex);
00636           e = dir_delete_entry(e, p, s, d);
00637           continue;
00638         }
00639       } else
00640         DDebug("dir_probe_tag", "tag mismatch %p %X vs expected %X", e, dir_tag(e), key->slice32(3));
00641     Lcont:
00642       p = e;
00643       e = next_dir(e, seg);
00644     } while (e);
00645   if (collision) {              // last collision no longer in the list, retry
00646     DDebug("cache_stats", "Incrementing dir collisions");
00647     CACHE_INC_DIR_COLLISIONS(d->mutex);
00648     collision = NULL;
00649     goto Lagain;
00650   }
00651   DDebug("dir_probe_miss", "missed %X %X on vol %d bucket %d at %p", key->slice32(0), key->slice32(1), d->fd, b, seg);
00652   CHECK_DIR(d);
00653   return 0;
00654 }
00655 
00656 int
00657 dir_insert(CacheKey *key, Vol *d, Dir *to_part)
00658 {
00659   ink_assert(d->mutex->thread_holding == this_ethread());
00660   int s = key->slice32(0) % d->segments, l;
00661   int bi = key->slice32(1) % d->buckets;
00662   ink_assert(dir_approx_size(to_part) <= MAX_FRAG_SIZE + sizeofDoc);
00663   Dir *seg = dir_segment(s, d);
00664   Dir *e = NULL;
00665   Dir *b = dir_bucket(bi, seg);
00666   Vol *vol = d;
00667 #if defined(DEBUG) && defined(DO_CHECK_DIR_FAST)
00668   unsigned int t = DIR_MASK_TAG(key->slice32(2));
00669   Dir *col = b;
00670   while (col) {
00671     ink_assert((dir_tag(col) != t) || (dir_offset(col) != dir_offset(to_part)));
00672     col = next_dir(col, seg);
00673   }
00674 #endif
00675   CHECK_DIR(d);
00676 
00677 Lagain:
00678   // get from this row first
00679   e = b;
00680   if (dir_is_empty(e))
00681     goto Lfill;
00682   for (l = 1; l < DIR_DEPTH; l++) {
00683     e = dir_bucket_row(b, l);
00684     if (dir_is_empty(e)) {
00685       unlink_from_freelist(e, s, d);
00686       goto Llink;
00687     }
00688   }
00689   // get one from the freelist
00690   e = freelist_pop(s, d);
00691   if (!e)
00692     goto Lagain;
00693 Llink:
00694 #if TS_USE_INTERIM_CACHE == 1
00695   dir_assign(e, b);
00696 #else
00697   dir_set_next(e, dir_next(b));
00698 #endif
00699   dir_set_next(b, dir_to_offset(e, seg));
00700 Lfill:
00701 #if TS_USE_INTERIM_CACHE == 1
00702   dir_assign_data(b, to_part);
00703   dir_set_tag(b, key->slice32(2));
00704 #else
00705   dir_assign_data(e, to_part);
00706   dir_set_tag(e, key->slice32(2));
00707   ink_assert(vol_offset(d, e) < (d->skip + d->len));
00708 #endif
00709   DDebug("dir_insert",
00710         "insert %p %X into vol %d bucket %d at %p tag %X %X boffset %" PRId64 "",
00711          e, key->slice32(0), d->fd, bi, e, key->slice32(1), dir_tag(e), dir_offset(e));
00712   CHECK_DIR(d);
00713   d->header->dirty = 1;
00714   CACHE_INC_DIR_USED(d->mutex);
00715   return 1;
00716 }
00717 
00718 int
00719 dir_overwrite(CacheKey *key, Vol *d, Dir *dir, Dir *overwrite, bool must_overwrite)
00720 {
00721   ink_assert(d->mutex->thread_holding == this_ethread());
00722   int s = key->slice32(0) % d->segments, l;
00723   int bi = key->slice32(1) % d->buckets;
00724   Dir *seg = dir_segment(s, d);
00725   Dir *e = NULL;
00726   Dir *b = dir_bucket(bi, seg);
00727   unsigned int t = DIR_MASK_TAG(key->slice32(2));
00728   int res = 1;
00729 #ifdef LOOP_CHECK_MODE
00730   int loop_count = 0;
00731   bool loop_possible = true;
00732 #endif
00733   Vol *vol = d;
00734   CHECK_DIR(d);
00735 
00736   ink_assert((unsigned int) dir_approx_size(dir) <= (unsigned int) (MAX_FRAG_SIZE + sizeofDoc));        // XXX - size should be unsigned
00737 Lagain:
00738   // find entry to overwrite
00739   e = b;
00740   if (dir_offset(e))
00741     do {
00742 #ifdef LOOP_CHECK_MODE
00743       loop_count++;
00744       if (loop_count > DIR_LOOP_THRESHOLD && loop_possible) {
00745         if (dir_bucket_loop_fix(b, s, d)) {
00746           loop_possible = false;
00747           goto Lagain;
00748         }
00749       }
00750 #endif
00751 #if TS_USE_INTERIM_CACHE == 1
00752       if (dir_tag(e) == t && dir_get_offset(e) == dir_get_offset(overwrite))
00753 #else
00754       if (dir_tag(e) == t && dir_offset(e) == dir_offset(overwrite))
00755 #endif
00756         goto Lfill;
00757       e = next_dir(e, seg);
00758     } while (e);
00759   if (must_overwrite)
00760     return 0;
00761   res = 0;
00762   // get from this row first
00763   e = b;
00764   if (dir_is_empty(e)) {
00765     CACHE_INC_DIR_USED(d->mutex);
00766     goto Lfill;
00767   }
00768   for (l = 1; l < DIR_DEPTH; l++) {
00769     e = dir_bucket_row(b, l);
00770     if (dir_is_empty(e)) {
00771       unlink_from_freelist(e, s, d);
00772       goto Llink;
00773     }
00774   }
00775   // get one from the freelist
00776   e = freelist_pop(s, d);
00777   if (!e)
00778     goto Lagain;
00779 Llink:
00780   CACHE_INC_DIR_USED(d->mutex);
00781   dir_set_next(e, dir_next(b));
00782   dir_set_next(b, dir_to_offset(e, seg));
00783 Lfill:
00784   dir_assign_data(e, dir);
00785   dir_set_tag(e, t);
00786   ink_assert(vol_offset(d, e) < d->skip + d->len);
00787   DDebug("dir_overwrite",
00788         "overwrite %p %X into vol %d bucket %d at %p tag %X %X boffset %" PRId64 "",
00789          e, key->slice32(0), d->fd, bi, e, t, dir_tag(e), dir_offset(e));
00790   CHECK_DIR(d);
00791   d->header->dirty = 1;
00792   return res;
00793 }
00794 
00795 int
00796 dir_delete(CacheKey *key, Vol *d, Dir *del)
00797 {
00798   ink_assert(d->mutex->thread_holding == this_ethread());
00799   int s = key->slice32(0) % d->segments;
00800   int b = key->slice32(1) % d->buckets;
00801   Dir *seg = dir_segment(s, d);
00802   Dir *e = NULL, *p = NULL;
00803 #ifdef LOOP_CHECK_MODE
00804   int loop_count = 0;
00805 #endif
00806   Vol *vol = d;
00807   CHECK_DIR(d);
00808 
00809   e = dir_bucket(b, seg);
00810   if (dir_offset(e))
00811     do {
00812 #ifdef LOOP_CHECK_MODE
00813       loop_count++;
00814       if (loop_count > DIR_LOOP_THRESHOLD) {
00815         if (dir_bucket_loop_fix(dir_bucket(b, seg), s, d))
00816           return 0;
00817       }
00818 #endif
00819 #if TS_USE_INTERIM_CACHE == 1
00820       if (dir_compare_tag(e, key) && dir_get_offset(e) == dir_get_offset(del)) {
00821 #else
00822       if (dir_compare_tag(e, key) && dir_offset(e) == dir_offset(del)) {
00823 #endif
00824         CACHE_DEC_DIR_USED(d->mutex);
00825         dir_delete_entry(e, p, s, d);
00826         CHECK_DIR(d);
00827         return 1;
00828       }
00829       p = e;
00830       e = next_dir(e, seg);
00831     } while (e);
00832   CHECK_DIR(d);
00833   return 0;
00834 }
00835 
00836 // Lookaside Cache
00837 
00838 int
00839 dir_lookaside_probe(CacheKey *key, Vol *d, Dir *result, EvacuationBlock ** eblock)
00840 {
00841   ink_assert(d->mutex->thread_holding == this_ethread());
00842   int i = key->slice32(3) % LOOKASIDE_SIZE;
00843   EvacuationBlock *b = d->lookaside[i].head;
00844   while (b) {
00845     if (b->evac_frags.key == *key) {
00846       if (dir_valid(d, &b->new_dir)) {
00847         *result = b->new_dir;
00848         DDebug("dir_lookaside", "probe %X success", key->slice32(0));
00849         if (eblock)
00850           *eblock = b;
00851         return 1;
00852       }
00853     }
00854     b = b->link.next;
00855   }
00856   DDebug("dir_lookaside", "probe %X failed", key->slice32(0));
00857   return 0;
00858 }
00859 
00860 int
00861 dir_lookaside_insert(EvacuationBlock *eblock, Vol *d, Dir *to)
00862 {
00863   CacheKey *key = &eblock->evac_frags.earliest_key;
00864   DDebug("dir_lookaside", "insert %X %X, offset %d phase %d", key->slice32(0), key->slice32(1), (int) dir_offset(to), (int) dir_phase(to));
00865   ink_assert(d->mutex->thread_holding == this_ethread());
00866   int i = key->slice32(3) % LOOKASIDE_SIZE;
00867   EvacuationBlock *b = new_EvacuationBlock(d->mutex->thread_holding);
00868   b->evac_frags.key = *key;
00869   b->evac_frags.earliest_key = *key;
00870   b->earliest_evacuator = eblock->earliest_evacuator;
00871   ink_assert(b->earliest_evacuator);
00872   b->dir = eblock->dir;
00873   b->new_dir = *to;
00874   d->lookaside[i].push(b);
00875   return 1;
00876 }
00877 
00878 int
00879 dir_lookaside_fixup(CacheKey *key, Vol *d)
00880 {
00881   ink_assert(d->mutex->thread_holding == this_ethread());
00882   int i = key->slice32(3) % LOOKASIDE_SIZE;
00883   EvacuationBlock *b = d->lookaside[i].head;
00884   while (b) {
00885     if (b->evac_frags.key == *key) {
00886       int res = dir_overwrite(key, d, &b->new_dir, &b->dir, false);
00887       DDebug("dir_lookaside", "fixup %X %X offset %" PRId64" phase %d %d",
00888             key->slice32(0), key->slice32(1), dir_offset(&b->new_dir), dir_phase(&b->new_dir), res);
00889 #if TS_USE_INTERIM_CACHE == 1
00890       int64_t o = dir_get_offset(&b->dir), n = dir_get_offset(&b->new_dir);
00891 #else
00892       int64_t o = dir_offset(&b->dir), n = dir_offset(&b->new_dir);
00893 #endif
00894       d->ram_cache->fixup(key, (uint32_t)(o >> 32), (uint32_t)o, (uint32_t)(n >> 32), (uint32_t)n);
00895       d->lookaside[i].remove(b);
00896       free_EvacuationBlock(b, d->mutex->thread_holding);
00897       return res;
00898     }
00899     b = b->link.next;
00900   }
00901   DDebug("dir_lookaside", "fixup %X %X failed", key->slice32(0), key->slice32(1));
00902   return 0;
00903 }
00904 
00905 void
00906 dir_lookaside_cleanup(Vol *d)
00907 {
00908   ink_assert(d->mutex->thread_holding == this_ethread());
00909   for (int i = 0; i < LOOKASIDE_SIZE; i++) {
00910     EvacuationBlock *b = d->lookaside[i].head;
00911     while (b) {
00912       if (!dir_valid(d, &b->new_dir)) {
00913         EvacuationBlock *nb = b->link.next;
00914         DDebug("dir_lookaside", "cleanup %X %X cleaned up",
00915               b->evac_frags.earliest_key.slice32(0), b->evac_frags.earliest_key.slice32(1));
00916         d->lookaside[i].remove(b);
00917         free_CacheVC(b->earliest_evacuator);
00918         free_EvacuationBlock(b, d->mutex->thread_holding);
00919         b = nb;
00920         goto Lagain;
00921       }
00922       b = b->link.next;
00923     Lagain:;
00924     }
00925   }
00926 }
00927 
00928 void
00929 dir_lookaside_remove(CacheKey *key, Vol *d)
00930 {
00931   ink_assert(d->mutex->thread_holding == this_ethread());
00932   int i = key->slice32(3) % LOOKASIDE_SIZE;
00933   EvacuationBlock *b = d->lookaside[i].head;
00934   while (b) {
00935     if (b->evac_frags.key == *key) {
00936       DDebug("dir_lookaside", "remove %X %X offset %" PRId64" phase %d",
00937             key->slice32(0), key->slice32(1), dir_offset(&b->new_dir), dir_phase(&b->new_dir));
00938       d->lookaside[i].remove(b);
00939       free_EvacuationBlock(b, d->mutex->thread_holding);
00940       return;
00941     }
00942     b = b->link.next;
00943   }
00944   DDebug("dir_lookaside", "remove %X %X failed", key->slice32(0), key->slice32(1));
00945   return;
00946 }
00947 
00948 // Cache Sync
00949 //
00950 
00951 void
00952 dir_sync_init()
00953 {
00954   cacheDirSync = new CacheSync;
00955   cacheDirSync->trigger = eventProcessor.schedule_in(cacheDirSync, HRTIME_SECONDS(cache_config_dir_sync_frequency));
00956 }
00957 
00958 void
00959 CacheSync::aio_write(int fd, char *b, int n, off_t o)
00960 {
00961   io.aiocb.aio_fildes = fd;
00962   io.aiocb.aio_offset = o;
00963   io.aiocb.aio_nbytes = n;
00964   io.aiocb.aio_buf = b;
00965   io.action = this;
00966   io.thread = AIO_CALLBACK_THREAD_ANY;
00967   ink_assert(ink_aio_write(&io) >= 0);
00968 }
00969 
00970 uint64_t
00971 dir_entries_used(Vol *d)
00972 {
00973   uint64_t full = 0;
00974   uint64_t sfull = 0;
00975   for (int s = 0; s < d->segments; full += sfull, s++) {
00976     Dir *seg = dir_segment(s, d);
00977     sfull = 0;
00978     for (int b = 0; b < d->buckets; b++) {
00979       Dir *e = dir_bucket(b, seg);
00980       if (dir_bucket_loop_fix(e, s, d)) {
00981         sfull = 0;
00982         break;
00983       }
00984       while (e) {
00985         if (dir_offset(e))
00986           sfull++;
00987         e = next_dir(e, seg);
00988         if (!e)
00989           break;
00990       }
00991     }
00992   }
00993   return full;
00994 }
00995 
00996 /*
00997  * this function flushes the cache meta data to disk when
00998  * the cache is shutdown. Must *NOT* be used during regular
00999  * operation.
01000  */
01001 
01002 void
01003 sync_cache_dir_on_shutdown(void)
01004 {
01005   Debug("cache_dir_sync", "sync started");
01006   char *buf = NULL;
01007   size_t buflen = 0;
01008 
01009   EThread *t = (EThread *) 0xdeadbeef;
01010   for (int i = 0; i < gnvol; i++) {
01011     // the process is going down, do a blocking call
01012     // dont release the volume's lock, there could
01013     // be another aggWrite in progress
01014     MUTEX_TAKE_LOCK(gvol[i]->mutex, t);
01015     Vol *d = gvol[i];
01016 
01017     if (DISK_BAD(d->disk)) {
01018       Debug("cache_dir_sync", "Dir %s: ignoring -- bad disk", d->hash_text.get());
01019       continue;
01020     }
01021     size_t dirlen = vol_dirlen(d);
01022     ink_assert(dirlen > 0); // make clang happy - if not > 0 the vol is seriously messed up
01023     if (!d->header->dirty && !d->dir_sync_in_progress) {
01024       Debug("cache_dir_sync", "Dir %s: ignoring -- not dirty", d->hash_text.get());
01025       continue;
01026     }
01027     // recompute hit_evacuate_window
01028     d->hit_evacuate_window = (d->data_blocks * cache_config_hit_evacuate_percent) / 100;
01029 
01030 
01031     // check if we have data in the agg buffer
01032     // dont worry about the cachevc s in the agg queue
01033     // directories have not been inserted for these writes
01034     if (d->agg_buf_pos) {
01035       Debug("cache_dir_sync", "Dir %s: flushing agg buffer first", d->hash_text.get());
01036 
01037       // set write limit
01038       d->header->agg_pos = d->header->write_pos + d->agg_buf_pos;
01039 
01040       int r = pwrite(d->fd, d->agg_buffer, d->agg_buf_pos,
01041                      d->header->write_pos);
01042       if (r != d->agg_buf_pos) {
01043         ink_assert(!"flusing agg buffer failed");
01044         continue;
01045       }
01046       d->header->last_write_pos = d->header->write_pos;
01047       d->header->write_pos += d->agg_buf_pos;
01048       ink_assert(d->header->write_pos == d->header->agg_pos);
01049       d->agg_buf_pos = 0;
01050       d->header->write_serial++;
01051     }
01052 
01053 #if TS_USE_INTERIM_CACHE == 1
01054     for (int i = 0; i < d->num_interim_vols; i++) {
01055       InterimCacheVol *sv = &(d->interim_vols[i]);
01056       if (sv->agg_buf_pos) {
01057         Debug("cache_dir_sync", "Dir %s: flushing agg buffer first to interim", d->hash_text.get());
01058         sv->header->agg_pos = sv->header->write_pos + sv->agg_buf_pos;
01059 
01060         int r = pwrite(sv->fd, sv->agg_buffer, sv->agg_buf_pos, sv->header->write_pos);
01061         if (r != sv->agg_buf_pos) {
01062           ink_assert(!"flusing agg buffer failed to interim");
01063           continue;
01064         }
01065         sv->header->last_write_pos = sv->header->write_pos;
01066         sv->header->write_pos += sv->agg_buf_pos;
01067         ink_assert(sv->header->write_pos == sv->header->agg_pos);
01068         sv->agg_buf_pos = 0;
01069         sv->header->write_serial++;
01070       }
01071     }
01072 #endif
01073 
01074     if (buflen < dirlen) {
01075       if (buf)
01076         ats_memalign_free(buf);
01077       buf = (char *)ats_memalign(ats_pagesize(), dirlen);
01078       buflen = dirlen;
01079     }
01080 
01081     if (!d->dir_sync_in_progress) {
01082       d->header->sync_serial++;
01083     } else {
01084       Debug("cache_dir_sync", "Periodic dir sync in progress -- overwriting");
01085     }
01086     d->footer->sync_serial = d->header->sync_serial;
01087 
01088 #if TS_USE_INTERIM_CACHE == 1
01089     for (int j = 0; j < d->num_interim_vols; j++) {
01090       d->interim_vols[j].header->sync_serial = d->header->sync_serial;
01091     }
01092 #endif
01093     CHECK_DIR(d);
01094     memcpy(buf, d->raw_dir, dirlen);
01095     size_t B = d->header->sync_serial & 1;
01096     off_t start = d->skip + (B ? dirlen : 0);
01097     B = pwrite(d->fd, buf, dirlen, start);
01098     ink_assert(B == dirlen);
01099     Debug("cache_dir_sync", "done syncing dir for vol %s", d->hash_text.get());
01100   }
01101   Debug("cache_dir_sync", "sync done");
01102   if (buf)
01103     ats_memalign_free(buf);
01104 }
01105 
01106 
01107 
01108 int
01109 CacheSync::mainEvent(int event, Event *e)
01110 {
01111   if (trigger) {
01112     trigger->cancel_action();
01113     trigger = NULL;
01114   }
01115 
01116 Lrestart:
01117   if (vol >= gnvol) {
01118     vol = 0;
01119     if (buf) {
01120       ats_memalign_free(buf);
01121       buf = 0;
01122       buflen = 0;
01123     }
01124     Debug("cache_dir_sync", "sync done");
01125     if (event == EVENT_INTERVAL)
01126       trigger = e->ethread->schedule_in(this, HRTIME_SECONDS(cache_config_dir_sync_frequency));
01127     else
01128       trigger = eventProcessor.schedule_in(this, HRTIME_SECONDS(cache_config_dir_sync_frequency));
01129     return EVENT_CONT;
01130   }
01131   if (event == AIO_EVENT_DONE) {
01132     // AIO Thread
01133     if (io.aio_result != (int64_t)io.aiocb.aio_nbytes) {
01134       Warning("vol write error during directory sync '%s'", gvol[vol]->hash_text.get());
01135       event = EVENT_NONE;
01136       goto Ldone;
01137     }
01138     trigger = eventProcessor.schedule_in(this, SYNC_DELAY);
01139     return EVENT_CONT;
01140   }
01141   {
01142     CACHE_TRY_LOCK(lock, gvol[vol]->mutex, mutex->thread_holding);
01143     if (!lock) {
01144       trigger = eventProcessor.schedule_in(this, HRTIME_MSECONDS(cache_config_mutex_retry_delay));
01145       return EVENT_CONT;
01146     }
01147     Vol *d = gvol[vol];
01148 
01149     // recompute hit_evacuate_window
01150     d->hit_evacuate_window = (d->data_blocks * cache_config_hit_evacuate_percent) / 100;
01151 
01152     if (DISK_BAD(d->disk))
01153       goto Ldone;
01154 
01155     int headerlen = ROUND_TO_STORE_BLOCK(sizeof(VolHeaderFooter));
01156     size_t dirlen = vol_dirlen(d);
01157     if (!writepos) {
01158       // start
01159       Debug("cache_dir_sync", "sync started");
01160       /* Don't sync the directory to disk if its not dirty. Syncing the
01161          clean directory to disk is also the cause of INKqa07151. Increasing
01162          the serial serial causes the cache to recover more data
01163          than necessary.
01164          The dirty bit it set in dir_insert, dir_overwrite and dir_delete_entry
01165        */
01166       if (!d->header->dirty) {
01167         Debug("cache_dir_sync", "Dir %s not dirty", d->hash_text.get());
01168         goto Ldone;
01169       }
01170       if (d->is_io_in_progress() || d->agg_buf_pos) {
01171         Debug("cache_dir_sync", "Dir %s: waiting for agg buffer", d->hash_text.get());
01172         d->dir_sync_waiting = 1;
01173         if (!d->is_io_in_progress())
01174           d->aggWrite(EVENT_IMMEDIATE, 0);
01175 #if TS_USE_INTERIM_CACHE == 1
01176         for (int i = 0; i < d->num_interim_vols; i++) {
01177           if (!d->interim_vols[i].is_io_in_progress()) {
01178             d->interim_vols[i].sync = true;
01179             d->interim_vols[i].aggWrite(EVENT_IMMEDIATE, 0);
01180           }
01181         }
01182 #endif
01183         return EVENT_CONT;
01184       }
01185       Debug("cache_dir_sync", "pos: %" PRIu64 " Dir %s dirty...syncing to disk", d->header->write_pos, d->hash_text.get());
01186       d->header->dirty = 0;
01187       if (buflen < dirlen) {
01188         if (buf)
01189           ats_memalign_free(buf);
01190         buf = (char *)ats_memalign(ats_pagesize(), dirlen);
01191         buflen = dirlen;
01192       }
01193       d->header->sync_serial++;
01194       d->footer->sync_serial = d->header->sync_serial;
01195 #if TS_USE_INTERIM_CACHE == 1
01196       for (int j = 0; j < d->num_interim_vols; j++) {
01197           d->interim_vols[j].header->sync_serial = d->header->sync_serial;
01198       }
01199 #endif
01200       CHECK_DIR(d);
01201       memcpy(buf, d->raw_dir, dirlen);
01202       d->dir_sync_in_progress = 1;
01203     }
01204     size_t B = d->header->sync_serial & 1;
01205     off_t start = d->skip + (B ? dirlen : 0);
01206 
01207     if (!writepos) {
01208       // write header
01209       aio_write(d->fd, buf + writepos, headerlen, start + writepos);
01210       writepos += headerlen;
01211     } else if (writepos < (off_t)dirlen - headerlen) {
01212       // write part of body
01213       int l = SYNC_MAX_WRITE;
01214       if (writepos + l > (off_t)dirlen - headerlen)
01215         l = dirlen - headerlen - writepos;
01216       aio_write(d->fd, buf + writepos, l, start + writepos);
01217       writepos += l;
01218     } else if (writepos < (off_t)dirlen) {
01219       ink_assert(writepos == (off_t)dirlen - headerlen);
01220       // write footer
01221       aio_write(d->fd, buf + writepos, headerlen, start + writepos);
01222       writepos += headerlen;
01223     } else {
01224       d->dir_sync_in_progress = 0;
01225       goto Ldone;
01226     }
01227     return EVENT_CONT;
01228   }
01229 Ldone:
01230   // done
01231   writepos = 0;
01232   vol++;
01233   goto Lrestart;
01234 }
01235 
01236 //
01237 // Check
01238 //
01239 
01240 #define HIST_DEPTH 8
01241 int
01242 Vol::dir_check(bool /* fix ATS_UNUSED */) // TODO: we should eliminate this parameter ?
01243 {
01244   int hist[HIST_DEPTH + 1] = { 0 };
01245   int *shist = (int*)ats_malloc(segments * sizeof(int));
01246   memset(shist, 0, segments * sizeof(int));
01247   int j;
01248   int stale = 0, full = 0, empty = 0;
01249   int last = 0, free = 0;
01250   for (int s = 0; s < segments; s++) {
01251     Dir *seg = dir_segment(s, this);
01252     for (int b = 0; b < buckets; b++) {
01253       int h = 0;
01254       Dir *e = dir_bucket(b, seg);
01255       while (e) {
01256         if (!dir_offset(e))
01257           empty++;
01258         else {
01259           h++;
01260           if (!dir_valid(this, e))
01261             stale++;
01262           else
01263             full++;
01264         }
01265         e = next_dir(e, seg);
01266         if (!e)
01267           break;
01268       }
01269       if (h > HIST_DEPTH)
01270         h = HIST_DEPTH;
01271       hist[h]++;
01272     }
01273     int t = stale + full;
01274     shist[s] = t - last;
01275     last = t;
01276     free += dir_freelist_length(this, s);
01277   }
01278   int total = buckets * segments * DIR_DEPTH;
01279   printf("    Directory for [%s]\n", hash_text.get());
01280   printf("        Bytes:     %d\n", total * SIZEOF_DIR);
01281   printf("        Segments:  %" PRIu64 "\n", (uint64_t)segments);
01282   printf("        Buckets:   %" PRIu64 "\n", (uint64_t)buckets);
01283   printf("        Entries:   %d\n", total);
01284   printf("        Full:      %d\n", full);
01285   printf("        Empty:     %d\n", empty);
01286   printf("        Stale:     %d\n", stale);
01287   printf("        Free:      %d\n", free);
01288   printf("        Bucket Fullness:   ");
01289   for (j = 0; j < HIST_DEPTH; j++) {
01290     printf("%8d ", hist[j]);
01291     if ((j % 4 == 3))
01292       printf("\n" "                           ");
01293   }
01294   printf("\n");
01295   printf("        Segment Fullness:  ");
01296   for (j = 0; j < segments; j++) {
01297     printf("%5d ", shist[j]);
01298     if ((j % 5 == 4))
01299       printf("\n" "                           ");
01300   }
01301   printf("\n");
01302   printf("        Freelist Fullness: ");
01303   for (j = 0; j < segments; j++) {
01304     printf("%5d ", dir_freelist_length(this, j));
01305     if ((j % 5 == 4))
01306       printf("\n" "                           ");
01307   }
01308   printf("\n");
01309   ats_free(shist);
01310   return 0;
01311 }
01312 
01313 //
01314 // Static Tables
01315 //
01316 
01317 // permutation table
01318 uint8_t CacheKey_next_table[256] = {
01319   21, 53, 167, 51, 255, 126, 241, 151,
01320   115, 66, 155, 174, 226, 215, 80, 188,
01321   12, 95, 8, 24, 162, 201, 46, 104,
01322   79, 172, 39, 68, 56, 144, 142, 217,
01323   101, 62, 14, 108, 120, 90, 61, 47,
01324   132, 199, 110, 166, 83, 125, 57, 65,
01325   19, 130, 148, 116, 228, 189, 170, 1,
01326   71, 0, 252, 184, 168, 177, 88, 229,
01327   242, 237, 183, 55, 13, 212, 240, 81,
01328   211, 74, 195, 205, 147, 93, 30, 87,
01329   86, 63, 135, 102, 233, 106, 118, 163,
01330   107, 10, 243, 136, 160, 119, 43, 161,
01331   206, 141, 203, 78, 175, 36, 37, 140,
01332   224, 197, 185, 196, 248, 84, 122, 73,
01333   152, 157, 18, 225, 219, 145, 45, 2,
01334   171, 249, 173, 32, 143, 137, 69, 41,
01335   35, 89, 33, 98, 179, 214, 114, 231,
01336   251, 123, 180, 194, 29, 3, 178, 31,
01337   192, 164, 15, 234, 26, 230, 91, 156,
01338   5, 16, 23, 244, 58, 50, 4, 67,
01339   134, 165, 60, 235, 250, 7, 138, 216,
01340   49, 139, 191, 154, 11, 52, 239, 59,
01341   111, 245, 9, 64, 25, 129, 247, 232,
01342   190, 246, 109, 22, 112, 210, 221, 181,
01343   92, 169, 48, 100, 193, 77, 103, 133,
01344   70, 220, 207, 223, 176, 204, 76, 186,
01345   200, 208, 158, 182, 227, 222, 131, 38,
01346   187, 238, 6, 34, 253, 128, 146, 44,
01347   94, 127, 105, 153, 113, 20, 27, 124,
01348   159, 17, 72, 218, 96, 149, 213, 42,
01349   28, 254, 202, 40, 117, 82, 97, 209,
01350   54, 236, 121, 75, 85, 150, 99, 198,
01351 };
01352 
01353 // permutation table
01354 uint8_t CacheKey_prev_table[256] = {
01355   57, 55, 119, 141, 158, 152, 218, 165,
01356   18, 178, 89, 172, 16, 68, 34, 146,
01357   153, 233, 114, 48, 229, 0, 187, 154,
01358   19, 180, 148, 230, 240, 140, 78, 143,
01359   123, 130, 219, 128, 101, 102, 215, 26,
01360   243, 127, 239, 94, 223, 118, 22, 39,
01361   194, 168, 157, 3, 173, 1, 248, 67,
01362   28, 46, 156, 175, 162, 38, 33, 81,
01363   179, 47, 9, 159, 27, 126, 200, 56,
01364   234, 111, 73, 251, 206, 197, 99, 24,
01365   14, 71, 245, 44, 109, 252, 80, 79,
01366   62, 129, 37, 150, 192, 77, 224, 17,
01367   236, 246, 131, 254, 195, 32, 83, 198,
01368   23, 226, 85, 88, 35, 186, 42, 176,
01369   188, 228, 134, 8, 51, 244, 86, 93,
01370   36, 250, 110, 137, 231, 45, 5, 225,
01371   221, 181, 49, 214, 40, 199, 160, 82,
01372   91, 125, 166, 169, 103, 97, 30, 124,
01373   29, 117, 222, 76, 50, 237, 253, 7,
01374   112, 227, 171, 10, 151, 113, 210, 232,
01375   92, 95, 20, 87, 145, 161, 43, 2,
01376   60, 193, 54, 120, 25, 122, 11, 100,
01377   204, 61, 142, 132, 138, 191, 211, 66,
01378   59, 106, 207, 216, 15, 53, 184, 170,
01379   144, 196, 139, 74, 107, 105, 255, 41,
01380   208, 21, 242, 98, 205, 75, 96, 202,
01381   209, 247, 189, 72, 69, 238, 133, 13,
01382   167, 31, 235, 116, 201, 190, 213, 203,
01383   104, 115, 12, 212, 52, 63, 149, 135,
01384   183, 84, 147, 163, 249, 65, 217, 174,
01385   70, 6, 64, 90, 155, 177, 185, 182,
01386   108, 121, 164, 136, 58, 220, 241, 4,
01387 };
01388 
01389 //
01390 // Regression
01391 //
01392 unsigned int regress_rand_seed = 0;
01393 void
01394 regress_rand_init(unsigned int i)
01395 {
01396   regress_rand_seed = i;
01397 }
01398 
01399 void
01400 regress_rand_CacheKey(CacheKey *key)
01401 {
01402   unsigned int *x = (unsigned int *) key;
01403   for (int i = 0; i < 4; i++)
01404     x[i] = next_rand(&regress_rand_seed);
01405 }
01406 
01407 void
01408 dir_corrupt_bucket(Dir *b, int s, Vol *d)
01409 {
01410   // coverity[secure_coding]
01411   int l = ((int) (dir_bucket_length(b, s, d) * drand48()));
01412   Dir *e = b;
01413   Dir *seg = dir_segment(s, d);
01414   for (int i = 0; i < l; i++) {
01415     ink_release_assert(e);
01416     e = next_dir(e, seg);
01417   }
01418   dir_set_next(e, dir_to_offset(e, seg));
01419 }
01420 
01421 EXCLUSIVE_REGRESSION_TEST(Cache_dir) (RegressionTest *t, int /* atype ATS_UNUSED */, int *status) {
01422   ink_hrtime ttime;
01423   int ret = REGRESSION_TEST_PASSED;
01424 
01425   if ((CacheProcessor::IsCacheEnabled() != CACHE_INITIALIZED) || gnvol < 1) {
01426     rprintf(t, "cache not ready/configured");
01427     *status = REGRESSION_TEST_FAILED;
01428     return;
01429   }
01430   Vol *d = gvol[0];
01431   EThread *thread = this_ethread();
01432   MUTEX_TRY_LOCK(lock, d->mutex, thread);
01433   ink_release_assert(lock);
01434   rprintf(t, "clearing vol 0\n", free);
01435   vol_dir_clear(d);
01436 
01437   // coverity[var_decl]
01438   Dir dir;
01439   dir_clear(&dir);
01440   dir_set_phase(&dir, 0);
01441   dir_set_head(&dir, true);
01442   dir_set_offset(&dir, 1);
01443 
01444   d->header->agg_pos = d->header->write_pos += 1024;
01445 
01446   CacheKey key;
01447   rand_CacheKey(&key, thread->mutex);
01448 
01449   int s = key.slice32(0) % d->segments, i, j;
01450   Dir *seg = dir_segment(s, d);
01451 
01452   // test insert
01453   rprintf(t, "insert test\n", free);
01454   int inserted = 0;
01455   int free = dir_freelist_length(d, s);
01456   int n = free;
01457   rprintf(t, "free: %d\n", free);
01458   while (n--) {
01459     if (!dir_insert(&key, d, &dir))
01460       break;
01461     inserted++;
01462   }
01463   rprintf(t, "inserted: %d\n", inserted);
01464   if ((unsigned int) (inserted - free) > 1)
01465     ret = REGRESSION_TEST_FAILED;
01466 
01467   // test delete
01468   rprintf(t, "delete test\n");
01469   for (i = 0; i < d->buckets; i++)
01470     for (j = 0; j < DIR_DEPTH; j++)
01471       dir_set_offset(dir_bucket_row(dir_bucket(i, seg), j), 0); // delete
01472   dir_clean_segment(s, d);
01473   int newfree = dir_freelist_length(d, s);
01474   rprintf(t, "newfree: %d\n", newfree);
01475   if ((unsigned int) (newfree - free) > 1)
01476     ret = REGRESSION_TEST_FAILED;
01477 
01478   // test insert-delete
01479   rprintf(t, "insert-delete test\n");
01480   regress_rand_init(13);
01481   ttime = ink_get_hrtime_internal();
01482   for (i = 0; i < newfree; i++) {
01483     regress_rand_CacheKey(&key);
01484     dir_insert(&key, d, &dir);
01485   }
01486   uint64_t us = (ink_get_hrtime_internal() - ttime) / HRTIME_USECOND;
01487   //On windows us is sometimes 0. I don't know why.
01488   //printout the insert rate only if its not 0
01489   if (us)
01490     rprintf(t, "insert rate = %d / second\n", (int) ((newfree * (uint64_t) 1000000) / us));
01491   regress_rand_init(13);
01492   ttime = ink_get_hrtime_internal();
01493   for (i = 0; i < newfree; i++) {
01494     Dir *last_collision = 0;
01495     regress_rand_CacheKey(&key);
01496     if (!dir_probe(&key, d, &dir, &last_collision))
01497       ret = REGRESSION_TEST_FAILED;
01498   }
01499   us = (ink_get_hrtime_internal() - ttime) / HRTIME_USECOND;
01500   //On windows us is sometimes 0. I don't know why.
01501   //printout the probe rate only if its not 0
01502   if (us)
01503     rprintf(t, "probe rate = %d / second\n", (int) ((newfree * (uint64_t) 1000000) / us));
01504 
01505 
01506   for (int c = 0; c < vol_direntries(d) * 0.75; c++) {
01507     regress_rand_CacheKey(&key);
01508     dir_insert(&key, d, &dir);
01509   }
01510 
01511 
01512   Dir dir1;
01513   memset(&dir1, 0, sizeof(dir1));
01514   int s1, b1;
01515 
01516   rprintf(t, "corrupt_bucket test\n");
01517   for (int ntimes = 0; ntimes < 10; ntimes++) {
01518 #ifdef LOOP_CHECK_MODE
01519     // dir_probe in bucket with loop
01520     rand_CacheKey(&key, thread->mutex);
01521     s1 = key.slice32(0) % d->segments;
01522     b1 = key.slice32(1) % d->buckets;
01523     dir_corrupt_bucket(dir_bucket(b1, dir_segment(s1, d)), s1, d);
01524     dir_insert(&key, d, &dir);
01525     Dir *last_collision = 0;
01526     dir_probe(&key, d, &dir, &last_collision);
01527 
01528 
01529     rand_CacheKey(&key, thread->mutex);
01530     s1 = key.slice32(0) % d->segments;
01531     b1 = key.slice32(1) % d->buckets;
01532     dir_corrupt_bucket(dir_bucket(b1, dir_segment(s1, d)), s1, d);
01533 
01534     last_collision = 0;
01535     dir_probe(&key, d, &dir, &last_collision);
01536 
01537     // dir_overwrite in bucket with loop
01538     rand_CacheKey(&key, thread->mutex);
01539     s1 = key.slice32(0) % d->segments;
01540     b1 = key.slice32(1) % d->buckets;
01541     CacheKey key1;
01542     key1.b[1] = 127;
01543     dir1 = dir;
01544     dir_set_offset(&dir1, 23);
01545     dir_insert(&key1, d, &dir1);
01546     dir_insert(&key, d, &dir);
01547     key1.b[1] = 80;
01548     dir_insert(&key1, d, &dir1);
01549     dir_corrupt_bucket(dir_bucket(b1, dir_segment(s1, d)), s1, d);
01550     dir_overwrite(&key, d, &dir, &dir, 1);
01551 
01552     rand_CacheKey(&key, thread->mutex);
01553     s1 = key.slice32(0) % d->segments;
01554     b1 = key.slice32(1) % d->buckets;
01555     key.b[1] = 23;
01556     dir_insert(&key, d, &dir1);
01557     dir_corrupt_bucket(dir_bucket(b1, dir_segment(s1, d)), s1, d);
01558     dir_overwrite(&key, d, &dir, &dir, 0);
01559 
01560     rand_CacheKey(&key, thread->mutex);
01561     s1 = key.slice32(0) % d->segments;
01562     Dir *seg1 = dir_segment(s1, d);
01563     // dir_freelist_length in freelist with loop
01564     dir_corrupt_bucket(dir_from_offset(d->header->freelist[s], seg1), s1, d);
01565     dir_freelist_length(d, s1);
01566 
01567     rand_CacheKey(&key, thread->mutex);
01568     s1 = key.slice32(0) % d->segments;
01569     b1 = key.slice32(1) % d->buckets;
01570     // dir_bucket_length in bucket with loop
01571     dir_corrupt_bucket(dir_bucket(b1, dir_segment(s1, d)), s1, d);
01572     dir_bucket_length(dir_bucket(b1, dir_segment(s1, d)), s1, d);
01573     if (!check_dir(d))
01574       ret = REGRESSION_TEST_FAILED;
01575 #else
01576     // test corruption detection
01577     rand_CacheKey(&key, thread->mutex);
01578     s1 = key.slice32(0) % d->segments;
01579     b1 = key.slice32(1) % d->buckets;
01580 
01581     dir_insert(&key, d, &dir1);
01582     dir_insert(&key, d, &dir1);
01583     dir_insert(&key, d, &dir1);
01584     dir_insert(&key, d, &dir1);
01585     dir_insert(&key, d, &dir1);
01586     dir_corrupt_bucket(dir_bucket(b1, dir_segment(s1, d)), s1, d);
01587     if (check_dir(d))
01588       ret = REGRESSION_TEST_FAILED;
01589 #endif
01590   }
01591   vol_dir_clear(d);
01592   *status = ret;
01593 }

Generated by  doxygen 1.7.1