cache.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. /*
  2. * Copyright 2019 Dgraph Labs, Inc. and Contributors
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. // Ristretto is a fast, fixed size, in-memory cache with a dual focus on
  17. // throughput and hit ratio performance. You can easily add Ristretto to an
  18. // existing system and keep the most valuable data where you need it.
  19. package ristretto
  20. import (
  21. "bytes"
  22. "errors"
  23. "fmt"
  24. "sync"
  25. "sync/atomic"
  26. "time"
  27. "unsafe"
  28. "github.com/dgraph-io/ristretto/z"
  29. )
  30. var (
  31. // TODO: find the optimal value for this or make it configurable
  32. setBufSize = 32 * 1024
  33. )
  34. type itemCallback func(*Item)
  35. const itemSize = int64(unsafe.Sizeof(storeItem{}))
  36. // Cache is a thread-safe implementation of a hashmap with a TinyLFU admission
  37. // policy and a Sampled LFU eviction policy. You can use the same Cache instance
  38. // from as many goroutines as you want.
  39. type Cache struct {
  40. // store is the central concurrent hashmap where key-value items are stored.
  41. store store
  42. // policy determines what gets let in to the cache and what gets kicked out.
  43. policy policy
  44. // getBuf is a custom ring buffer implementation that gets pushed to when
  45. // keys are read.
  46. getBuf *ringBuffer
  47. // setBuf is a buffer allowing us to batch/drop Sets during times of high
  48. // contention.
  49. setBuf chan *Item
  50. // onEvict is called for item evictions.
  51. onEvict itemCallback
  52. // onReject is called when an item is rejected via admission policy.
  53. onReject itemCallback
  54. // onExit is called whenever a value goes out of scope from the cache.
  55. onExit (func(interface{}))
  56. // KeyToHash function is used to customize the key hashing algorithm.
  57. // Each key will be hashed using the provided function. If keyToHash value
  58. // is not set, the default keyToHash function is used.
  59. keyToHash func(interface{}) (uint64, uint64)
  60. // stop is used to stop the processItems goroutine.
  61. stop chan struct{}
  62. // indicates whether cache is closed.
  63. isClosed bool
  64. // cost calculates cost from a value.
  65. cost func(value interface{}) int64
  66. // ignoreInternalCost dictates whether to ignore the cost of internally storing
  67. // the item in the cost calculation.
  68. ignoreInternalCost bool
  69. // cleanupTicker is used to periodically check for entries whose TTL has passed.
  70. cleanupTicker *time.Ticker
  71. // Metrics contains a running log of important statistics like hits, misses,
  72. // and dropped items.
  73. Metrics *Metrics
  74. }
  75. // Config is passed to NewCache for creating new Cache instances.
  76. type Config struct {
  77. // NumCounters determines the number of counters (keys) to keep that hold
  78. // access frequency information. It's generally a good idea to have more
  79. // counters than the max cache capacity, as this will improve eviction
  80. // accuracy and subsequent hit ratios.
  81. //
  82. // For example, if you expect your cache to hold 1,000,000 items when full,
  83. // NumCounters should be 10,000,000 (10x). Each counter takes up roughly
  84. // 3 bytes (4 bits for each counter * 4 copies plus about a byte per
  85. // counter for the bloom filter). Note that the number of counters is
  86. // internally rounded up to the nearest power of 2, so the space usage
  87. // may be a little larger than 3 bytes * NumCounters.
  88. NumCounters int64
  89. // MaxCost can be considered as the cache capacity, in whatever units you
  90. // choose to use.
  91. //
  92. // For example, if you want the cache to have a max capacity of 100MB, you
  93. // would set MaxCost to 100,000,000 and pass an item's number of bytes as
  94. // the `cost` parameter for calls to Set. If new items are accepted, the
  95. // eviction process will take care of making room for the new item and not
  96. // overflowing the MaxCost value.
  97. MaxCost int64
  98. // BufferItems determines the size of Get buffers.
  99. //
  100. // Unless you have a rare use case, using `64` as the BufferItems value
  101. // results in good performance.
  102. BufferItems int64
  103. // Metrics determines whether cache statistics are kept during the cache's
  104. // lifetime. There *is* some overhead to keeping statistics, so you should
  105. // only set this flag to true when testing or throughput performance isn't a
  106. // major factor.
  107. Metrics bool
  108. // OnEvict is called for every eviction and passes the hashed key, value,
  109. // and cost to the function.
  110. OnEvict func(item *Item)
  111. // OnReject is called for every rejection done via the policy.
  112. OnReject func(item *Item)
  113. // OnExit is called whenever a value is removed from cache. This can be
  114. // used to do manual memory deallocation. Would also be called on eviction
  115. // and rejection of the value.
  116. OnExit func(val interface{})
  117. // KeyToHash function is used to customize the key hashing algorithm.
  118. // Each key will be hashed using the provided function. If keyToHash value
  119. // is not set, the default keyToHash function is used.
  120. KeyToHash func(key interface{}) (uint64, uint64)
  121. // Cost evaluates a value and outputs a corresponding cost. This function
  122. // is ran after Set is called for a new item or an item update with a cost
  123. // param of 0.
  124. Cost func(value interface{}) int64
  125. // IgnoreInternalCost set to true indicates to the cache that the cost of
  126. // internally storing the value should be ignored. This is useful when the
  127. // cost passed to set is not using bytes as units. Keep in mind that setting
  128. // this to true will increase the memory usage.
  129. IgnoreInternalCost bool
  130. }
  131. type itemFlag byte
  132. const (
  133. itemNew itemFlag = iota
  134. itemDelete
  135. itemUpdate
  136. )
  137. // Item is passed to setBuf so items can eventually be added to the cache.
  138. type Item struct {
  139. flag itemFlag
  140. Key uint64
  141. Conflict uint64
  142. Value interface{}
  143. Cost int64
  144. Expiration time.Time
  145. wg *sync.WaitGroup
  146. }
  147. // NewCache returns a new Cache instance and any configuration errors, if any.
  148. func NewCache(config *Config) (*Cache, error) {
  149. switch {
  150. case config.NumCounters == 0:
  151. return nil, errors.New("NumCounters can't be zero")
  152. case config.MaxCost == 0:
  153. return nil, errors.New("MaxCost can't be zero")
  154. case config.BufferItems == 0:
  155. return nil, errors.New("BufferItems can't be zero")
  156. }
  157. policy := newPolicy(config.NumCounters, config.MaxCost)
  158. cache := &Cache{
  159. store: newStore(),
  160. policy: policy,
  161. getBuf: newRingBuffer(policy, config.BufferItems),
  162. setBuf: make(chan *Item, setBufSize),
  163. keyToHash: config.KeyToHash,
  164. stop: make(chan struct{}),
  165. cost: config.Cost,
  166. ignoreInternalCost: config.IgnoreInternalCost,
  167. cleanupTicker: time.NewTicker(time.Duration(bucketDurationSecs) * time.Second / 2),
  168. }
  169. cache.onExit = func(val interface{}) {
  170. if config.OnExit != nil && val != nil {
  171. config.OnExit(val)
  172. }
  173. }
  174. cache.onEvict = func(item *Item) {
  175. if config.OnEvict != nil {
  176. config.OnEvict(item)
  177. }
  178. cache.onExit(item.Value)
  179. }
  180. cache.onReject = func(item *Item) {
  181. if config.OnReject != nil {
  182. config.OnReject(item)
  183. }
  184. cache.onExit(item.Value)
  185. }
  186. if cache.keyToHash == nil {
  187. cache.keyToHash = z.KeyToHash
  188. }
  189. if config.Metrics {
  190. cache.collectMetrics()
  191. }
  192. // NOTE: benchmarks seem to show that performance decreases the more
  193. // goroutines we have running cache.processItems(), so 1 should
  194. // usually be sufficient
  195. go cache.processItems()
  196. return cache, nil
  197. }
  198. func (c *Cache) Wait() {
  199. if c == nil || c.isClosed {
  200. return
  201. }
  202. wg := &sync.WaitGroup{}
  203. wg.Add(1)
  204. c.setBuf <- &Item{wg: wg}
  205. wg.Wait()
  206. }
  207. // Get returns the value (if any) and a boolean representing whether the
  208. // value was found or not. The value can be nil and the boolean can be true at
  209. // the same time.
  210. func (c *Cache) Get(key interface{}) (interface{}, bool) {
  211. if c == nil || c.isClosed || key == nil {
  212. return nil, false
  213. }
  214. keyHash, conflictHash := c.keyToHash(key)
  215. c.getBuf.Push(keyHash)
  216. value, ok := c.store.Get(keyHash, conflictHash)
  217. if ok {
  218. c.Metrics.add(hit, keyHash, 1)
  219. } else {
  220. c.Metrics.add(miss, keyHash, 1)
  221. }
  222. return value, ok
  223. }
  224. // Set attempts to add the key-value item to the cache. If it returns false,
  225. // then the Set was dropped and the key-value item isn't added to the cache. If
  226. // it returns true, there's still a chance it could be dropped by the policy if
  227. // its determined that the key-value item isn't worth keeping, but otherwise the
  228. // item will be added and other items will be evicted in order to make room.
  229. //
  230. // To dynamically evaluate the items cost using the Config.Coster function, set
  231. // the cost parameter to 0 and Coster will be ran when needed in order to find
  232. // the items true cost.
  233. func (c *Cache) Set(key, value interface{}, cost int64) bool {
  234. return c.SetWithTTL(key, value, cost, 0*time.Second)
  235. }
  236. // SetWithTTL works like Set but adds a key-value pair to the cache that will expire
  237. // after the specified TTL (time to live) has passed. A zero value means the value never
  238. // expires, which is identical to calling Set. A negative value is a no-op and the value
  239. // is discarded.
  240. func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool {
  241. if c == nil || c.isClosed || key == nil {
  242. return false
  243. }
  244. var expiration time.Time
  245. switch {
  246. case ttl == 0:
  247. // No expiration.
  248. break
  249. case ttl < 0:
  250. // Treat this a a no-op.
  251. return false
  252. default:
  253. expiration = time.Now().Add(ttl)
  254. }
  255. keyHash, conflictHash := c.keyToHash(key)
  256. i := &Item{
  257. flag: itemNew,
  258. Key: keyHash,
  259. Conflict: conflictHash,
  260. Value: value,
  261. Cost: cost,
  262. Expiration: expiration,
  263. }
  264. // cost is eventually updated. The expiration must also be immediately updated
  265. // to prevent items from being prematurely removed from the map.
  266. if prev, ok := c.store.Update(i); ok {
  267. c.onExit(prev)
  268. i.flag = itemUpdate
  269. }
  270. // Attempt to send item to policy.
  271. select {
  272. case c.setBuf <- i:
  273. return true
  274. default:
  275. if i.flag == itemUpdate {
  276. // Return true if this was an update operation since we've already
  277. // updated the store. For all the other operations (set/delete), we
  278. // return false which means the item was not inserted.
  279. return true
  280. }
  281. c.Metrics.add(dropSets, keyHash, 1)
  282. return false
  283. }
  284. }
  285. // Del deletes the key-value item from the cache if it exists.
  286. func (c *Cache) Del(key interface{}) {
  287. if c == nil || c.isClosed || key == nil {
  288. return
  289. }
  290. keyHash, conflictHash := c.keyToHash(key)
  291. // Delete immediately.
  292. _, prev := c.store.Del(keyHash, conflictHash)
  293. c.onExit(prev)
  294. // If we've set an item, it would be applied slightly later.
  295. // So we must push the same item to `setBuf` with the deletion flag.
  296. // This ensures that if a set is followed by a delete, it will be
  297. // applied in the correct order.
  298. c.setBuf <- &Item{
  299. flag: itemDelete,
  300. Key: keyHash,
  301. Conflict: conflictHash,
  302. }
  303. }
  304. // GetTTL returns the TTL for the specified key and a bool that is true if the
  305. // item was found and is not expired.
  306. func (c *Cache) GetTTL(key interface{}) (time.Duration, bool) {
  307. if c == nil || key == nil {
  308. return 0, false
  309. }
  310. keyHash, conflictHash := c.keyToHash(key)
  311. if _, ok := c.store.Get(keyHash, conflictHash); !ok {
  312. // not found
  313. return 0, false
  314. }
  315. expiration := c.store.Expiration(keyHash)
  316. if expiration.IsZero() {
  317. // found but no expiration
  318. return 0, true
  319. }
  320. if time.Now().After(expiration) {
  321. // found but expired
  322. return 0, false
  323. }
  324. return time.Until(expiration), true
  325. }
  326. // Close stops all goroutines and closes all channels.
  327. func (c *Cache) Close() {
  328. if c == nil || c.isClosed {
  329. return
  330. }
  331. c.Clear()
  332. // Block until processItems goroutine is returned.
  333. c.stop <- struct{}{}
  334. close(c.stop)
  335. close(c.setBuf)
  336. c.policy.Close()
  337. c.isClosed = true
  338. }
  339. // Clear empties the hashmap and zeroes all policy counters. Note that this is
  340. // not an atomic operation (but that shouldn't be a problem as it's assumed that
  341. // Set/Get calls won't be occurring until after this).
  342. func (c *Cache) Clear() {
  343. if c == nil || c.isClosed {
  344. return
  345. }
  346. // Block until processItems goroutine is returned.
  347. c.stop <- struct{}{}
  348. // Clear out the setBuf channel.
  349. loop:
  350. for {
  351. select {
  352. case i := <-c.setBuf:
  353. if i.wg != nil {
  354. i.wg.Done()
  355. continue
  356. }
  357. if i.flag != itemUpdate {
  358. // In itemUpdate, the value is already set in the store. So, no need to call
  359. // onEvict here.
  360. c.onEvict(i)
  361. }
  362. default:
  363. break loop
  364. }
  365. }
  366. // Clear value hashmap and policy data.
  367. c.policy.Clear()
  368. c.store.Clear(c.onEvict)
  369. // Only reset metrics if they're enabled.
  370. if c.Metrics != nil {
  371. c.Metrics.Clear()
  372. }
  373. // Restart processItems goroutine.
  374. go c.processItems()
  375. }
  376. // MaxCost returns the max cost of the cache.
  377. func (c *Cache) MaxCost() int64 {
  378. if c == nil {
  379. return 0
  380. }
  381. return c.policy.MaxCost()
  382. }
  383. // UpdateMaxCost updates the maxCost of an existing cache.
  384. func (c *Cache) UpdateMaxCost(maxCost int64) {
  385. if c == nil {
  386. return
  387. }
  388. c.policy.UpdateMaxCost(maxCost)
  389. }
  390. // processItems is ran by goroutines processing the Set buffer.
  391. func (c *Cache) processItems() {
  392. startTs := make(map[uint64]time.Time)
  393. numToKeep := 100000 // TODO: Make this configurable via options.
  394. trackAdmission := func(key uint64) {
  395. if c.Metrics == nil {
  396. return
  397. }
  398. startTs[key] = time.Now()
  399. if len(startTs) > numToKeep {
  400. for k := range startTs {
  401. if len(startTs) <= numToKeep {
  402. break
  403. }
  404. delete(startTs, k)
  405. }
  406. }
  407. }
  408. onEvict := func(i *Item) {
  409. if ts, has := startTs[i.Key]; has {
  410. c.Metrics.trackEviction(int64(time.Since(ts) / time.Second))
  411. delete(startTs, i.Key)
  412. }
  413. if c.onEvict != nil {
  414. c.onEvict(i)
  415. }
  416. }
  417. for {
  418. select {
  419. case i := <-c.setBuf:
  420. if i.wg != nil {
  421. i.wg.Done()
  422. continue
  423. }
  424. // Calculate item cost value if new or update.
  425. if i.Cost == 0 && c.cost != nil && i.flag != itemDelete {
  426. i.Cost = c.cost(i.Value)
  427. }
  428. if !c.ignoreInternalCost {
  429. // Add the cost of internally storing the object.
  430. i.Cost += itemSize
  431. }
  432. switch i.flag {
  433. case itemNew:
  434. victims, added := c.policy.Add(i.Key, i.Cost)
  435. if added {
  436. c.store.Set(i)
  437. c.Metrics.add(keyAdd, i.Key, 1)
  438. trackAdmission(i.Key)
  439. } else {
  440. c.onReject(i)
  441. }
  442. for _, victim := range victims {
  443. victim.Conflict, victim.Value = c.store.Del(victim.Key, 0)
  444. onEvict(victim)
  445. }
  446. case itemUpdate:
  447. c.policy.Update(i.Key, i.Cost)
  448. case itemDelete:
  449. c.policy.Del(i.Key) // Deals with metrics updates.
  450. _, val := c.store.Del(i.Key, i.Conflict)
  451. c.onExit(val)
  452. }
  453. case <-c.cleanupTicker.C:
  454. c.store.Cleanup(c.policy, onEvict)
  455. case <-c.stop:
  456. return
  457. }
  458. }
  459. }
  460. // collectMetrics just creates a new *Metrics instance and adds the pointers
  461. // to the cache and policy instances.
  462. func (c *Cache) collectMetrics() {
  463. c.Metrics = newMetrics()
  464. c.policy.CollectMetrics(c.Metrics)
  465. }
  466. type metricType int
  467. const (
  468. // The following 2 keep track of hits and misses.
  469. hit = iota
  470. miss
  471. // The following 3 keep track of number of keys added, updated and evicted.
  472. keyAdd
  473. keyUpdate
  474. keyEvict
  475. // The following 2 keep track of cost of keys added and evicted.
  476. costAdd
  477. costEvict
  478. // The following keep track of how many sets were dropped or rejected later.
  479. dropSets
  480. rejectSets
  481. // The following 2 keep track of how many gets were kept and dropped on the
  482. // floor.
  483. dropGets
  484. keepGets
  485. // This should be the final enum. Other enums should be set before this.
  486. doNotUse
  487. )
  488. func stringFor(t metricType) string {
  489. switch t {
  490. case hit:
  491. return "hit"
  492. case miss:
  493. return "miss"
  494. case keyAdd:
  495. return "keys-added"
  496. case keyUpdate:
  497. return "keys-updated"
  498. case keyEvict:
  499. return "keys-evicted"
  500. case costAdd:
  501. return "cost-added"
  502. case costEvict:
  503. return "cost-evicted"
  504. case dropSets:
  505. return "sets-dropped"
  506. case rejectSets:
  507. return "sets-rejected" // by policy.
  508. case dropGets:
  509. return "gets-dropped"
  510. case keepGets:
  511. return "gets-kept"
  512. default:
  513. return "unidentified"
  514. }
  515. }
  516. // Metrics is a snapshot of performance statistics for the lifetime of a cache instance.
  517. type Metrics struct {
  518. all [doNotUse][]*uint64
  519. mu sync.RWMutex
  520. life *z.HistogramData // Tracks the life expectancy of a key.
  521. }
  522. func newMetrics() *Metrics {
  523. s := &Metrics{
  524. life: z.NewHistogramData(z.HistogramBounds(1, 16)),
  525. }
  526. for i := 0; i < doNotUse; i++ {
  527. s.all[i] = make([]*uint64, 256)
  528. slice := s.all[i]
  529. for j := range slice {
  530. slice[j] = new(uint64)
  531. }
  532. }
  533. return s
  534. }
  535. func (p *Metrics) add(t metricType, hash, delta uint64) {
  536. if p == nil {
  537. return
  538. }
  539. valp := p.all[t]
  540. // Avoid false sharing by padding at least 64 bytes of space between two
  541. // atomic counters which would be incremented.
  542. idx := (hash % 25) * 10
  543. atomic.AddUint64(valp[idx], delta)
  544. }
  545. func (p *Metrics) get(t metricType) uint64 {
  546. if p == nil {
  547. return 0
  548. }
  549. valp := p.all[t]
  550. var total uint64
  551. for i := range valp {
  552. total += atomic.LoadUint64(valp[i])
  553. }
  554. return total
  555. }
  556. // Hits is the number of Get calls where a value was found for the corresponding key.
  557. func (p *Metrics) Hits() uint64 {
  558. return p.get(hit)
  559. }
  560. // Misses is the number of Get calls where a value was not found for the corresponding key.
  561. func (p *Metrics) Misses() uint64 {
  562. return p.get(miss)
  563. }
  564. // KeysAdded is the total number of Set calls where a new key-value item was added.
  565. func (p *Metrics) KeysAdded() uint64 {
  566. return p.get(keyAdd)
  567. }
  568. // KeysUpdated is the total number of Set calls where the value was updated.
  569. func (p *Metrics) KeysUpdated() uint64 {
  570. return p.get(keyUpdate)
  571. }
  572. // KeysEvicted is the total number of keys evicted.
  573. func (p *Metrics) KeysEvicted() uint64 {
  574. return p.get(keyEvict)
  575. }
  576. // CostAdded is the sum of costs that have been added (successful Set calls).
  577. func (p *Metrics) CostAdded() uint64 {
  578. return p.get(costAdd)
  579. }
  580. // CostEvicted is the sum of all costs that have been evicted.
  581. func (p *Metrics) CostEvicted() uint64 {
  582. return p.get(costEvict)
  583. }
  584. // SetsDropped is the number of Set calls that don't make it into internal
  585. // buffers (due to contention or some other reason).
  586. func (p *Metrics) SetsDropped() uint64 {
  587. return p.get(dropSets)
  588. }
  589. // SetsRejected is the number of Set calls rejected by the policy (TinyLFU).
  590. func (p *Metrics) SetsRejected() uint64 {
  591. return p.get(rejectSets)
  592. }
  593. // GetsDropped is the number of Get counter increments that are dropped
  594. // internally.
  595. func (p *Metrics) GetsDropped() uint64 {
  596. return p.get(dropGets)
  597. }
  598. // GetsKept is the number of Get counter increments that are kept.
  599. func (p *Metrics) GetsKept() uint64 {
  600. return p.get(keepGets)
  601. }
  602. // Ratio is the number of Hits over all accesses (Hits + Misses). This is the
  603. // percentage of successful Get calls.
  604. func (p *Metrics) Ratio() float64 {
  605. if p == nil {
  606. return 0.0
  607. }
  608. hits, misses := p.get(hit), p.get(miss)
  609. if hits == 0 && misses == 0 {
  610. return 0.0
  611. }
  612. return float64(hits) / float64(hits+misses)
  613. }
  614. func (p *Metrics) trackEviction(numSeconds int64) {
  615. if p == nil {
  616. return
  617. }
  618. p.mu.Lock()
  619. defer p.mu.Unlock()
  620. p.life.Update(numSeconds)
  621. }
  622. func (p *Metrics) LifeExpectancySeconds() *z.HistogramData {
  623. if p == nil {
  624. return nil
  625. }
  626. p.mu.RLock()
  627. defer p.mu.RUnlock()
  628. return p.life.Copy()
  629. }
  630. // Clear resets all the metrics.
  631. func (p *Metrics) Clear() {
  632. if p == nil {
  633. return
  634. }
  635. for i := 0; i < doNotUse; i++ {
  636. for j := range p.all[i] {
  637. atomic.StoreUint64(p.all[i][j], 0)
  638. }
  639. }
  640. p.mu.Lock()
  641. p.life = z.NewHistogramData(z.HistogramBounds(1, 16))
  642. p.mu.Unlock()
  643. }
  644. // String returns a string representation of the metrics.
  645. func (p *Metrics) String() string {
  646. if p == nil {
  647. return ""
  648. }
  649. var buf bytes.Buffer
  650. for i := 0; i < doNotUse; i++ {
  651. t := metricType(i)
  652. fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t))
  653. }
  654. fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss))
  655. fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio())
  656. return buf.String()
  657. }