Fix #72: Connected placeholder shouldn't resolve to true when when host is unreachable

This commit is contained in:
TwinProduction 2021-01-12 21:08:18 -05:00
parent a515335c15
commit aec867ae69
9 changed files with 279 additions and 121 deletions

View File

@ -70,6 +70,10 @@ func Ping(address string) (bool, time.Duration) {
return false, 0 return false, 0
} }
if pinger.Statistics() != nil { if pinger.Statistics() != nil {
// If the packet loss is 100, it means that the packet didn't reach the host
if pinger.Statistics().PacketLoss == 100 {
return false, pinger.Timeout
}
return true, pinger.Statistics().MaxRtt return true, pinger.Statistics().MaxRtt
} }
return true, 0 return true, 0

2
go.mod
View File

@ -4,7 +4,7 @@ go 1.15
require ( require (
cloud.google.com/go v0.74.0 // indirect cloud.google.com/go v0.74.0 // indirect
github.com/TwinProduction/gocache v0.3.0 github.com/TwinProduction/gocache v1.1.0
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663 github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/gorilla/mux v1.8.0 github.com/gorilla/mux v1.8.0

4
go.sum
View File

@ -50,8 +50,8 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/TwinProduction/gocache v0.3.0 h1:nC02PSOyGLiXGrOJ6eskGc5chBq5GW6m3pA2g341VEM= github.com/TwinProduction/gocache v1.1.0 h1:mibBUyccd8kGHlm5dXhTMDOvWBK4mjNqGyOOkG8mib8=
github.com/TwinProduction/gocache v0.3.0/go.mod h1:+qH57V/K4oAcX9C7CvgJTwUX4lzfIUXQC/6XaRSOS1Y= github.com/TwinProduction/gocache v1.1.0/go.mod h1:+qH57V/K4oAcX9C7CvgJTwUX4lzfIUXQC/6XaRSOS1Y=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=

View File

@ -4,8 +4,7 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/TwinProduction/gocache)](https://goreportcard.com/report/github.com/TwinProduction/gocache) [![Go Report Card](https://goreportcard.com/badge/github.com/TwinProduction/gocache)](https://goreportcard.com/report/github.com/TwinProduction/gocache)
[![codecov](https://codecov.io/gh/TwinProduction/gocache/branch/master/graph/badge.svg)](https://codecov.io/gh/TwinProduction/gocache) [![codecov](https://codecov.io/gh/TwinProduction/gocache/branch/master/graph/badge.svg)](https://codecov.io/gh/TwinProduction/gocache)
[![Go version](https://img.shields.io/github/go-mod/go-version/TwinProduction/gocache.svg)](https://github.com/TwinProduction/gocache) [![Go version](https://img.shields.io/github/go-mod/go-version/TwinProduction/gocache.svg)](https://github.com/TwinProduction/gocache)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/TwinProduction/gocache) [![Go Reference](https://pkg.go.dev/badge/github.com/TwinProduction/gocache.svg)](https://pkg.go.dev/github.com/TwinProduction/gocache)
[![Docker pulls](https://img.shields.io/docker/pulls/twinproduction/gocache-server.svg)](https://cloud.docker.com/repository/docker/twinproduction/gocache-server)
gocache is an easy-to-use, high-performance, lightweight and thread-safe (goroutine-safe) in-memory key-value cache gocache is an easy-to-use, high-performance, lightweight and thread-safe (goroutine-safe) in-memory key-value cache
with support for LRU and FIFO eviction policies as well as expiration, bulk operations and even persistence to file. with support for LRU and FIFO eviction policies as well as expiration, bulk operations and even persistence to file.
@ -27,6 +26,7 @@ with support for LRU and FIFO eviction policies as well as expiration, bulk oper
- [Eviction](#eviction) - [Eviction](#eviction)
- [MaxSize](#maxsize) - [MaxSize](#maxsize)
- [MaxMemoryUsage](#maxmemoryusage) - [MaxMemoryUsage](#maxmemoryusage)
- [Expiration](#expiration)
- [Server](#server) - [Server](#server)
- [Running the server with Docker](#running-the-server-with-docker) - [Running the server with Docker](#running-the-server-with-docker)
- [Performance](#performance) - [Performance](#performance)
@ -71,19 +71,20 @@ cache.StartJanitor()
``` ```
### Functions ### Functions
| Function | Description | | Function | Description |
| ------------------ | ----------- | | --------------------------------- | ----------- |
| WithMaxSize | Sets the max size of the cache. `gocache.NoMaxSize` means there is no limit. If not set, the default max size is `gocache.DefaultMaxSize`. | WithMaxSize | Sets the max size of the cache. `gocache.NoMaxSize` means there is no limit. If not set, the default max size is `gocache.DefaultMaxSize`.
| WithMaxMemoryUsage | Sets the max memory usage of the cache. `gocache.NoMaxMemoryUsage` means there is no limit. The default behavior is to not evict based on memory usage. | WithMaxMemoryUsage | Sets the max memory usage of the cache. `gocache.NoMaxMemoryUsage` means there is no limit. The default behavior is to not evict based on memory usage.
| WithEvictionPolicy | Sets the eviction algorithm to be used when the cache reaches the max size. If not set, the default eviction policy is `gocache.FirstInFirstOut` (FIFO). | WithEvictionPolicy | Sets the eviction algorithm to be used when the cache reaches the max size. If not set, the default eviction policy is `gocache.FirstInFirstOut` (FIFO).
| WithForceNilInterfaceOnNilPointer | Configures whether values with a nil pointer passed to write functions should be forcefully set to nil. Defaults to true.
| StartJanitor | Starts the janitor, which is in charge of deleting expired cache entries in the background. | StartJanitor | Starts the janitor, which is in charge of deleting expired cache entries in the background.
| StopJanitor | Stops the janitor. | StopJanitor | Stops the janitor.
| Set | Same as `SetWithTTL`, but with no expiration (`gocache.NoExpiration`) | Set | Same as `SetWithTTL`, but with no expiration (`gocache.NoExpiration`)
| SetAll | Same as `Set`, but in bulk | SetAll | Same as `Set`, but in bulk
| SetWithTTL | Creates or updates a cache entry with the given key, value and expiration time. If the max size after the aforementioned operation is above the configured max size, the tail will be evicted. Depending on the eviction policy, the tail is defined as the oldest | SetWithTTL | Creates or updates a cache entry with the given key, value and expiration time. If the max size after the aforementioned operation is above the configured max size, the tail will be evicted. Depending on the eviction policy, the tail is defined as the oldest
| Get | Gets a cache entry by its key. | Get | Gets a cache entry by its key.
| GetAll | Gets a map of entries by their keys. The resulting map will contain all keys, even if some of the keys in the slice passed as parameter were not present in the cache. | GetByKeys | Gets a map of entries by their keys. The resulting map will contain all keys, even if some of the keys in the slice passed as parameter were not present in the cache.
| GetAll | Gets all cache entries.
| GetKeysByPattern | Retrieves a slice of keys that matches a given pattern. | GetKeysByPattern | Retrieves a slice of keys that matches a given pattern.
| Delete | Removes a key from the cache. | Delete | Removes a key from the cache.
| DeleteAll | Removes multiple keys from the cache. | DeleteAll | Removes multiple keys from the cache.
@ -94,6 +95,8 @@ cache.StartJanitor()
| SaveToFile | Stores the content of the cache to a file so that it can be read using `ReadFromFile`. See [persistence](#persistence). | SaveToFile | Stores the content of the cache to a file so that it can be read using `ReadFromFile`. See [persistence](#persistence).
| ReadFromFile | Populates the cache using a file created using `SaveToFile`. See [persistence](#persistence). | ReadFromFile | Populates the cache using a file created using `SaveToFile`. See [persistence](#persistence).
For further documentation, please refer to [Go Reference](https://pkg.go.dev/github.com/TwinProduction/gocache)
### Examples ### Examples
@ -102,13 +105,14 @@ cache.StartJanitor()
cache.Set("key", "value") cache.Set("key", "value")
cache.Set("key", 1) cache.Set("key", 1)
cache.Set("key", struct{ Text string }{Test: "value"}) cache.Set("key", struct{ Text string }{Test: "value"})
cache.SetWithTTL("key", []byte("value"), 24*time.Hour)
``` ```
#### Getting an entry #### Getting an entry
```go ```go
value, ok := cache.Get("key") value, exists := cache.Get("key")
``` ```
You can also get multiple entries by using `cache.GetAll([]string{"key1", "key2"})` You can also get multiple entries by using `cache.GetByKeys([]string{"key1", "key2"})`
#### Deleting an entry #### Deleting an entry
```go ```go
@ -122,8 +126,9 @@ package main
import ( import (
"fmt" "fmt"
"github.com/TwinProduction/gocache"
"time" "time"
"github.com/TwinProduction/gocache"
) )
func main() { func main() {
@ -136,8 +141,8 @@ func main() {
value, exists := cache.Get("key") value, exists := cache.Get("key")
fmt.Printf("[Get] key=key; value=%s; exists=%v\n", value, exists) fmt.Printf("[Get] key=key; value=%s; exists=%v\n", value, exists)
for key, value := range cache.GetAll([]string{"k1", "k2", "k3"}) { for key, value := range cache.GetByKeys([]string{"k1", "k2", "k3"}) {
fmt.Printf("[GetAll] key=%s; value=%s\n", key, value) fmt.Printf("[GetByKeys] key=%s; value=%s\n", key, value)
} }
for _, key := range cache.GetKeysByPattern("key*", 0) { for _, key := range cache.GetKeysByPattern("key*", 0) {
fmt.Printf("[GetKeysByPattern] key=%s\n", key) fmt.Printf("[GetKeysByPattern] key=%s\n", key)
@ -174,9 +179,9 @@ func main() {
``` ```
[Get] key=key; value=value; exists=true [Get] key=key; value=value; exists=true
[GetAll] key=k2; value=v2 [GetByKeys] key=k2; value=v2
[GetAll] key=k3; value=v3 [GetByKeys] key=k3; value=v3
[GetAll] key=k1; value=v1 [GetByKeys] key=k1; value=v1
[GetKeysByPattern] key=key [GetKeysByPattern] key=key
[GetKeysByPattern] key=key-with-ttl [GetKeysByPattern] key=key-with-ttl
Cache size before persisting cache to file: 5 Cache size before persisting cache to file: 5
@ -248,6 +253,7 @@ you'll be fine.
## Eviction ## Eviction
### MaxSize ### MaxSize
Eviction by MaxSize is the default behavior, and is also the most efficient. Eviction by MaxSize is the default behavior, and is also the most efficient.
@ -258,7 +264,7 @@ cache := gocache.NewCache().WithMaxSize(1000)
This means that whenever an operation causes the total size of the cache to go above 1000, the tail will be evicted. This means that whenever an operation causes the total size of the cache to go above 1000, the tail will be evicted.
### MaxMemoryUsage ### MaxMemoryUsage
Eviction by MaxMemoryUsage is **disabled by default**, and is still a work in progress. Eviction by MaxMemoryUsage is **disabled by default**, and is in alpha.
The code below will create a cache that has a maximum memory usage of 50MB: The code below will create a cache that has a maximum memory usage of 50MB:
```go ```go
@ -268,7 +274,7 @@ This means that whenever an operation causes the total memory usage of the cache
will be evicted. will be evicted.
Unlike evictions caused by reaching the MaxSize, evictions triggered by MaxMemoryUsage may lead to multiple entries Unlike evictions caused by reaching the MaxSize, evictions triggered by MaxMemoryUsage may lead to multiple entries
being evicted in a row. The reason for this is that if, for instance, you had 500 entries of 0.1MB each and you suddenly added being evicted in a row. The reason for this is that if, for instance, you had 100 entries of 0.1MB each and you suddenly added
a single entry of 10MB, 100 entries would need to be evicted to make enough space for that new big entry. a single entry of 10MB, 100 entries would need to be evicted to make enough space for that new big entry.
It's very important to keep in mind that eviction by MaxMemoryUsage is approximate. It's very important to keep in mind that eviction by MaxMemoryUsage is approximate.
@ -284,6 +290,18 @@ As previously mentioned, this is a work in progress, and here's a list of the th
- Adding an entry bigger than the configured MaxMemoryUsage will work, but it will evict all other entries. - Adding an entry bigger than the configured MaxMemoryUsage will work, but it will evict all other entries.
## Expiration
There are two ways that the deletion of expired keys can take place:
- Active
- Passive
**Active deletion of expired keys** happens when an attempt is made to access the value of a cache entry that expired.
`Get`, `GetByKeys` and `GetAll` are the only functions that can trigger active deletion of expired keys.
**Passive deletion of expired keys** runs in the background and is managed by the janitor.
If you do not start the janitor, there will be no passive deletion of expired keys.
## Server ## Server
For the sake of convenience, a ready-to-go cache server is available For the sake of convenience, a ready-to-go cache server is available
through the `gocacheserver` package. through the `gocacheserver` package.
@ -330,12 +348,14 @@ Any Redis client should be able to interact with the server, though only the fol
## Running the server with Docker ## Running the server with Docker
[![Docker pulls](https://img.shields.io/docker/pulls/twinproduction/gocache-server.svg)](https://cloud.docker.com/repository/docker/twinproduction/gocache-server)
To build it locally, refer to the Makefile's `docker-build` and `docker-run` steps. To build it locally, refer to the Makefile's `docker-build` and `docker-run` steps.
Note that the server version of gocache is still under development. Note that the server version of gocache is still under development.
``` ```
docker run --name gocache-server -p 6379:6379 twinproduction/gocache-server:v0.1.0 docker run --name gocache-server -p 6379:6379 twinproduction/gocache-server
``` ```
@ -362,45 +382,52 @@ but if you're looking into using a library like gocache, odds are, you want more
| mem | 32G DDR4 | | mem | 32G DDR4 |
``` ```
BenchmarkMap_Get-8 47943618 26.6 ns/op BenchmarkMap_Get-8 95936680 26.3 ns/op
BenchmarkMap_SetSmallValue-8 3800810 394 ns/op BenchmarkMap_SetSmallValue-8 7738132 424 ns/op
BenchmarkMap_SetMediumValue-8 3904794 400 ns/op BenchmarkMap_SetMediumValue-8 7766346 424 ns/op
BenchmarkMap_SetLargeValue-8 3934033 383 ns/op BenchmarkMap_SetLargeValue-8 7947063 435 ns/op
BenchmarkCache_Get-8 27254640 45.0 ns/op BenchmarkCache_Get-8 54549049 45.7 ns/op
BenchmarkCache_SetSmallValue-8 2991620 401 ns/op BenchmarkCache_SetSmallValue-8 35225013 69.2 ns/op
BenchmarkCache_SetMediumValue-8 3051128 381 ns/op BenchmarkCache_SetMediumValue-8 5952064 412 ns/op
BenchmarkCache_SetLargeValue-8 2995904 382 ns/op BenchmarkCache_SetLargeValue-8 5969121 411 ns/op
BenchmarkCache_SetSmallValueWhenUsingMaxMemoryUsage-8 2752288 428 ns/op BenchmarkCache_GetUsingLRU-8 54545949 45.6 ns/op
BenchmarkCache_SetMediumValueWhenUsingMaxMemoryUsage-8 2744899 436 ns/op BenchmarkCache_SetSmallValueUsingLRU-8 5909504 419 ns/op
BenchmarkCache_SetLargeValueWhenUsingMaxMemoryUsage-8 2756816 430 ns/op BenchmarkCache_SetMediumValueUsingLRU-8 5910885 418 ns/op
BenchmarkCache_SetSmallValueWithMaxSize10-8 5308886 226 ns/op BenchmarkCache_SetLargeValueUsingLRU-8 5867544 419 ns/op
BenchmarkCache_SetMediumValueWithMaxSize10-8 5304098 226 ns/op BenchmarkCache_SetSmallValueWhenUsingMaxMemoryUsage-8 5477178 462 ns/op
BenchmarkCache_SetLargeValueWithMaxSize10-8 5277986 227 ns/op BenchmarkCache_SetMediumValueWhenUsingMaxMemoryUsage-8 5417595 475 ns/op
BenchmarkCache_SetSmallValueWithMaxSize1000-8 5130580 236 ns/op BenchmarkCache_SetLargeValueWhenUsingMaxMemoryUsage-8 5215263 479 ns/op
BenchmarkCache_SetMediumValueWithMaxSize1000-8 5102404 237 ns/op BenchmarkCache_SetSmallValueWithMaxSize10-8 10115574 236 ns/op
BenchmarkCache_SetLargeValueWithMaxSize1000-8 5084695 237 ns/op BenchmarkCache_SetMediumValueWithMaxSize10-8 10242792 241 ns/op
BenchmarkCache_SetSmallValueWithMaxSize100000-8 3858066 315 ns/op BenchmarkCache_SetLargeValueWithMaxSize10-8 10201894 241 ns/op
BenchmarkCache_SetMediumValueWithMaxSize100000-8 3909277 315 ns/op BenchmarkCache_SetSmallValueWithMaxSize1000-8 9637113 253 ns/op
BenchmarkCache_SetLargeValueWithMaxSize100000-8 3870913 315 ns/op BenchmarkCache_SetMediumValueWithMaxSize1000-8 9635175 253 ns/op
BenchmarkCache_SetSmallValueWithMaxSize100000AndLRU-8 3856012 316 ns/op BenchmarkCache_SetLargeValueWithMaxSize1000-8 9598982 260 ns/op
BenchmarkCache_SetMediumValueWithMaxSize100000AndLRU-8 3809518 316 ns/op BenchmarkCache_SetSmallValueWithMaxSize100000-8 7642584 337 ns/op
BenchmarkCache_SetLargeValueWithMaxSize100000AndLRU-8 3834754 318 ns/op BenchmarkCache_SetMediumValueWithMaxSize100000-8 7407571 344 ns/op
BenchmarkCache_GetAndSetConcurrently-8 1779258 672 ns/op BenchmarkCache_SetLargeValueWithMaxSize100000-8 7071360 345 ns/op
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndLRU-8 2569590 487 ns/op BenchmarkCache_SetSmallValueWithMaxSize100000AndLRU-8 7544194 332 ns/op
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndFIFO-8 2608369 474 ns/op BenchmarkCache_SetMediumValueWithMaxSize100000AndLRU-8 7667004 344 ns/op
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndLRU-8 2185795 582 ns/op BenchmarkCache_SetLargeValueWithMaxSize100000AndLRU-8 7357642 338 ns/op
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndFIFO-8 2238811 568 ns/op BenchmarkCache_GetAndSetMultipleConcurrently-8 1442306 1684 ns/op
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndLRU-8 3726714 320 ns/op BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndLRU-8 5117271 477 ns/op
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndFIFO-8 3682808 325 ns/op BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndFIFO-8 5228412 475 ns/op
BenchmarkCache_GetConcurrentlyWithLRU-8 1536589 739 ns/op BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndLRU-8 5139195 529 ns/op
BenchmarkCache_GetConcurrentlyWithFIFO-8 1558513 737 ns/op BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndFIFO-8 5251639 511 ns/op
BenchmarkCache_GetKeysThatDoNotExistConcurrently-8 10173138 119 ns/op BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndLRU-8 7384626 334 ns/op
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndFIFO-8 7361985 332 ns/op
BenchmarkCache_GetConcurrentlyWithLRU-8 3370784 726 ns/op
BenchmarkCache_GetConcurrentlyWithFIFO-8 3749994 681 ns/op
BenchmarkCache_GetKeysThatDoNotExistConcurrently-8 17647344 143 ns/op
``` ```
## FAQ ## FAQ
### Why does the memory usage not go down? ### Why does the memory usage not go down?
> **NOTE**: As of Go 1.16, this will no longer apply. See [golang/go#42330](https://github.com/golang/go/issues/42330)
By default, Go uses `MADV_FREE` if the kernel supports it to release memory, which is significantly more efficient By default, Go uses `MADV_FREE` if the kernel supports it to release memory, which is significantly more efficient
than using `MADV_DONTNEED`. Unfortunately, this means that RSS doesn't go down unless the OS actually needs the than using `MADV_DONTNEED`. Unfortunately, this means that RSS doesn't go down unless the OS actually needs the
memory. memory.

View File

@ -6,8 +6,12 @@ import (
"unsafe" "unsafe"
) )
// Entry is a cache entry
type Entry struct { type Entry struct {
// Key is the name of the cache entry
Key string Key string
// Value is the value of the cache entry
Value interface{} Value interface{}
// RelevantTimestamp is the variable used to store either: // RelevantTimestamp is the variable used to store either:
@ -24,10 +28,12 @@ type Entry struct {
previous *Entry previous *Entry
} }
// Accessed updates the Entry's RelevantTimestamp to now
func (entry *Entry) Accessed() { func (entry *Entry) Accessed() {
entry.RelevantTimestamp = time.Now() entry.RelevantTimestamp = time.Now()
} }
// Expired returns whether the Entry has expired
func (entry Entry) Expired() bool { func (entry Entry) Expired() bool {
if entry.Expiration > 0 { if entry.Expiration > 0 {
if time.Now().UnixNano() > entry.Expiration { if time.Now().UnixNano() > entry.Expiration {
@ -37,6 +43,7 @@ func (entry Entry) Expired() bool {
return false return false
} }
// SizeInBytes returns the size of an entry in bytes, approximately.
func (entry *Entry) SizeInBytes() int { func (entry *Entry) SizeInBytes() int {
return toBytes(entry.Key) + toBytes(entry.Value) + 32 return toBytes(entry.Key) + toBytes(entry.Value) + 32
} }

View File

@ -2,13 +2,16 @@ package gocache
import ( import (
"errors" "errors"
"reflect"
"sync" "sync"
"time" "time"
) )
const ( var (
Debug = false Debug = false
)
const (
// NoMaxSize means that the cache has no maximum number of entries in the cache // NoMaxSize means that the cache has no maximum number of entries in the cache
// Setting Cache.maxSize to this value also means there will be no eviction // Setting Cache.maxSize to this value also means there will be no eviction
NoMaxSize = 0 NoMaxSize = 0
@ -23,15 +26,14 @@ const (
NoExpiration = -1 NoExpiration = -1
Kilobyte = 1024 Kilobyte = 1024
Megabyte = 1024 * 1024 Megabyte = 1024 * Kilobyte
Gigabyte = 1024 * 1024 * 1024 Gigabyte = 1024 * Megabyte
) )
var ( var (
ErrKeyDoesNotExist = errors.New("key does not exist") ErrKeyDoesNotExist = errors.New("key does not exist")
ErrKeyHasNoExpiration = errors.New("key has no expiration") ErrKeyHasNoExpiration = errors.New("key has no expiration")
ErrJanitorAlreadyRunning = errors.New("janitor is already running") ErrJanitorAlreadyRunning = errors.New("janitor is already running")
ErrAutoSaveAlreadyRunning = errors.New("autosave is already running")
) )
// Cache is the core struct of gocache which contains the data as well as all relevant configuration fields // Cache is the core struct of gocache which contains the data as well as all relevant configuration fields
@ -68,6 +70,15 @@ type Cache struct {
// memoryUsage is the approximate memory usage of the cache (dataset only) in bytes // memoryUsage is the approximate memory usage of the cache (dataset only) in bytes
memoryUsage int memoryUsage int
// forceNilInterfaceOnNilPointer determines whether all Set-like functions should set a value as nil if the
// interface passed has a nil value but not a nil type.
//
// By default, interfaces are only nil when both their type and value is nil.
// This means that when you pass a pointer to a nil value, the type of the interface
// will still show as nil, which means that if you don't cast the interface after
// retrieving it, a nil check will return that the value is not false.
forceNilInterfaceOnNilPointer bool
} }
// MaxSize returns the maximum amount of keys that can be present in the cache before // MaxSize returns the maximum amount of keys that can be present in the cache before
@ -87,8 +98,16 @@ func (cache *Cache) EvictionPolicy() EvictionPolicy {
} }
// Stats returns statistics from the cache // Stats returns statistics from the cache
func (cache *Cache) Stats() *Statistics { func (cache *Cache) Stats() Statistics {
return cache.stats cache.mutex.RLock()
stats := Statistics{
EvictedKeys: cache.stats.EvictedKeys,
ExpiredKeys: cache.stats.ExpiredKeys,
Hits: cache.stats.Hits,
Misses: cache.stats.Misses,
}
cache.mutex.RUnlock()
return stats
} }
// MemoryUsage returns the current memory usage of the cache's dataset in bytes // MemoryUsage returns the current memory usage of the cache's dataset in bytes
@ -103,6 +122,9 @@ func (cache *Cache) WithMaxSize(maxSize int) *Cache {
if maxSize < 0 { if maxSize < 0 {
maxSize = NoMaxSize maxSize = NoMaxSize
} }
if maxSize != NoMaxSize && cache.Count() == 0 {
cache.entries = make(map[string]*Entry, maxSize)
}
cache.maxSize = maxSize cache.maxSize = maxSize
return cache return cache
} }
@ -127,10 +149,51 @@ func (cache *Cache) WithEvictionPolicy(policy EvictionPolicy) *Cache {
return cache return cache
} }
// WithForceNilInterfaceOnNilPointer sets whether all Set-like functions should set a value as nil if the
// interface passed has a nil value but not a nil type.
//
// In Go, an interface is only nil if both its type and value are nil, which means that a nil pointer
// (e.g. (*Struct)(nil)) will retain its attribution to the type, and the unmodified value returned from
// Cache.Get, for instance, would return false when compared with nil if this option is set to false.
//
// We can bypass this by detecting if the interface's value is nil and setting it to nil rather than
// a nil pointer, which will make the value returned from Cache.Get return true when compared with nil.
// This is exactly what passing true to WithForceNilInterfaceOnNilPointer does, and it's also the default behavior.
//
// Alternatively, you may pass false to WithForceNilInterfaceOnNilPointer, which will mean that you'll have
// to cast the value returned from Cache.Get to its original type to check for whether the pointer returned
// is nil or not.
//
// If set to true:
// cache := gocache.NewCache().WithForceNilInterfaceOnNilPointer(true)
// cache.Set("key", (*Struct)(nil))
// value, _ := cache.Get("key")
// // the following returns true, because the interface{} was forcefully set to nil
// if value == nil {}
// // the following will panic, because the value has been casted to its type
// if value.(*Struct) == nil {}
//
// If set to false:
// cache := gocache.NewCache().WithForceNilInterfaceOnNilPointer(false)
// cache.Set("key", (*Struct)(nil))
// value, _ := cache.Get("key")
// // the following returns false, because the interface{} returned has a non-nil type (*Struct)
// if value == nil {}
// // the following returns true, because the value has been casted to its type
// if value.(*Struct) == nil {}
//
// In other words, if set to true, you do not need to cast the value returned from the cache to
// to check if the value is nil.
//
// Defaults to true
func (cache *Cache) WithForceNilInterfaceOnNilPointer(forceNilInterfaceOnNilPointer bool) *Cache {
cache.forceNilInterfaceOnNilPointer = forceNilInterfaceOnNilPointer
return cache
}
// NewCache creates a new Cache // NewCache creates a new Cache
// //
// Should be used in conjunction with Cache.WithMaxSize, Cache.WithMaxMemoryUsage and/or Cache.WithEvictionPolicy // Should be used in conjunction with Cache.WithMaxSize, Cache.WithMaxMemoryUsage and/or Cache.WithEvictionPolicy
//
// gocache.NewCache().WithMaxSize(10000).WithEvictionPolicy(gocache.LeastRecentlyUsed) // gocache.NewCache().WithMaxSize(10000).WithEvictionPolicy(gocache.LeastRecentlyUsed)
// //
func NewCache() *Cache { func NewCache() *Cache {
@ -141,6 +204,7 @@ func NewCache() *Cache {
entries: make(map[string]*Entry), entries: make(map[string]*Entry),
mutex: sync.RWMutex{}, mutex: sync.RWMutex{},
stopJanitor: nil, stopJanitor: nil,
forceNilInterfaceOnNilPointer: true,
} }
} }
@ -150,13 +214,22 @@ func (cache *Cache) Set(key string, value interface{}) {
} }
// SetWithTTL creates or updates a key with a given value and sets an expiration time (-1 is NoExpiration) // SetWithTTL creates or updates a key with a given value and sets an expiration time (-1 is NoExpiration)
//
// The TTL provided must be greater than 0, or NoExpiration (-1). If a negative value that isn't -1 (NoExpiration) is
// provided, the entry will not be created if the key doesn't exist
func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration) { func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration) {
// An interface is only nil if both its value and its type are nil, however, passing a pointer
if cache.forceNilInterfaceOnNilPointer {
if value != nil && (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) {
value = nil
}
}
cache.mutex.Lock() cache.mutex.Lock()
entry, ok := cache.get(key) entry, ok := cache.get(key)
if !ok { if !ok {
// A negative TTL that isn't -1 (NoExpiration) is an entry that will expire instantly, // A negative TTL that isn't -1 (NoExpiration) or 0 is an entry that will expire instantly,
// so might as well just not create it in the first place // so might as well just not create it in the first place
if ttl != NoExpiration && ttl < 0 { if ttl != NoExpiration && ttl < 1 {
cache.mutex.Unlock() cache.mutex.Unlock()
return return
} }
@ -178,6 +251,13 @@ func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration)
cache.memoryUsage += entry.SizeInBytes() cache.memoryUsage += entry.SizeInBytes()
} }
} else { } else {
// A negative TTL that isn't -1 (NoExpiration) or 0 is an entry that will expire instantly,
// so might as well just delete it immediately instead of updating it
if ttl != NoExpiration && ttl < 1 {
cache.delete(key)
cache.mutex.Unlock()
return
}
if cache.maxMemoryUsage != NoMaxMemoryUsage { if cache.maxMemoryUsage != NoMaxMemoryUsage {
// Substract the old entry from the cache's memoryUsage // Substract the old entry from the cache's memoryUsage
cache.memoryUsage -= entry.SizeInBytes() cache.memoryUsage -= entry.SizeInBytes()
@ -234,12 +314,13 @@ func (cache *Cache) Get(key string) (interface{}, bool) {
cache.stats.Misses++ cache.stats.Misses++
return nil, false return nil, false
} }
cache.stats.Hits++
if entry.Expired() { if entry.Expired() {
cache.stats.ExpiredKeys++
cache.delete(key) cache.delete(key)
cache.mutex.Unlock() cache.mutex.Unlock()
return nil, false return nil, false
} }
cache.stats.Hits++
if cache.evictionPolicy == LeastRecentlyUsed { if cache.evictionPolicy == LeastRecentlyUsed {
entry.Accessed() entry.Accessed()
if cache.head == entry { if cache.head == entry {
@ -253,12 +334,11 @@ func (cache *Cache) Get(key string) (interface{}, bool) {
return entry.Value, true return entry.Value, true
} }
// GetAll retrieves multiple entries using the keys passed as parameter // GetByKeys retrieves multiple entries using the keys passed as parameter
// All keys are returned in the map, regardless of whether they exist or not, // All keys are returned in the map, regardless of whether they exist or not, however, entries that do not exist in the
// however, entries that do not exist in the cache will return nil, meaning that // cache will return nil, meaning that there is no way of determining whether a key genuinely has the value nil, or
// there is no way of determining whether a key genuinely has the value nil, or // whether it doesn't exist in the cache using only this function.
// whether it doesn't exist in the cache using only this function func (cache *Cache) GetByKeys(keys []string) map[string]interface{} {
func (cache *Cache) GetAll(keys []string) map[string]interface{} {
entries := make(map[string]interface{}) entries := make(map[string]interface{})
for _, key := range keys { for _, key := range keys {
entries[key], _ = cache.Get(key) entries[key], _ = cache.Get(key)
@ -266,18 +346,51 @@ func (cache *Cache) GetAll(keys []string) map[string]interface{} {
return entries return entries
} }
// GetAll retrieves all cache entries
//
// If the eviction policy is LeastRecentlyUsed, note that unlike Get and GetByKeys, this does not update the last access
// timestamp. The reason for this is that since all cache entries will be accessed, updating the last access timestamp
// would provide very little benefit while harming the ability to accurately determine the next key that will be evicted
//
// You should probably avoid using this if you have a lot of entries.
//
// GetKeysByPattern is a good alternative if you want to retrieve entries that you do not have the key for, as it only
// retrieves the keys and does not trigger active eviction and has a parameter for setting a limit to the number of keys
// you wish to retrieve.
func (cache *Cache) GetAll() map[string]interface{} {
entries := make(map[string]interface{})
cache.mutex.Lock()
for key, entry := range cache.entries {
if entry.Expired() {
cache.delete(key)
continue
}
entries[key] = entry.Value
}
cache.stats.Hits += uint64(len(entries))
cache.mutex.Unlock()
return entries
}
// GetKeysByPattern retrieves a slice of keys that match a given pattern // GetKeysByPattern retrieves a slice of keys that match a given pattern
// If the limit is set to 0, the entire cache will be searched for matching keys. // If the limit is set to 0, the entire cache will be searched for matching keys.
// If the limit is above 0, the search will stop once the specified number of matching keys have been found. // If the limit is above 0, the search will stop once the specified number of matching keys have been found.
// //
// e.g. cache.GetKeysByPattern("*some*", 0) will return all keys containing "some" in them // e.g.
// e.g. cache.GetKeysByPattern("*some*", 5) will return 5 keys (or less) containing "some" in them // cache.GetKeysByPattern("*some*", 0) will return all keys containing "some" in them
// cache.GetKeysByPattern("*some*", 5) will return 5 keys (or less) containing "some" in them
// //
// Note that GetKeysByPattern does not trigger evictions, nor does it count as accessing the entry. // Note that GetKeysByPattern does not trigger active evictions, nor does it count as accessing the entry, the latter
// only applying if the cache uses the LeastRecentlyUsed eviction policy.
// The reason for that behavior is that these two (active eviction and access) only applies when you access the value
// of the cache entry, and this function only returns the keys.
func (cache *Cache) GetKeysByPattern(pattern string, limit int) []string { func (cache *Cache) GetKeysByPattern(pattern string, limit int) []string {
var matchingKeys []string var matchingKeys []string
cache.mutex.RLock() cache.mutex.Lock()
for key := range cache.entries { for key, value := range cache.entries {
if value.Expired() {
continue
}
if MatchPattern(pattern, key) { if MatchPattern(pattern, key) {
matchingKeys = append(matchingKeys, key) matchingKeys = append(matchingKeys, key)
if limit > 0 && len(matchingKeys) >= limit { if limit > 0 && len(matchingKeys) >= limit {
@ -285,7 +398,7 @@ func (cache *Cache) GetKeysByPattern(pattern string, limit int) []string {
} }
} }
} }
cache.mutex.RUnlock() cache.mutex.Unlock()
return matchingKeys return matchingKeys
} }

View File

@ -2,7 +2,6 @@ package gocache
import ( import (
"log" "log"
"runtime"
"time" "time"
) )
@ -24,9 +23,10 @@ const (
) )
// StartJanitor starts the janitor on a different goroutine // StartJanitor starts the janitor on a different goroutine
// The janitor's job is to delete expired keys in the background. // The janitor's job is to delete expired keys in the background, in other words, it takes care of passive eviction.
// It can be stopped by calling Cache.StopJanitor. // It can be stopped by calling Cache.StopJanitor.
// If you do not start the janitor, expired keys will only be deleted when they are accessed through Get // If you do not start the janitor, expired keys will only be deleted when they are accessed through Get, GetByKeys, or
// GetAll.
func (cache *Cache) StartJanitor() error { func (cache *Cache) StartJanitor() error {
if cache.stopJanitor != nil { if cache.stopJanitor != nil {
return ErrJanitorAlreadyRunning return ErrJanitorAlreadyRunning
@ -109,26 +109,32 @@ func (cache *Cache) StartJanitor() error {
} }
cache.mutex.Unlock() cache.mutex.Unlock()
case <-cache.stopJanitor: case <-cache.stopJanitor:
cache.stopJanitor = nil cache.stopJanitor <- true
return return
} }
} }
}() }()
if Debug { //if Debug {
go func() { // go func() {
var m runtime.MemStats // var m runtime.MemStats
for { // for {
runtime.ReadMemStats(&m) // runtime.ReadMemStats(&m)
log.Printf("Alloc=%vMB; HeapReleased=%vMB; Sys=%vMB; HeapInUse=%vMB; HeapObjects=%v; HeapObjectsFreed=%v; GC=%v; cache.memoryUsage=%vMB; cacheSize=%d\n", m.Alloc/1024/1024, m.HeapReleased/1024/1024, m.Sys/1024/1024, m.HeapInuse/1024/1024, m.HeapObjects, m.Frees, m.NumGC, cache.memoryUsage/1024/1024, cache.Count()) // log.Printf("Alloc=%vMB; HeapReleased=%vMB; Sys=%vMB; HeapInUse=%vMB; HeapObjects=%v; HeapObjectsFreed=%v; GC=%v; cache.memoryUsage=%vMB; cacheSize=%d\n", m.Alloc/1024/1024, m.HeapReleased/1024/1024, m.Sys/1024/1024, m.HeapInuse/1024/1024, m.HeapObjects, m.Frees, m.NumGC, cache.memoryUsage/1024/1024, cache.Count())
time.Sleep(3 * time.Second) // time.Sleep(3 * time.Second)
} // }
}() // }()
} //}
return nil return nil
} }
// StopJanitor stops the janitor // StopJanitor stops the janitor
func (cache *Cache) StopJanitor() { func (cache *Cache) StopJanitor() {
if cache.stopJanitor != nil {
// Tell the janitor to stop, and then wait for the janitor to reply on the same channel that it's stopping
// This may seem a bit odd, but this allows us to avoid a data race condition in which setting cache.stopJanitor
// to nil
cache.stopJanitor <- true cache.stopJanitor <- true
time.Sleep(100 * time.Millisecond) <-cache.stopJanitor
cache.stopJanitor = nil
}
} }

View File

@ -3,11 +3,12 @@ package gocache
import ( import (
"bytes" "bytes"
"encoding/gob" "encoding/gob"
"github.com/boltdb/bolt"
"log" "log"
"os" "os"
"sort" "sort"
"time" "time"
"github.com/boltdb/bolt"
) )
// SaveToFile stores the content of the cache to a file so that it can be read using // SaveToFile stores the content of the cache to a file so that it can be read using

2
vendor/modules.txt vendored
View File

@ -1,7 +1,7 @@
# cloud.google.com/go v0.74.0 # cloud.google.com/go v0.74.0
## explicit ## explicit
cloud.google.com/go/compute/metadata cloud.google.com/go/compute/metadata
# github.com/TwinProduction/gocache v0.3.0 # github.com/TwinProduction/gocache v1.1.0
## explicit ## explicit
github.com/TwinProduction/gocache github.com/TwinProduction/gocache
# github.com/beorn7/perks v1.0.1 # github.com/beorn7/perks v1.0.1