Update TwinProduction/gocache to v1.2.2

This commit is contained in:
TwinProduction 2021-06-06 14:54:58 -04:00
parent e214d56af1
commit ca87547430
13 changed files with 180 additions and 73 deletions

2
go.mod
View File

@ -4,7 +4,7 @@ go 1.16
require (
cloud.google.com/go v0.74.0 // indirect
github.com/TwinProduction/gocache v1.2.1
github.com/TwinProduction/gocache v1.2.2
github.com/TwinProduction/health v1.0.0
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663
github.com/google/gofuzz v1.2.0 // indirect

4
go.sum
View File

@ -49,8 +49,8 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/TwinProduction/gocache v1.2.1 h1:NAdMwO9SQEZFmX69YWx6fzhwb6fHakkLri0451c+V1w=
github.com/TwinProduction/gocache v1.2.1/go.mod h1:6zkBoLjrFLkIISwkZTgLy67qliCGSon1xpORM4Ri5HM=
github.com/TwinProduction/gocache v1.2.2 h1:GpIq4HW+oLFlxO8mXapWKx54qT8p7SMWh2tf91jkLDU=
github.com/TwinProduction/gocache v1.2.2/go.mod h1:Yj2daITit8TTBgiOpc26XCDSbg9xcFskUilHj9u3Mh8=
github.com/TwinProduction/health v1.0.0 h1:TVyYTAORQQZ8LaptX8jCHZRCGCAO6e+oJx19BUIzQYY=
github.com/TwinProduction/health v1.0.0/go.mod h1:ys4mYKUeEfYrWmkm60xLtPjTuLIEDQNBZaTZvenLG1c=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=

View File

@ -2,7 +2,7 @@
FROM golang:alpine as builder
WORKDIR /app
ADD . ./
RUN CGO_ENABLED=0 GOOS=linux go build -mod vendor -a -installsuffix cgo -o bin/gocache-server ./gocacheserver/main
RUN CGO_ENABLED=0 GOOS=linux go build -mod vendor -a -installsuffix cgo -o bin/gocache-server cmd/server/main.go
RUN apk --update add --no-cache ca-certificates
FROM scratch

View File

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2020 TwinProduction
Copyright (c) 2021 TwinProduction
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

View File

@ -12,7 +12,7 @@ docker-run-max-memory-usage:
docker run -p 6666:6379 -e AUTOSAVE=true -e MAX_CACHE_SIZE=0 -e MAX_MEMORY_USAGE=524288000 --name gocache-server -d gocache-server
run:
PORT=6666 go run gocacheserver/main/server.go
PORT=6666 go run cmd/server/main.go
start-redis:
docker run -p 6379:6379 --name redis -d redis

View File

@ -5,6 +5,7 @@
[![codecov](https://codecov.io/gh/TwinProduction/gocache/branch/master/graph/badge.svg)](https://codecov.io/gh/TwinProduction/gocache)
[![Go version](https://img.shields.io/github/go-mod/go-version/TwinProduction/gocache.svg)](https://github.com/TwinProduction/gocache)
[![Go Reference](https://pkg.go.dev/badge/github.com/TwinProduction/gocache.svg)](https://pkg.go.dev/github.com/TwinProduction/gocache)
[![Join Discord server](https://img.shields.io/discord/442432928614449155.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/44p4TRep)
gocache is an easy-to-use, high-performance, lightweight and thread-safe (goroutine-safe) in-memory key-value cache
with support for LRU and FIFO eviction policies as well as expiration, bulk operations and even persistence to file.
@ -33,6 +34,8 @@ with support for LRU and FIFO eviction policies as well as expiration, bulk oper
- [Summary](#summary)
- [Results](#results)
- [FAQ](#faq)
- [How can I persist the data on application termination?](#how-can-i-persist-the-data-on-application-termination)
- [How can I automatically save the cache to a file every 5 minutes?](#how-can-i-automatically-save-the-cache-to-a-file-every-5-minutes)
- [Why does the memory usage not go down?](#why-does-the-memory-usage-not-go-down)
@ -58,6 +61,9 @@ It may also serve as a good reference to use in order to implement gocache in yo
go get -u github.com/TwinProduction/gocache
```
If you're interested in using gocache as a server rather than an embedded library, see [Server](#server)
### Initializing the cache
```go
cache := gocache.NewCache().WithMaxSize(1000).WithEvictionPolicy(gocache.LeastRecentlyUsed)
@ -125,52 +131,52 @@ You can also delete multiple entries by using `cache.DeleteAll([]string{"key1",
package main
import (
"fmt"
"time"
"fmt"
"time"
"github.com/TwinProduction/gocache"
)
func main() {
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(10000)
cache.StartJanitor() // Passively manages expired entries
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(10000)
cache.StartJanitor() // Passively manages expired entries
cache.Set("key", "value")
cache.SetWithTTL("key-with-ttl", "value", 60*time.Minute)
cache.SetAll(map[string]interface{}{"k1": "v1", "k2": "v2", "k3": "v3"})
cache.Set("key", "value")
cache.SetWithTTL("key-with-ttl", "value", 60*time.Minute)
cache.SetAll(map[string]interface{}{"k1": "v1", "k2": "v2", "k3": "v3"})
value, exists := cache.Get("key")
fmt.Printf("[Get] key=key; value=%s; exists=%v\n", value, exists)
for key, value := range cache.GetByKeys([]string{"k1", "k2", "k3"}) {
fmt.Printf("[GetByKeys] key=%s; value=%s\n", key, value)
}
for _, key := range cache.GetKeysByPattern("key*", 0) {
fmt.Printf("[GetKeysByPattern] key=%s\n", key)
}
value, exists := cache.Get("key")
fmt.Printf("[Get] key=key; value=%s; exists=%v\n", value, exists)
for key, value := range cache.GetByKeys([]string{"k1", "k2", "k3"}) {
fmt.Printf("[GetByKeys] key=%s; value=%s\n", key, value)
}
for _, key := range cache.GetKeysByPattern("key*", 0) {
fmt.Printf("[GetKeysByPattern] key=%s\n", key)
}
fmt.Println("Cache size before persisting cache to file:", cache.Count())
err := cache.SaveToFile("cache.bak")
if err != nil {
panic(fmt.Sprintf("failed to persist cache to file: %s", err.Error()))
}
fmt.Println("Cache size before persisting cache to file:", cache.Count())
err := cache.SaveToFile("cache.bak")
if err != nil {
panic(fmt.Sprintf("failed to persist cache to file: %s", err.Error()))
}
cache.Expire("key", time.Hour)
time.Sleep(500*time.Millisecond)
timeUntilExpiration, _ := cache.TTL("key")
fmt.Println("Number of minutes before 'key' expires:", int(timeUntilExpiration.Seconds()))
cache.Expire("key", time.Hour)
time.Sleep(500*time.Millisecond)
timeUntilExpiration, _ := cache.TTL("key")
fmt.Println("Number of minutes before 'key' expires:", int(timeUntilExpiration.Seconds()))
cache.Delete("key")
cache.DeleteAll([]string{"k1", "k2", "k3"})
cache.Delete("key")
cache.DeleteAll([]string{"k1", "k2", "k3"})
fmt.Println("Cache size before restoring cache from file:", cache.Count())
_, err = cache.ReadFromFile("cache.bak")
if err != nil {
panic(fmt.Sprintf("failed to restore cache from file: %s", err.Error()))
}
fmt.Println("Cache size before restoring cache from file:", cache.Count())
_, err = cache.ReadFromFile("cache.bak")
if err != nil {
panic(fmt.Sprintf("failed to restore cache from file: %s", err.Error()))
}
fmt.Println("Cache size after restoring cache from file:", cache.Count())
cache.Clear()
fmt.Println("Cache size after clearing the cache:", cache.Count())
fmt.Println("Cache size after restoring cache from file:", cache.Count())
cache.Clear()
fmt.Println("Cache size after clearing the cache:", cache.Count())
}
```
@ -215,8 +221,8 @@ While you can cache structs in memory out of the box, persisting structs to a fi
```go
type YourCustomStruct struct {
A string
B int
A string
B int
}
// ...
@ -251,6 +257,13 @@ every key that cannot be parsed are not populated into the cache by `ReadFromFil
In other words, if you're falling back to a database or something similar when the cache doesn't have the key requested,
you'll be fine.
Note that if you need to modify the type of a variable in a struct, you should change the name of that variable as well.
For instance, if the struct has a `CreatedAt` variable with the type `time.Time` and that variable type is later
modified to `uint64`, decoding the struct would fail, however, if you rename the variable to `CreatedAtUnixTimeInMs`,
there won't be any decoding issues other than the loss of data for that field. You could also obviously handle the
migration gracefully by keeping both variables, populating the `CreatedAtUnixTimeInMs` variable with the `CreatedAt`
value and then removing the `CreatedAt` field.
## Eviction
@ -303,31 +316,37 @@ If you do not start the janitor, there will be no passive deletion of expired ke
## Server
For the sake of convenience, a ready-to-go cache server is available
through the `gocacheserver` package.
The reason why the server is in a different package is because `gocache` limit its external dependencies to the strict
minimum (e.g. boltdb for persistence), however, rather than re-inventing the wheel, the server implementation uses
redcon, which is a very good Redis server framework for Go.
That way, those who desire to use gocache without the server will not add any extra dependencies
as long as they don't import the `gocacheserver` package.
For the sake of convenience, a ready-to-go cache server is available through the `server` package.
#### As an application
```go
package main
import (
"github.com/TwinProduction/gocache"
"github.com/TwinProduction/gocache/gocacheserver"
"github.com/TwinProduction/gocache"
gocacheserver "github.com/TwinProduction/gocache/server"
)
func main() {
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(100000)
server := gocacheserver.NewServer(cache).WithPort(6379)
server.Start()
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(100000)
server := gocacheserver.NewServer(cache).WithPort(6379)
// This is a blocking function, therefore, you are expected to run this on a goroutine
server.Start()
}
```
The reason why the server is in a different package is because `gocache` limit its external dependencies to the strict
minimum (e.g. boltdb for persistence), however, rather than re-inventing the wheel, the server implementation uses
redcon, which is a very good Redis server framework for Go.
That way, those who desire to use gocache without the server will not add any extra dependencies
as long as they don't import the `server` package.
If you'd like to run it through the CLI:
```
go run cmd/server/main.go
```
Any Redis client should be able to interact with the server, though only the following instructions are supported:
- [X] GET
- [X] SET
@ -350,14 +369,12 @@ Any Redis client should be able to interact with the server, though only the fol
## Running the server with Docker
[![Docker pulls](https://img.shields.io/docker/pulls/twinproduction/gocache-server.svg)](https://cloud.docker.com/repository/docker/twinproduction/gocache-server)
To build it locally, refer to the Makefile's `docker-build` and `docker-run` steps.
Note that the server version of gocache is still under development.
```
docker run --name gocache-server -p 6379:6379 twinproduction/gocache-server
```
To build it locally, refer to the Makefile's `docker-build` and `docker-run` steps.
## Performance
@ -448,9 +465,96 @@ WithForceNilInterfaceOnNilPointerWithConcurrency/false-8
## FAQ
### How can I persist the data on application termination?
Because this library doesn't persist immediately after every write operations, persistence is instead expected to be
done on a schedule, like for instance, every 10 minutes.
While this prevents you from losing all of your data, you may still lose some data if the application stopped 9 minutes
after the previous "auto save".
To increase your odds of not losing any data, you can use Go's `signal` package, more specifically its `Notify` function
which allows listening for termination signals like SIGTERM and SIGINT. Once a termination signal is caught, you can
add the necessary logic for a graceful shutdown.
In the following example, the code that would usually be present in the `main` function is moved to a different function
named `Start` which is launched on a different goroutine so that listening for a termination signals is what blocks the
main goroutine instead:
```go
package main
import (
"log"
"os"
"os/signal"
"syscall"
"github.com/TwinProduction/gocache"
)
const CacheFile = "gocache.data"
var cache = gocache.NewCache()
func main() {
// Load persisted data from file
cache.ReadFromFile(CacheFile)
// Start everything else on another goroutine to prevent blocking the main goroutine
go Start()
// Wait for termination signal
sig := make(chan os.Signal, 1)
done := make(chan bool, 1)
signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
go func() {
<-sig
log.Println("Received termination signal, attempting to gracefully shut down")
err := cache.SaveToFile(CacheFile)
if err != nil {
log.Println("Failed to save storage provider:", err.Error())
}
done <- true
}()
<-done
log.Println("Shutting down")
}
```
Note that this won't protect you from a SIGKILL, as this signal cannot be caught.
### How can I automatically save the cache to a file every 5 minutes?
Beside using the suggestion above, automatically persisting the cache on an interval will protect your application from
sudden terminations triggered by signals that cannot be caught, such as the force kill signal received by an application
being OOMKilled.
The simplest implementation could be something like this:
```go
const CacheFile = "gocache.data"
func main() {
cache := gocache.NewCache()
cache.ReadFromFile(CacheFile)
go autoSave(10*time.Minute)
// ...
}
func autoSave(interval time.Duration) {
for {
err := cache.SaveToFile(CacheFile)
if err != nil {
log.Println("Failed to persist cache to file:", err.Error())
}
time.Sleep(interval)
}
}
```
### Why does the memory usage not go down?
> **NOTE**: As of Go 1.16, this will no longer apply. See [golang/go#42330](https://github.com/golang/go/issues/42330)
> **NOTE**: As of Go 1.16, this no longer applies. See [golang/go#42330](https://github.com/golang/go/issues/42330)
By default, Go uses `MADV_FREE` if the kernel supports it to release memory, which is significantly more efficient
than using `MADV_DONTNEED`. Unfortunately, this means that RSS doesn't go down unless the OS actually needs the
@ -463,7 +567,7 @@ notice the memory usage lowering.
[reference](https://github.com/golang/go/issues/33376#issuecomment-666455792)
You can reproduce this by following the steps below:
- Start gocacheserver
- Start the server
- Note the memory usage
- Create 500k keys
- Note the memory usage

View File

@ -1,6 +1,6 @@
module github.com/TwinProduction/gocache
go 1.15
go 1.16
require (
github.com/go-redis/redis v6.15.9+incompatible

View File

@ -9,11 +9,9 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
@ -55,9 +53,7 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=

View File

@ -260,7 +260,7 @@ func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration)
return
}
if cache.maxMemoryUsage != NoMaxMemoryUsage {
// Substract the old entry from the cache's memoryUsage
// Subtract the old entry from the cache's memoryUsage
cache.memoryUsage -= entry.SizeInBytes()
}
// Update existing entry's value
@ -278,8 +278,8 @@ func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration)
} else {
entry.Expiration = NoExpiration
}
// If the cache doesn't have a maxSize/maxMemoryUsage, then there's no point checking if we need to evict
// an entry, so we'll just return now
// If the cache doesn't have a maxSize/maxMemoryUsage, then there's no point
// checking if we need to evict an entry, so we'll just return now
if cache.maxSize == NoMaxSize && cache.maxMemoryUsage == NoMaxMemoryUsage {
cache.mutex.Unlock()
return

View File

@ -131,8 +131,8 @@ func (cache *Cache) StartJanitor() error {
func (cache *Cache) StopJanitor() {
if cache.stopJanitor != nil {
// Tell the janitor to stop, and then wait for the janitor to reply on the same channel that it's stopping
// This may seem a bit odd, but this allows us to avoid a data race condition in which setting cache.stopJanitor
// to nil
// This may seem a bit odd, but this allows us to avoid a data race condition when trying to set
// cache.stopJanitor to nil
cache.stopJanitor <- true
<-cache.stopJanitor
cache.stopJanitor = nil

View File

@ -85,6 +85,12 @@ func (cache *Cache) ReadFromFile(path string) (int, error) {
if err != nil {
// Failed to decode the value, so we'll skip it.
// This is likely due to the fact that the custom struct wasn't registered using gob.Register(...)
//
// Could also be due to a breaking change in a struct's variable. For instance, if the struct has
// a variable with a type map[string]string and that variable is modified to map[string]int,
// decoding the struct would fail. This can be avoided by using a different variable name every
// time you must change the type of a variable within a struct.
//
// See [Persistence - Limitations](https://github.com/TwinProduction/gocache#limitations)
return err
}

View File

@ -1,5 +1,6 @@
package gocache
// EvictionPolicy is what dictates how evictions are handled
type EvictionPolicy string
var (

2
vendor/modules.txt vendored
View File

@ -1,7 +1,7 @@
# cloud.google.com/go v0.74.0
## explicit
cloud.google.com/go/compute/metadata
# github.com/TwinProduction/gocache v1.2.1
# github.com/TwinProduction/gocache v1.2.2
## explicit
github.com/TwinProduction/gocache
# github.com/TwinProduction/health v1.0.0