Skip to content

Commit 2f725cb

Browse files
authored
Add LRU mem cache implementation (#16226)
The current default memory cache implementation is unbounded in size and number of objects cached. This is hardly ideal. This PR proposes creating a TwoQueue LRU cache as the underlying cache for Gitea. The cache is limited by the number of objects stored in the cache (rather than size) for simplicity. The default number of objects is 50000 - which is perhaps too small as most of our objects cached are going to be much less than 1kB. It may be worth considering using a different LRU implementation that actively limits sizes or avoids GC - however, this is just a beginning implementation. Signed-off-by: Andrew Thornton <[email protected]>
1 parent 0728479 commit 2f725cb

File tree

5 files changed

+219
-6
lines changed

5 files changed

+219
-6
lines changed

custom/conf/app.example.ini

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1471,7 +1471,7 @@ PATH =
14711471
;; if the cache enabled
14721472
;ENABLED = true
14731473
;;
1474-
;; Either "memory", "redis", or "memcache", default is "memory"
1474+
;; Either "memory", "redis", "memcache", or "twoqueue". default is "memory"
14751475
;ADAPTER = memory
14761476
;;
14771477
;; For "memory" only, GC interval in seconds, default is 60
@@ -1480,6 +1480,7 @@ PATH =
14801480
;; For "redis" and "memcache", connection host address
14811481
;; redis: network=tcp,addr=:6379,password=macaron,db=0,pool_size=100,idle_timeout=180
14821482
;; memcache: `127.0.0.1:11211`
1483+
;; twoqueue: `{"size":50000,"recent_ratio":0.25,"ghost_ratio":0.5}` or `50000`
14831484
;HOST =
14841485
;;
14851486
;; Time to keep items in cache if not used, default is 16 hours.

docs/content/doc/advanced/config-cheat-sheet.en-us.md

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -590,11 +590,12 @@ Define allowed algorithms and their minimum key length (use -1 to disable a type
590590
## Cache (`cache`)
591591

592592
- `ENABLED`: **true**: Enable the cache.
593-
- `ADAPTER`: **memory**: Cache engine adapter, either `memory`, `redis`, or `memcache`.
594-
- `INTERVAL`: **60**: Garbage Collection interval (sec), for memory cache only.
595-
- `HOST`: **\<empty\>**: Connection string for `redis` and `memcache`.
593+
- `ADAPTER`: **memory**: Cache engine adapter, either `memory`, `redis`, `twoqueue` or `memcache`. (`twoqueue` represents a size limited LRU cache.)
594+
- `INTERVAL`: **60**: Garbage Collection interval (sec), for memory and twoqueue cache only.
595+
- `HOST`: **\<empty\>**: Connection string for `redis` and `memcache`. For `twoqueue` sets configuration for the queue.
596596
- Redis: `redis://:[email protected]:6379/0?pool_size=100&idle_timeout=180s`
597597
- Memcache: `127.0.0.1:9090;127.0.0.1:9091`
598+
- TwoQueue LRU cache: `{"size":50000,"recent_ratio":0.25,"ghost_ratio":0.5}` or `50000` representing the maximum number of objects stored in the cache.
598599
- `ITEM_TTL`: **16h**: Time to keep items in cache if not used, Setting it to 0 disables caching.
599600

600601
## Cache - LastCommitCache settings (`cache.last_commit`)

modules/cache/cache_twoqueue.go

Lines changed: 204 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,204 @@
1+
// Copyright 2021 The Gitea Authors. All rights reserved.
2+
// Use of this source code is governed by a MIT-style
3+
// license that can be found in the LICENSE file.
4+
5+
package cache
6+
7+
import (
8+
"strconv"
9+
"sync"
10+
"time"
11+
12+
mc "gitea.com/go-chi/cache"
13+
lru "github.com/hashicorp/golang-lru"
14+
jsoniter "github.com/json-iterator/go"
15+
)
16+
17+
// TwoQueueCache represents a LRU 2Q cache adapter implementation
18+
type TwoQueueCache struct {
19+
lock sync.Mutex
20+
cache *lru.TwoQueueCache
21+
interval int
22+
}
23+
24+
// TwoQueueCacheConfig describes the configuration for TwoQueueCache
25+
type TwoQueueCacheConfig struct {
26+
Size int `ini:"SIZE" json:"size"`
27+
RecentRatio float64 `ini:"RECENT_RATIO" json:"recent_ratio"`
28+
GhostRatio float64 `ini:"GHOST_RATIO" json:"ghost_ratio"`
29+
}
30+
31+
// MemoryItem represents a memory cache item.
32+
type MemoryItem struct {
33+
Val interface{}
34+
Created int64
35+
Timeout int64
36+
}
37+
38+
func (item *MemoryItem) hasExpired() bool {
39+
return item.Timeout > 0 &&
40+
(time.Now().Unix()-item.Created) >= item.Timeout
41+
}
42+
43+
var _ mc.Cache = &TwoQueueCache{}
44+
45+
// Put puts value into cache with key and expire time.
46+
func (c *TwoQueueCache) Put(key string, val interface{}, timeout int64) error {
47+
item := &MemoryItem{
48+
Val: val,
49+
Created: time.Now().Unix(),
50+
Timeout: timeout,
51+
}
52+
c.lock.Lock()
53+
defer c.lock.Unlock()
54+
c.cache.Add(key, item)
55+
return nil
56+
}
57+
58+
// Get gets cached value by given key.
59+
func (c *TwoQueueCache) Get(key string) interface{} {
60+
c.lock.Lock()
61+
defer c.lock.Unlock()
62+
cached, ok := c.cache.Get(key)
63+
if !ok {
64+
return nil
65+
}
66+
item, ok := cached.(*MemoryItem)
67+
68+
if !ok || item.hasExpired() {
69+
c.cache.Remove(key)
70+
return nil
71+
}
72+
73+
return item.Val
74+
}
75+
76+
// Delete deletes cached value by given key.
77+
func (c *TwoQueueCache) Delete(key string) error {
78+
c.lock.Lock()
79+
defer c.lock.Unlock()
80+
c.cache.Remove(key)
81+
return nil
82+
}
83+
84+
// Incr increases cached int-type value by given key as a counter.
85+
func (c *TwoQueueCache) Incr(key string) error {
86+
c.lock.Lock()
87+
defer c.lock.Unlock()
88+
cached, ok := c.cache.Get(key)
89+
if !ok {
90+
return nil
91+
}
92+
item, ok := cached.(*MemoryItem)
93+
94+
if !ok || item.hasExpired() {
95+
c.cache.Remove(key)
96+
return nil
97+
}
98+
99+
var err error
100+
item.Val, err = mc.Incr(item.Val)
101+
return err
102+
}
103+
104+
// Decr decreases cached int-type value by given key as a counter.
105+
func (c *TwoQueueCache) Decr(key string) error {
106+
c.lock.Lock()
107+
defer c.lock.Unlock()
108+
cached, ok := c.cache.Get(key)
109+
if !ok {
110+
return nil
111+
}
112+
item, ok := cached.(*MemoryItem)
113+
114+
if !ok || item.hasExpired() {
115+
c.cache.Remove(key)
116+
return nil
117+
}
118+
119+
var err error
120+
item.Val, err = mc.Decr(item.Val)
121+
return err
122+
}
123+
124+
// IsExist returns true if cached value exists.
125+
func (c *TwoQueueCache) IsExist(key string) bool {
126+
c.lock.Lock()
127+
defer c.lock.Unlock()
128+
cached, ok := c.cache.Peek(key)
129+
if !ok {
130+
return false
131+
}
132+
item, ok := cached.(*MemoryItem)
133+
if !ok || item.hasExpired() {
134+
c.cache.Remove(key)
135+
return false
136+
}
137+
138+
return true
139+
}
140+
141+
// Flush deletes all cached data.
142+
func (c *TwoQueueCache) Flush() error {
143+
c.lock.Lock()
144+
defer c.lock.Unlock()
145+
c.cache.Purge()
146+
return nil
147+
}
148+
149+
func (c *TwoQueueCache) checkAndInvalidate(key interface{}) {
150+
c.lock.Lock()
151+
defer c.lock.Unlock()
152+
cached, ok := c.cache.Peek(key)
153+
if !ok {
154+
return
155+
}
156+
item, ok := cached.(*MemoryItem)
157+
if !ok || item.hasExpired() {
158+
c.cache.Remove(item)
159+
}
160+
}
161+
162+
func (c *TwoQueueCache) startGC() {
163+
if c.interval < 0 {
164+
return
165+
}
166+
for _, key := range c.cache.Keys() {
167+
c.checkAndInvalidate(key)
168+
}
169+
time.AfterFunc(time.Duration(c.interval)*time.Second, c.startGC)
170+
}
171+
172+
// StartAndGC starts GC routine based on config string settings.
173+
func (c *TwoQueueCache) StartAndGC(opts mc.Options) error {
174+
var err error
175+
size := 50000
176+
if opts.AdapterConfig != "" {
177+
size, err = strconv.Atoi(opts.AdapterConfig)
178+
}
179+
if err != nil {
180+
json := jsoniter.ConfigCompatibleWithStandardLibrary
181+
if !json.Valid([]byte(opts.AdapterConfig)) {
182+
return err
183+
}
184+
185+
cfg := &TwoQueueCacheConfig{
186+
Size: 50000,
187+
RecentRatio: lru.Default2QRecentRatio,
188+
GhostRatio: lru.Default2QGhostEntries,
189+
}
190+
_ = json.Unmarshal([]byte(opts.AdapterConfig), cfg)
191+
c.cache, err = lru.New2QParams(cfg.Size, cfg.RecentRatio, cfg.GhostRatio)
192+
} else {
193+
c.cache, err = lru.New2Q(size)
194+
}
195+
c.interval = opts.Interval
196+
if c.interval > 0 {
197+
go c.startGC()
198+
}
199+
return err
200+
}
201+
202+
func init() {
203+
mc.Register("twoqueue", &TwoQueueCache{})
204+
}

modules/setting/cache.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,11 +58,16 @@ func newCacheService() {
5858
log.Fatal("Failed to map Cache settings: %v", err)
5959
}
6060

61-
CacheService.Adapter = sec.Key("ADAPTER").In("memory", []string{"memory", "redis", "memcache"})
61+
CacheService.Adapter = sec.Key("ADAPTER").In("memory", []string{"memory", "redis", "memcache", "twoqueue"})
6262
switch CacheService.Adapter {
6363
case "memory":
6464
case "redis", "memcache":
6565
CacheService.Conn = strings.Trim(sec.Key("HOST").String(), "\" ")
66+
case "twoqueue":
67+
CacheService.Conn = strings.TrimSpace(sec.Key("HOST").String())
68+
if CacheService.Conn == "" {
69+
CacheService.Conn = "50000"
70+
}
6671
case "": // disable cache
6772
CacheService.Enabled = false
6873
default:

routers/init.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,9 @@ func NewServices() {
5252
log.Fatal("repository init failed: %v", err)
5353
}
5454
mailer.NewContext()
55-
_ = cache.NewContext()
55+
if err := cache.NewContext(); err != nil {
56+
log.Fatal("Unable to start cache service: %v", err)
57+
}
5658
notification.NewContext()
5759
if err := archiver.Init(); err != nil {
5860
log.Fatal("archiver init failed: %v", err)

0 commit comments

Comments
 (0)