// A Getter loads data for a key. type Getter interface { // Get returns the value identified by key, populating dest. // // The returned data must be unversioned. That is, key must // uniquely describe the loaded data, without an implicit // current time, and without relying on cache expiration // mechanisms. Get(ctx context.Context, key string, dest Sink) error }
Sink 是一个接口,用于接收从缓存中 Get 到的数据。
GetterFunc
GetterFunc 是接口型函数,这是为了向接口参数里传函数,就让函数继承了接口。
1 2 3 4 5 6
// A GetterFunc implements Getter with a function. type GetterFunc func(ctx context.Context, key string, dest Sink)error
// PeerPicker is the interface that must be implemented to locate // the peer that owns a specific key. type PeerPicker interface { // PickPeer returns the peer that owns the specific key // and true to indicate that a remote peer was nominated. // It returns nil, false if the key owner is the current peer. PickPeer(key string) (peer ProtoGetter, ok bool) }
// A Group is a cache namespace and associated data loaded spread over // a group of 1 or more machines. type Group struct { name string getter Getter peersOnce sync.Once peers PeerPicker cacheBytes int64// limit for sum of mainCache and hotCache size
// mainCache is a cache of the keys for which this process // (amongst its peers) is authoritative. That is, this cache // contains keys which consistent hash on to this process's // peer number. mainCache cache
// hotCache contains keys/values for which this peer is not // authoritative (otherwise they would be in mainCache), but // are popular enough to warrant mirroring in this process to // avoid going over the network to fetch from a peer. Having // a hotCache avoids network hotspotting, where a peer's // network card could become the bottleneck on a popular key. // This cache is used sparingly to maximize the total number // of key/value pairs that can be stored globally. hotCache cache
// loadGroup ensures that each key is only fetched once // (either locally or remotely), regardless of the number of // concurrent callers. loadGroup flightGroup
_ int32// force Stats to be 8-byte aligned on 32-bit platforms
// Stats are statistics on the group. Stats Stats }
Get 查询缓存
Group 的核心方法就是 Get 方法,Get 方法根据 key 获取相应的缓存数据,并且将数据传给 dest。
1
func(g *Group)Get(ctx context.Context, key string, dest Sink)
其流程为:
首先保证 peers 被初始化,且仅被初始化一次。
1 2
g.peersOnce.Do(g.initPeers) g.Stats.Gets.Add(1)
从本地的主缓存和热点数据缓存中查询 key,若存在数据则缓存命中数加 1并且返回。
1 2 3 4 5 6
value, cacheHit := g.lookupCache(key)
if cacheHit { g.Stats.CacheHits.Add(1) return setSinkView(dest, value) }
g.Stats.Loads.Add(1) viewi, err := g.loadGroup.Do(key, func()(interface{}, error) { // Check the cache again because singleflight can only dedup calls // that overlap concurrently. It's possible for 2 concurrent // requests to miss the cache, resulting in 2 load() calls. An // unfortunate goroutine scheduling would result in this callback // being run twice, serially. If we don't check the cache again, // cache.nbytes would be incremented below even though there will // be only one entry for this key. // // Consider the following serialized event ordering for two // goroutines in which this callback gets called twice for the // same key: // 1: Get("key") // 2: Get("key") // 1: lookupCache("key") // 2: lookupCache("key") // 1: load("key") // 2: load("key") // 1: loadGroup.Do("key", fn) // 1: fn() // 2: loadGroup.Do("key", fn) // 2: fn() if value, cacheHit := g.lookupCache(key); cacheHit { g.Stats.CacheHits.Add(1) return value, nil } g.Stats.LoadsDeduped.Add(1) var value ByteView var err error if peer, ok := g.peers.PickPeer(key); ok { value, err = g.getFromPeer(ctx, peer, key) if err == nil { g.Stats.PeerLoads.Add(1) return value, nil } g.Stats.PeerErrors.Add(1) // TODO(bradfitz): log the peer's error? keep // log of the past few for /groupcachez? It's // probably boring (normal task movement), so not // worth logging I imagine. } value, err = g.getLocally(ctx, key, dest) if err != nil { g.Stats.LocalLoadErrs.Add(1) returnnil, err } g.Stats.LocalLoads.Add(1) destPopulated = true// only one caller of load gets this return value g.populateCache(key, value, &g.mainCache) return value, nil })