Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/golangci-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ jobs:
uses: actions/checkout@v3

- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: 1.18
go-version-file: 'go.mod'

- name: golangci-lint
uses: golangci/golangci-lint-action@v3
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/goreleaser.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ jobs:
echo "VERSION=$(git describe --tags $(git rev-list --tags --max-count=1))" >> $GITHUB_ENV

- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: 1.18
go-version-file: 'go.mod'

- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
Expand Down
5 changes: 3 additions & 2 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,10 @@ on:
jobs:
unittests:
runs-on: ubuntu-latest
container: projecteru2/footstone:latest

steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version-file: 'go.mod'
- name: unit tests
run: make test
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ deps:
go mod vendor

binary:
CGO_ENABLED=0 go build -ldflags "$(GO_LDFLAGS)" -gcflags=all=-G=3 -o eru-core
CGO_ENABLED=0 go build -ldflags "$(GO_LDFLAGS)" -o eru-core

build: deps binary

Expand Down
42 changes: 13 additions & 29 deletions client/utils/servicepusher.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ import (
type EndpointPusher struct {
sync.Mutex
chans []chan []string
pendingEndpoints hashmap.HashMap
availableEndpoints hashmap.HashMap
pendingEndpoints *hashmap.Map[string, context.CancelFunc]
availableEndpoints *hashmap.Map[string, struct{}]
}

// NewEndpointPusher .
Expand All @@ -42,42 +42,30 @@ func (p *EndpointPusher) delOutdated(endpoints []string) {
p.Lock()
defer p.Unlock()

for kv := range p.pendingEndpoints.Iter() {
endpoint, ok := kv.Key.(string)
if !ok {
log.Error("[EruResolver] failed to cast key while ranging pendingEndpoints")
continue
}
cancel, ok := kv.Value.(context.CancelFunc)
if !ok {
log.Error("[EruResolver] failed to cast value while ranging pendingEndpoints")
}
p.pendingEndpoints.Range(func(endpoint string, cancel context.CancelFunc) bool {
if !slices.Contains(endpoints, endpoint) {
cancel()
p.pendingEndpoints.Del(endpoint)
log.Debugf(nil, "[EruResolver] pending endpoint deleted: %s", endpoint) //nolint
}
}
return true
})

for kv := range p.availableEndpoints.Iter() {
endpoint, ok := kv.Key.(string)
if !ok {
log.Error("[EruResolver] failed to cast key while ranging availableEndpoints")
continue
}
p.availableEndpoints.Range(func(endpoint string, _ struct{}) bool {
if !slices.Contains(endpoints, endpoint) {
p.availableEndpoints.Del(endpoint)
log.Debugf(nil, "[EruResolver] available endpoint deleted: %s", endpoint) //nolint
}
}
return true
})
}

func (p *EndpointPusher) addCheck(endpoints []string) {
for _, endpoint := range endpoints {
if _, ok := p.pendingEndpoints.GetStringKey(endpoint); ok {
if _, ok := p.pendingEndpoints.Get(endpoint); ok {
continue
}
if _, ok := p.availableEndpoints.GetStringKey(endpoint); ok {
if _, ok := p.availableEndpoints.Get(endpoint); ok {
continue
}

Expand Down Expand Up @@ -139,14 +127,10 @@ func (p *EndpointPusher) checkReachability(host string) (err error) {

func (p *EndpointPusher) pushEndpoints() {
endpoints := []string{}
for kv := range p.availableEndpoints.Iter() {
endpoint, ok := kv.Key.(string)
if !ok {
log.Error("[EruResolver] failed to cast key while ranging availableEndpoints")
continue
}
p.availableEndpoints.Range(func(endpoint string, _ struct{}) bool {
endpoints = append(endpoints, endpoint)
}
return true
})
for _, ch := range p.chans {
ch <- endpoints
}
Expand Down
9 changes: 4 additions & 5 deletions cluster/calcium/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ func (c *Calcium) doDeployWorkloads(ctx context.Context,

wg := sync.WaitGroup{}
wg.Add(len(deployMap))
syncRollbackMap := hashmap.HashMap{}
syncRollbackMap := hashmap.New[string, []int]()

seq := 0
rollbackMap := make(map[string][]int)
Expand All @@ -198,11 +198,10 @@ func (c *Calcium) doDeployWorkloads(ctx context.Context,
}

wg.Wait()
for kv := range syncRollbackMap.Iter() {
nodename := kv.Key.(string)
indices := kv.Value.([]int)
syncRollbackMap.Range(func(nodename string, indices []int) bool {
rollbackMap[nodename] = indices
}
return true
})
log.Debugf(ctx, "[Calcium.doDeployWorkloads] rollbackMap: %+v", rollbackMap)
if len(rollbackMap) != 0 {
err = types.ErrRollbackMapIsNotEmpty
Expand Down
56 changes: 28 additions & 28 deletions discovery/helium/helium.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,10 @@ const interval = 15 * time.Second
// Helium .
type Helium struct {
sync.Once
store store.Store
subs hashmap.HashMap
interval time.Duration
store store.Store
subs *hashmap.Map[uint32, entry]
interval time.Duration
unsubChan chan uint32
}

type entry struct {
Expand All @@ -31,7 +32,12 @@ type entry struct {

// New .
func New(config types.GRPCConfig, store store.Store) *Helium {
h := &Helium{interval: config.ServiceDiscoveryPushInterval, store: store, subs: hashmap.HashMap{}}
h := &Helium{
interval: config.ServiceDiscoveryPushInterval,
store: store,
subs: hashmap.New[uint32, entry](),
unsubChan: make(chan uint32),
}
if h.interval < time.Second {
h.interval = interval
}
Expand All @@ -57,18 +63,7 @@ func (h *Helium) Subscribe(ctx context.Context) (uuid.UUID, <-chan types.Service

// Unsubscribe .
func (h *Helium) Unsubscribe(id uuid.UUID) {
v, ok := h.subs.GetUintKey(uintptr(id.ID()))
if !ok {
return
}

entry, ok := v.(entry)
if !ok {
return
}
entry.cancel()
h.subs.Del(id.ID())
close(entry.ch)
h.unsubChan <- id.ID()
}

func (h *Helium) start(ctx context.Context) {
Expand Down Expand Up @@ -96,33 +91,38 @@ func (h *Helium) start(ctx context.Context) {
Addresses: addresses,
Interval: h.interval * 2,
}

case id := <-h.unsubChan:
if entry, ok := h.subs.Get(id); ok {
entry.cancel()
h.subs.Del(id)
close(entry.ch)
}

case <-ticker.C:
}

h.dispatch(latestStatus)
}
}()
}

func (h *Helium) dispatch(status types.ServiceStatus) {
f := func(kv hashmap.KeyValue) {
f := func(key uint32, val entry) {
defer func() {
if err := recover(); err != nil {
log.Errorf(context.TODO(), "[dispatch] dispatch %v failed, err: %v", kv.Key, err)
log.Errorf(context.TODO(), "[dispatch] dispatch %v failed, err: %v", key, err)
}
}()
e, ok := kv.Value.(entry)
if !ok {
log.Error("[WatchServiceStatus] failed to cast entry from map")
return
}
select {
case e.ch <- status:
case val.ch <- status:
return
case <-e.ctx.Done():
case <-val.ctx.Done():
return
}
}
for kv := range h.subs.Iter() {
f(kv)
}
h.subs.Range(func(k uint32, v entry) bool {
f(k, v)
return true
})
}
11 changes: 6 additions & 5 deletions engine/factory/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func (ep engineParams) getCacheKey() string {
// EngineCache .
type EngineCache struct {
cache *utils.EngineCache
keysToCheck hashmap.HashMap
keysToCheck *hashmap.Map[uintptr, engineParams]
pool *ants.PoolWithFunc
config types.Config
}
Expand All @@ -64,7 +64,7 @@ func NewEngineCache(config types.Config) *EngineCache {
pool, _ := utils.NewPool(config.MaxConcurrency)
return &EngineCache{
cache: utils.NewEngineCache(12*time.Hour, 10*time.Minute),
keysToCheck: hashmap.HashMap{},
keysToCheck: hashmap.New[uintptr, engineParams](),
pool: pool,
config: config,
}
Expand Down Expand Up @@ -106,9 +106,10 @@ func (e *EngineCache) CheckAlive(ctx context.Context) {

paramsChan := make(chan engineParams)
go func() {
for kv := range e.keysToCheck.Iter() {
paramsChan <- kv.Value.(engineParams)
}
e.keysToCheck.Range(func(k uintptr, v engineParams) bool {
paramsChan <- v
return true
})
close(paramsChan)
}()

Expand Down
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
module github.com/projecteru2/core

go 1.18
go 1.19

require (
github.com/CMGS/statsd v0.0.0-20160223095033-48c421b3c1ab
github.com/alicebob/miniredis/v2 v2.14.3
github.com/cenkalti/backoff/v4 v4.1.1
github.com/cornelk/hashmap v1.0.1
github.com/cornelk/hashmap v1.0.8
github.com/docker/distribution v2.8.0+incompatible
github.com/docker/docker v20.10.0+incompatible
github.com/docker/go-connections v0.4.0
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cornelk/hashmap v1.0.1 h1:RXGcy29hEdLLV8T6aK4s+BAd4tq4+3Hq50N2GoG0uIg=
github.com/cornelk/hashmap v1.0.1/go.mod h1:8wbysTUDnwJGrPZ1Iwsou3m+An6sldFrJItjRhfegCw=
github.com/cornelk/hashmap v1.0.8 h1:nv0AWgw02n+iDcawr5It4CjQIAcdMMKRrs10HOJYlrc=
github.com/cornelk/hashmap v1.0.8/go.mod h1:RfZb7JO3RviW/rT6emczVuC/oxpdz4UsSB2LJSclR1k=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
Expand Down
2 changes: 1 addition & 1 deletion resources/volume/models/capacity.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func (v *Volume) GetNodesDeployCapacity(ctx context.Context, nodes []string, opt
return capacityInfoMap, total, nil
}

func (v *Volume) doGetNodeCapacityInfo(ctx context.Context, node string, resourceInfo *types.NodeResourceInfo, opts *types.WorkloadResourceOpts) *types.NodeCapacityInfo {
func (v *Volume) doGetNodeCapacityInfo(_ context.Context, node string, resourceInfo *types.NodeResourceInfo, opts *types.WorkloadResourceOpts) *types.NodeCapacityInfo {
capacityInfo := &types.NodeCapacityInfo{
Node: node,
Weight: 1,
Expand Down
4 changes: 2 additions & 2 deletions utils/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ var defaultUnixSockClient = &http.Client{
Transport: getDefaultUnixSockTransport(),
}

var httpsClientCache = hashmap.New(32)
var httpsClientCache = hashmap.New[string, *http.Client]()

// GetHTTPClient returns a HTTP client
func GetHTTPClient() *http.Client {
Expand All @@ -48,7 +48,7 @@ func GetHTTPSClient(ctx context.Context, certPath, name, ca, cert, key string) (

cacheKey := name + SHA256(fmt.Sprintf("%s-%s-%s-%s-%s", certPath, name, ca, cert, key))[:8]
if httpsClient, ok := httpsClientCache.Get(cacheKey); ok {
return httpsClient.(*http.Client), nil
return httpsClient, nil
}

caFile, err := ioutil.TempFile(certPath, fmt.Sprintf("ca-%s", name))
Expand Down
20 changes: 14 additions & 6 deletions wal/hydro.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ const (

// Hydro is the simplest wal implementation.
type Hydro struct {
hashmap.HashMap
*hashmap.Map[string, EventHandler]
store kv.KV
}

Expand All @@ -27,7 +27,10 @@ func NewHydro(path string, timeout time.Duration) (*Hydro, error) {
if err := store.Open(path, fileMode, timeout); err != nil {
return nil, err
}
return &Hydro{HashMap: hashmap.HashMap{}, store: store}, nil
return &Hydro{
Map: hashmap.New[string, EventHandler](),
store: store,
}, nil
}

// Close disconnects the kvdb.
Expand All @@ -43,9 +46,15 @@ func (h *Hydro) Register(handler EventHandler) {
// Recover starts a disaster recovery, which will replay all the events.
func (h *Hydro) Recover(ctx context.Context) {
ch, _ := h.store.Scan([]byte(eventPrefix))

events := []HydroEvent{}
for scanEntry := range ch {

for {
scanEntry, ok := <-ch
if !ok {
log.Errorf(nil, "[Recover] closed ch") // nolint
break
}

event, err := h.decodeEvent(scanEntry)
if err != nil {
log.Errorf(nil, "[Recover] decode event error: %v", err) // nolint
Expand Down Expand Up @@ -123,11 +132,10 @@ func (h *Hydro) recover(ctx context.Context, handler EventHandler, event HydroEv
}

func (h *Hydro) getEventHandler(eventyp string) (EventHandler, bool) {
v, ok := h.GetStringKey(eventyp)
handler, ok := h.Map.Get(eventyp)
if !ok {
return nil, ok
}
handler, ok := v.(EventHandler)
return handler, ok
}

Expand Down
Loading