Thanks to visit codestin.com
Credit goes to github.com

Skip to content

chore: add support for blockEndpoints to configMaps #11512

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 18 additions & 4 deletions tailnet/configmaps.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,11 @@ func (c *configMaps) netMapLocked() *netmap.NetworkMap {
func (c *configMaps) peerConfigLocked() []*tailcfg.Node {
out := make([]*tailcfg.Node, 0, len(c.peers))
for _, p := range c.peers {
out = append(out, p.node.Clone())
n := p.node.Clone()
if c.blockEndpoints {
n.Endpoints = nil
}
out = append(out, n)
}
return out
}
Expand All @@ -228,6 +232,19 @@ func (c *configMaps) setAddresses(ips []netip.Prefix) {
c.Broadcast()
}

// setBlockEndpoints sets whether we should block configuring endpoints we learn
// from peers. It triggers a configuration of the engine if the value changes.
// nolint: revive
func (c *configMaps) setBlockEndpoints(blockEndpoints bool) {
c.L.Lock()
defer c.L.Unlock()
if c.blockEndpoints != blockEndpoints {
c.netmapDirty = true
}
c.blockEndpoints = blockEndpoints
c.Broadcast()
}

// derMapLocked returns the current DERPMap. c.L must be held
func (c *configMaps) derpMapLocked() *tailcfg.DERPMap {
m := DERPMapFromProto(c.derpMap)
Expand Down Expand Up @@ -342,9 +359,6 @@ func (c *configMaps) updatePeerLocked(update *proto.CoordinateResponse_PeerUpdat
// to avoid random hangs while we set up the connection again after
// inactivity.
node.KeepAlive = ok && peerStatus.Active
if c.blockEndpoints {
node.Endpoints = nil
}
}
switch {
case !ok && update.Kind == proto.CoordinateResponse_PeerUpdate_NODE:
Expand Down
87 changes: 87 additions & 0 deletions tailnet/configmaps_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -484,6 +484,93 @@ func TestConfigMaps_updatePeers_lost_and_found(t *testing.T) {
_ = testutil.RequireRecvCtx(ctx, t, done)
}

func TestConfigMaps_setBlockEndpoints_different(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitShort)
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
fEng := newFakeEngineConfigurable()
nodePrivateKey := key.NewNode()
nodeID := tailcfg.NodeID(5)
discoKey := key.NewDisco()
uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil)
defer uut.close()

p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000")
p1Node := newTestNode(1)
p1n, err := NodeToProto(p1Node)
require.NoError(t, err)
p1tcn, err := uut.protoNodeToTailcfg(p1n)
p1tcn.KeepAlive = true
require.NoError(t, err)

// Given: peer already exists
uut.L.Lock()
uut.peers[p1ID] = &peerLifecycle{
peerID: p1ID,
node: p1tcn,
lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC),
}
uut.L.Unlock()

uut.setBlockEndpoints(true)

nm := testutil.RequireRecvCtx(ctx, t, fEng.setNetworkMap)
r := testutil.RequireRecvCtx(ctx, t, fEng.reconfig)
require.Len(t, nm.Peers, 1)
require.Len(t, nm.Peers[0].Endpoints, 0)
require.Len(t, r.wg.Peers, 1)

done := make(chan struct{})
go func() {
defer close(done)
uut.close()
}()
_ = testutil.RequireRecvCtx(ctx, t, done)
}

func TestConfigMaps_setBlockEndpoints_same(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitShort)
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
fEng := newFakeEngineConfigurable()
nodePrivateKey := key.NewNode()
nodeID := tailcfg.NodeID(5)
discoKey := key.NewDisco()
uut := newConfigMaps(logger, fEng, nodeID, nodePrivateKey, discoKey.Public(), nil)
defer uut.close()

p1ID := uuid.MustParse("10000000-0000-0000-0000-000000000000")
p1Node := newTestNode(1)
p1n, err := NodeToProto(p1Node)
require.NoError(t, err)
p1tcn, err := uut.protoNodeToTailcfg(p1n)
p1tcn.KeepAlive = true
require.NoError(t, err)

// Given: peer already exists && blockEndpoints set to true
uut.L.Lock()
uut.peers[p1ID] = &peerLifecycle{
peerID: p1ID,
node: p1tcn,
lastHandshake: time.Date(2024, 1, 7, 12, 0, 10, 0, time.UTC),
}
uut.blockEndpoints = true
uut.L.Unlock()

// Then: we don't configure
requireNeverConfigures(ctx, t, uut)

// When we set blockEndpoints to true
uut.setBlockEndpoints(true)

done := make(chan struct{})
go func() {
defer close(done)
uut.close()
}()
_ = testutil.RequireRecvCtx(ctx, t, done)
}

func expectStatusWithHandshake(
ctx context.Context, t testing.TB, fEng *fakeEngineConfigurable, k key.NodePublic, lastHandshake time.Time,
) <-chan struct{} {
Expand Down