Thanks to visit codestin.com
Credit goes to github.com

Skip to content

refactor: Improve handshake resiliency of peer #95

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 23 commits into from
Jan 31, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Properly close ICE gatherer
  • Loading branch information
kylecarbs committed Jan 30, 2022
commit 09d84424558b3aa5ccf66d39d4dbab8a0f34a05f
2 changes: 1 addition & 1 deletion .github/workflows/coder.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ jobs:
run:
gotestsum --jsonfile="gotests.json" --packages="./..." --
-covermode=atomic -coverprofile="gotests.coverage" -timeout=3m
-count=3 -race -parallel=2
-count=3 -race -short -parallel=2

- name: Test with PostgreSQL Database
if: runner.os == 'Linux'
Expand Down
5 changes: 5 additions & 0 deletions database/migrate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@ func TestMain(m *testing.M) {
func TestMigrate(t *testing.T) {
t.Parallel()

if testing.Short() {
t.Skip()
return
}

t.Run("Once", func(t *testing.T) {
t.Parallel()
connection, closeFn, err := postgres.Open()
Expand Down
5 changes: 5 additions & 0 deletions database/postgres/postgres_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,11 @@ func TestMain(m *testing.M) {
func TestPostgres(t *testing.T) {
t.Parallel()

if testing.Short() {
t.Skip()
return
}

connect, close, err := postgres.Open()
require.NoError(t, err)
defer close()
Expand Down
4 changes: 2 additions & 2 deletions peer/channel.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,9 @@ func (c *Channel) init() {
// A DataChannel can disconnect multiple times, so this needs to loop.
for {
select {
case <-c.closed:
case <-c.conn.closedRTC:
// If this channel was closed, there's no need to close again.
return
err = c.conn.closeError
case <-c.conn.Closed():
// If the RTC connection closed with an error, this channel
// should end with the same one.
Expand Down
138 changes: 88 additions & 50 deletions peer/conn.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,11 +108,13 @@ type Conn struct {
// Determines whether this connection will send the offer or the answer.
offerrer bool

closed chan struct{}
closedRTC chan struct{}
closedICE chan struct{}
closeMutex sync.Mutex
closeError error
closed chan struct{}
closedRTC chan struct{}
closedRTCMutex sync.Mutex
closedICE chan struct{}
closedICEMutex sync.Mutex
closeMutex sync.Mutex
closeError error

dcOpenChannel chan *webrtc.DataChannel
dcDisconnectChannel chan struct{}
Expand All @@ -128,7 +130,6 @@ type Conn struct {

pendingCandidatesToSend []webrtc.ICECandidateInit
pendingCandidatesToSendMutex sync.Mutex
pendingCandidatesFlushed bool

pingChannelID uint16
pingEchoChannelID uint16
Expand All @@ -155,6 +156,8 @@ func (c *Conn) init() error {
slog.F("state", iceConnectionState))

if iceConnectionState == webrtc.ICEConnectionStateClosed {
c.closedICEMutex.Lock()
defer c.closedICEMutex.Unlock()
select {
case <-c.closedICE:
default:
Expand All @@ -169,24 +172,36 @@ func (c *Conn) init() error {
c.rtc.OnICEGatheringStateChange(func(iceGatherState webrtc.ICEGathererState) {
c.opts.Logger.Debug(context.Background(), "ice gathering state updated",
slog.F("state", iceGatherState))

if iceGatherState == webrtc.ICEGathererStateClosed {
c.closedICEMutex.Lock()
defer c.closedICEMutex.Unlock()
select {
case <-c.closedICE:
default:
close(c.closedICE)
}
}
})
c.rtc.OnICECandidate(func(iceCandidate *webrtc.ICECandidate) {
if iceCandidate == nil {
return
}
c.pendingCandidatesToSendMutex.Lock()
defer c.pendingCandidatesToSendMutex.Unlock()
if !c.pendingCandidatesFlushed {
c.pendingCandidatesToSend = append(c.pendingCandidatesToSend, iceCandidate.ToJSON())
c.opts.Logger.Debug(context.Background(), "buffering local candidate")
return
}
c.opts.Logger.Debug(context.Background(), "sending local candidate")
select {
case <-c.closed:
break
case c.localCandidateChannel <- iceCandidate.ToJSON():
}
go func() {
c.pendingCandidatesToSendMutex.Lock()
defer c.pendingCandidatesToSendMutex.Unlock()
if c.rtc.RemoteDescription() == nil {
c.pendingCandidatesToSend = append(c.pendingCandidatesToSend, iceCandidate.ToJSON())
c.opts.Logger.Debug(context.Background(), "buffering local candidate")
return
}
c.opts.Logger.Debug(context.Background(), "sending local candidate")
select {
case <-c.closed:
break
case c.localCandidateChannel <- iceCandidate.ToJSON():
}
}()
})
c.rtc.OnDataChannel(func(dc *webrtc.DataChannel) {
select {
Expand All @@ -197,6 +212,11 @@ func (c *Conn) init() error {
}
})
c.rtc.OnConnectionStateChange(func(peerConnectionState webrtc.PeerConnectionState) {
if c.isClosed() {
return
}
// Pion executes this handler multiple times in a rare condition.
// This prevents logging from happening after close.
c.opts.Logger.Debug(context.Background(), "rtc connection updated",
slog.F("state", peerConnectionState))

Expand All @@ -215,18 +235,20 @@ func (c *Conn) init() error {
default:
}
}
}

if peerConnectionState == webrtc.PeerConnectionStateClosed {
case webrtc.PeerConnectionStateClosed:
// Pion executes event handlers after close is called
// on the RTC connection. This ensures our Close()
// handler properly cleans up before returning.
//
// Pion can execute this multiple times, so we check
// if it's open before closing.
c.closedRTCMutex.Lock()
defer c.closedRTCMutex.Unlock()
select {
case <-c.closedRTC:
c.opts.Logger.Debug(context.Background(), "closedRTC channel already closed")
default:
c.opts.Logger.Debug(context.Background(), "closedRTC channel closing...")
close(c.closedRTC)
}
}
Expand All @@ -248,14 +270,18 @@ func (c *Conn) init() error {
// uses trickle ICE by default. See: https://webrtchacks.com/trickle-ice/
func (c *Conn) negotiate() {
c.opts.Logger.Debug(context.Background(), "negotiating")
c.remoteSessionDescriptionMutex.Lock()
defer c.remoteSessionDescriptionMutex.Unlock()

if c.offerrer {
offer, err := c.rtc.CreateOffer(&webrtc.OfferOptions{})
if err != nil {
_ = c.CloseWithError(xerrors.Errorf("create offer: %w", err))
return
}
c.closeMutex.Lock()
err = c.rtc.SetLocalDescription(offer)
c.closeMutex.Unlock()
if err != nil {
_ = c.CloseWithError(xerrors.Errorf("set local description: %w", err))
return
Expand All @@ -266,25 +292,23 @@ func (c *Conn) negotiate() {
return
case c.localSessionDescriptionChannel <- offer:
}
c.opts.Logger.Debug(context.Background(), "sent offer")
}

var sessionDescription webrtc.SessionDescription
c.opts.Logger.Debug(context.Background(), "awaiting remote description...")
select {
case <-c.closed:
return
case sessionDescription = <-c.remoteSessionDescriptionChannel:
}

// This prevents candidates from being added while
// the remote description is being set.
c.remoteSessionDescriptionMutex.Lock()
c.opts.Logger.Debug(context.Background(), "setting remote description")
err := c.rtc.SetRemoteDescription(sessionDescription)
if err != nil {
_ = c.CloseWithError(xerrors.Errorf("set remote description (closed %v): %w", c.isClosed(), err))
return
}
c.remoteSessionDescriptionMutex.Unlock()

if !c.offerrer {
answer, err := c.rtc.CreateAnswer(&webrtc.AnswerOptions{})
Expand All @@ -306,31 +330,44 @@ func (c *Conn) negotiate() {
return
case c.localSessionDescriptionChannel <- answer:
}
c.opts.Logger.Debug(context.Background(), "sent answer")
}

c.pendingCandidatesToSendMutex.Lock()
defer c.pendingCandidatesToSendMutex.Unlock()
for _, pendingCandidate := range c.pendingCandidatesToSend {
select {
case <-c.closed:
return
case c.localCandidateChannel <- pendingCandidate:
go func() {
c.pendingCandidatesToSendMutex.Lock()
defer c.pendingCandidatesToSendMutex.Unlock()
for _, pendingCandidate := range c.pendingCandidatesToSend {
select {
case <-c.closed:
return
case c.localCandidateChannel <- pendingCandidate:
}
c.opts.Logger.Debug(context.Background(), "flushed buffered local candidate")
}
c.opts.Logger.Debug(context.Background(), "flushed buffered local candidate")
}
c.opts.Logger.Debug(context.Background(), "flushed buffered local candidates",
slog.F("count", len(c.pendingCandidatesToSend)),
)
c.pendingCandidatesToSend = make([]webrtc.ICECandidateInit, 0)
c.pendingCandidatesFlushed = true
c.opts.Logger.Debug(context.Background(), "flushed buffered local candidates",
slog.F("count", len(c.pendingCandidatesToSend)),
)
c.pendingCandidatesToSend = make([]webrtc.ICECandidateInit, 0)
}()
}

// AddRemoteCandidate adds a remote candidate to the RTC connection.
func (c *Conn) AddRemoteCandidate(i webrtc.ICECandidateInit) error {
c.remoteSessionDescriptionMutex.Lock()
defer c.remoteSessionDescriptionMutex.Unlock()
c.opts.Logger.Debug(context.Background(), "accepting candidate", slog.F("length", len(i.Candidate)))
return c.rtc.AddICECandidate(i)
func (c *Conn) AddRemoteCandidate(i webrtc.ICECandidateInit) {
if c.isClosed() {
return
}
go func() {
c.remoteSessionDescriptionMutex.Lock()
defer c.remoteSessionDescriptionMutex.Unlock()
if c.isClosed() {
return
}
c.opts.Logger.Debug(context.Background(), "accepting candidate", slog.F("length", len(i.Candidate)))
err := c.rtc.AddICECandidate(i)
if err != nil {
_ = c.CloseWithError(xerrors.Errorf("accept candidate: %w", err))
}
}()
}

// SetRemoteSessionDescription sets the remote description for the WebRTC connection.
Expand Down Expand Up @@ -528,7 +565,6 @@ func (c *Conn) CloseWithError(err error) error {
} else {
c.closeError = err
}
close(c.closed)

if ch, _ := c.pingChannel(); ch != nil {
_ = ch.closeWithError(c.closeError)
Expand All @@ -538,10 +574,6 @@ func (c *Conn) CloseWithError(err error) error {
// closing an already closed connection isn't an issue for us.
_ = c.rtc.Close()

// Waits for all DataChannels to exit before officially labeling as closed.
// All logging, goroutines, and async functionality is cleaned up after this.
c.dcClosedWaitGroup.Wait()

if c.rtc.ConnectionState() != webrtc.PeerConnectionStateNew {
c.opts.Logger.Debug(context.Background(), "waiting for rtc connection close...")
<-c.closedRTC
Expand All @@ -552,5 +584,11 @@ func (c *Conn) CloseWithError(err error) error {
<-c.closedICE
}

// Waits for all DataChannels to exit before officially labeling as closed.
// All logging, goroutines, and async functionality is cleaned up after this.
c.dcClosedWaitGroup.Wait()

close(c.closed)
c.opts.Logger.Debug(context.Background(), "closed")
return err
}
Loading