forked from urnetwork/connect
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtransport_p2p.go
More file actions
398 lines (332 loc) · 9.21 KB
/
transport_p2p.go
File metadata and controls
398 lines (332 loc) · 9.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
package connect
import (
"context"
"io"
"net"
"slices"
"time"
)
// Assumptions about our peer-to-peer connections:
// - a limited transmit buffer that uses semi-reliable delivery as flow control.
// While the transfer client is the ultimate source of reliable delivery,
// we require the p2p connection use semi-reliable delivery to back pressure the transfer rate,
// which propagates through the entire multi-hop stream.
// Without flow control we would have more mismatches in transfer rate
// and retransmits from the transfer clients.
// - disconnect detection. Both peers should be aware when either side disconnects.
// This is typically manifested in clean disconnect messages and heartbeat timeouts.
// - directed initializaton. One side of the connection will offer to connect
// and the other side will respond. We assume this in our architecture. However,
// directed is usually a superset of undirected, so this does not prevent an undirected
// initializtion either.
// important - changing this will break compatibility with older clients
const ReadyHeader = "rdy"
func DefaultP2pTransportSettings() *P2pTransportSettings {
return &P2pTransportSettings{
MaxMessageSize: ByteCount(1024 * 1024),
ReconnectTimeout: 5 * time.Second,
}
}
type PeerType = string
const (
// the peer who initiates the transfer
PeerTypeSource PeerType = "source"
// the peer who is the destination of the transfer
PeerTypeDestination PeerType = "destination"
)
type P2pTransportSettings struct {
MaxMessageSize ByteCount
ReconnectTimeout time.Duration
}
type P2pTransport struct {
ctx context.Context
cancel context.CancelFunc
client *Client
webRtcManager *WebRtcManager
sendRouteManager *RouteManager
receiveRouteManager *RouteManager
peerId Id
streamId Id
peerType PeerType
sendReady chan struct{}
receiveReady chan struct{}
p2pTransportSettings *P2pTransportSettings
}
func NewP2pTransport(
ctx context.Context,
client *Client,
webRtcManager *WebRtcManager,
sendRouteManager *RouteManager,
receiveRouteManager *RouteManager,
peerId Id,
streamId Id,
// this is the peer type of `peerId`. The current client is the complement.
peerType PeerType,
sendReady chan struct{},
receiveReady chan struct{},
p2pTransportSettings *P2pTransportSettings,
) *P2pTransport {
cancelCtx, cancel := context.WithCancel(ctx)
p2pTransport := &P2pTransport{
ctx: cancelCtx,
cancel: cancel,
client: client,
webRtcManager: webRtcManager,
sendRouteManager: sendRouteManager,
receiveRouteManager: receiveRouteManager,
peerId: peerId,
streamId: streamId,
peerType: peerType,
sendReady: sendReady,
receiveReady: receiveReady,
p2pTransportSettings: p2pTransportSettings,
}
return p2pTransport
}
func (self *P2pTransport) run() {
defer self.cancel()
for {
// TODO using net.Conn as a stand in for the actual interface
reconnect := NewReconnect(self.p2pTransportSettings.ReconnectTimeout)
var conn net.Conn
var err error
// note, one side of the P2P connection will be driving the setup process (active).
// We arbitrarily choose the sender (peer is destination) as active.
switch self.peerType {
case PeerTypeDestination:
conn, err = self.webRtcManager.NewP2pConnActive(self.ctx, self.peerId, self.streamId)
case PeerTypeSource:
conn, err = self.webRtcManager.NewP2pConnPassive(self.ctx, self.peerId, self.streamId)
default:
// unknown peer type
return
}
if err != nil {
select {
case <-self.ctx.Done():
return
case <-reconnect.After():
}
continue
}
// at this point, the connection should be able to ping the other side
// now we wait for the entire stream to be ready by propagating the `ReaderHeader`
c := func() {
defer conn.Close()
handleCtx, handleCancel := context.WithCancel(self.ctx)
defer handleCancel()
go func() {
defer self.cancel()
select {
case <-handleCtx.Done():
return
case <-self.receiveReady:
}
_, err := conn.Write([]byte(ReadyHeader))
if err != nil {
return
}
t, route := NewP2pReceiveTransport(handleCtx, handleCancel, conn, self.streamId, self.p2pTransportSettings)
self.receiveRouteManager.UpdateTransport(t, []Route{route})
defer self.receiveRouteManager.RemoveTransport(t)
select {
case <-handleCtx.Done():
return
}
}()
go func() {
defer self.cancel()
select {
case <-handleCtx.Done():
return
default:
}
header := make([]byte, len(ReadyHeader))
_, err := io.ReadFull(conn, header)
if err != nil {
return
}
if !slices.Equal(header, []byte(ReadyHeader)) {
return
}
close(self.sendReady)
t, route := NewP2pSendTransport(handleCtx, handleCancel, conn, self.streamId, self.p2pTransportSettings)
self.sendRouteManager.UpdateTransport(t, []Route{route})
defer self.sendRouteManager.RemoveTransport(t)
select {
case <-handleCtx.Done():
return
}
}()
select {
case <-handleCtx.Done():
return
}
}
reconnect = NewReconnect(self.p2pTransportSettings.ReconnectTimeout)
c()
select {
case <-self.ctx.Done():
return
case <-reconnect.After():
}
}
}
type P2pSendTransport struct {
transportId Id
ctx context.Context
cancel context.CancelFunc
conn net.Conn
streamId Id
send chan []byte
p2pTransportSettings *P2pTransportSettings
}
func NewP2pSendTransport(
ctx context.Context,
cancel context.CancelFunc,
conn net.Conn,
streamId Id,
p2pTransportSettings *P2pTransportSettings,
) (Transport, Route) {
send := make(chan []byte)
p2pSendTransport := &P2pSendTransport{
transportId: NewId(),
ctx: ctx,
cancel: cancel,
conn: conn,
streamId: streamId,
send: send,
p2pTransportSettings: p2pTransportSettings,
}
go p2pSendTransport.run()
return p2pSendTransport, send
}
func (self *P2pSendTransport) run() {
defer self.cancel()
for {
select {
case <-self.ctx.Done():
return
case transferFrameBytes, ok := <-self.send:
if !ok {
return
}
if ByteCount(len(transferFrameBytes)) <= self.p2pTransportSettings.MaxMessageSize {
_, err := self.conn.Write(transferFrameBytes)
if err != nil {
return
}
} else {
// drop it
// FIXME log
}
}
}
}
func (self *P2pSendTransport) TransportId() Id {
return self.transportId
}
// lower priority takes precedence
func (self *P2pSendTransport) Priority() int {
return 0
}
func (self *P2pSendTransport) Weight() float32 {
return 1.0
}
func (self *P2pSendTransport) CanEvalRouteWeight(stats *RouteStats, remainingStats map[Transport]*RouteStats) bool {
return true
}
func (self *P2pSendTransport) RouteWeight(stats *RouteStats, remainingStats map[Transport]*RouteStats) float32 {
return 1.0
}
func (self *P2pSendTransport) MatchesSend(destination TransferPath) bool {
return destination.StreamId == self.streamId
}
func (self *P2pSendTransport) MatchesReceive(destination TransferPath) bool {
return false
}
func (self *P2pSendTransport) Downgrade(source TransferPath) {
if source.StreamId == self.streamId {
self.cancel()
}
}
type P2pReceiveTransport struct {
transportId Id
ctx context.Context
cancel context.CancelFunc
conn net.Conn
streamId Id
receive chan []byte
p2pTransportSettings *P2pTransportSettings
}
func NewP2pReceiveTransport(
ctx context.Context,
cancel context.CancelFunc,
conn net.Conn,
streamId Id,
p2pTransportSettings *P2pTransportSettings,
) (Transport, Route) {
receive := make(chan []byte)
p2pReceiveTransport := &P2pReceiveTransport{
transportId: NewId(),
ctx: ctx,
cancel: cancel,
conn: conn,
streamId: streamId,
receive: receive,
p2pTransportSettings: p2pTransportSettings,
}
go p2pReceiveTransport.run()
return p2pReceiveTransport, receive
}
func (self *P2pReceiveTransport) run() {
defer self.cancel()
buffer := make([]byte, self.p2pTransportSettings.MaxMessageSize)
for {
select {
case <-self.ctx.Done():
return
default:
}
n, err := self.conn.Read(buffer)
if err != nil {
return
}
if 0 < n {
transferFrameBytes := make([]byte, n)
copy(transferFrameBytes, buffer[0:n])
select {
case <-self.ctx.Done():
return
case self.receive <- transferFrameBytes:
}
}
}
}
func (self *P2pReceiveTransport) TransportId() Id {
return self.transportId
}
// lower priority takes precedence
func (self *P2pReceiveTransport) Priority() int {
return 0
}
func (self *P2pReceiveTransport) Weight() float32 {
return 1.0
}
func (self *P2pReceiveTransport) CanEvalRouteWeight(stats *RouteStats, remainingStats map[Transport]*RouteStats) bool {
return true
}
func (self *P2pReceiveTransport) RouteWeight(stats *RouteStats, remainingStats map[Transport]*RouteStats) float32 {
return 1.0
}
func (self *P2pReceiveTransport) MatchesSend(destination TransferPath) bool {
return false
}
func (self *P2pReceiveTransport) MatchesReceive(destination TransferPath) bool {
return true
}
func (self *P2pReceiveTransport) Downgrade(source TransferPath) {
if source.StreamId == self.streamId {
self.cancel()
}
}