-
Notifications
You must be signed in to change notification settings - Fork 5.4k
Expand file tree
/
Copy pathconnection_impl.cc
More file actions
1231 lines (1065 loc) · 50 KB
/
connection_impl.cc
File metadata and controls
1231 lines (1065 loc) · 50 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include "source/common/network/connection_impl.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include "envoy/common/exception.h"
#include "envoy/common/platform.h"
#include "envoy/config/core/v3/base.pb.h"
#include "envoy/event/scaled_range_timer_manager.h"
#include "envoy/event/timer.h"
#include "envoy/network/filter.h"
#include "envoy/network/socket.h"
#include "source/common/common/assert.h"
#include "source/common/common/dump_state_utils.h"
#include "source/common/common/empty_string.h"
#include "source/common/common/enum_to_int.h"
#include "source/common/common/scope_tracker.h"
#include "source/common/network/address_impl.h"
#include "source/common/network/connection_socket_impl.h"
#include "source/common/network/raw_buffer_socket.h"
#include "source/common/network/socket_option_factory.h"
#include "source/common/network/socket_option_impl.h"
#include "source/common/network/utility.h"
#include "source/common/runtime/runtime_features.h"
namespace Envoy {
namespace Network {
namespace {
constexpr absl::string_view kTransportSocketConnectTimeoutTerminationDetails =
"transport socket timeout was reached";
std::ostream& operator<<(std::ostream& os, Connection::State connection_state) {
switch (connection_state) {
case Connection::State::Open:
return os << "Open";
case Connection::State::Closing:
return os << "Closing";
case Connection::State::Closed:
return os << "Closed";
}
return os;
}
} // namespace
void ConnectionImplUtility::updateBufferStats(uint64_t delta, uint64_t new_total,
uint64_t& previous_total, Stats::Counter& stat_total,
Stats::Gauge& stat_current) {
if (delta) {
stat_total.add(delta);
}
if (new_total != previous_total) {
if (new_total > previous_total) {
stat_current.add(new_total - previous_total);
} else {
stat_current.sub(previous_total - new_total);
}
previous_total = new_total;
}
}
std::atomic<uint64_t> ConnectionImpl::next_global_id_;
ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPtr&& socket,
TransportSocketPtr&& transport_socket,
StreamInfo::StreamInfo& stream_info, bool connected)
: ConnectionImplBase(dispatcher, next_global_id_++),
transport_socket_(std::move(transport_socket)), socket_(std::move(socket)),
stream_info_(stream_info), filter_manager_(*this, *socket_),
write_buffer_(dispatcher.getWatermarkFactory().createBuffer(
[this]() -> void { this->onWriteBufferLowWatermark(); },
[this]() -> void { this->onWriteBufferHighWatermark(); },
[]() -> void { /* TODO(adisuissa): Handle overflow watermark */ })),
read_buffer_(dispatcher.getWatermarkFactory().createBuffer(
[this]() -> void { this->onReadBufferLowWatermark(); },
[this]() -> void { this->onReadBufferHighWatermark(); },
[]() -> void { /* TODO(adisuissa): Handle overflow watermark */ })),
detect_early_close_(true), enable_half_close_(false), read_end_stream_raised_(false),
read_end_stream_(false), write_end_stream_(false), current_write_end_stream_(false),
dispatch_buffered_data_(false), transport_wants_read_(false),
enable_close_through_filter_manager_(Runtime::runtimeFeatureEnabled(
"envoy.reloadable_features.connection_close_through_filter_manager")) {
if (!socket_->isOpen()) {
IS_ENVOY_BUG("Client socket failure");
return;
}
if (!connected) {
connecting_ = true;
}
Event::FileTriggerType trigger = Event::PlatformDefaultTriggerType;
// We never ask for both early close and read at the same time. If we are reading, we want to
// consume all available data.
socket_->ioHandle().initializeFileEvent(
dispatcher_,
[this](uint32_t events) {
onFileEvent(events);
return absl::OkStatus();
},
trigger, Event::FileReadyType::Read | Event::FileReadyType::Write);
transport_socket_->setTransportSocketCallbacks(*this);
// TODO(soulxu): generate the connection id inside the addressProvider directly,
// then we don't need a setter or any of the optional stuff.
socket_->connectionInfoProvider().setConnectionID(id());
socket_->connectionInfoProvider().setSslConnection(transport_socket_->ssl());
}
ConnectionImpl::~ConnectionImpl() {
ASSERT(!socket_->isOpen() && delayed_close_timer_ == nullptr,
"ConnectionImpl destroyed with open socket and/or active timer");
// In general we assume that owning code has called close() previously to the destructor being
// run. This generally must be done so that callbacks run in the correct context (vs. deferred
// deletion). Hence the assert above. However, call close() here just to be completely sure that
// the fd is closed and make it more likely that we crash from a bad close callback.
close(ConnectionCloseType::NoFlush);
// Ensure that the access log is written.
ensureAccessLogWritten();
}
void ConnectionImpl::addWriteFilter(WriteFilterSharedPtr filter) {
filter_manager_.addWriteFilter(filter);
}
void ConnectionImpl::addFilter(FilterSharedPtr filter) { filter_manager_.addFilter(filter); }
void ConnectionImpl::addReadFilter(ReadFilterSharedPtr filter) {
filter_manager_.addReadFilter(filter);
}
void ConnectionImpl::removeReadFilter(ReadFilterSharedPtr filter) {
filter_manager_.removeReadFilter(filter);
}
bool ConnectionImpl::initializeReadFilters() { return filter_manager_.initializeReadFilters(); }
void ConnectionImpl::addAccessLogHandler(AccessLog::InstanceSharedPtr handler) {
filter_manager_.addAccessLogHandler(handler);
}
void ConnectionImpl::ensureAccessLogWritten() {
if (!access_log_written_) {
access_log_written_ = true;
filter_manager_.log(AccessLog::AccessLogType::TcpConnectionEnd);
}
}
void ConnectionImpl::close(ConnectionCloseType type) {
if (!socket_->isOpen()) {
ENVOY_CONN_LOG_EVENT(debug, "connection_closing", "Not closing conn, socket is not open",
*this);
return;
}
// The connection is closed by Envoy by sending RST, and the connection is closed immediately.
if (type == ConnectionCloseType::AbortReset) {
ENVOY_CONN_LOG(
trace, "connection closing type=AbortReset, setting LocalReset to the detected close type.",
*this);
setDetectedCloseType(StreamInfo::DetectedCloseType::LocalReset);
closeSocket(ConnectionEvent::LocalClose);
return;
}
if (type == ConnectionCloseType::Abort || type == ConnectionCloseType::NoFlush) {
closeInternal(type);
return;
}
// Only FlushWrite and FlushWriteAndDelay are managed by the filter manager, since the above
// status will abort data naturally.
ASSERT(type == ConnectionCloseType::FlushWrite ||
type == ConnectionCloseType::FlushWriteAndDelay);
closeThroughFilterManager(ConnectionCloseAction{ConnectionEvent::LocalClose, false, type});
}
void ConnectionImpl::closeInternal(ConnectionCloseType type) {
if (!socket_->isOpen()) {
return;
}
uint64_t data_to_write = write_buffer_->length();
ENVOY_CONN_LOG_EVENT(debug, "connection_closing", "closing data_to_write={} type={}", *this,
data_to_write, enumToInt(type));
const bool delayed_close_timeout_set = delayed_close_timeout_.count() > 0;
if (data_to_write == 0 || type == ConnectionCloseType::NoFlush ||
type == ConnectionCloseType::Abort || !transport_socket_->canFlushClose()) {
if (data_to_write > 0 && type != ConnectionCloseType::Abort) {
// We aren't going to wait to flush, but try to write as much as we can if there is pending
// data.
transport_socket_->doWrite(*write_buffer_, true);
}
if (type != ConnectionCloseType::FlushWriteAndDelay || !delayed_close_timeout_set) {
closeConnectionImmediately();
return;
}
// The socket is being closed and either there is no more data to write or the data can not be
// flushed (!transport_socket_->canFlushClose()). Since a delayed close has been requested,
// start the delayed close timer if it hasn't been done already by a previous close().
// NOTE: Even though the delayed_close_state_ is being set to CloseAfterFlushAndWait, since
// a write event is not being registered for the socket, this logic is simply setting the
// timer and waiting for it to trigger to close the socket.
if (!inDelayedClose()) {
initializeDelayedCloseTimer();
delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait;
// Monitor for the peer closing the connection.
ioHandle().enableFileEvents(enable_half_close_ ? 0 : Event::FileReadyType::Closed);
}
return;
}
ASSERT(type == ConnectionCloseType::FlushWrite ||
type == ConnectionCloseType::FlushWriteAndDelay);
// If there is a pending delayed close, simply update the delayed close state.
//
// An example of this condition manifests when a downstream connection is closed early by Envoy,
// such as when a route can't be matched:
// In ConnectionManagerImpl::onData()
// 1) Via codec_->dispatch(), a local reply with a 404 is sent to the client
// a) ConnectionManagerImpl::doEndStream() issues the first connection close() via
// ConnectionManagerImpl::checkForDeferredClose()
// 2) A second close is issued by a subsequent call to
// ConnectionManagerImpl::checkForDeferredClose() prior to returning from onData()
if (inDelayedClose()) {
// Validate that a delayed close timer is already enabled unless it was disabled via
// configuration.
ASSERT(!delayed_close_timeout_set || delayed_close_timer_ != nullptr);
if (type == ConnectionCloseType::FlushWrite || !delayed_close_timeout_set) {
delayed_close_state_ = DelayedCloseState::CloseAfterFlush;
} else {
delayed_close_state_ = DelayedCloseState::CloseAfterFlushAndWait;
}
return;
}
// NOTE: At this point, it's already been validated that the connection is not already in
// delayed close processing and therefore the timer has not yet been created.
if (delayed_close_timeout_set) {
initializeDelayedCloseTimer();
delayed_close_state_ = (type == ConnectionCloseType::FlushWrite)
? DelayedCloseState::CloseAfterFlush
: DelayedCloseState::CloseAfterFlushAndWait;
} else {
delayed_close_state_ = DelayedCloseState::CloseAfterFlush;
}
ioHandle().enableFileEvents(Event::FileReadyType::Write |
(enable_half_close_ ? 0 : Event::FileReadyType::Closed));
}
void ConnectionImpl::onBufferHighWatermarkTimeout() {
ENVOY_CONN_LOG(debug, "buffer high watermark timeout reached", *this);
if (!socket_->isOpen()) {
return;
}
closeConnectionImmediatelyWithDetails(
StreamInfo::LocalCloseReasons::get().BufferHighWatermarkTimeout);
}
void ConnectionImpl::scheduleBufferHighWatermarkTimeout() {
if (buffer_high_watermark_timeout_.count() == 0) {
return;
}
if (buffer_high_watermark_timer_ == nullptr) {
buffer_high_watermark_timer_ =
dispatcher_.createTimer([this]() -> void { onBufferHighWatermarkTimeout(); });
}
if (!buffer_high_watermark_timer_->enabled()) {
ENVOY_CONN_LOG(debug, "scheduling buffer high watermark timeout", *this);
buffer_high_watermark_timer_->enableTimer(buffer_high_watermark_timeout_);
}
}
void ConnectionImpl::maybeCancelBufferHighWatermarkTimeout() {
if (buffer_high_watermark_timer_ == nullptr || !buffer_high_watermark_timer_->enabled()) {
return;
}
if (!write_buffer_->highWatermarkTriggered() && !read_buffer_->highWatermarkTriggered()) {
ENVOY_CONN_LOG(debug, "cancelling buffer high watermark timeout", *this);
buffer_high_watermark_timer_->disableTimer();
}
}
Connection::State ConnectionImpl::state() const {
if (!socket_->isOpen()) {
return State::Closed;
} else if (inDelayedClose()) {
return State::Closing;
} else {
return State::Open;
}
}
void ConnectionImpl::closeConnectionImmediately() { closeSocket(ConnectionEvent::LocalClose); }
void ConnectionImpl::setTransportSocketIsReadable() {
ASSERT(dispatcher_.isThreadSafe());
// Remember that the transport requested read resumption, in case the resumption event is not
// scheduled immediately or is "lost" because read was disabled.
transport_wants_read_ = true;
// Only schedule a read activation if the connection is not read disabled to avoid spurious
// wakeups. When read disabled, the connection will not read from the transport, and limit
// dispatch to the current contents of the read buffer if its high-watermark is triggered and
// dispatch_buffered_data_ is set.
if (read_disable_count_ == 0) {
ioHandle().activateFileEvents(Event::FileReadyType::Read);
}
}
bool ConnectionImpl::filterChainWantsData() {
return read_disable_count_ == 0 ||
(read_disable_count_ == 1 && read_buffer_->highWatermarkTriggered());
}
void ConnectionImpl::setDetectedCloseType(StreamInfo::DetectedCloseType close_type) {
detected_close_type_ = close_type;
}
void ConnectionImpl::closeThroughFilterManager(ConnectionCloseAction close_action) {
if (!socket_->isOpen()) {
return;
}
if (!enable_close_through_filter_manager_) {
ENVOY_CONN_LOG(trace, "connection is closing not through the filter manager", *this);
closeConnection(close_action);
return;
}
ENVOY_CONN_LOG(trace, "connection is closing through the filter manager", *this);
filter_manager_.onConnectionClose(close_action);
}
void ConnectionImpl::closeSocket(ConnectionEvent close_type) {
if (!socket_->isOpen()) {
ENVOY_CONN_LOG(trace, "closeSocket: socket is not open, returning", *this);
return;
}
// No need for a delayed close (if pending) now that the socket is being closed.
if (delayed_close_timer_) {
delayed_close_timer_->disableTimer();
delayed_close_timer_ = nullptr;
}
ENVOY_CONN_LOG(debug, "closing socket: {}", *this, static_cast<uint32_t>(close_type));
transport_socket_->closeSocket(close_type);
// Drain input and output buffers.
updateReadBufferStats(0, 0);
updateWriteBufferStats(0, 0);
// As the socket closes, drain any remaining data.
// The data won't be written out at this point, and where there are reference
// counted buffer fragments, it helps avoid lifetime issues with the
// connection outlasting the subscriber.
write_buffer_->drain(write_buffer_->length());
connection_stats_.reset();
if (detected_close_type_ == StreamInfo::DetectedCloseType::RemoteReset ||
detected_close_type_ == StreamInfo::DetectedCloseType::LocalReset) {
#if ENVOY_PLATFORM_ENABLE_SEND_RST
const bool ok = Network::Socket::applyOptions(
Network::SocketOptionFactory::buildZeroSoLingerOptions(), *socket_,
envoy::config::core::v3::SocketOption::STATE_LISTENING);
if (!ok) {
ENVOY_LOG_EVERY_POW_2(error, "rst setting so_linger=0 failed on connection {}", id());
}
#endif
}
// It is safe to call close() since there is an IO handle check.
socket_->close();
// Propagate transport failure reason to StreamInfo before raising close events,
// ensuring it's available to all filters and access loggers.
// Only set if we have a valid failure reason to avoid accessing potentially invalid state.
absl::string_view failure_reason = transportFailureReason();
if (!failure_reason.empty()) {
stream_info_.setDownstreamTransportFailureReason(failure_reason);
}
// Call the base class directly as close() is called in the destructor.
ConnectionImpl::raiseEvent(close_type);
}
void ConnectionImpl::onConnected() {
ASSERT(!connecting_);
transport_socket_->onConnected();
}
void ConnectionImpl::noDelay(bool enable) {
// There are cases where a connection to localhost can immediately fail (e.g., if the other end
// does not have enough fds, reaches a backlog limit, etc.). Because we run with deferred error
// events, the calling code may not yet know that the connection has failed. This is one call
// where we go outside of libevent and hit the fd directly and this case can fail if the fd is
// invalid. For this call instead of plumbing through logic that will immediately indicate that a
// connect failed, we will just ignore the noDelay() call if the socket is invalid since error is
// going to be raised shortly anyway and it makes the calling code simpler.
if (!socket_->isOpen()) {
return;
}
// Don't set NODELAY for unix domain sockets or internal socket.
if (socket_->addressType() != Address::Type::Ip) {
return;
}
// Set NODELAY
int new_value = enable;
Api::SysCallIntResult result =
socket_->setSocketOption(IPPROTO_TCP, TCP_NODELAY, &new_value, sizeof(new_value));
#if defined(__APPLE__)
if (SOCKET_FAILURE(result.return_value_) && result.errno_ == SOCKET_ERROR_INVAL) {
// Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is
// enabled despite this result.
return;
}
#elif defined(WIN32)
if (SOCKET_FAILURE(result.return_value_) &&
(result.errno_ == SOCKET_ERROR_AGAIN || result.errno_ == SOCKET_ERROR_INVAL)) {
// Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is
// enabled despite this result.
return;
}
#endif
RELEASE_ASSERT(result.return_value_ == 0,
fmt::format("Failed to set TCP_NODELAY with error {}, {}", result.errno_,
errorDetails(result.errno_)));
}
void ConnectionImpl::onRead(uint64_t read_buffer_size) {
ASSERT(dispatcher_.isThreadSafe());
// Do not read the data from the socket if the connection is in delay closed,
// high watermark is called, or is closing through filter manager.
if (inDelayedClose() || !filterChainWantsData() ||
(enable_close_through_filter_manager_ && filter_manager_.pendingClose())) {
return;
}
ASSERT(socket_->isOpen());
if (read_buffer_size == 0 && !read_end_stream_) {
return;
}
if (read_end_stream_) {
// read() on a raw socket will repeatedly return 0 (EOF) once EOF has
// occurred, so filter out the repeats so that filters don't have
// to handle repeats.
//
// I don't know of any cases where this actually happens (we should stop
// reading the socket after EOF), but this check guards against any bugs
// in ConnectionImpl or strangeness in the OS events (epoll, kqueue, etc)
// and maintains the guarantee for filters.
if (read_end_stream_raised_) {
// No further data can be delivered after end_stream
ASSERT(read_buffer_size == 0);
return;
}
read_end_stream_raised_ = true;
}
filter_manager_.onRead();
}
void ConnectionImpl::enableHalfClose(bool enabled) {
// This code doesn't correctly ensure that EV_CLOSE isn't set if reading is disabled
// when enabling half-close. This could be fixed, but isn't needed right now, so just
// ASSERT that it doesn't happen.
ASSERT(!enabled || read_disable_count_ == 0);
enable_half_close_ = enabled;
}
Connection::ReadDisableStatus ConnectionImpl::readDisable(bool disable) {
// Calls to readEnabled on a closed socket are considered to be an error.
ASSERT(state() == State::Open);
ENVOY_CONN_LOG(trace, "readDisable: disable={} disable_count={} state={} buffer_length={}", *this,
disable, read_disable_count_, static_cast<int>(state()), read_buffer_->length());
// When we disable reads, we still allow for early close notifications (the equivalent of
// `EPOLLRDHUP` for an epoll backend). For backends that support it, this allows us to apply
// back pressure at the kernel layer, but still get timely notification of a FIN. Note that
// we are not guaranteed to get notified, so even if the remote has closed, we may not know
// until we try to write. Further note that currently we optionally don't correctly handle half
// closed TCP connections in the sense that we assume that a remote FIN means the remote intends a
// full close.
if (disable) {
++read_disable_count_;
if (state() != State::Open) {
// If readDisable is called on a closed connection, do not crash.
return ReadDisableStatus::NoTransition;
}
if (read_disable_count_ > 1) {
// The socket has already been read disabled.
return ReadDisableStatus::StillReadDisabled;
}
// If half-close semantics are enabled, we never want early close notifications; we
// always want to read all available data, even if the other side has closed.
if (detect_early_close_ && !enable_half_close_) {
ioHandle().enableFileEvents(Event::FileReadyType::Write | Event::FileReadyType::Closed);
} else {
ioHandle().enableFileEvents(Event::FileReadyType::Write);
}
return ReadDisableStatus::TransitionedToReadDisabled;
} else {
ASSERT(read_disable_count_ != 0);
--read_disable_count_;
if (state() != State::Open) {
// If readDisable is called on a closed connection, do not crash.
return ReadDisableStatus::NoTransition;
}
auto read_disable_status = ReadDisableStatus::StillReadDisabled;
if (read_disable_count_ == 0) {
// We never ask for both early close and read at the same time. If we are reading, we want to
// consume all available data.
ioHandle().enableFileEvents(Event::FileReadyType::Read | Event::FileReadyType::Write);
read_disable_status = ReadDisableStatus::TransitionedToReadEnabled;
}
if (filterChainWantsData() && (read_buffer_->length() > 0 || transport_wants_read_)) {
// Sanity check: resumption with read_disable_count_ > 0 should only happen if the read
// buffer's high watermark has triggered.
ASSERT(read_buffer_->length() > 0 || read_disable_count_ == 0);
// If the read_buffer_ is not empty or transport_wants_read_ is true, the connection may be
// able to process additional bytes even if there is no data in the kernel to kick off the
// filter chain. Alternately the connection may need read resumption while read disabled and
// not registered for read events because the read buffer's high-watermark has triggered. To
// handle these cases, directly schedule a fake read event to make sure the buffered data in
// the read buffer or in transport socket internal buffers gets processed regardless and
// ensure that we dispatch it via onRead.
dispatch_buffered_data_ = true;
ioHandle().activateFileEvents(Event::FileReadyType::Read);
}
return read_disable_status;
}
}
void ConnectionImpl::raiseEvent(ConnectionEvent event) {
ENVOY_CONN_LOG(trace, "raising connection event {}", *this, static_cast<int>(event));
ConnectionImplBase::raiseConnectionEvent(event);
// We may have pending data in the write buffer on transport handshake
// completion, which may also have completed in the context of onReadReady(),
// where no check of the write buffer is made. Provide an opportunity to flush
// here. If connection write is not ready, this is harmless. We should only do
// this if we're still open (the above callbacks may have closed).
if (event == ConnectionEvent::Connected) {
flushWriteBuffer();
}
}
bool ConnectionImpl::readEnabled() const {
// Calls to readEnabled on a closed socket are considered to be an error.
ASSERT(state() == State::Open);
ASSERT(dispatcher_.isThreadSafe());
return read_disable_count_ == 0;
}
void ConnectionImpl::addBytesSentCallback(BytesSentCb cb) {
bytes_sent_callbacks_.emplace_back(cb);
}
void ConnectionImpl::rawWrite(Buffer::Instance& data, bool end_stream) {
write(data, end_stream, false);
}
void ConnectionImpl::write(Buffer::Instance& data, bool end_stream) {
write(data, end_stream, true);
}
void ConnectionImpl::write(Buffer::Instance& data, bool end_stream, bool through_filter_chain) {
ASSERT(!end_stream || enable_half_close_);
ASSERT(dispatcher_.isThreadSafe());
if (write_end_stream_) {
// It is an API violation to write more data after writing end_stream, but a duplicate
// end_stream with no data is harmless. This catches misuse of the API that could result in data
// being lost.
ASSERT(data.length() == 0 && end_stream);
return;
}
if (through_filter_chain) {
// NOTE: This is kind of a hack, but currently we don't support restart/continue on the write
// path, so we just pass around the buffer passed to us in this function. If we ever
// support buffer/restart/continue on the write path this needs to get more complicated.
current_write_buffer_ = &data;
current_write_end_stream_ = end_stream;
FilterStatus status = filter_manager_.onWrite();
current_write_buffer_ = nullptr;
if (FilterStatus::StopIteration == status) {
return;
}
}
write_end_stream_ = end_stream;
if (data.length() > 0 || end_stream) {
ENVOY_CONN_LOG(trace, "writing {} bytes, end_stream {}", *this, data.length(), end_stream);
// TODO(mattklein123): All data currently gets moved from the source buffer to the write buffer.
// This can lead to inefficient behavior if writing a bunch of small chunks. In this case, it
// would likely be more efficient to copy data below a certain size. VERY IMPORTANT: If this is
// ever changed, read the comment in SslSocket::doWrite() VERY carefully. That code assumes that
// we never change existing write_buffer_ chain elements between calls to SSL_write(). That code
// might need to change if we ever copy here.
write_buffer_->move(data);
// Activating a write event before the socket is connected has the side-effect of tricking
// doWriteReady into thinking the socket is connected. On macOS, the underlying write may fail
// with a connection error if a call to write(2) occurs before the connection is completed.
if (!connecting_) {
ioHandle().activateFileEvents(Event::FileReadyType::Write);
}
}
}
void ConnectionImpl::setBufferLimits(uint32_t limit) {
read_buffer_limit_ = limit;
// Due to the fact that writes to the connection and flushing data from the connection are done
// asynchronously, we have the option of either setting the watermarks aggressively, and regularly
// enabling/disabling reads from the socket, or allowing more data, but then not triggering
// based on watermarks until 2x the data is buffered in the common case. Given these are all soft
// limits we err on the side of buffering more triggering watermark callbacks less often.
//
// Given the current implementation for straight up TCP proxying, the common case is reading
// |limit| bytes through the socket, passing |limit| bytes to the connection and the immediately
// draining |limit| bytes to the socket. Triggering the high watermarks and then immediately
// triggering the low watermarks would be expensive, but we narrowly avoid triggering high
// watermark when moving |limit| bytes through the connection because the high watermark
// computation checks if the size of the buffer exceeds the high watermark value.
if (limit > 0) {
write_buffer_->setWatermarks(limit);
read_buffer_->setWatermarks(limit);
}
}
void ConnectionImpl::setBufferHighWatermarkTimeout(std::chrono::milliseconds timeout) {
if (timeout == buffer_high_watermark_timeout_) {
return;
}
buffer_high_watermark_timeout_ = timeout;
if (buffer_high_watermark_timer_ != nullptr && buffer_high_watermark_timer_->enabled()) {
buffer_high_watermark_timer_->disableTimer();
}
if (state() == State::Open &&
(write_buffer_->highWatermarkTriggered() || read_buffer_->highWatermarkTriggered())) {
scheduleBufferHighWatermarkTimeout();
}
}
void ConnectionImpl::onReadBufferLowWatermark() {
ENVOY_CONN_LOG(debug, "onBelowReadBufferLowWatermark", *this);
if (state() == State::Open) {
readDisable(false);
maybeCancelBufferHighWatermarkTimeout();
}
}
void ConnectionImpl::onReadBufferHighWatermark() {
ENVOY_CONN_LOG(debug, "onAboveReadBufferHighWatermark", *this);
if (state() == State::Open) {
readDisable(true);
scheduleBufferHighWatermarkTimeout();
}
}
void ConnectionImpl::onWriteBufferLowWatermark() {
ENVOY_CONN_LOG(debug, "onBelowWriteBufferLowWatermark", *this);
if (state() == State::Open) {
maybeCancelBufferHighWatermarkTimeout();
}
onFilterBelowLowWatermark();
}
void ConnectionImpl::onWriteBufferHighWatermark() {
ENVOY_CONN_LOG(debug, "onAboveWriteBufferHighWatermark", *this);
if (state() == State::Open) {
scheduleBufferHighWatermarkTimeout();
}
onFilterAboveHighWatermark();
}
void ConnectionImpl::setFailureReason(absl::string_view failure_reason) {
if (!transport_socket_->failureReason().empty()) {
failure_reason_ = absl::StrCat(failure_reason, ". ", transport_socket_->failureReason());
} else {
failure_reason_ = std::string(failure_reason);
}
}
void ConnectionImpl::onFileEvent(uint32_t events) {
ScopeTrackerScopeState scope(this, this->dispatcher_);
ENVOY_CONN_LOG(trace, "socket event: {}", *this, events);
if (immediate_error_event_ == ConnectionEvent::LocalClose ||
immediate_error_event_ == ConnectionEvent::RemoteClose) {
if (bind_error_) {
ENVOY_CONN_LOG(debug, "raising bind error", *this);
// Update stats here, rather than on bind failure, to give the caller a chance to
// setConnectionStats.
if (connection_stats_ && connection_stats_->bind_errors_) {
connection_stats_->bind_errors_->inc();
}
} else {
ENVOY_CONN_LOG(debug, "raising immediate error", *this);
}
closeSocket(immediate_error_event_);
return;
}
if (events & Event::FileReadyType::Closed) {
// We never ask for both early close and read at the same time. If we are reading, we want to
// consume all available data.
ASSERT(!(events & Event::FileReadyType::Read));
ENVOY_CONN_LOG(debug, "remote early close", *this);
// If half-close is enabled, this is never activated.
// If half-close is disabled, there are two scenarios where this applies:
// 1. During the closeInternal(_) call.
// 2. When an early close is detected while the connection is read-disabled.
// Both situations allow the connection to bypass the filter manager's status since there will
// be data loss even in normal cases.
closeSocket(ConnectionEvent::RemoteClose);
return;
}
if (events & Event::FileReadyType::Write) {
onWriteReady();
}
// It's possible for a write event callback to close the socket (which will cause fd_ to be -1).
// In this case ignore read event processing.
if (socket_->isOpen() && (events & Event::FileReadyType::Read)) {
onReadReady();
}
}
void ConnectionImpl::onReadReady() {
ENVOY_CONN_LOG(trace, "read ready. dispatch_buffered_data={}", *this,
static_cast<int>(dispatch_buffered_data_));
const bool latched_dispatch_buffered_data = dispatch_buffered_data_;
dispatch_buffered_data_ = false;
ASSERT(!connecting_);
// If it is closing through the filter manager, we either need to close the socket or go
// through the close(), so we prevent further reading from the socket when we are waiting
// for the connection close.
if (enable_close_through_filter_manager_ && filter_manager_.pendingClose()) {
return;
}
// We get here while read disabled in two ways.
// 1) There was a call to setTransportSocketIsReadable(), for example if a raw buffer socket ceded
// due to shouldDrainReadBuffer(). In this case we defer the event until the socket is read
// enabled.
// 2) The consumer of connection data called readDisable(true), and instead of reading from the
// socket we simply need to dispatch already read data.
if (read_disable_count_ != 0) {
// Do not clear transport_wants_read_ when returning early; the early return skips the transport
// socket doRead call.
if (latched_dispatch_buffered_data && filterChainWantsData()) {
onRead(read_buffer_->length());
}
return;
}
// Clear transport_wants_read_ just before the call to doRead. This is the only way to ensure that
// the transport socket read resumption happens as requested; onReadReady() returns early without
// reading from the transport if the read buffer is above high watermark at the start of the
// method.
transport_wants_read_ = false;
IoResult result = transport_socket_->doRead(*read_buffer_);
uint64_t new_buffer_size = read_buffer_->length();
updateReadBufferStats(result.bytes_processed_, new_buffer_size);
// The socket is closed immediately when receiving RST.
if (result.err_code_.has_value() &&
result.err_code_ == Api::IoError::IoErrorCode::ConnectionReset) {
ENVOY_CONN_LOG(trace, "read: rst close from peer", *this);
setDetectedCloseType(StreamInfo::DetectedCloseType::RemoteReset);
if (result.bytes_processed_ != 0) {
onRead(new_buffer_size);
// In some cases, the transport socket could read data along with an RST (Reset) flag.
// We need to ensure this data is properly propagated to the terminal filter for proper
// handling. For more details, see #29616 and #28817.
closeThroughFilterManager(ConnectionCloseAction{ConnectionEvent::RemoteClose, true});
} else {
// Otherwise no data was read, and close the socket directly.
closeSocket(Network::ConnectionEvent::RemoteClose);
}
return;
}
// If this connection doesn't have half-close semantics, translate end_stream into
// a connection close.
if ((!enable_half_close_ && result.end_stream_read_)) {
result.end_stream_read_ = false;
result.action_ = PostIoAction::Close;
}
read_end_stream_ |= result.end_stream_read_;
if (result.bytes_processed_ != 0 || result.end_stream_read_ ||
(latched_dispatch_buffered_data && read_buffer_->length() > 0)) {
// Skip onRead if no bytes were processed unless we explicitly want to force onRead for
// buffered data. For instance, skip onRead if the connection was closed without producing
// more data.
onRead(new_buffer_size);
}
// The read callback may have already closed the connection.
if (result.action_ == PostIoAction::Close || bothSidesHalfClosed()) {
ENVOY_CONN_LOG(debug, "remote close", *this);
// This is the typical case where a socket read triggers a connection close.
// When half-close is disabled, the action_ will be set to close.
// When half-close is enabled, once both directions of the connection are closed,
// we need to ensure that the read data is properly propagated to the terminal filter.
closeThroughFilterManager(ConnectionCloseAction{ConnectionEvent::RemoteClose, true});
}
}
absl::optional<Connection::UnixDomainSocketPeerCredentials>
ConnectionImpl::unixSocketPeerCredentials() const {
// TODO(snowp): Support non-linux platforms.
#ifndef SO_PEERCRED
return absl::nullopt;
#else
struct ucred ucred;
socklen_t ucred_size = sizeof(ucred);
int rc = socket_->getSocketOption(SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size).return_value_;
if (SOCKET_FAILURE(rc)) {
return absl::nullopt;
}
return {{ucred.pid, ucred.uid, ucred.gid}};
#endif
}
void ConnectionImpl::onWriteReady() {
ENVOY_CONN_LOG(trace, "write ready", *this);
if (connecting_) {
int error;
socklen_t error_size = sizeof(error);
RELEASE_ASSERT(
socket_->getSocketOption(SOL_SOCKET, SO_ERROR, &error, &error_size).return_value_ == 0, "");
if (error == 0) {
ENVOY_CONN_LOG_EVENT(debug, "connection_connected", "connected", *this);
connecting_ = false;
onConnected();
// It's possible that we closed during the connect callback.
if (state() != State::Open) {
ENVOY_CONN_LOG_EVENT(debug, "connection_closed_callback", "close during connected callback",
*this);
return;
}
} else {
setFailureReason(absl::StrCat("delayed connect error: ", errorDetails(error)));
ENVOY_CONN_LOG_EVENT(debug, "connection_error", "{}", *this, transportFailureReason());
closeSocket(ConnectionEvent::RemoteClose);
return;
}
}
IoResult result = transport_socket_->doWrite(*write_buffer_, write_end_stream_);
ASSERT(!result.end_stream_read_); // The interface guarantees that only read operations set this.
uint64_t new_buffer_size = write_buffer_->length();
updateWriteBufferStats(result.bytes_processed_, new_buffer_size);
// The socket is closed immediately when receiving RST.
if (result.err_code_.has_value() &&
result.err_code_ == Api::IoError::IoErrorCode::ConnectionReset) {
// Discard anything in the buffer.
ENVOY_CONN_LOG(debug, "write: rst close from peer.", *this);
setDetectedCloseType(StreamInfo::DetectedCloseType::RemoteReset);
closeSocket(ConnectionEvent::RemoteClose);
return;
}
// NOTE: If the delayed_close_timer_ is set, it must only trigger after a delayed_close_timeout_
// period of inactivity from the last write event. Therefore, the timer must be reset to its
// original timeout value unless the socket is going to be closed as a result of the doWrite().
if (result.action_ == PostIoAction::Close) {
// It is possible (though unlikely) for the connection to have already been closed during the
// write callback. This can happen if we manage to complete the SSL handshake in the write
// callback, raise a connected event, and close the connection.
closeSocket(ConnectionEvent::RemoteClose);
} else if ((inDelayedClose() && new_buffer_size == 0) || bothSidesHalfClosed()) {
ENVOY_CONN_LOG(debug, "write flush complete", *this);
if (delayed_close_state_ == DelayedCloseState::CloseAfterFlushAndWait) {
ASSERT(delayed_close_timer_ != nullptr && delayed_close_timer_->enabled());
if (result.bytes_processed_ > 0) {
delayed_close_timer_->enableTimer(delayed_close_timeout_);
}
} else {
ASSERT(bothSidesHalfClosed() || delayed_close_state_ == DelayedCloseState::CloseAfterFlush);
ENVOY_CONN_LOG(trace, "both sides are half closed or it is final close after flush state",
*this);
if (delayed_close_state_ == DelayedCloseState::CloseAfterFlush) {
// close() is already managed by the filter manager and delayed.
// This is the final close.
closeConnectionImmediately();
} else if (bothSidesHalfClosed()) {
// If half_close is enabled, the close should still go through the filter manager, since
// the end_stream from read side is possible pending in the filter chain.
closeThroughFilterManager(ConnectionCloseAction{ConnectionEvent::LocalClose, true});
}
}
} else {
ASSERT(result.action_ == PostIoAction::KeepOpen);
ASSERT(!delayed_close_timer_ || delayed_close_timer_->enabled());
if (delayed_close_timer_ != nullptr && result.bytes_processed_ > 0) {
delayed_close_timer_->enableTimer(delayed_close_timeout_);
}
if (result.bytes_processed_ > 0) {
auto it = bytes_sent_callbacks_.begin();
while (it != bytes_sent_callbacks_.end()) {
if ((*it)(result.bytes_processed_)) {
// move to the next callback.
it++;
} else {
// remove the current callback.
it = bytes_sent_callbacks_.erase(it);
}
// If a callback closes the socket, stop iterating.
if (!socket_->isOpen()) {
return;
}
}
}
}
}
void ConnectionImpl::updateReadBufferStats(uint64_t num_read, uint64_t new_size) {
if (!connection_stats_) {
return;
}
ConnectionImplUtility::updateBufferStats(num_read, new_size, last_read_buffer_size_,
connection_stats_->read_total_,
connection_stats_->read_current_);
}
void ConnectionImpl::updateWriteBufferStats(uint64_t num_written, uint64_t new_size) {
if (!connection_stats_) {
return;
}
ConnectionImplUtility::updateBufferStats(num_written, new_size, last_write_buffer_size_,
connection_stats_->write_total_,
connection_stats_->write_current_);
}
bool ConnectionImpl::bothSidesHalfClosed() {
// If the write_buffer_ is not empty, then the end_stream has not been sent to the transport
// yet.
return read_end_stream_ && write_end_stream_ && write_buffer_->length() == 0;
}
bool ConnectionImpl::setSocketOption(Network::SocketOptionName name, absl::Span<uint8_t> value) {
Api::SysCallIntResult result =
SocketOptionImpl::setSocketOption(*socket_, name, value.data(), value.size());
if (result.return_value_ != 0) {
return false;
}
// Only add a sockopt if it's added successfully.
auto sockopt = std::make_shared<SocketOptionImpl>(