Wire Sysio Wire Sysion 1.0.0
Loading...
Searching...
No Matches
connection.hpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2015, Peter Thorson. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above copyright
9 * notice, this list of conditions and the following disclaimer in the
10 * documentation and/or other materials provided with the distribution.
11 * * Neither the name of the WebSocket++ Project nor the
12 * names of its contributors may be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL PETER THORSON BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28#ifndef WEBSOCKETPP_TRANSPORT_ASIO_CON_HPP
29#define WEBSOCKETPP_TRANSPORT_ASIO_CON_HPP
30
32
34
37
39#include <websocketpp/error.hpp>
40#include <websocketpp/uri.hpp>
41
48
49#include <istream>
50#include <sstream>
51#include <string>
52#include <vector>
53
54namespace websocketpp {
55namespace transport {
56namespace asio {
57
58typedef lib::function<void(connection_hdl)> tcp_init_handler;
59
61
66template <typename config>
67class connection : public config::socket_type::socket_con_type {
68public:
72 typedef lib::shared_ptr<type> ptr;
73
75 typedef typename config::socket_type::socket_con_type socket_con_type;
77 typedef typename socket_con_type::ptr socket_con_ptr;
79 typedef typename config::alog_type alog_type;
81 typedef typename config::elog_type elog_type;
82
83 typedef typename config::request_type request_type;
84 typedef typename request_type::ptr request_ptr;
85 typedef typename config::response_type response_type;
86 typedef typename response_type::ptr response_ptr;
87
89 typedef lib::asio::io_service * io_service_ptr;
91 typedef lib::shared_ptr<lib::asio::io_service::strand> strand_ptr;
93 typedef lib::shared_ptr<lib::asio::steady_timer> timer_ptr;
94
95 // connection is friends with its associated endpoint to allow the endpoint
96 // to call private/protected utility methods that we don't want to expose
97 // to the public api.
98 friend class endpoint<config>;
99 template<typename T> friend class local_endpoint;
100
101 // generate and manage our own io_service
102 explicit connection(bool is_server, alog_type & alog, elog_type & elog)
103 : m_is_server(is_server)
104 , m_alog(alog)
105 , m_elog(elog)
106 {
107 m_alog.write(log::alevel::devel,"asio con transport constructor");
108 }
109
112 return lib::static_pointer_cast<type>(socket_con_type::get_shared());
113 }
114
115 bool is_secure() const {
116 return socket_con_type::is_secure();
117 }
118
120
131 void set_uri(uri_ptr u) {
132 socket_con_type::set_uri(u);
133 }
134
136
146 m_tcp_pre_init_handler = h;
147 }
148
150
162
164
175 m_tcp_post_init_handler = h;
176 }
177
179
190 void set_proxy(std::string const & uri, lib::error_code & ec) {
191 // TODO: return errors for illegal URIs here?
192 // TODO: should https urls be illegal for the moment?
193 m_proxy = uri;
194 m_proxy_data = lib::make_shared<proxy_data>();
195 ec = lib::error_code();
196 }
197
199 void set_proxy(std::string const & uri) {
200 lib::error_code ec;
201 set_proxy(uri,ec);
202 if (ec) { throw exception(ec); }
203 }
204
206
218 void set_proxy_basic_auth(std::string const & username, std::string const &
219 password, lib::error_code & ec)
220 {
221 if (!m_proxy_data) {
222 ec = make_error_code(websocketpp::error::invalid_state);
223 return;
224 }
225
226 // TODO: username can't contain ':'
227 std::string val = "Basic "+base64_encode(username + ":" + password);
228 m_proxy_data->req.replace_header("Proxy-Authorization",val);
229 ec = lib::error_code();
230 }
231
233 void set_proxy_basic_auth(std::string const & username, std::string const &
234 password)
235 {
236 lib::error_code ec;
237 set_proxy_basic_auth(username,password,ec);
238 if (ec) { throw exception(ec); }
239 }
240
242
251 void set_proxy_timeout(long duration, lib::error_code & ec) {
252 if (!m_proxy_data) {
253 ec = make_error_code(websocketpp::error::invalid_state);
254 return;
255 }
256
257 m_proxy_data->timeout_proxy = duration;
258 ec = lib::error_code();
259 }
260
262 void set_proxy_timeout(long duration) {
263 lib::error_code ec;
264 set_proxy_timeout(duration,ec);
265 if (ec) { throw exception(ec); }
266 }
267
268 std::string const & get_proxy() const {
269 return m_proxy;
270 }
271
273
282 std::string get_remote_endpoint() const {
283 lib::error_code ec;
284
285 std::string ret = socket_con_type::get_remote_endpoint(ec);
286
287 if (ec) {
288 m_elog.write(log::elevel::info,ret);
289 return "Unknown";
290 } else {
291 return ret;
292 }
293 }
294
297 return m_connection_hdl;
298 }
299
301
314 timer_ptr set_timer(long duration, timer_handler callback) {
315 timer_ptr new_timer = lib::make_shared<lib::asio::steady_timer>(
316 *m_io_service,
318 );
319
320 if (config::enable_multithreading) {
321 new_timer->async_wait(m_strand->wrap(lib::bind(
323 new_timer,
324 callback,
325 lib::placeholders::_1
326 )));
327 } else {
328 new_timer->async_wait(lib::bind(
330 new_timer,
331 callback,
332 lib::placeholders::_1
333 ));
334 }
335
336 return new_timer;
337 }
338
340
351 lib::asio::error_code const & ec)
352 {
353 if (ec) {
354 if (ec == lib::asio::error::operation_aborted) {
355 callback(make_error_code(transport::error::operation_aborted));
356 } else {
357 log_err(log::elevel::info,"asio handle_timer",ec);
358 callback(ec);
359 }
360 } else {
361 callback(lib::error_code());
362 }
363 }
364
367 return m_strand;
368 }
369
371
389 lib::asio::error_code get_transport_ec() const {
390 return m_tec;
391 }
392
394
410protected:
411 void init(init_handler callback) {
412 if (m_alog.static_test(log::alevel::devel)) {
413 m_alog.write(log::alevel::devel,"asio connection init");
414 }
415
416 // TODO: pre-init timeout. Right now no implemented socket policies
417 // actually have an asyncronous pre-init
418
419 socket_con_type::pre_init(
420 lib::bind(
422 get_shared(),
423 callback,
424 lib::placeholders::_1
425 )
426 );
427 }
428
430
437 lib::error_code proxy_init(std::string const & authority) {
438 if (!m_proxy_data) {
441 }
442 m_proxy_data->req.set_version("HTTP/1.1");
443 m_proxy_data->req.set_method("CONNECT");
444
445 m_proxy_data->req.set_uri(authority);
446 m_proxy_data->req.replace_header("Host",authority);
447
448 return lib::error_code();
449 }
450
452
461 lib::error_code init_asio (io_service_ptr io_service) {
462 m_io_service = io_service;
463
464 if (config::enable_multithreading) {
465 m_strand = lib::make_shared<lib::asio::io_service::strand>(*io_service);
466 }
467
468 lib::error_code ec = socket_con_type::init_asio(io_service, m_strand,
469 m_is_server);
470
471 return ec;
472 }
473
474 void handle_pre_init(init_handler callback, lib::error_code const & ec) {
475 if (m_alog.static_test(log::alevel::devel)) {
476 m_alog.write(log::alevel::devel,"asio connection handle pre_init");
477 }
478
479 if (m_tcp_pre_init_handler) {
480 m_tcp_pre_init_handler(m_connection_hdl);
481 }
482
483 if (ec) {
484 callback(ec);
485 }
486
487 // If we have a proxy set issue a proxy connect, otherwise skip to
488 // post_init
489 if (!m_proxy.empty()) {
490 proxy_write(callback);
491 } else {
492 post_init(callback);
493 }
494 }
495
496 void post_init(init_handler callback) {
497 if (m_alog.static_test(log::alevel::devel)) {
498 m_alog.write(log::alevel::devel,"asio connection post_init");
499 }
500
501 timer_ptr post_timer;
502
503 if (config::timeout_socket_post_init > 0) {
504 post_timer = set_timer(
505 config::timeout_socket_post_init,
506 lib::bind(
508 get_shared(),
509 post_timer,
510 callback,
511 lib::placeholders::_1
512 )
513 );
514 }
515
516 socket_con_type::post_init(
517 lib::bind(
519 get_shared(),
520 post_timer,
521 callback,
522 lib::placeholders::_1
523 )
524 );
525 }
526
528
537 lib::error_code const & ec)
538 {
539 lib::error_code ret_ec;
540
541 if (ec) {
543 m_alog.write(log::alevel::devel,
544 "asio post init timer cancelled");
545 return;
546 }
547
548 log_err(log::elevel::devel,"asio handle_post_init_timeout",ec);
549 ret_ec = ec;
550 } else {
551 if (socket_con_type::get_ec()) {
552 ret_ec = socket_con_type::get_ec();
553 } else {
554 ret_ec = make_error_code(transport::error::timeout);
555 }
556 }
557
558 m_alog.write(log::alevel::devel, "Asio transport post-init timed out");
560 callback(ret_ec);
561 }
562
564
572 void handle_post_init(timer_ptr post_timer, init_handler callback,
573 lib::error_code const & ec)
574 {
576 (post_timer && lib::asio::is_neg(post_timer->expires_from_now())))
577 {
578 m_alog.write(log::alevel::devel,"post_init cancelled");
579 return;
580 }
581
582 if (post_timer) {
583 post_timer->cancel();
584 }
585
586 if (m_alog.static_test(log::alevel::devel)) {
587 m_alog.write(log::alevel::devel,"asio connection handle_post_init");
588 }
589
590 if (m_tcp_post_init_handler) {
591 m_tcp_post_init_handler(m_connection_hdl);
592 }
593
594 callback(ec);
595 }
596
597 void proxy_write(init_handler callback) {
598 if (m_alog.static_test(log::alevel::devel)) {
599 m_alog.write(log::alevel::devel,"asio connection proxy_write");
600 }
601
602 if (!m_proxy_data) {
603 m_elog.write(log::elevel::library,
604 "assertion failed: !m_proxy_data in asio::connection::proxy_write");
605 callback(make_error_code(error::general));
606 return;
607 }
608
609 m_proxy_data->write_buf = m_proxy_data->req.raw();
610
611 m_bufs.push_back(lib::asio::buffer(m_proxy_data->write_buf.data(),
612 m_proxy_data->write_buf.size()));
613
614 m_alog.write(log::alevel::devel,m_proxy_data->write_buf);
615
616 // Set a timer so we don't wait forever for the proxy to respond
617 m_proxy_data->timer = this->set_timer(
618 m_proxy_data->timeout_proxy,
619 lib::bind(
621 get_shared(),
622 callback,
623 lib::placeholders::_1
624 )
625 );
626
627 // Send proxy request
628 if (config::enable_multithreading) {
629 lib::asio::async_write(
630 socket_con_type::get_next_layer(),
631 m_bufs,
632 m_strand->wrap(lib::bind(
634 callback,
635 lib::placeholders::_1
636 ))
637 );
638 } else {
639 lib::asio::async_write(
640 socket_con_type::get_next_layer(),
641 m_bufs,
642 lib::bind(
644 callback,
645 lib::placeholders::_1
646 )
647 );
648 }
649 }
650
651 void handle_proxy_timeout(init_handler callback, lib::error_code const & ec)
652 {
654 m_alog.write(log::alevel::devel,
655 "asio handle_proxy_write timer cancelled");
656 return;
657 } else if (ec) {
658 log_err(log::elevel::devel,"asio handle_proxy_write",ec);
659 callback(ec);
660 } else {
661 m_alog.write(log::alevel::devel,
662 "asio handle_proxy_write timer expired");
664 callback(make_error_code(transport::error::timeout));
665 }
666 }
667
669 lib::asio::error_code const & ec)
670 {
671 if (m_alog.static_test(log::alevel::devel)) {
672 m_alog.write(log::alevel::devel,
673 "asio connection handle_proxy_write");
674 }
675
676 m_bufs.clear();
677
678 // Timer expired or the operation was aborted for some reason.
679 // Whatever aborted it will be issuing the callback so we are safe to
680 // return
681 if (ec == lib::asio::error::operation_aborted ||
682 lib::asio::is_neg(m_proxy_data->timer->expires_from_now()))
683 {
684 m_elog.write(log::elevel::devel,"write operation aborted");
685 return;
686 }
687
688 if (ec) {
689 log_err(log::elevel::info,"asio handle_proxy_write",ec);
690 m_proxy_data->timer->cancel();
691 callback(ec);
692 return;
693 }
694
695 proxy_read(callback);
696 }
697
698 void proxy_read(init_handler callback) {
699 if (m_alog.static_test(log::alevel::devel)) {
700 m_alog.write(log::alevel::devel,"asio connection proxy_read");
701 }
702
703 if (!m_proxy_data) {
704 m_elog.write(log::elevel::library,
705 "assertion failed: !m_proxy_data in asio::connection::proxy_read");
706 m_proxy_data->timer->cancel();
707 callback(make_error_code(error::general));
708 return;
709 }
710
711 if (config::enable_multithreading) {
712 lib::asio::async_read_until(
713 socket_con_type::get_next_layer(),
714 m_proxy_data->read_buf,
715 "\r\n\r\n",
716 m_strand->wrap(lib::bind(
718 callback,
719 lib::placeholders::_1, lib::placeholders::_2
720 ))
721 );
722 } else {
723 lib::asio::async_read_until(
724 socket_con_type::get_next_layer(),
725 m_proxy_data->read_buf,
726 "\r\n\r\n",
727 lib::bind(
729 callback,
730 lib::placeholders::_1, lib::placeholders::_2
731 )
732 );
733 }
734 }
735
737
743 lib::asio::error_code const & ec, size_t)
744 {
745 if (m_alog.static_test(log::alevel::devel)) {
746 m_alog.write(log::alevel::devel,
747 "asio connection handle_proxy_read");
748 }
749
750 // Timer expired or the operation was aborted for some reason.
751 // Whatever aborted it will be issuing the callback so we are safe to
752 // return
753 if (ec == lib::asio::error::operation_aborted ||
754 lib::asio::is_neg(m_proxy_data->timer->expires_from_now()))
755 {
756 m_elog.write(log::elevel::devel,"read operation aborted");
757 return;
758 }
759
760 // At this point there is no need to wait for the timer anymore
761 m_proxy_data->timer->cancel();
762
763 if (ec) {
764 m_elog.write(log::elevel::info,
765 "asio handle_proxy_read error: "+ec.message());
766 callback(ec);
767 } else {
768 if (!m_proxy_data) {
769 m_elog.write(log::elevel::library,
770 "assertion failed: !m_proxy_data in asio::connection::handle_proxy_read");
771 callback(make_error_code(error::general));
772 return;
773 }
774
775 std::istream input(&m_proxy_data->read_buf);
776
777 m_proxy_data->res.consume(input);
778
779 if (!m_proxy_data->res.headers_ready()) {
780 // we read until the headers were done in theory but apparently
781 // they aren't. Internal endpoint error.
782 callback(make_error_code(error::general));
783 return;
784 }
785
786 m_alog.write(log::alevel::devel,m_proxy_data->res.raw());
787
788 if (m_proxy_data->res.get_status_code() != http::status_code::ok) {
789 // got an error response back
790 // TODO: expose this error in a programmatically accessible way?
791 // if so, see below for an option on how to do this.
792 std::stringstream s;
793 s << "Proxy connection error: "
794 << m_proxy_data->res.get_status_code()
795 << " ("
796 << m_proxy_data->res.get_status_msg()
797 << ")";
798 m_elog.write(log::elevel::info,s.str());
799 callback(make_error_code(error::proxy_failed));
800 return;
801 }
802
803 // we have successfully established a connection to the proxy, now
804 // we can continue and the proxy will transparently forward the
805 // WebSocket connection.
806
807 // TODO: decide if we want an on_proxy callback that would allow
808 // access to the proxy response.
809
810 // free the proxy buffers and req/res objects as they aren't needed
811 // anymore
812 m_proxy_data.reset();
813
814 // Continue with post proxy initialization
815 post_init(callback);
816 }
817 }
818
820 void async_read_at_least(size_t num_bytes, char *buf, size_t len,
821 read_handler handler)
822 {
823 if (m_alog.static_test(log::alevel::devel)) {
824 std::stringstream s;
825 s << "asio async_read_at_least: " << num_bytes;
826 m_alog.write(log::alevel::devel,s.str());
827 }
828
829 // TODO: safety vs speed ?
830 // maybe move into an if devel block
831 /*if (num_bytes > len) {
832 m_elog.write(log::elevel::devel,
833 "asio async_read_at_least error::invalid_num_bytes");
834 handler(make_error_code(transport::error::invalid_num_bytes),
835 size_t(0));
836 return;
837 }*/
838
839 if (config::enable_multithreading) {
840 lib::asio::async_read(
841 socket_con_type::get_socket(),
842 lib::asio::buffer(buf,len),
843 lib::asio::transfer_at_least(num_bytes),
844 m_strand->wrap(make_custom_alloc_handler(
845 m_read_handler_allocator,
846 lib::bind(
848 handler,
849 lib::placeholders::_1, lib::placeholders::_2
850 )
851 ))
852 );
853 } else {
854 lib::asio::async_read(
855 socket_con_type::get_socket(),
856 lib::asio::buffer(buf,len),
857 lib::asio::transfer_at_least(num_bytes),
859 m_read_handler_allocator,
860 lib::bind(
862 handler,
863 lib::placeholders::_1, lib::placeholders::_2
864 )
865 )
866 );
867 }
868
869 }
870
871 void handle_async_read(read_handler handler, lib::asio::error_code const & ec,
872 size_t bytes_transferred)
873 {
874 m_alog.write(log::alevel::devel, "asio con handle_async_read");
875
876 // translate asio error codes into more lib::error_codes
877 lib::error_code tec;
878 if (ec == lib::asio::error::eof) {
879 tec = make_error_code(transport::error::eof);
880 } else if (ec) {
881 // We don't know much more about the error at this point. As our
882 // socket/security policy if it knows more:
883 tec = socket_con_type::translate_ec(ec);
884 m_tec = ec;
885
886 if (tec == transport::error::tls_error ||
888 {
889 // These are aggregate/catch all errors. Log some human readable
890 // information to the info channel to give library users some
891 // more details about why the upstream method may have failed.
892 log_err(log::elevel::info,"asio async_read_at_least",ec);
893 }
894 }
895 if (handler) {
896 handler(tec,bytes_transferred);
897 } else {
898 // This can happen in cases where the connection is terminated while
899 // the transport is waiting on a read.
900 m_alog.write(log::alevel::devel,
901 "handle_async_read called with null read handler");
902 }
903 }
904
906 void async_write(const char* buf, size_t len, write_handler handler) {
907 m_bufs.push_back(lib::asio::buffer(buf,len));
908
909 if (config::enable_multithreading) {
910 lib::asio::async_write(
911 socket_con_type::get_socket(),
912 m_bufs,
913 m_strand->wrap(make_custom_alloc_handler(
914 m_write_handler_allocator,
915 lib::bind(
917 handler,
918 lib::placeholders::_1, lib::placeholders::_2
919 )
920 ))
921 );
922 } else {
923 lib::asio::async_write(
924 socket_con_type::get_socket(),
925 m_bufs,
927 m_write_handler_allocator,
928 lib::bind(
930 handler,
931 lib::placeholders::_1, lib::placeholders::_2
932 )
933 )
934 );
935 }
936 }
937
939 void async_write(std::vector<buffer> const & bufs, write_handler handler) {
940 std::vector<buffer>::const_iterator it;
941
942 for (it = bufs.begin(); it != bufs.end(); ++it) {
943 m_bufs.push_back(lib::asio::buffer((*it).buf,(*it).len));
944 }
945
946 if (config::enable_multithreading) {
947 lib::asio::async_write(
948 socket_con_type::get_socket(),
949 m_bufs,
950 m_strand->wrap(make_custom_alloc_handler(
951 m_write_handler_allocator,
952 lib::bind(
954 handler,
955 lib::placeholders::_1, lib::placeholders::_2
956 )
957 ))
958 );
959 } else {
960 lib::asio::async_write(
961 socket_con_type::get_socket(),
962 m_bufs,
964 m_write_handler_allocator,
965 lib::bind(
967 handler,
968 lib::placeholders::_1, lib::placeholders::_2
969 )
970 )
971 );
972 }
973 }
974
976
980 void handle_async_write(write_handler handler, lib::asio::error_code const & ec, size_t) {
981 m_bufs.clear();
982 lib::error_code tec;
983 if (ec) {
984 log_err(log::elevel::info,"asio async_write",ec);
985 tec = ec;
986 }
987 if (handler) {
988 handler(tec);
989 } else {
990 // This can happen in cases where the connection is terminated while
991 // the transport is waiting on a read.
992 m_alog.write(log::alevel::devel,
993 "handle_async_write called with null write handler");
994 }
995 }
996
998
1005 m_connection_hdl = hdl;
1006 socket_con_type::set_handle(hdl);
1007 }
1008
1010
1013 lib::error_code interrupt(interrupt_handler handler) {
1014 if (config::enable_multithreading) {
1015 m_io_service->post(m_strand->wrap(handler));
1016 } else {
1017 m_io_service->post(handler);
1018 }
1019 return lib::error_code();
1020 }
1021
1022 lib::error_code dispatch(dispatch_handler handler) {
1023 if (config::enable_multithreading) {
1024 m_io_service->post(m_strand->wrap(handler));
1025 } else {
1026 m_io_service->post(handler);
1027 }
1028 return lib::error_code();
1029 }
1030
1031 /*void handle_interrupt(interrupt_handler handler) {
1032 handler();
1033 }*/
1034
1037 if (m_alog.static_test(log::alevel::devel)) {
1038 m_alog.write(log::alevel::devel,"asio connection async_shutdown");
1039 }
1040
1041 timer_ptr shutdown_timer;
1042 shutdown_timer = set_timer(
1043 config::timeout_socket_shutdown,
1044 lib::bind(
1046 get_shared(),
1047 shutdown_timer,
1048 callback,
1049 lib::placeholders::_1
1050 )
1051 );
1052
1053 socket_con_type::async_shutdown(
1054 lib::bind(
1056 get_shared(),
1057 shutdown_timer,
1058 callback,
1059 lib::placeholders::_1
1060 )
1061 );
1062 }
1063
1065
1071 lib::error_code const & ec)
1072 {
1073 lib::error_code ret_ec;
1074
1075 if (ec) {
1077 m_alog.write(log::alevel::devel,
1078 "asio socket shutdown timer cancelled");
1079 return;
1080 }
1081
1082 log_err(log::elevel::devel,"asio handle_async_shutdown_timeout",ec);
1083 ret_ec = ec;
1084 } else {
1085 ret_ec = make_error_code(transport::error::timeout);
1086 }
1087
1088 m_alog.write(log::alevel::devel,
1089 "Asio transport socket shutdown timed out");
1091 callback(ret_ec);
1092 }
1093
1095 callback, lib::asio::error_code const & ec)
1096 {
1097 if (ec == lib::asio::error::operation_aborted ||
1098 lib::asio::is_neg(shutdown_timer->expires_from_now()))
1099 {
1100 m_alog.write(log::alevel::devel,"async_shutdown cancelled");
1101 return;
1102 }
1103
1104 shutdown_timer->cancel();
1105
1106 lib::error_code tec;
1107 if (ec) {
1108 if (ec == lib::asio::error::not_connected) {
1109 // The socket was already closed when we tried to close it. This
1110 // happens periodically (usually if a read or write fails
1111 // earlier and if it is a real error will be caught at another
1112 // level of the stack.
1113 } else {
1114 // We don't know anything more about this error, give our
1115 // socket/security policy a crack at it.
1116 tec = socket_con_type::translate_ec(ec);
1117 m_tec = ec;
1118
1120 // TLS short read at this point is somewhat expected if both
1121 // sides try and end the connection at the same time or if
1122 // SSLv2 is being used. In general there is nothing that can
1123 // be done here other than a low level development log.
1124 } else {
1125 // all other errors are effectively pass through errors of
1126 // some sort so print some detail on the info channel for
1127 // library users to look up if needed.
1128 log_err(log::elevel::info,"asio async_shutdown",ec);
1129 }
1130 }
1131 } else {
1132 if (m_alog.static_test(log::alevel::devel)) {
1133 m_alog.write(log::alevel::devel,
1134 "asio con handle_async_shutdown");
1135 }
1136 }
1137 callback(tec);
1138 }
1139
1142 lib::asio::error_code cec = socket_con_type::cancel_socket();
1143 if (cec) {
1144 if (cec == lib::asio::error::operation_not_supported) {
1145 // cancel not supported on this OS, ignore and log at dev level
1146 m_alog.write(log::alevel::devel, "socket cancel not supported");
1147 } else {
1148 log_err(log::elevel::warn, "socket cancel failed", cec);
1149 }
1150 }
1151 }
1152
1153private:
1155 template <typename error_type>
1156 void log_err(log::level l, const char * msg, const error_type & ec) {
1157 std::stringstream s;
1158 s << msg << " error: " << ec << " (" << ec.message() << ")";
1159 m_elog.write(l,s.str());
1160 }
1161
1162 // static settings
1163 const bool m_is_server;
1164 alog_type& m_alog;
1165 elog_type& m_elog;
1166
1167 struct proxy_data {
1168 proxy_data() : timeout_proxy(config::timeout_proxy) {}
1169
1170 request_type req;
1171 response_type res;
1172 std::string write_buf;
1173 lib::asio::streambuf read_buf;
1174 long timeout_proxy;
1175 timer_ptr timer;
1176 };
1177
1178 std::string m_proxy;
1179 lib::shared_ptr<proxy_data> m_proxy_data;
1180
1181 // transport resources
1182 io_service_ptr m_io_service;
1183 strand_ptr m_strand;
1184 connection_hdl m_connection_hdl;
1185
1186 std::vector<lib::asio::const_buffer> m_bufs;
1187
1189 lib::asio::error_code m_tec;
1190
1191 // Handlers
1192 tcp_init_handler m_tcp_pre_init_handler;
1193 tcp_init_handler m_tcp_post_init_handler;
1194
1195 handler_allocator m_read_handler_allocator;
1196 handler_allocator m_write_handler_allocator;
1197};
1198
1199
1200} // namespace asio
1201} // namespace transport
1202} // namespace websocketpp
1203
1204#endif // WEBSOCKETPP_TRANSPORT_ASIO_CON_HPP
Asio based connection transport component.
void set_proxy_timeout(long duration)
Set the proxy timeout duration (exception)
void set_tcp_post_init_handler(tcp_init_handler h)
Sets the tcp post init handler.
void set_proxy_basic_auth(std::string const &username, std::string const &password)
Set the basic auth credentials to use (exception)
void handle_async_read(read_handler handler, lib::asio::error_code const &ec, size_t bytes_transferred)
void set_proxy_timeout(long duration, lib::error_code &ec)
Set the proxy timeout duration (exception free)
strand_ptr get_strand()
Get a pointer to this connection's strand.
void async_read_at_least(size_t num_bytes, char *buf, size_t len, read_handler handler)
read at least num_bytes bytes into buf and then call handler.
lib::error_code proxy_init(std::string const &authority)
initialize the proxy buffers and http parsers
void handle_pre_init(init_handler callback, lib::error_code const &ec)
lib::error_code interrupt(interrupt_handler handler)
Trigger the on_interrupt handler.
void proxy_read(init_handler callback)
lib::shared_ptr< type > ptr
Type of a shared pointer to this connection transport component.
void proxy_write(init_handler callback)
void handle_proxy_write(init_handler callback, lib::asio::error_code const &ec)
void handle_proxy_timeout(init_handler callback, lib::error_code const &ec)
void handle_async_write(write_handler handler, lib::asio::error_code const &ec, size_t)
Async write callback.
void handle_timer(timer_ptr, timer_handler callback, lib::asio::error_code const &ec)
Timer callback.
lib::error_code init_asio(io_service_ptr io_service)
Finish constructing the transport.
lib::error_code dispatch(dispatch_handler handler)
config::socket_type::socket_con_type socket_con_type
Type of the socket connection component.
void handle_post_init(timer_ptr post_timer, init_handler callback, lib::error_code const &ec)
Post init timeout callback.
lib::shared_ptr< lib::asio::io_service::strand > strand_ptr
Type of a pointer to the Asio io_service::strand being used.
void async_shutdown(shutdown_handler callback)
close and clean up the underlying socket
config::alog_type alog_type
Type of this transport's access logging policy.
lib::asio::error_code get_transport_ec() const
Get the internal transport error code for a closed/failed connection.
lib::shared_ptr< lib::asio::steady_timer > timer_ptr
Type of a pointer to the Asio timer class.
void set_proxy(std::string const &uri)
Set the proxy to connect through (exception)
void handle_post_init_timeout(timer_ptr, init_handler callback, lib::error_code const &ec)
Post init timeout callback.
void handle_async_shutdown_timeout(timer_ptr, init_handler callback, lib::error_code const &ec)
Async shutdown timeout handler.
void set_proxy(std::string const &uri, lib::error_code &ec)
Set the proxy to connect through (exception free)
void set_proxy_basic_auth(std::string const &username, std::string const &password, lib::error_code &ec)
Set the basic auth credentials to use (exception free)
void async_write(const char *buf, size_t len, write_handler handler)
Initiate a potentially asyncronous write of the given buffer.
void async_write(std::vector< buffer > const &bufs, write_handler handler)
Initiate a potentially asyncronous write of the given buffers.
connection< config > type
Type of this connection transport component.
lib::asio::io_service * io_service_ptr
Type of a pointer to the Asio io_service being used.
timer_ptr set_timer(long duration, timer_handler callback)
Call back a function after a period of time.
void set_tcp_init_handler(tcp_init_handler h)
Sets the tcp pre init handler (deprecated)
void handle_proxy_read(init_handler callback, lib::asio::error_code const &ec, size_t)
Proxy read callback.
void set_uri(uri_ptr u)
Set uri hook.
std::string get_remote_endpoint() const
Get the remote endpoint address.
ptr get_shared()
Get a shared pointer to this component.
void cancel_socket_checked()
Cancel the underlying socket and log any errors.
void set_handle(connection_hdl hdl)
Set Connection Handle.
void set_tcp_pre_init_handler(tcp_init_handler h)
Sets the tcp pre init handler.
void post_init(init_handler callback)
void handle_async_shutdown(timer_ptr shutdown_timer, shutdown_handler callback, lib::asio::error_code const &ec)
config::elog_type elog_type
Type of this transport's error logging policy.
std::string const & get_proxy() const
void init(init_handler callback)
Initialize transport for reading.
socket_con_type::ptr socket_con_ptr
Type of a shared pointer to the socket connection component.
connection_hdl get_handle() const
Get the connection handle.
connection(bool is_server, alog_type &alog, elog_type &elog)
Asio based endpoint transport component.
Definition endpoint.hpp:53
Asio based endpoint transport component.
#define elog(FORMAT,...)
Definition logger.hpp:130
@ invalid_state
The connection was in the wrong state for this operation.
Definition error.hpp:74
lib::error_code make_error_code(error::value e)
Definition error.hpp:235
bool is_neg(T duration)
Definition asio.hpp:114
boost::posix_time::time_duration milliseconds(long duration)
Definition asio.hpp:117
uint32_t level
Type of a channel package.
Definition levels.hpp:37
@ proxy_failed
The connection to the requested proxy server failed.
Definition base.hpp:174
lib::function< void(connection_hdl)> tcp_init_handler
custom_alloc_handler< Handler > make_custom_alloc_handler(handler_allocator &a, Handler h)
Definition base.hpp:128
@ pass_through
underlying transport pass through
@ operation_aborted
Operation aborted.
@ tls_short_read
TLS short read.
lib::function< void(lib::error_code const &, size_t)> read_handler
The type and signature of the callback passed to the read method.
lib::function< void()> dispatch_handler
The type and signature of the callback passed to the dispatch method.
lib::function< void()> interrupt_handler
The type and signature of the callback passed to the interrupt method.
lib::function< void(lib::error_code const &)> timer_handler
The type and signature of the callback passed to the read method.
lib::function< void(lib::error_code const &)> write_handler
The type and signature of the callback passed to the write method.
lib::function< void(lib::error_code const &)> init_handler
The type and signature of the callback passed to the init hook.
lib::function< void(lib::error_code const &)> shutdown_handler
The type and signature of the callback passed to the shutdown method.
Namespace for the WebSocket++ project.
Definition base64.hpp:41
lib::weak_ptr< void > connection_hdl
A handle to uniquely identify a connection.
lib::shared_ptr< uri > uri_ptr
Pointer to a URI.
Definition uri.hpp:351
std::string base64_encode(unsigned char const *input, size_t len)
Encode a char buffer into a base64 string.
Definition base64.hpp:66
static level const devel
Development messages (warning: very chatty)
Definition levels.hpp:141
static level const devel
Low level debugging information (warning: very chatty)
Definition levels.hpp:63
static level const library
Definition levels.hpp:66
static level const info
Definition levels.hpp:69
static level const warn
Definition levels.hpp:72
CK_RV ret
char * s
size_t len
uint8_t buf[2048]
int l