websocket.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. #ifndef __WEBSOCKET_H
  2. #define __WEBSOCKET_H
  3. #include <libwebsockets.h>
  4. #include <string.h>
  5. #include <stdio.h>
  6. #include <pthread.h>
  7. #include "message.h"
  8. #include "chatclient.h"
  9. #include "biglist.h"
  10. #include "datablock.h"
  11. #include "user.h"
  12. #include "chatroom.h"
  13. #include "task.h"
  14. #include "tasks.h"
  15. #include "concurrent_queue.h"
  16. #include "simplechatgame.h"
  17. #define EXAMPLE_RX_BUFFER_BYTES (30)
  18. #define EXAMPLE_RX_CHATROOM_BYTES 1024
  19. struct payload
  20. {
  21. unsigned char data[LWS_SEND_BUFFER_PRE_PADDING + EXAMPLE_RX_BUFFER_BYTES + LWS_SEND_BUFFER_POST_PADDING];
  22. size_t len;
  23. };
  24. class websocket
  25. {
  26. public:
  27. websocket();
  28. ~websocket();
  29. biglist<user *> users;
  30. biglist<chatroom *> chatrooms;
  31. biglist<chatclient *> chatclients;
  32. biglist<simplechatgame *> simplechatgames;
  33. int64_t next_chatclientid;
  34. int64_t next_chatroomid;
  35. volatile bool shutdown;
  36. datablock *server_password;
  37. tasks *chatroom_tasks;
  38. pthread_t task_thread;
  39. bool run_async;
  40. biglist_item<simplechatgame *> *find_simple_game(datastring gameid);
  41. biglist_item<simplechatgame *> *add_simple_game(simplechatgame *game);
  42. void remove_client_from_simple_games(chatclient *client,bool senduserlistmessage);
  43. static void *task_thread_routine(void *arg); // Routine that is called from a background thread if run_async is true.
  44. // The following three functions are libwebsockets callback functions.
  45. static int callback_http( struct lws *wsi, enum lws_callback_reasons reason, void *user, void *in, size_t len );
  46. static int callback_chatroom( struct lws *wsi, enum lws_callback_reasons reason, void *user, void *in, size_t len );
  47. static int callback_example( struct lws *wsi, enum lws_callback_reasons reason, void *user, void *in, size_t len );
  48. };
  49. extern websocket *the_websocket; // One global variable. Shhhh. Don't tell anyone.
  50. enum protocols
  51. {
  52. PROTOCOL_HTTP = 0,
  53. PROTOCOL_EXAMPLE,
  54. PROTOCOL_COUNT
  55. };
  56. static struct lws_protocols protocols[] =
  57. {
  58. /* The first protocol must always be the HTTP handler */
  59. {
  60. "http-only", /* name */
  61. websocket::callback_http, /* callback */
  62. 0, /* No per session data. */
  63. 0, /* max frame size / rx buffer */
  64. },
  65. {
  66. "example-protocol",
  67. websocket::callback_example,
  68. 0, // size of client block.
  69. EXAMPLE_RX_BUFFER_BYTES,
  70. },
  71. {
  72. "chatroom-protocol",
  73. websocket::callback_chatroom,
  74. sizeof(chatclient),
  75. EXAMPLE_RX_CHATROOM_BYTES,
  76. },
  77. { NULL, NULL, 0, 0 } /* terminator */
  78. };
  79. #endif
  80. /*
  81. Talking about master, basically, yes, but a situation where more than one thread is trying to
  82. set a writable callback on the same wsi at the same time will blow up, because there is no
  83. locking inside lws to protect against that.
  84. Master includes a lot of docs about this now. From READMEs/README.coding.md:
  85. Libwebsockets works in a serialized event loop, in a single thread. It supports not only the
  86. default poll() backend, but libuv, libev, and libevent event loop libraries that also take this
  87. locking-free, nonblocking event loop approach that is not threadsafe. There are several advantages
  88. to this technique, but one disadvantage, it doesn't integrate easily if there are multiple threads
  89. that want to use libwebsockets.
  90. However integration to multithreaded apps is possible if you follow some guidelines.
  91. Aside from two APIs, directly calling lws apis from other threads is not allowed.
  92. If you want to keep a list of live wsi, you need to use lifecycle callbacks on the protocol in the
  93. service thread to manage the list, with your own locking. Typically you use an ESTABLISHED callback
  94. to add ws wsi to your list and a CLOSED callback to remove them.
  95. LWS regulates your write activity by being able to let you know when you may write more on a connection.
  96. That reflects the reality that you cannot succeed to send data to a peer that has no room for it, so
  97. you should not generate or buffer write data until you know the peer connection can take more.
  98. Other libraries pretend that the guy doing the writing is the boss who decides what happens, and absorb
  99. as much as you want to write to local buffering. That does not scale to a lot of connections, because
  100. it will exhaust your memory and waste time copying data around in memory needlessly.
  101. The truth is the receiver, along with the network between you, is the boss who decides what will happen.
  102. If he stops accepting data, no data will move. LWS is designed to reflect that.
  103. If you have something to send, you call lws_callback_on_writable() on the connection, and when it is
  104. writeable, you will get a LWS_CALLBACK_SERVER_WRITEABLE callback, where you should generate the data
  105. to send and send it with lws_write().
  106. You cannot send data using lws_write() outside of the WRITEABLE callback.
  107. For multithreaded apps, this corresponds to a need to be able to provoke the lws_callback_on_writable()
  108. action and to wake the service thread from its event loop wait (sleeping in poll() or epoll() or whatever).
  109. The rules above mean directly sending data on the connection from another thread is out of the question.
  110. Therefore the two apis mentioned above that may be used from another thread are
  111. For LWS using the default poll() event loop, lws_callback_on_writable()
  112. For LWS using libuv/libev/libevent event loop, lws_cancel_service()
  113. If you are using the default poll() event loop, one "foreign thread" at a time may call lws_callback_on_writable()
  114. directly for a wsi. You need to use your own locking around that to serialize multiple thread access to it.
  115. If you implement LWS_CALLBACK_GET_THREAD_ID in protocols[0], then LWS will detect when it has been called
  116. from a foreign thread and automatically use lws_cancel_service() to additionally wake the service loop from its wait.
  117. For libuv/libev/libevent event loop, they cannot handle being called from other threads. So there is a slightly
  118. different scheme, you may call lws_cancel_service() to force the event loop to end immediately. This then
  119. broadcasts a callback (in the service thread context) LWS_CALLBACK_EVENT_WAIT_CANCELLED, to all protocols
  120. on all vhosts, where you can perform your own locking and walk a list of wsi that need lws_callback_on_writable()
  121. calling on them.
  122. lws_cancel_service() is very cheap to call.
  123. The obverse of this truism about the receiver being the boss is the case where we are receiving. If we get into a
  124. situation we actually can't usefully receive any more, perhaps because we are passing the data on and the guy we
  125. want to send to can't receive any more, then we should "turn off RX" by using the RX flow control
  126. API, lws_rx_flow_control(wsi, 0). When something happens where we can accept more RX, (eg, we learn our onward
  127. connection is writeable) we can call it again to re-enable it on the incoming wsi.
  128. LWS stops calling back about RX immediately you use flow control to disable RX, it buffers the data internally
  129. if necessary. So you will only see RX when you can handle it. When flow control is disabled, LWS stops taking
  130. new data in... this makes the situation known to the sender by TCP "backpressure", the tx window fills and the
  131. sender finds he cannot write any more to the connection.
  132. See the mirror protocol implementations for example code.
  133. If you need to service other socket or file descriptors as well as the websocket ones, you can combine them
  134. together with the websocket ones in one poll loop, see "External Polling Loop support" below, and still do
  135. it all in one thread / process context. If the need is less architectural, you can also create RAW mode client
  136. and serving sockets; this is how the lws plugin for the ssh server works.
  137. */