sock.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398
  1. /*
  2. * Server-side socket management
  3. *
  4. * Copyright (C) 1999 Marcus Meissner, Ove Kåven
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. *
  20. * FIXME: we use read|write access in all cases. Shouldn't we depend that
  21. * on the access of the current handle?
  22. */
  23. #include "config.h"
  24. #include <assert.h>
  25. #include <fcntl.h>
  26. #include <stdarg.h>
  27. #include <stdio.h>
  28. #include <string.h>
  29. #include <stdlib.h>
  30. #include <errno.h>
  31. #ifdef HAVE_POLL_H
  32. # include <poll.h>
  33. #endif
  34. #include <sys/time.h>
  35. #include <sys/types.h>
  36. #ifdef HAVE_SYS_SOCKET_H
  37. # include <sys/socket.h>
  38. #endif
  39. #ifdef HAVE_SYS_IOCTL_H
  40. #include <sys/ioctl.h>
  41. #endif
  42. #ifdef HAVE_SYS_FILIO_H
  43. # include <sys/filio.h>
  44. #endif
  45. #include <time.h>
  46. #include <unistd.h>
  47. #include <limits.h>
  48. #ifdef HAVE_LINUX_RTNETLINK_H
  49. # include <linux/rtnetlink.h>
  50. #endif
  51. #include "ntstatus.h"
  52. #define WIN32_NO_STATUS
  53. #include "windef.h"
  54. #include "winternl.h"
  55. #include "winerror.h"
  56. #define USE_WS_PREFIX
  57. #include "winsock2.h"
  58. #include "process.h"
  59. #include "file.h"
  60. #include "handle.h"
  61. #include "thread.h"
  62. #include "request.h"
  63. #include "user.h"
  64. /* From winsock.h */
  65. #define FD_MAX_EVENTS 10
  66. #define FD_READ_BIT 0
  67. #define FD_WRITE_BIT 1
  68. #define FD_OOB_BIT 2
  69. #define FD_ACCEPT_BIT 3
  70. #define FD_CONNECT_BIT 4
  71. #define FD_CLOSE_BIT 5
  72. /*
  73. * Define flags to be used with the WSAAsyncSelect() call.
  74. */
  75. #define FD_READ 0x00000001
  76. #define FD_WRITE 0x00000002
  77. #define FD_OOB 0x00000004
  78. #define FD_ACCEPT 0x00000008
  79. #define FD_CONNECT 0x00000010
  80. #define FD_CLOSE 0x00000020
  81. /* internal per-socket flags */
  82. #define FD_WINE_REUSE 0x08000000
  83. #define FD_WINE_LISTENING 0x10000000
  84. #define FD_WINE_NONBLOCKING 0x20000000
  85. #define FD_WINE_CONNECTED 0x40000000
  86. #define FD_WINE_RAW 0x80000000
  87. #define FD_WINE_INTERNAL 0xFFFF0000
  88. struct sock
  89. {
  90. struct object obj; /* object header */
  91. struct fd *fd; /* socket file descriptor */
  92. unsigned int state; /* status bits */
  93. unsigned int mask; /* event mask */
  94. unsigned int hmask; /* held (blocked) events */
  95. unsigned int pmask; /* pending events */
  96. unsigned int flags; /* socket flags */
  97. int polling; /* is socket being polled? */
  98. unsigned short proto; /* socket protocol */
  99. unsigned short type; /* socket type */
  100. unsigned short family; /* socket family */
  101. struct event *event; /* event object */
  102. user_handle_t window; /* window to send the message to */
  103. unsigned int message; /* message to send */
  104. obj_handle_t wparam; /* message wparam (socket handle) */
  105. unsigned int errors[FD_MAX_EVENTS]; /* event errors */
  106. timeout_t connect_time;/* time the socket was connected */
  107. struct sock *deferred; /* socket that waits for a deferred accept */
  108. struct async_queue read_q; /* queue for asynchronous reads */
  109. struct async_queue write_q; /* queue for asynchronous writes */
  110. struct async_queue ifchange_q; /* queue for interface change notifications */
  111. struct object *ifchange_obj; /* the interface change notification object */
  112. struct list ifchange_entry; /* entry in ifchange notification list */
  113. };
  114. static void sock_dump( struct object *obj, int verbose );
  115. static int sock_signaled( struct object *obj, struct wait_queue_entry *entry );
  116. static struct fd *sock_get_fd( struct object *obj );
  117. static void sock_destroy( struct object *obj );
  118. static struct object *sock_get_ifchange( struct sock *sock );
  119. static void sock_release_ifchange( struct sock *sock );
  120. static int sock_get_poll_events( struct fd *fd );
  121. static void sock_poll_event( struct fd *fd, int event );
  122. static enum server_fd_type sock_get_fd_type( struct fd *fd );
  123. static int sock_ioctl( struct fd *fd, ioctl_code_t code, client_ptr_t in_buf, client_ptr_t out_buf, struct async *async );
  124. static void sock_queue_async( struct fd *fd, struct async *async, int type, int count );
  125. static void sock_reselect_async( struct fd *fd, struct async_queue *queue );
  126. static int sock_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  127. static int sock_get_ntstatus( int err );
  128. static unsigned int sock_get_error( int err );
  129. static const struct object_ops sock_ops =
  130. {
  131. sizeof(struct sock), /* size */
  132. sock_dump, /* dump */
  133. no_get_type, /* get_type */
  134. add_queue, /* add_queue */
  135. remove_queue, /* remove_queue */
  136. sock_signaled, /* signaled */
  137. NULL, /* get_esync_fd */
  138. no_satisfied, /* satisfied */
  139. no_signal, /* signal */
  140. sock_get_fd, /* get_fd */
  141. default_fd_map_access, /* map_access */
  142. default_get_sd, /* get_sd */
  143. default_set_sd, /* set_sd */
  144. no_lookup_name, /* lookup_name */
  145. no_link_name, /* link_name */
  146. NULL, /* unlink_name */
  147. no_open_file, /* open_file */
  148. no_kernel_obj_list, /* get_kernel_obj_list */
  149. no_alloc_handle, /* alloc_handle */
  150. sock_close_handle, /* close_handle */
  151. sock_destroy /* destroy */
  152. };
  153. static const struct fd_ops sock_fd_ops =
  154. {
  155. sock_get_poll_events, /* get_poll_events */
  156. sock_poll_event, /* poll_event */
  157. sock_get_fd_type, /* get_fd_type */
  158. no_fd_read, /* read */
  159. no_fd_write, /* write */
  160. no_fd_flush, /* flush */
  161. default_fd_get_file_info, /* get_file_info */
  162. no_fd_get_volume_info, /* get_volume_info */
  163. sock_ioctl, /* ioctl */
  164. sock_queue_async, /* queue_async */
  165. sock_reselect_async /* reselect_async */
  166. };
  167. /* Permutation of 0..FD_MAX_EVENTS - 1 representing the order in which
  168. * we post messages if there are multiple events. Used to send
  169. * messages. The problem is if there is both a FD_CONNECT event and,
  170. * say, an FD_READ event available on the same socket, we want to
  171. * notify the app of the connect event first. Otherwise it may
  172. * discard the read event because it thinks it hasn't connected yet.
  173. */
  174. static const int event_bitorder[FD_MAX_EVENTS] =
  175. {
  176. FD_CONNECT_BIT,
  177. FD_ACCEPT_BIT,
  178. FD_OOB_BIT,
  179. FD_WRITE_BIT,
  180. FD_READ_BIT,
  181. FD_CLOSE_BIT,
  182. 6, 7, 8, 9 /* leftovers */
  183. };
  184. /* Flags that make sense only for SOCK_STREAM sockets */
  185. #define STREAM_FLAG_MASK ((unsigned int) (FD_CONNECT | FD_ACCEPT | FD_WINE_LISTENING | FD_WINE_CONNECTED))
  186. typedef enum {
  187. SOCK_SHUTDOWN_ERROR = -1,
  188. SOCK_SHUTDOWN_EOF = 0,
  189. SOCK_SHUTDOWN_POLLHUP = 1
  190. } sock_shutdown_t;
  191. static sock_shutdown_t sock_shutdown_type = SOCK_SHUTDOWN_ERROR;
  192. static sock_shutdown_t sock_check_pollhup(void)
  193. {
  194. sock_shutdown_t ret = SOCK_SHUTDOWN_ERROR;
  195. int fd[2], n;
  196. struct pollfd pfd;
  197. char dummy;
  198. if ( socketpair( AF_UNIX, SOCK_STREAM, 0, fd ) ) return ret;
  199. if ( shutdown( fd[0], 1 ) ) goto out;
  200. pfd.fd = fd[1];
  201. pfd.events = POLLIN;
  202. pfd.revents = 0;
  203. /* Solaris' poll() sometimes returns nothing if given a 0ms timeout here */
  204. n = poll( &pfd, 1, 1 );
  205. if ( n != 1 ) goto out; /* error or timeout */
  206. if ( pfd.revents & POLLHUP )
  207. ret = SOCK_SHUTDOWN_POLLHUP;
  208. else if ( pfd.revents & POLLIN &&
  209. read( fd[1], &dummy, 1 ) == 0 )
  210. ret = SOCK_SHUTDOWN_EOF;
  211. out:
  212. close( fd[0] );
  213. close( fd[1] );
  214. return ret;
  215. }
  216. void sock_init(void)
  217. {
  218. sock_shutdown_type = sock_check_pollhup();
  219. switch ( sock_shutdown_type )
  220. {
  221. case SOCK_SHUTDOWN_EOF:
  222. if (debug_level) fprintf( stderr, "sock_init: shutdown() causes EOF\n" );
  223. break;
  224. case SOCK_SHUTDOWN_POLLHUP:
  225. if (debug_level) fprintf( stderr, "sock_init: shutdown() causes POLLHUP\n" );
  226. break;
  227. default:
  228. fprintf( stderr, "sock_init: ERROR in sock_check_pollhup()\n" );
  229. sock_shutdown_type = SOCK_SHUTDOWN_EOF;
  230. }
  231. }
  232. static int sock_reselect( struct sock *sock )
  233. {
  234. int ev = sock_get_poll_events( sock->fd );
  235. if (debug_level)
  236. fprintf(stderr,"sock_reselect(%p): new mask %x\n", sock, ev);
  237. if (!sock->polling) /* FIXME: should find a better way to do this */
  238. {
  239. /* previously unconnected socket, is this reselect supposed to connect it? */
  240. if (!(sock->state & ~FD_WINE_NONBLOCKING)) return 0;
  241. /* ok, it is, attach it to the wineserver's main poll loop */
  242. sock->polling = 1;
  243. allow_fd_caching( sock->fd );
  244. }
  245. /* update condition mask */
  246. set_fd_events( sock->fd, ev );
  247. return ev;
  248. }
  249. /* wake anybody waiting on the socket event or send the associated message */
  250. static void sock_wake_up( struct sock *sock )
  251. {
  252. unsigned int events = sock->pmask & sock->mask;
  253. int i;
  254. if ( !events ) return;
  255. if (sock->event)
  256. {
  257. if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event );
  258. set_event( sock->event );
  259. }
  260. if (sock->window)
  261. {
  262. if (debug_level) fprintf(stderr, "signalling events %x win %08x\n", events, sock->window );
  263. for (i = 0; i < FD_MAX_EVENTS; i++)
  264. {
  265. int event = event_bitorder[i];
  266. if (sock->pmask & (1 << event))
  267. {
  268. lparam_t lparam = (1 << event) | (sock->errors[event] << 16);
  269. post_message( sock->window, sock->message, sock->wparam, lparam );
  270. }
  271. }
  272. sock->pmask = 0;
  273. sock_reselect( sock );
  274. }
  275. }
  276. static inline int sock_error( struct fd *fd )
  277. {
  278. unsigned int optval = 0;
  279. socklen_t optlen = sizeof(optval);
  280. getsockopt( get_unix_fd(fd), SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
  281. return optval;
  282. }
  283. static int sock_dispatch_asyncs( struct sock *sock, int event, int error )
  284. {
  285. if ( sock->flags & WSA_FLAG_OVERLAPPED )
  286. {
  287. if (event & (POLLIN|POLLPRI) && async_waiting( &sock->read_q ))
  288. {
  289. if (debug_level) fprintf( stderr, "activating read queue for socket %p\n", sock );
  290. async_wake_up( &sock->read_q, STATUS_ALERTED );
  291. event &= ~(POLLIN|POLLPRI);
  292. }
  293. if (event & POLLOUT && async_waiting( &sock->write_q ))
  294. {
  295. if (debug_level) fprintf( stderr, "activating write queue for socket %p\n", sock );
  296. async_wake_up( &sock->write_q, STATUS_ALERTED );
  297. event &= ~POLLOUT;
  298. }
  299. if ( event & (POLLERR|POLLHUP) )
  300. {
  301. int status = sock_get_ntstatus( error );
  302. if ( !(sock->state & FD_READ) )
  303. async_wake_up( &sock->read_q, status );
  304. if ( !(sock->state & FD_WRITE) )
  305. async_wake_up( &sock->write_q, status );
  306. }
  307. }
  308. return event;
  309. }
  310. static void sock_dispatch_events( struct sock *sock, int prevstate, int event, int error )
  311. {
  312. if (prevstate & FD_CONNECT)
  313. {
  314. sock->pmask |= FD_CONNECT;
  315. sock->hmask |= FD_CONNECT;
  316. sock->errors[FD_CONNECT_BIT] = sock_get_error( error );
  317. goto end;
  318. }
  319. if (prevstate & FD_WINE_LISTENING)
  320. {
  321. sock->pmask |= FD_ACCEPT;
  322. sock->hmask |= FD_ACCEPT;
  323. sock->errors[FD_ACCEPT_BIT] = sock_get_error( error );
  324. goto end;
  325. }
  326. if (event & POLLIN)
  327. {
  328. sock->pmask |= FD_READ;
  329. sock->hmask |= FD_READ;
  330. sock->errors[FD_READ_BIT] = 0;
  331. }
  332. if (event & POLLOUT)
  333. {
  334. sock->pmask |= FD_WRITE;
  335. sock->hmask |= FD_WRITE;
  336. sock->errors[FD_WRITE_BIT] = 0;
  337. }
  338. if (event & POLLPRI)
  339. {
  340. sock->pmask |= FD_OOB;
  341. sock->hmask |= FD_OOB;
  342. sock->errors[FD_OOB_BIT] = 0;
  343. }
  344. if (event & (POLLERR|POLLHUP))
  345. {
  346. sock->pmask |= FD_CLOSE;
  347. sock->hmask |= FD_CLOSE;
  348. sock->errors[FD_CLOSE_BIT] = sock_get_error( error );
  349. }
  350. end:
  351. sock_wake_up( sock );
  352. }
  353. static void sock_poll_event( struct fd *fd, int event )
  354. {
  355. struct sock *sock = get_fd_user( fd );
  356. int hangup_seen = 0;
  357. int prevstate = sock->state;
  358. int error = 0;
  359. assert( sock->obj.ops == &sock_ops );
  360. if (debug_level)
  361. fprintf(stderr, "socket %p select event: %x\n", sock, event);
  362. /* we may change event later, remove from loop here */
  363. if (event & (POLLERR|POLLHUP)) set_fd_events( sock->fd, -1 );
  364. if (sock->state & FD_CONNECT)
  365. {
  366. if (event & (POLLERR|POLLHUP))
  367. {
  368. /* we didn't get connected? */
  369. sock->state &= ~FD_CONNECT;
  370. event &= ~POLLOUT;
  371. error = sock_error( fd );
  372. }
  373. else if (event & POLLOUT)
  374. {
  375. /* we got connected */
  376. sock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
  377. sock->state &= ~FD_CONNECT;
  378. sock->connect_time = current_time;
  379. }
  380. }
  381. else if (sock->state & FD_WINE_LISTENING)
  382. {
  383. /* listening */
  384. if (event & (POLLERR|POLLHUP))
  385. error = sock_error( fd );
  386. }
  387. else
  388. {
  389. /* normal data flow */
  390. if ( sock->type == SOCK_STREAM && ( event & POLLIN ) )
  391. {
  392. char dummy;
  393. int nr;
  394. /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
  395. * has been closed, so we need to check for it explicitly here */
  396. nr = recv( get_unix_fd( fd ), &dummy, 1, MSG_PEEK );
  397. if ( nr == 0 )
  398. {
  399. hangup_seen = 1;
  400. event &= ~POLLIN;
  401. }
  402. else if ( nr < 0 )
  403. {
  404. event &= ~POLLIN;
  405. /* EAGAIN can happen if an async recv() falls between the server's poll()
  406. call and the invocation of this routine */
  407. if ( errno != EAGAIN )
  408. {
  409. error = errno;
  410. event |= POLLERR;
  411. if ( debug_level )
  412. fprintf( stderr, "recv error on socket %p: %d\n", sock, errno );
  413. }
  414. }
  415. }
  416. if ( (hangup_seen || event & (POLLHUP|POLLERR)) && (sock->state & (FD_READ|FD_WRITE)) )
  417. {
  418. error = error ? error : sock_error( fd );
  419. if ( (event & POLLERR) || ( sock_shutdown_type == SOCK_SHUTDOWN_EOF && (event & POLLHUP) ))
  420. sock->state &= ~FD_WRITE;
  421. sock->state &= ~FD_READ;
  422. if (debug_level)
  423. fprintf(stderr, "socket %p aborted by error %d, event: %x\n", sock, error, event);
  424. }
  425. if (hangup_seen)
  426. event |= POLLHUP;
  427. }
  428. event = sock_dispatch_asyncs( sock, event, error );
  429. sock_dispatch_events( sock, prevstate, event, error );
  430. /* if anyone is stupid enough to wait on the socket object itself,
  431. * maybe we should wake them up too, just in case? */
  432. wake_up( &sock->obj, 0 );
  433. sock_reselect( sock );
  434. }
  435. static void sock_dump( struct object *obj, int verbose )
  436. {
  437. struct sock *sock = (struct sock *)obj;
  438. assert( obj->ops == &sock_ops );
  439. fprintf( stderr, "Socket fd=%p, state=%x, mask=%x, pending=%x, held=%x\n",
  440. sock->fd, sock->state,
  441. sock->mask, sock->pmask, sock->hmask );
  442. }
  443. static int sock_signaled( struct object *obj, struct wait_queue_entry *entry )
  444. {
  445. struct sock *sock = (struct sock *)obj;
  446. assert( obj->ops == &sock_ops );
  447. return check_fd_events( sock->fd, sock_get_poll_events( sock->fd ) ) != 0;
  448. }
  449. static int sock_get_poll_events( struct fd *fd )
  450. {
  451. struct sock *sock = get_fd_user( fd );
  452. unsigned int mask = sock->mask & ~sock->hmask;
  453. unsigned int smask = sock->state & mask;
  454. int ev = 0;
  455. assert( sock->obj.ops == &sock_ops );
  456. if (sock->state & FD_CONNECT)
  457. /* connecting, wait for writable */
  458. return POLLOUT;
  459. if (async_queued( &sock->read_q ))
  460. {
  461. if (async_waiting( &sock->read_q )) ev |= POLLIN | POLLPRI;
  462. }
  463. else if (smask & FD_READ || (sock->state & FD_WINE_LISTENING && mask & FD_ACCEPT))
  464. ev |= POLLIN | POLLPRI;
  465. /* We use POLLIN with 0 bytes recv() as FD_CLOSE indication for stream sockets. */
  466. else if ( sock->type == SOCK_STREAM && sock->state & FD_READ && mask & FD_CLOSE &&
  467. !(sock->hmask & FD_READ) )
  468. ev |= POLLIN;
  469. if (async_queued( &sock->write_q ))
  470. {
  471. if (async_waiting( &sock->write_q )) ev |= POLLOUT;
  472. }
  473. else if (smask & FD_WRITE)
  474. ev |= POLLOUT;
  475. return ev;
  476. }
  477. static enum server_fd_type sock_get_fd_type( struct fd *fd )
  478. {
  479. return FD_TYPE_SOCKET;
  480. }
  481. static int sock_ioctl( struct fd *fd, ioctl_code_t code, client_ptr_t in_buf, client_ptr_t out_buf, struct async *async )
  482. {
  483. struct sock *sock = get_fd_user( fd );
  484. assert( sock->obj.ops == &sock_ops );
  485. switch(code)
  486. {
  487. case WS_SIO_ADDRESS_LIST_CHANGE:
  488. if ((sock->state & FD_WINE_NONBLOCKING) && async_is_blocking( async ))
  489. {
  490. set_win32_error( WSAEWOULDBLOCK );
  491. return 0;
  492. }
  493. if (!sock_get_ifchange( sock )) return 0;
  494. queue_async( &sock->ifchange_q, async );
  495. set_error( STATUS_PENDING );
  496. return 1;
  497. default:
  498. set_error( STATUS_NOT_SUPPORTED );
  499. return 0;
  500. }
  501. }
  502. static void sock_queue_async( struct fd *fd, struct async *async, int type, int count )
  503. {
  504. struct sock *sock = get_fd_user( fd );
  505. struct async_queue *queue;
  506. assert( sock->obj.ops == &sock_ops );
  507. switch (type)
  508. {
  509. case ASYNC_TYPE_READ:
  510. queue = &sock->read_q;
  511. break;
  512. case ASYNC_TYPE_WRITE:
  513. queue = &sock->write_q;
  514. break;
  515. default:
  516. set_error( STATUS_INVALID_PARAMETER );
  517. return;
  518. }
  519. if ( ( !( sock->state & (FD_READ|FD_CONNECT|FD_WINE_LISTENING) ) && type == ASYNC_TYPE_READ ) ||
  520. ( !( sock->state & (FD_WRITE|FD_CONNECT) ) && type == ASYNC_TYPE_WRITE ) )
  521. {
  522. set_error( STATUS_PIPE_DISCONNECTED );
  523. return;
  524. }
  525. queue_async( queue, async );
  526. sock_reselect( sock );
  527. set_error( STATUS_PENDING );
  528. }
  529. static void sock_reselect_async( struct fd *fd, struct async_queue *queue )
  530. {
  531. struct sock *sock = get_fd_user( fd );
  532. /* ignore reselect on ifchange queue */
  533. if (&sock->ifchange_q != queue)
  534. sock_reselect( sock );
  535. }
  536. static struct fd *sock_get_fd( struct object *obj )
  537. {
  538. struct sock *sock = (struct sock *)obj;
  539. return (struct fd *)grab_object( sock->fd );
  540. }
  541. static int init_sockfd( int family, int type, int protocol )
  542. {
  543. int sockfd;
  544. sockfd = socket( family, type, protocol );
  545. if (sockfd == -1)
  546. {
  547. if (errno == EINVAL) set_win32_error( WSAESOCKTNOSUPPORT );
  548. else set_win32_error( sock_get_error( errno ));
  549. return sockfd;
  550. }
  551. fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
  552. return sockfd;
  553. }
  554. static int sock_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  555. {
  556. struct sock *sock = (struct sock *)obj;
  557. assert( obj->ops == &sock_ops );
  558. if (!fd_close_handle( obj, process, handle ))
  559. return FALSE;
  560. if (sock->state & FD_WINE_REUSE)
  561. {
  562. struct fd *fd;
  563. int sockfd;
  564. if ((sockfd = init_sockfd( sock->family, sock->type, sock->proto )) == -1)
  565. return TRUE;
  566. if (!(fd = create_anonymous_fd( &sock_fd_ops, sockfd, &sock->obj, get_fd_options(sock->fd) )))
  567. return TRUE;
  568. shutdown( get_unix_fd(sock->fd), SHUT_RDWR );
  569. release_object( sock->fd );
  570. sock->fd = fd;
  571. return FALSE;
  572. }
  573. return TRUE;
  574. }
  575. static void sock_destroy( struct object *obj )
  576. {
  577. struct sock *sock = (struct sock *)obj;
  578. assert( obj->ops == &sock_ops );
  579. /* FIXME: special socket shutdown stuff? */
  580. if ( sock->deferred )
  581. release_object( sock->deferred );
  582. async_wake_up( &sock->ifchange_q, STATUS_CANCELLED );
  583. sock_release_ifchange( sock );
  584. free_async_queue( &sock->read_q );
  585. free_async_queue( &sock->write_q );
  586. free_async_queue( &sock->ifchange_q );
  587. if (sock->event) release_object( sock->event );
  588. if (sock->fd)
  589. {
  590. /* shut the socket down to force pending poll() calls in the client to return */
  591. shutdown( get_unix_fd(sock->fd), SHUT_RDWR );
  592. release_object( sock->fd );
  593. }
  594. }
  595. static void init_sock(struct sock *sock)
  596. {
  597. sock->state = 0;
  598. sock->mask = 0;
  599. sock->hmask = 0;
  600. sock->pmask = 0;
  601. sock->polling = 0;
  602. sock->flags = 0;
  603. sock->type = 0;
  604. sock->family = 0;
  605. sock->event = NULL;
  606. sock->window = 0;
  607. sock->message = 0;
  608. sock->wparam = 0;
  609. sock->connect_time = 0;
  610. sock->deferred = NULL;
  611. sock->ifchange_obj = NULL;
  612. init_async_queue( &sock->read_q );
  613. init_async_queue( &sock->write_q );
  614. init_async_queue( &sock->ifchange_q );
  615. memset( sock->errors, 0, sizeof(sock->errors) );
  616. }
  617. /* create a new and unconnected socket */
  618. static struct object *create_socket( int family, int type, int protocol, unsigned int flags )
  619. {
  620. struct sock *sock;
  621. int sockfd;
  622. if ((sockfd = init_sockfd( family, type, protocol )) == -1)
  623. return NULL;
  624. if (!(sock = alloc_object( &sock_ops )))
  625. {
  626. close( sockfd );
  627. return NULL;
  628. }
  629. init_sock( sock );
  630. sock->state = (type != SOCK_STREAM) ? (FD_READ|FD_WRITE) : 0;
  631. sock->flags = flags;
  632. sock->proto = protocol;
  633. sock->type = type;
  634. sock->family = family;
  635. if (!(sock->fd = create_anonymous_fd( &sock_fd_ops, sockfd, &sock->obj,
  636. (flags & WSA_FLAG_OVERLAPPED) ? 0 : FILE_SYNCHRONOUS_IO_NONALERT )))
  637. {
  638. release_object( sock );
  639. return NULL;
  640. }
  641. sock_reselect( sock );
  642. clear_error();
  643. return &sock->obj;
  644. }
  645. /* accepts a socket and inits it */
  646. static int accept_new_fd( struct sock *sock )
  647. {
  648. /* Try to accept(2). We can't be safe that this an already connected socket
  649. * or that accept() is allowed on it. In those cases we will get -1/errno
  650. * return.
  651. */
  652. struct sockaddr saddr;
  653. socklen_t slen = sizeof(saddr);
  654. int acceptfd = accept( get_unix_fd(sock->fd), &saddr, &slen );
  655. if (acceptfd != -1)
  656. fcntl( acceptfd, F_SETFL, O_NONBLOCK );
  657. else
  658. set_win32_error( sock_get_error( errno ));
  659. return acceptfd;
  660. }
  661. /* accept a socket (creates a new fd) */
  662. static struct sock *accept_socket( obj_handle_t handle )
  663. {
  664. struct sock *acceptsock;
  665. struct sock *sock;
  666. int acceptfd;
  667. sock = (struct sock *)get_handle_obj( current->process, handle, FILE_READ_DATA, &sock_ops );
  668. if (!sock)
  669. return NULL;
  670. if ( sock->deferred )
  671. {
  672. acceptsock = sock->deferred;
  673. sock->deferred = NULL;
  674. }
  675. else
  676. {
  677. if ((acceptfd = accept_new_fd( sock )) == -1)
  678. {
  679. release_object( sock );
  680. return NULL;
  681. }
  682. if (!(acceptsock = alloc_object( &sock_ops )))
  683. {
  684. close( acceptfd );
  685. release_object( sock );
  686. return NULL;
  687. }
  688. init_sock( acceptsock );
  689. /* newly created socket gets the same properties of the listening socket */
  690. acceptsock->state = FD_WINE_CONNECTED|FD_READ|FD_WRITE;
  691. if (sock->state & FD_WINE_NONBLOCKING)
  692. acceptsock->state |= FD_WINE_NONBLOCKING;
  693. acceptsock->mask = sock->mask;
  694. acceptsock->proto = sock->proto;
  695. acceptsock->type = sock->type;
  696. acceptsock->family = sock->family;
  697. acceptsock->window = sock->window;
  698. acceptsock->message = sock->message;
  699. acceptsock->connect_time = current_time;
  700. if (sock->event) acceptsock->event = (struct event *)grab_object( sock->event );
  701. acceptsock->flags = sock->flags;
  702. if (!(acceptsock->fd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
  703. get_fd_options( sock->fd ) )))
  704. {
  705. release_object( acceptsock );
  706. release_object( sock );
  707. return NULL;
  708. }
  709. }
  710. clear_error();
  711. sock->pmask &= ~FD_ACCEPT;
  712. sock->hmask &= ~FD_ACCEPT;
  713. sock_reselect( sock );
  714. release_object( sock );
  715. return acceptsock;
  716. }
  717. static int accept_into_socket( struct sock *sock, struct sock *acceptsock )
  718. {
  719. int acceptfd;
  720. struct fd *newfd;
  721. if ( sock->deferred )
  722. {
  723. newfd = dup_fd_object( sock->deferred->fd, 0, 0,
  724. get_fd_options( acceptsock->fd ) );
  725. if ( !newfd )
  726. return FALSE;
  727. set_fd_user( newfd, &sock_fd_ops, &acceptsock->obj );
  728. release_object( sock->deferred );
  729. sock->deferred = NULL;
  730. }
  731. else
  732. {
  733. if ((acceptfd = accept_new_fd( sock )) == -1)
  734. return FALSE;
  735. if (!(newfd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
  736. get_fd_options( acceptsock->fd ) )))
  737. return FALSE;
  738. }
  739. acceptsock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
  740. acceptsock->hmask = 0;
  741. acceptsock->pmask = 0;
  742. acceptsock->polling = 0;
  743. acceptsock->proto = sock->proto;
  744. acceptsock->type = sock->type;
  745. acceptsock->family = sock->family;
  746. acceptsock->wparam = 0;
  747. acceptsock->deferred = NULL;
  748. acceptsock->connect_time = current_time;
  749. fd_copy_completion( acceptsock->fd, newfd );
  750. release_object( acceptsock->fd );
  751. acceptsock->fd = newfd;
  752. clear_error();
  753. sock->pmask &= ~FD_ACCEPT;
  754. sock->hmask &= ~FD_ACCEPT;
  755. sock_reselect( sock );
  756. return TRUE;
  757. }
  758. /* return an errno value mapped to a WSA error */
  759. static unsigned int sock_get_error( int err )
  760. {
  761. switch (err)
  762. {
  763. case EINTR: return WSAEINTR;
  764. case EBADF: return WSAEBADF;
  765. case EPERM:
  766. case EACCES: return WSAEACCES;
  767. case EFAULT: return WSAEFAULT;
  768. case EINVAL: return WSAEINVAL;
  769. case EMFILE: return WSAEMFILE;
  770. case EWOULDBLOCK: return WSAEWOULDBLOCK;
  771. case EINPROGRESS: return WSAEINPROGRESS;
  772. case EALREADY: return WSAEALREADY;
  773. case ENOTSOCK: return WSAENOTSOCK;
  774. case EDESTADDRREQ: return WSAEDESTADDRREQ;
  775. case EMSGSIZE: return WSAEMSGSIZE;
  776. case EPROTOTYPE: return WSAEPROTOTYPE;
  777. case ENOPROTOOPT: return WSAENOPROTOOPT;
  778. case EPROTONOSUPPORT: return WSAEPROTONOSUPPORT;
  779. case ESOCKTNOSUPPORT: return WSAESOCKTNOSUPPORT;
  780. case EOPNOTSUPP: return WSAEOPNOTSUPP;
  781. case EPFNOSUPPORT: return WSAEPFNOSUPPORT;
  782. case EAFNOSUPPORT: return WSAEAFNOSUPPORT;
  783. case EADDRINUSE: return WSAEADDRINUSE;
  784. case EADDRNOTAVAIL: return WSAEADDRNOTAVAIL;
  785. case ENETDOWN: return WSAENETDOWN;
  786. case ENETUNREACH: return WSAENETUNREACH;
  787. case ENETRESET: return WSAENETRESET;
  788. case ECONNABORTED: return WSAECONNABORTED;
  789. case EPIPE:
  790. case ECONNRESET: return WSAECONNRESET;
  791. case ENOBUFS: return WSAENOBUFS;
  792. case EISCONN: return WSAEISCONN;
  793. case ENOTCONN: return WSAENOTCONN;
  794. case ESHUTDOWN: return WSAESHUTDOWN;
  795. case ETOOMANYREFS: return WSAETOOMANYREFS;
  796. case ETIMEDOUT: return WSAETIMEDOUT;
  797. case ECONNREFUSED: return WSAECONNREFUSED;
  798. case ELOOP: return WSAELOOP;
  799. case ENAMETOOLONG: return WSAENAMETOOLONG;
  800. case EHOSTDOWN: return WSAEHOSTDOWN;
  801. case EHOSTUNREACH: return WSAEHOSTUNREACH;
  802. case ENOTEMPTY: return WSAENOTEMPTY;
  803. #ifdef EPROCLIM
  804. case EPROCLIM: return WSAEPROCLIM;
  805. #endif
  806. #ifdef EUSERS
  807. case EUSERS: return WSAEUSERS;
  808. #endif
  809. #ifdef EDQUOT
  810. case EDQUOT: return WSAEDQUOT;
  811. #endif
  812. #ifdef ESTALE
  813. case ESTALE: return WSAESTALE;
  814. #endif
  815. #ifdef EREMOTE
  816. case EREMOTE: return WSAEREMOTE;
  817. #endif
  818. case 0: return 0;
  819. default:
  820. errno = err;
  821. perror("wineserver: sock_get_error() can't map error");
  822. return WSAEFAULT;
  823. }
  824. }
  825. static int sock_get_ntstatus( int err )
  826. {
  827. switch ( err )
  828. {
  829. case EBADF: return STATUS_INVALID_HANDLE;
  830. case EBUSY: return STATUS_DEVICE_BUSY;
  831. case EPERM:
  832. case EACCES: return STATUS_ACCESS_DENIED;
  833. case EFAULT: return STATUS_NO_MEMORY;
  834. case EINVAL: return STATUS_INVALID_PARAMETER;
  835. case ENFILE:
  836. case EMFILE: return STATUS_TOO_MANY_OPENED_FILES;
  837. case EWOULDBLOCK: return STATUS_CANT_WAIT;
  838. case EINPROGRESS: return STATUS_PENDING;
  839. case EALREADY: return STATUS_NETWORK_BUSY;
  840. case ENOTSOCK: return STATUS_OBJECT_TYPE_MISMATCH;
  841. case EDESTADDRREQ: return STATUS_INVALID_PARAMETER;
  842. case EMSGSIZE: return STATUS_BUFFER_OVERFLOW;
  843. case EPROTONOSUPPORT:
  844. case ESOCKTNOSUPPORT:
  845. case EPFNOSUPPORT:
  846. case EAFNOSUPPORT:
  847. case EPROTOTYPE: return STATUS_NOT_SUPPORTED;
  848. case ENOPROTOOPT: return STATUS_INVALID_PARAMETER;
  849. case EOPNOTSUPP: return STATUS_NOT_SUPPORTED;
  850. case EADDRINUSE: return STATUS_ADDRESS_ALREADY_ASSOCIATED;
  851. case EADDRNOTAVAIL: return STATUS_INVALID_PARAMETER;
  852. case ECONNREFUSED: return STATUS_CONNECTION_REFUSED;
  853. case ESHUTDOWN: return STATUS_PIPE_DISCONNECTED;
  854. case ENOTCONN: return STATUS_CONNECTION_DISCONNECTED;
  855. case ETIMEDOUT: return STATUS_IO_TIMEOUT;
  856. case ENETUNREACH: return STATUS_NETWORK_UNREACHABLE;
  857. case EHOSTUNREACH: return STATUS_HOST_UNREACHABLE;
  858. case ENETDOWN: return STATUS_NETWORK_BUSY;
  859. case EPIPE:
  860. case ECONNRESET: return STATUS_CONNECTION_RESET;
  861. case ECONNABORTED: return STATUS_CONNECTION_ABORTED;
  862. case 0: return STATUS_SUCCESS;
  863. default:
  864. errno = err;
  865. perror("wineserver: sock_get_ntstatus() can't map error");
  866. return STATUS_UNSUCCESSFUL;
  867. }
  868. }
  869. #ifdef HAVE_LINUX_RTNETLINK_H
  870. /* only keep one ifchange object around, all sockets waiting for wakeups will look to it */
  871. static struct object *ifchange_object;
  872. static void ifchange_dump( struct object *obj, int verbose );
  873. static struct fd *ifchange_get_fd( struct object *obj );
  874. static void ifchange_destroy( struct object *obj );
  875. static int ifchange_get_poll_events( struct fd *fd );
  876. static void ifchange_poll_event( struct fd *fd, int event );
  877. struct ifchange
  878. {
  879. struct object obj; /* object header */
  880. struct fd *fd; /* interface change file descriptor */
  881. struct list sockets; /* list of sockets to send interface change notifications */
  882. };
  883. static const struct object_ops ifchange_ops =
  884. {
  885. sizeof(struct ifchange), /* size */
  886. ifchange_dump, /* dump */
  887. no_get_type, /* get_type */
  888. add_queue, /* add_queue */
  889. NULL, /* remove_queue */
  890. NULL, /* signaled */
  891. NULL, /* get_esync_fd */
  892. no_satisfied, /* satisfied */
  893. no_signal, /* signal */
  894. ifchange_get_fd, /* get_fd */
  895. default_fd_map_access, /* map_access */
  896. default_get_sd, /* get_sd */
  897. default_set_sd, /* set_sd */
  898. no_lookup_name, /* lookup_name */
  899. no_link_name, /* link_name */
  900. NULL, /* unlink_name */
  901. no_open_file, /* open_file */
  902. no_kernel_obj_list, /* get_kernel_obj_list */
  903. no_alloc_handle, /* alloc_handle */
  904. no_close_handle, /* close_handle */
  905. ifchange_destroy /* destroy */
  906. };
  907. static const struct fd_ops ifchange_fd_ops =
  908. {
  909. ifchange_get_poll_events, /* get_poll_events */
  910. ifchange_poll_event, /* poll_event */
  911. NULL, /* get_fd_type */
  912. no_fd_read, /* read */
  913. no_fd_write, /* write */
  914. no_fd_flush, /* flush */
  915. no_fd_get_file_info, /* get_file_info */
  916. no_fd_get_volume_info, /* get_volume_info */
  917. no_fd_ioctl, /* ioctl */
  918. NULL, /* queue_async */
  919. NULL /* reselect_async */
  920. };
  921. static void ifchange_dump( struct object *obj, int verbose )
  922. {
  923. assert( obj->ops == &ifchange_ops );
  924. fprintf( stderr, "Interface change\n" );
  925. }
  926. static struct fd *ifchange_get_fd( struct object *obj )
  927. {
  928. struct ifchange *ifchange = (struct ifchange *)obj;
  929. return (struct fd *)grab_object( ifchange->fd );
  930. }
  931. static void ifchange_destroy( struct object *obj )
  932. {
  933. struct ifchange *ifchange = (struct ifchange *)obj;
  934. assert( obj->ops == &ifchange_ops );
  935. release_object( ifchange->fd );
  936. /* reset the global ifchange object so that it will be recreated if it is needed again */
  937. assert( obj == ifchange_object );
  938. ifchange_object = NULL;
  939. }
  940. static int ifchange_get_poll_events( struct fd *fd )
  941. {
  942. return POLLIN;
  943. }
  944. /* wake up all the sockets waiting for a change notification event */
  945. static void ifchange_wake_up( struct object *obj, unsigned int status )
  946. {
  947. struct ifchange *ifchange = (struct ifchange *)obj;
  948. struct list *ptr, *next;
  949. assert( obj->ops == &ifchange_ops );
  950. assert( obj == ifchange_object );
  951. LIST_FOR_EACH_SAFE( ptr, next, &ifchange->sockets )
  952. {
  953. struct sock *sock = LIST_ENTRY( ptr, struct sock, ifchange_entry );
  954. assert( sock->ifchange_obj );
  955. async_wake_up( &sock->ifchange_q, status ); /* issue ifchange notification for the socket */
  956. sock_release_ifchange( sock ); /* remove socket from list and decrement ifchange refcount */
  957. }
  958. }
  959. static void ifchange_poll_event( struct fd *fd, int event )
  960. {
  961. struct object *ifchange = get_fd_user( fd );
  962. unsigned int status = STATUS_PENDING;
  963. char buffer[PIPE_BUF];
  964. int r;
  965. r = recv( get_unix_fd(fd), buffer, sizeof(buffer), MSG_DONTWAIT );
  966. if (r < 0)
  967. {
  968. if (errno == EWOULDBLOCK || (EWOULDBLOCK != EAGAIN && errno == EAGAIN))
  969. return; /* retry when poll() says the socket is ready */
  970. status = sock_get_ntstatus( errno );
  971. }
  972. else if (r > 0)
  973. {
  974. struct nlmsghdr *nlh;
  975. for (nlh = (struct nlmsghdr *)buffer; NLMSG_OK(nlh, r); nlh = NLMSG_NEXT(nlh, r))
  976. {
  977. if (nlh->nlmsg_type == NLMSG_DONE)
  978. break;
  979. if (nlh->nlmsg_type == RTM_NEWADDR || nlh->nlmsg_type == RTM_DELADDR)
  980. status = STATUS_SUCCESS;
  981. }
  982. }
  983. else status = STATUS_CANCELLED;
  984. if (status != STATUS_PENDING) ifchange_wake_up( ifchange, status );
  985. }
  986. #endif
  987. /* we only need one of these interface notification objects, all of the sockets dependent upon
  988. * it will wake up when a notification event occurs */
  989. static struct object *get_ifchange( void )
  990. {
  991. #ifdef HAVE_LINUX_RTNETLINK_H
  992. struct ifchange *ifchange;
  993. struct sockaddr_nl addr;
  994. int unix_fd;
  995. if (ifchange_object)
  996. {
  997. /* increment the refcount for each socket that uses the ifchange object */
  998. return grab_object( ifchange_object );
  999. }
  1000. /* create the socket we need for processing interface change notifications */
  1001. unix_fd = socket( PF_NETLINK, SOCK_RAW, NETLINK_ROUTE );
  1002. if (unix_fd == -1)
  1003. {
  1004. set_win32_error( sock_get_error( errno ));
  1005. return NULL;
  1006. }
  1007. fcntl( unix_fd, F_SETFL, O_NONBLOCK ); /* make socket nonblocking */
  1008. memset( &addr, 0, sizeof(addr) );
  1009. addr.nl_family = AF_NETLINK;
  1010. addr.nl_groups = RTMGRP_IPV4_IFADDR;
  1011. /* bind the socket to the special netlink kernel interface */
  1012. if (bind( unix_fd, (struct sockaddr *)&addr, sizeof(addr) ) == -1)
  1013. {
  1014. close( unix_fd );
  1015. set_win32_error( sock_get_error( errno ));
  1016. return NULL;
  1017. }
  1018. if (!(ifchange = alloc_object( &ifchange_ops )))
  1019. {
  1020. close( unix_fd );
  1021. set_error( STATUS_NO_MEMORY );
  1022. return NULL;
  1023. }
  1024. list_init( &ifchange->sockets );
  1025. if (!(ifchange->fd = create_anonymous_fd( &ifchange_fd_ops, unix_fd, &ifchange->obj, 0 )))
  1026. {
  1027. release_object( ifchange );
  1028. set_error( STATUS_NO_MEMORY );
  1029. return NULL;
  1030. }
  1031. set_fd_events( ifchange->fd, POLLIN ); /* enable read wakeup on the file descriptor */
  1032. /* the ifchange object is now successfully configured */
  1033. ifchange_object = &ifchange->obj;
  1034. return &ifchange->obj;
  1035. #else
  1036. set_error( STATUS_NOT_SUPPORTED );
  1037. return NULL;
  1038. #endif
  1039. }
  1040. /* add the socket to the interface change notification list */
  1041. static void ifchange_add_sock( struct object *obj, struct sock *sock )
  1042. {
  1043. #ifdef HAVE_LINUX_RTNETLINK_H
  1044. struct ifchange *ifchange = (struct ifchange *)obj;
  1045. list_add_tail( &ifchange->sockets, &sock->ifchange_entry );
  1046. #endif
  1047. }
  1048. /* create a new ifchange queue for a specific socket or, if one already exists, reuse the existing one */
  1049. static struct object *sock_get_ifchange( struct sock *sock )
  1050. {
  1051. struct object *ifchange;
  1052. if (sock->ifchange_obj) /* reuse existing ifchange_obj for this socket */
  1053. return sock->ifchange_obj;
  1054. if (!(ifchange = get_ifchange()))
  1055. return NULL;
  1056. /* add the socket to the ifchange notification list */
  1057. ifchange_add_sock( ifchange, sock );
  1058. sock->ifchange_obj = ifchange;
  1059. return ifchange;
  1060. }
  1061. /* destroy an existing ifchange queue for a specific socket */
  1062. static void sock_release_ifchange( struct sock *sock )
  1063. {
  1064. if (sock->ifchange_obj)
  1065. {
  1066. list_remove( &sock->ifchange_entry );
  1067. release_object( sock->ifchange_obj );
  1068. sock->ifchange_obj = NULL;
  1069. }
  1070. }
  1071. /* create a socket */
  1072. DECL_HANDLER(create_socket)
  1073. {
  1074. struct object *obj;
  1075. reply->handle = 0;
  1076. if ((obj = create_socket( req->family, req->type, req->protocol, req->flags )) != NULL)
  1077. {
  1078. reply->handle = alloc_handle( current->process, obj, req->access, req->attributes );
  1079. release_object( obj );
  1080. }
  1081. }
  1082. /* accept a socket */
  1083. DECL_HANDLER(accept_socket)
  1084. {
  1085. struct sock *sock;
  1086. reply->handle = 0;
  1087. if ((sock = accept_socket( req->lhandle )) != NULL)
  1088. {
  1089. reply->handle = alloc_handle( current->process, &sock->obj, req->access, req->attributes );
  1090. sock->wparam = reply->handle; /* wparam for message is the socket handle */
  1091. sock_reselect( sock );
  1092. release_object( &sock->obj );
  1093. }
  1094. }
  1095. /* accept a socket into an initialized socket */
  1096. DECL_HANDLER(accept_into_socket)
  1097. {
  1098. struct sock *sock, *acceptsock;
  1099. const int all_attributes = FILE_READ_ATTRIBUTES|FILE_WRITE_ATTRIBUTES|FILE_READ_DATA;
  1100. if (!(sock = (struct sock *)get_handle_obj( current->process, req->lhandle,
  1101. all_attributes, &sock_ops)))
  1102. return;
  1103. if (!(acceptsock = (struct sock *)get_handle_obj( current->process, req->ahandle,
  1104. all_attributes, &sock_ops)))
  1105. {
  1106. release_object( sock );
  1107. return;
  1108. }
  1109. if (accept_into_socket( sock, acceptsock ))
  1110. {
  1111. acceptsock->wparam = req->ahandle; /* wparam for message is the socket handle */
  1112. sock_reselect( acceptsock );
  1113. }
  1114. release_object( acceptsock );
  1115. release_object( sock );
  1116. }
  1117. /* mark a socket to be recreated on close */
  1118. DECL_HANDLER(reuse_socket)
  1119. {
  1120. struct sock *sock;
  1121. if (!(sock = (struct sock *)get_handle_obj( current->process, req->handle,
  1122. FILE_WRITE_ATTRIBUTES, &sock_ops))) return;
  1123. sock->state |= FD_WINE_REUSE;
  1124. release_object( &sock->obj );
  1125. }
  1126. /* set socket event parameters */
  1127. DECL_HANDLER(set_socket_event)
  1128. {
  1129. struct sock *sock;
  1130. struct event *old_event;
  1131. if (!(sock = (struct sock *)get_handle_obj( current->process, req->handle,
  1132. FILE_WRITE_ATTRIBUTES, &sock_ops))) return;
  1133. old_event = sock->event;
  1134. sock->mask = req->mask;
  1135. sock->hmask &= ~req->mask; /* re-enable held events */
  1136. sock->event = NULL;
  1137. sock->window = req->window;
  1138. sock->message = req->msg;
  1139. sock->wparam = req->handle; /* wparam is the socket handle */
  1140. if (req->event) sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );
  1141. if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
  1142. sock_reselect( sock );
  1143. sock->state |= FD_WINE_NONBLOCKING;
  1144. /* if a network event is pending, signal the event object
  1145. it is possible that FD_CONNECT or FD_ACCEPT network events has happened
  1146. before a WSAEventSelect() was done on it.
  1147. (when dealing with Asynchronous socket) */
  1148. sock_wake_up( sock );
  1149. if (old_event) release_object( old_event ); /* we're through with it */
  1150. release_object( &sock->obj );
  1151. }
  1152. /* get socket event parameters */
  1153. DECL_HANDLER(get_socket_event)
  1154. {
  1155. struct sock *sock;
  1156. if (!(sock = (struct sock *)get_handle_obj( current->process, req->handle,
  1157. FILE_READ_ATTRIBUTES, &sock_ops ))) return;
  1158. reply->mask = sock->mask;
  1159. reply->pmask = sock->pmask;
  1160. reply->state = sock->state;
  1161. set_reply_data( sock->errors, min( get_reply_max_size(), sizeof(sock->errors) ));
  1162. if (req->service)
  1163. {
  1164. if (req->c_event)
  1165. {
  1166. struct event *cevent = get_event_obj( current->process, req->c_event,
  1167. EVENT_MODIFY_STATE );
  1168. if (cevent)
  1169. {
  1170. reset_event( cevent );
  1171. release_object( cevent );
  1172. }
  1173. }
  1174. sock->pmask = 0;
  1175. sock_reselect( sock );
  1176. }
  1177. release_object( &sock->obj );
  1178. }
  1179. /* re-enable pending socket events */
  1180. DECL_HANDLER(enable_socket_event)
  1181. {
  1182. struct sock *sock;
  1183. if (!(sock = (struct sock*)get_handle_obj( current->process, req->handle,
  1184. FILE_WRITE_ATTRIBUTES, &sock_ops)))
  1185. return;
  1186. /* for event-based notification, windows erases stale events */
  1187. sock->pmask &= ~req->mask;
  1188. sock->hmask &= ~req->mask;
  1189. sock->state |= req->sstate;
  1190. sock->state &= ~req->cstate;
  1191. if ( sock->type != SOCK_STREAM ) sock->state &= ~STREAM_FLAG_MASK;
  1192. sock_reselect( sock );
  1193. release_object( &sock->obj );
  1194. }
  1195. DECL_HANDLER(set_socket_deferred)
  1196. {
  1197. struct sock *sock, *acceptsock;
  1198. sock=(struct sock *)get_handle_obj( current->process, req->handle, FILE_WRITE_ATTRIBUTES, &sock_ops );
  1199. if ( !sock )
  1200. return;
  1201. acceptsock = (struct sock *)get_handle_obj( current->process, req->deferred, 0, &sock_ops );
  1202. if ( !acceptsock )
  1203. {
  1204. release_object( sock );
  1205. return;
  1206. }
  1207. sock->deferred = acceptsock;
  1208. release_object( sock );
  1209. }
  1210. DECL_HANDLER(get_socket_info)
  1211. {
  1212. struct sock *sock;
  1213. sock = (struct sock *)get_handle_obj( current->process, req->handle, FILE_READ_ATTRIBUTES, &sock_ops );
  1214. if (!sock) return;
  1215. reply->family = sock->family;
  1216. reply->type = sock->type;
  1217. reply->protocol = sock->proto;
  1218. reply->connect_time = -(current_time - sock->connect_time);
  1219. release_object( &sock->obj );
  1220. }
  1221. DECL_HANDLER(socket_cleanup)
  1222. {
  1223. unsigned int index = 0;
  1224. obj_handle_t sock;
  1225. while ((sock = enumerate_handles(current->process, &sock_ops, &index, NULL)))
  1226. close_handle(current->process, sock);
  1227. }