sock.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512
  1. /*
  2. * Server-side socket management
  3. *
  4. * Copyright (C) 1999 Marcus Meissner, Ove Kåven
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. *
  20. * FIXME: we use read|write access in all cases. Shouldn't we depend that
  21. * on the access of the current handle?
  22. */
  23. #include "config.h"
  24. #include <assert.h>
  25. #include <fcntl.h>
  26. #include <stdarg.h>
  27. #include <stdio.h>
  28. #include <string.h>
  29. #include <stdlib.h>
  30. #include <errno.h>
  31. #ifdef HAVE_NETINET_IN_H
  32. # include <netinet/in.h>
  33. #endif
  34. #ifdef HAVE_POLL_H
  35. # include <poll.h>
  36. #endif
  37. #include <sys/time.h>
  38. #include <sys/types.h>
  39. #ifdef HAVE_SYS_SOCKET_H
  40. # include <sys/socket.h>
  41. #endif
  42. #ifdef HAVE_SYS_IOCTL_H
  43. #include <sys/ioctl.h>
  44. #endif
  45. #ifdef HAVE_SYS_FILIO_H
  46. # include <sys/filio.h>
  47. #endif
  48. #include <time.h>
  49. #include <unistd.h>
  50. #include <limits.h>
  51. #ifdef HAVE_LINUX_RTNETLINK_H
  52. # include <linux/rtnetlink.h>
  53. #endif
  54. #ifdef HAVE_NETIPX_IPX_H
  55. # include <netipx/ipx.h>
  56. #elif defined(HAVE_LINUX_IPX_H)
  57. # ifdef HAVE_ASM_TYPES_H
  58. # include <asm/types.h>
  59. # endif
  60. # ifdef HAVE_LINUX_TYPES_H
  61. # include <linux/types.h>
  62. # endif
  63. # include <linux/ipx.h>
  64. #endif
  65. #if defined(SOL_IPX) || defined(SO_DEFAULT_HEADERS)
  66. # define HAS_IPX
  67. #endif
  68. #ifdef HAVE_LINUX_IRDA_H
  69. # ifdef HAVE_LINUX_TYPES_H
  70. # include <linux/types.h>
  71. # endif
  72. # include <linux/irda.h>
  73. # define HAS_IRDA
  74. #endif
  75. #include "ntstatus.h"
  76. #define WIN32_NO_STATUS
  77. #include "windef.h"
  78. #include "winternl.h"
  79. #include "winerror.h"
  80. #define USE_WS_PREFIX
  81. #include "winsock2.h"
  82. #include "ws2tcpip.h"
  83. #include "wsipx.h"
  84. #include "af_irda.h"
  85. #include "wine/afd.h"
  86. #include "process.h"
  87. #include "file.h"
  88. #include "handle.h"
  89. #include "thread.h"
  90. #include "request.h"
  91. #include "user.h"
  92. /* From winsock.h */
  93. #define FD_MAX_EVENTS 10
  94. #define FD_READ_BIT 0
  95. #define FD_WRITE_BIT 1
  96. #define FD_OOB_BIT 2
  97. #define FD_ACCEPT_BIT 3
  98. #define FD_CONNECT_BIT 4
  99. #define FD_CLOSE_BIT 5
  100. /*
  101. * Define flags to be used with the WSAAsyncSelect() call.
  102. */
  103. #define FD_READ 0x00000001
  104. #define FD_WRITE 0x00000002
  105. #define FD_OOB 0x00000004
  106. #define FD_ACCEPT 0x00000008
  107. #define FD_CONNECT 0x00000010
  108. #define FD_CLOSE 0x00000020
  109. /* internal per-socket flags */
  110. #define FD_WINE_LISTENING 0x10000000
  111. #define FD_WINE_NONBLOCKING 0x20000000
  112. #define FD_WINE_CONNECTED 0x40000000
  113. #define FD_WINE_RAW 0x80000000
  114. #define FD_WINE_INTERNAL 0xFFFF0000
  115. static struct list poll_list = LIST_INIT( poll_list );
  116. struct poll_req
  117. {
  118. struct list entry;
  119. struct async *async;
  120. struct iosb *iosb;
  121. struct timeout_user *timeout;
  122. unsigned int count;
  123. struct poll_socket_output *output;
  124. struct
  125. {
  126. struct sock *sock;
  127. int flags;
  128. } sockets[1];
  129. };
  130. struct accept_req
  131. {
  132. struct list entry;
  133. struct async *async;
  134. struct iosb *iosb;
  135. struct sock *sock, *acceptsock;
  136. int accepted;
  137. unsigned int recv_len, local_len;
  138. };
  139. struct connect_req
  140. {
  141. struct async *async;
  142. struct iosb *iosb;
  143. struct sock *sock;
  144. unsigned int addr_len, send_len, send_cursor;
  145. };
  146. struct sock
  147. {
  148. struct object obj; /* object header */
  149. struct fd *fd; /* socket file descriptor */
  150. unsigned int state; /* status bits */
  151. unsigned int mask; /* event mask */
  152. /* pending FD_* events which have not yet been reported to the application */
  153. unsigned int pending_events;
  154. /* FD_* events which have already been reported and should not be selected for again until reset by a relevant call.
  155. * e.g. if FD_READ is set here and not in pending_events, it has already been reported and consumed, and we should
  156. * not report it again, even if POLLIN is signaled, until it is reset by e.g recv() */
  157. unsigned int reported_events;
  158. unsigned int flags; /* socket flags */
  159. int polling; /* is socket being polled? */
  160. int wr_shutdown_pending; /* is a write shutdown pending? */
  161. unsigned short proto; /* socket protocol */
  162. unsigned short type; /* socket type */
  163. unsigned short family; /* socket family */
  164. struct event *event; /* event object */
  165. user_handle_t window; /* window to send the message to */
  166. unsigned int message; /* message to send */
  167. obj_handle_t wparam; /* message wparam (socket handle) */
  168. unsigned int errors[FD_MAX_EVENTS]; /* event errors */
  169. timeout_t connect_time;/* time the socket was connected */
  170. struct sock *deferred; /* socket that waits for a deferred accept */
  171. struct async_queue read_q; /* queue for asynchronous reads */
  172. struct async_queue write_q; /* queue for asynchronous writes */
  173. struct async_queue ifchange_q; /* queue for interface change notifications */
  174. struct async_queue accept_q; /* queue for asynchronous accepts */
  175. struct async_queue connect_q; /* queue for asynchronous connects */
  176. struct async_queue poll_q; /* queue for asynchronous polls */
  177. struct object *ifchange_obj; /* the interface change notification object */
  178. struct list ifchange_entry; /* entry in ifchange notification list */
  179. struct list accept_list; /* list of pending accept requests */
  180. struct accept_req *accept_recv_req; /* pending accept-into request which will recv on this socket */
  181. struct connect_req connect_req; /* pending connection request (there can be only one) */
  182. };
  183. static void sock_dump( struct object *obj, int verbose );
  184. static struct fd *sock_get_fd( struct object *obj );
  185. static int sock_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  186. static void sock_destroy( struct object *obj );
  187. static struct object *sock_get_ifchange( struct sock *sock );
  188. static void sock_release_ifchange( struct sock *sock );
  189. static int sock_get_poll_events( struct fd *fd );
  190. static void sock_poll_event( struct fd *fd, int event );
  191. static enum server_fd_type sock_get_fd_type( struct fd *fd );
  192. static int sock_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
  193. static void sock_queue_async( struct fd *fd, struct async *async, int type, int count );
  194. static void sock_reselect_async( struct fd *fd, struct async_queue *queue );
  195. static int accept_into_socket( struct sock *sock, struct sock *acceptsock );
  196. static struct sock *accept_socket( struct sock *sock );
  197. static int sock_get_ntstatus( int err );
  198. static unsigned int sock_get_error( int err );
  199. static const struct object_ops sock_ops =
  200. {
  201. sizeof(struct sock), /* size */
  202. &file_type, /* type */
  203. sock_dump, /* dump */
  204. add_queue, /* add_queue */
  205. remove_queue, /* remove_queue */
  206. default_fd_signaled, /* signaled */
  207. no_satisfied, /* satisfied */
  208. no_signal, /* signal */
  209. sock_get_fd, /* get_fd */
  210. default_map_access, /* map_access */
  211. default_get_sd, /* get_sd */
  212. default_set_sd, /* set_sd */
  213. no_get_full_name, /* get_full_name */
  214. no_lookup_name, /* lookup_name */
  215. no_link_name, /* link_name */
  216. NULL, /* unlink_name */
  217. no_open_file, /* open_file */
  218. no_kernel_obj_list, /* get_kernel_obj_list */
  219. sock_close_handle, /* close_handle */
  220. sock_destroy /* destroy */
  221. };
  222. static const struct fd_ops sock_fd_ops =
  223. {
  224. sock_get_poll_events, /* get_poll_events */
  225. sock_poll_event, /* poll_event */
  226. sock_get_fd_type, /* get_fd_type */
  227. no_fd_read, /* read */
  228. no_fd_write, /* write */
  229. no_fd_flush, /* flush */
  230. default_fd_get_file_info, /* get_file_info */
  231. no_fd_get_volume_info, /* get_volume_info */
  232. sock_ioctl, /* ioctl */
  233. sock_queue_async, /* queue_async */
  234. sock_reselect_async /* reselect_async */
  235. };
  236. union unix_sockaddr
  237. {
  238. struct sockaddr addr;
  239. struct sockaddr_in in;
  240. struct sockaddr_in6 in6;
  241. #ifdef HAS_IPX
  242. struct sockaddr_ipx ipx;
  243. #endif
  244. #ifdef HAS_IRDA
  245. struct sockaddr_irda irda;
  246. #endif
  247. };
  248. static int sockaddr_from_unix( const union unix_sockaddr *uaddr, struct WS_sockaddr *wsaddr, socklen_t wsaddrlen )
  249. {
  250. memset( wsaddr, 0, wsaddrlen );
  251. switch (uaddr->addr.sa_family)
  252. {
  253. case AF_INET:
  254. {
  255. struct WS_sockaddr_in win = {0};
  256. if (wsaddrlen < sizeof(win)) return -1;
  257. win.sin_family = WS_AF_INET;
  258. win.sin_port = uaddr->in.sin_port;
  259. memcpy( &win.sin_addr, &uaddr->in.sin_addr, sizeof(win.sin_addr) );
  260. memcpy( wsaddr, &win, sizeof(win) );
  261. return sizeof(win);
  262. }
  263. case AF_INET6:
  264. {
  265. struct WS_sockaddr_in6 win = {0};
  266. if (wsaddrlen < sizeof(struct WS_sockaddr_in6_old)) return -1;
  267. win.sin6_family = WS_AF_INET6;
  268. win.sin6_port = uaddr->in6.sin6_port;
  269. win.sin6_flowinfo = uaddr->in6.sin6_flowinfo;
  270. memcpy( &win.sin6_addr, &uaddr->in6.sin6_addr, sizeof(win.sin6_addr) );
  271. #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID
  272. win.sin6_scope_id = uaddr->in6.sin6_scope_id;
  273. #endif
  274. if (wsaddrlen >= sizeof(struct WS_sockaddr_in6))
  275. {
  276. memcpy( wsaddr, &win, sizeof(struct WS_sockaddr_in6) );
  277. return sizeof(struct WS_sockaddr_in6);
  278. }
  279. memcpy( wsaddr, &win, sizeof(struct WS_sockaddr_in6_old) );
  280. return sizeof(struct WS_sockaddr_in6_old);
  281. }
  282. #ifdef HAS_IPX
  283. case AF_IPX:
  284. {
  285. struct WS_sockaddr_ipx win = {0};
  286. if (wsaddrlen < sizeof(win)) return -1;
  287. win.sa_family = WS_AF_IPX;
  288. memcpy( win.sa_netnum, &uaddr->ipx.sipx_network, sizeof(win.sa_netnum) );
  289. memcpy( win.sa_nodenum, &uaddr->ipx.sipx_node, sizeof(win.sa_nodenum) );
  290. win.sa_socket = uaddr->ipx.sipx_port;
  291. memcpy( wsaddr, &win, sizeof(win) );
  292. return sizeof(win);
  293. }
  294. #endif
  295. #ifdef HAS_IRDA
  296. case AF_IRDA:
  297. {
  298. SOCKADDR_IRDA win;
  299. if (wsaddrlen < sizeof(win)) return -1;
  300. win.irdaAddressFamily = WS_AF_IRDA;
  301. memcpy( win.irdaDeviceID, &uaddr->irda.sir_addr, sizeof(win.irdaDeviceID) );
  302. if (uaddr->irda.sir_lsap_sel != LSAP_ANY)
  303. snprintf( win.irdaServiceName, sizeof(win.irdaServiceName), "LSAP-SEL%u", uaddr->irda.sir_lsap_sel );
  304. else
  305. memcpy( win.irdaServiceName, uaddr->irda.sir_name, sizeof(win.irdaServiceName) );
  306. memcpy( wsaddr, &win, sizeof(win) );
  307. return sizeof(win);
  308. }
  309. #endif
  310. case AF_UNSPEC:
  311. return 0;
  312. default:
  313. return -1;
  314. }
  315. }
  316. /* Permutation of 0..FD_MAX_EVENTS - 1 representing the order in which
  317. * we post messages if there are multiple events. Used to send
  318. * messages. The problem is if there is both a FD_CONNECT event and,
  319. * say, an FD_READ event available on the same socket, we want to
  320. * notify the app of the connect event first. Otherwise it may
  321. * discard the read event because it thinks it hasn't connected yet.
  322. */
  323. static const int event_bitorder[FD_MAX_EVENTS] =
  324. {
  325. FD_CONNECT_BIT,
  326. FD_ACCEPT_BIT,
  327. FD_OOB_BIT,
  328. FD_WRITE_BIT,
  329. FD_READ_BIT,
  330. FD_CLOSE_BIT,
  331. 6, 7, 8, 9 /* leftovers */
  332. };
  333. /* Flags that make sense only for SOCK_STREAM sockets */
  334. #define STREAM_FLAG_MASK ((unsigned int) (FD_CONNECT | FD_ACCEPT | FD_WINE_LISTENING | FD_WINE_CONNECTED))
  335. typedef enum {
  336. SOCK_SHUTDOWN_ERROR = -1,
  337. SOCK_SHUTDOWN_EOF = 0,
  338. SOCK_SHUTDOWN_POLLHUP = 1
  339. } sock_shutdown_t;
  340. static sock_shutdown_t sock_shutdown_type = SOCK_SHUTDOWN_ERROR;
  341. static sock_shutdown_t sock_check_pollhup(void)
  342. {
  343. sock_shutdown_t ret = SOCK_SHUTDOWN_ERROR;
  344. int fd[2], n;
  345. struct pollfd pfd;
  346. char dummy;
  347. if ( socketpair( AF_UNIX, SOCK_STREAM, 0, fd ) ) return ret;
  348. if ( shutdown( fd[0], 1 ) ) goto out;
  349. pfd.fd = fd[1];
  350. pfd.events = POLLIN;
  351. pfd.revents = 0;
  352. /* Solaris' poll() sometimes returns nothing if given a 0ms timeout here */
  353. n = poll( &pfd, 1, 1 );
  354. if ( n != 1 ) goto out; /* error or timeout */
  355. if ( pfd.revents & POLLHUP )
  356. ret = SOCK_SHUTDOWN_POLLHUP;
  357. else if ( pfd.revents & POLLIN &&
  358. read( fd[1], &dummy, 1 ) == 0 )
  359. ret = SOCK_SHUTDOWN_EOF;
  360. out:
  361. close( fd[0] );
  362. close( fd[1] );
  363. return ret;
  364. }
  365. void sock_init(void)
  366. {
  367. sock_shutdown_type = sock_check_pollhup();
  368. switch ( sock_shutdown_type )
  369. {
  370. case SOCK_SHUTDOWN_EOF:
  371. if (debug_level) fprintf( stderr, "sock_init: shutdown() causes EOF\n" );
  372. break;
  373. case SOCK_SHUTDOWN_POLLHUP:
  374. if (debug_level) fprintf( stderr, "sock_init: shutdown() causes POLLHUP\n" );
  375. break;
  376. default:
  377. fprintf( stderr, "sock_init: ERROR in sock_check_pollhup()\n" );
  378. sock_shutdown_type = SOCK_SHUTDOWN_EOF;
  379. }
  380. }
  381. static int sock_reselect( struct sock *sock )
  382. {
  383. int ev = sock_get_poll_events( sock->fd );
  384. if (debug_level)
  385. fprintf(stderr,"sock_reselect(%p): new mask %x\n", sock, ev);
  386. if (!sock->polling) /* FIXME: should find a better way to do this */
  387. {
  388. /* previously unconnected socket, is this reselect supposed to connect it? */
  389. if (!(sock->state & ~FD_WINE_NONBLOCKING)) return 0;
  390. /* ok, it is, attach it to the wineserver's main poll loop */
  391. sock->polling = 1;
  392. allow_fd_caching( sock->fd );
  393. }
  394. /* update condition mask */
  395. set_fd_events( sock->fd, ev );
  396. return ev;
  397. }
  398. /* wake anybody waiting on the socket event or send the associated message */
  399. static void sock_wake_up( struct sock *sock )
  400. {
  401. unsigned int events = sock->pending_events & sock->mask;
  402. int i;
  403. if ( !events ) return;
  404. if (sock->event)
  405. {
  406. if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event );
  407. set_event( sock->event );
  408. }
  409. if (sock->window)
  410. {
  411. if (debug_level) fprintf(stderr, "signalling events %x win %08x\n", events, sock->window );
  412. for (i = 0; i < FD_MAX_EVENTS; i++)
  413. {
  414. int event = event_bitorder[i];
  415. if (sock->pending_events & (1 << event))
  416. {
  417. lparam_t lparam = (1 << event) | (sock->errors[event] << 16);
  418. post_message( sock->window, sock->message, sock->wparam, lparam );
  419. }
  420. }
  421. sock->pending_events = 0;
  422. sock_reselect( sock );
  423. }
  424. }
  425. static inline int sock_error( struct fd *fd )
  426. {
  427. unsigned int optval = 0;
  428. socklen_t optlen = sizeof(optval);
  429. getsockopt( get_unix_fd(fd), SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
  430. return optval;
  431. }
  432. static void free_accept_req( void *private )
  433. {
  434. struct accept_req *req = private;
  435. list_remove( &req->entry );
  436. if (req->acceptsock)
  437. {
  438. req->acceptsock->accept_recv_req = NULL;
  439. release_object( req->acceptsock );
  440. }
  441. release_object( req->async );
  442. release_object( req->iosb );
  443. release_object( req->sock );
  444. free( req );
  445. }
  446. static void fill_accept_output( struct accept_req *req )
  447. {
  448. struct iosb *iosb = req->iosb;
  449. union unix_sockaddr unix_addr;
  450. struct WS_sockaddr *win_addr;
  451. unsigned int remote_len;
  452. socklen_t unix_len;
  453. int fd, size = 0;
  454. char *out_data;
  455. int win_len;
  456. if (!(out_data = mem_alloc( iosb->out_size ))) return;
  457. fd = get_unix_fd( req->acceptsock->fd );
  458. if (req->recv_len && (size = recv( fd, out_data, req->recv_len, 0 )) < 0)
  459. {
  460. if (!req->accepted && errno == EWOULDBLOCK)
  461. {
  462. req->accepted = 1;
  463. sock_reselect( req->acceptsock );
  464. set_error( STATUS_PENDING );
  465. return;
  466. }
  467. set_error( sock_get_ntstatus( errno ) );
  468. free( out_data );
  469. return;
  470. }
  471. if (req->local_len)
  472. {
  473. if (req->local_len < sizeof(int))
  474. {
  475. set_error( STATUS_BUFFER_TOO_SMALL );
  476. free( out_data );
  477. return;
  478. }
  479. unix_len = sizeof(unix_addr);
  480. win_addr = (struct WS_sockaddr *)(out_data + req->recv_len + sizeof(int));
  481. if (getsockname( fd, &unix_addr.addr, &unix_len ) < 0 ||
  482. (win_len = sockaddr_from_unix( &unix_addr, win_addr, req->local_len - sizeof(int) )) < 0)
  483. {
  484. set_error( sock_get_ntstatus( errno ) );
  485. free( out_data );
  486. return;
  487. }
  488. memcpy( out_data + req->recv_len, &win_len, sizeof(int) );
  489. }
  490. unix_len = sizeof(unix_addr);
  491. win_addr = (struct WS_sockaddr *)(out_data + req->recv_len + req->local_len + sizeof(int));
  492. remote_len = iosb->out_size - req->recv_len - req->local_len;
  493. if (getpeername( fd, &unix_addr.addr, &unix_len ) < 0 ||
  494. (win_len = sockaddr_from_unix( &unix_addr, win_addr, remote_len - sizeof(int) )) < 0)
  495. {
  496. set_error( sock_get_ntstatus( errno ) );
  497. free( out_data );
  498. return;
  499. }
  500. memcpy( out_data + req->recv_len + req->local_len, &win_len, sizeof(int) );
  501. iosb->status = STATUS_SUCCESS;
  502. iosb->result = size;
  503. iosb->out_data = out_data;
  504. set_error( STATUS_ALERTED );
  505. }
  506. static void complete_async_accept( struct sock *sock, struct accept_req *req )
  507. {
  508. struct sock *acceptsock = req->acceptsock;
  509. struct async *async = req->async;
  510. if (debug_level) fprintf( stderr, "completing accept request for socket %p\n", sock );
  511. if (acceptsock)
  512. {
  513. if (!accept_into_socket( sock, acceptsock )) return;
  514. fill_accept_output( req );
  515. }
  516. else
  517. {
  518. struct iosb *iosb = req->iosb;
  519. obj_handle_t handle;
  520. if (!(acceptsock = accept_socket( sock ))) return;
  521. handle = alloc_handle_no_access_check( async_get_thread( async )->process, &acceptsock->obj,
  522. GENERIC_READ | GENERIC_WRITE | SYNCHRONIZE, OBJ_INHERIT );
  523. acceptsock->wparam = handle;
  524. release_object( acceptsock );
  525. if (!handle) return;
  526. if (!(iosb->out_data = malloc( sizeof(handle) ))) return;
  527. iosb->status = STATUS_SUCCESS;
  528. iosb->out_size = sizeof(handle);
  529. memcpy( iosb->out_data, &handle, sizeof(handle) );
  530. set_error( STATUS_ALERTED );
  531. }
  532. }
  533. static void complete_async_accept_recv( struct accept_req *req )
  534. {
  535. if (debug_level) fprintf( stderr, "completing accept recv request for socket %p\n", req->acceptsock );
  536. assert( req->recv_len );
  537. fill_accept_output( req );
  538. }
  539. static void free_connect_req( void *private )
  540. {
  541. struct connect_req *req = private;
  542. struct sock *sock = req->sock;
  543. struct async *async = req->async;
  544. struct iosb *iosb = req->iosb;
  545. memset( &req->sock->connect_req, 0, sizeof(*req) );
  546. release_object( async );
  547. release_object( iosb );
  548. release_object( sock );
  549. }
  550. static void complete_async_connect( struct sock *sock )
  551. {
  552. const char *in_buffer;
  553. struct iosb *iosb;
  554. size_t len;
  555. int ret;
  556. if (debug_level) fprintf( stderr, "completing connect request for socket %p\n", sock );
  557. sock->pending_events &= ~(FD_CONNECT | FD_READ | FD_WRITE);
  558. sock->reported_events &= ~(FD_CONNECT | FD_READ | FD_WRITE);
  559. sock->state |= FD_WINE_CONNECTED;
  560. sock->state &= ~(FD_CONNECT | FD_WINE_LISTENING);
  561. if (!sock->connect_req.send_len)
  562. {
  563. set_error( STATUS_SUCCESS );
  564. return;
  565. }
  566. iosb = sock->connect_req.iosb;
  567. in_buffer = (const char *)iosb->in_data + sizeof(struct afd_connect_params) + sock->connect_req.addr_len;
  568. len = sock->connect_req.send_len - sock->connect_req.send_cursor;
  569. ret = send( get_unix_fd( sock->fd ), in_buffer + sock->connect_req.send_cursor, len, 0 );
  570. if (ret < 0 && errno != EWOULDBLOCK)
  571. set_error( sock_get_ntstatus( errno ) );
  572. else if (ret == len)
  573. {
  574. iosb->result = sock->connect_req.send_len;
  575. iosb->status = STATUS_SUCCESS;
  576. set_error( STATUS_ALERTED );
  577. }
  578. else
  579. {
  580. sock->connect_req.send_cursor += ret;
  581. set_error( STATUS_PENDING );
  582. }
  583. }
  584. static void free_poll_req( void *private )
  585. {
  586. struct poll_req *req = private;
  587. unsigned int i;
  588. if (req->timeout) remove_timeout_user( req->timeout );
  589. for (i = 0; i < req->count; ++i)
  590. release_object( req->sockets[i].sock );
  591. release_object( req->async );
  592. release_object( req->iosb );
  593. list_remove( &req->entry );
  594. free( req );
  595. }
  596. static int get_poll_flags( struct sock *sock, int event )
  597. {
  598. int flags = 0;
  599. /* A connection-mode socket which has never been connected does not return
  600. * write or hangup events, but Linux reports POLLOUT | POLLHUP. */
  601. if (sock->type == WS_SOCK_STREAM && !(sock->state & (FD_CONNECT | FD_WINE_CONNECTED | FD_WINE_LISTENING)))
  602. event &= ~(POLLOUT | POLLHUP);
  603. if (event & POLLIN)
  604. {
  605. if (sock->state & FD_WINE_LISTENING)
  606. flags |= AFD_POLL_ACCEPT;
  607. else
  608. flags |= AFD_POLL_READ;
  609. }
  610. if (event & POLLPRI)
  611. flags |= AFD_POLL_OOB;
  612. if (event & POLLOUT)
  613. flags |= AFD_POLL_WRITE;
  614. if (sock->state & FD_WINE_CONNECTED)
  615. flags |= AFD_POLL_CONNECT;
  616. if (event & POLLHUP)
  617. flags |= AFD_POLL_HUP;
  618. if (event & POLLERR)
  619. flags |= AFD_POLL_CONNECT_ERR;
  620. return flags;
  621. }
  622. static void complete_async_polls( struct sock *sock, int event, int error )
  623. {
  624. int flags = get_poll_flags( sock, event );
  625. struct poll_req *req, *next;
  626. LIST_FOR_EACH_ENTRY_SAFE( req, next, &poll_list, struct poll_req, entry )
  627. {
  628. struct iosb *iosb = req->iosb;
  629. unsigned int i;
  630. if (iosb->status != STATUS_PENDING) continue;
  631. for (i = 0; i < req->count; ++i)
  632. {
  633. if (req->sockets[i].sock != sock) continue;
  634. if (!(req->sockets[i].flags & flags)) continue;
  635. if (debug_level)
  636. fprintf( stderr, "completing poll for socket %p, wanted %#x got %#x\n",
  637. sock, req->sockets[i].flags, flags );
  638. req->output[i].flags = req->sockets[i].flags & flags;
  639. req->output[i].status = sock_get_ntstatus( error );
  640. iosb->status = STATUS_SUCCESS;
  641. }
  642. if (iosb->status != STATUS_PENDING)
  643. {
  644. iosb->out_data = req->output;
  645. iosb->out_size = req->count * sizeof(*req->output);
  646. async_terminate( req->async, STATUS_ALERTED );
  647. }
  648. }
  649. }
  650. static void async_poll_timeout( void *private )
  651. {
  652. struct poll_req *req = private;
  653. struct iosb *iosb = req->iosb;
  654. req->timeout = NULL;
  655. if (iosb->status != STATUS_PENDING) return;
  656. iosb->status = STATUS_TIMEOUT;
  657. iosb->out_data = req->output;
  658. iosb->out_size = req->count * sizeof(*req->output);
  659. async_terminate( req->async, STATUS_ALERTED );
  660. }
  661. static int sock_dispatch_asyncs( struct sock *sock, int event, int error )
  662. {
  663. if (event & (POLLIN | POLLPRI))
  664. {
  665. struct accept_req *req;
  666. LIST_FOR_EACH_ENTRY( req, &sock->accept_list, struct accept_req, entry )
  667. {
  668. if (req->iosb->status == STATUS_PENDING && !req->accepted)
  669. {
  670. complete_async_accept( sock, req );
  671. if (get_error() != STATUS_PENDING)
  672. async_terminate( req->async, get_error() );
  673. break;
  674. }
  675. }
  676. if (sock->accept_recv_req && sock->accept_recv_req->iosb->status == STATUS_PENDING)
  677. {
  678. complete_async_accept_recv( sock->accept_recv_req );
  679. if (get_error() != STATUS_PENDING)
  680. async_terminate( sock->accept_recv_req->async, get_error() );
  681. }
  682. }
  683. if ((event & POLLOUT) && sock->connect_req.async && sock->connect_req.iosb->status == STATUS_PENDING)
  684. {
  685. complete_async_connect( sock );
  686. if (get_error() != STATUS_PENDING)
  687. async_terminate( sock->connect_req.async, get_error() );
  688. }
  689. if (event & (POLLIN | POLLPRI) && async_waiting( &sock->read_q ))
  690. {
  691. if (debug_level) fprintf( stderr, "activating read queue for socket %p\n", sock );
  692. async_wake_up( &sock->read_q, STATUS_ALERTED );
  693. event &= ~(POLLIN | POLLPRI);
  694. }
  695. if (is_fd_overlapped( sock->fd ))
  696. {
  697. if (event & POLLOUT && async_waiting( &sock->write_q ))
  698. {
  699. if (debug_level) fprintf( stderr, "activating write queue for socket %p\n", sock );
  700. async_wake_up( &sock->write_q, STATUS_ALERTED );
  701. event &= ~POLLOUT;
  702. }
  703. }
  704. if (event & (POLLERR | POLLHUP))
  705. {
  706. int status = sock_get_ntstatus( error );
  707. struct accept_req *req, *next;
  708. if (!(sock->state & FD_READ))
  709. async_wake_up( &sock->read_q, status );
  710. if (!(sock->state & FD_WRITE))
  711. async_wake_up( &sock->write_q, status );
  712. LIST_FOR_EACH_ENTRY_SAFE( req, next, &sock->accept_list, struct accept_req, entry )
  713. {
  714. if (req->iosb->status == STATUS_PENDING)
  715. async_terminate( req->async, status );
  716. }
  717. if (sock->accept_recv_req && sock->accept_recv_req->iosb->status == STATUS_PENDING)
  718. async_terminate( sock->accept_recv_req->async, status );
  719. if (sock->connect_req.async)
  720. async_terminate( sock->connect_req.async, status );
  721. }
  722. return event;
  723. }
  724. static void post_socket_event( struct sock *sock, unsigned int event_bit, unsigned int error )
  725. {
  726. unsigned int event = (1 << event_bit);
  727. sock->pending_events |= event;
  728. sock->reported_events |= event;
  729. sock->errors[event_bit] = error;
  730. }
  731. static void sock_dispatch_events( struct sock *sock, int prevstate, int event, int error )
  732. {
  733. if (prevstate & FD_CONNECT)
  734. {
  735. post_socket_event( sock, FD_CONNECT_BIT, sock_get_error( error ) );
  736. goto end;
  737. }
  738. if (prevstate & FD_WINE_LISTENING)
  739. {
  740. post_socket_event( sock, FD_ACCEPT_BIT, sock_get_error( error ) );
  741. goto end;
  742. }
  743. if (event & POLLIN)
  744. post_socket_event( sock, FD_READ_BIT, 0 );
  745. if (event & POLLOUT)
  746. post_socket_event( sock, FD_WRITE_BIT, 0 );
  747. if (event & POLLPRI)
  748. post_socket_event( sock, FD_OOB_BIT, 0 );
  749. if (event & (POLLERR|POLLHUP))
  750. post_socket_event( sock, FD_CLOSE_BIT, sock_get_error( error ) );
  751. end:
  752. sock_wake_up( sock );
  753. }
  754. static void sock_poll_event( struct fd *fd, int event )
  755. {
  756. struct sock *sock = get_fd_user( fd );
  757. int hangup_seen = 0;
  758. int prevstate = sock->state;
  759. int error = 0;
  760. assert( sock->obj.ops == &sock_ops );
  761. if (debug_level)
  762. fprintf(stderr, "socket %p select event: %x\n", sock, event);
  763. /* we may change event later, remove from loop here */
  764. if (event & (POLLERR|POLLHUP)) set_fd_events( sock->fd, -1 );
  765. if (sock->state & FD_CONNECT)
  766. {
  767. if (event & (POLLERR|POLLHUP))
  768. {
  769. /* we didn't get connected? */
  770. sock->state &= ~FD_CONNECT;
  771. event &= ~POLLOUT;
  772. error = sock_error( fd );
  773. }
  774. else if (event & POLLOUT)
  775. {
  776. /* we got connected */
  777. sock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
  778. sock->state &= ~FD_CONNECT;
  779. sock->connect_time = current_time;
  780. }
  781. }
  782. else if (sock->state & FD_WINE_LISTENING)
  783. {
  784. /* listening */
  785. if (event & (POLLERR|POLLHUP))
  786. error = sock_error( fd );
  787. }
  788. else
  789. {
  790. /* normal data flow */
  791. if (sock->type == WS_SOCK_STREAM && (event & POLLIN))
  792. {
  793. char dummy;
  794. int nr;
  795. /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
  796. * has been closed, so we need to check for it explicitly here */
  797. nr = recv( get_unix_fd( fd ), &dummy, 1, MSG_PEEK );
  798. if ( nr == 0 )
  799. {
  800. hangup_seen = 1;
  801. event &= ~POLLIN;
  802. }
  803. else if ( nr < 0 )
  804. {
  805. event &= ~POLLIN;
  806. /* EAGAIN can happen if an async recv() falls between the server's poll()
  807. call and the invocation of this routine */
  808. if ( errno != EAGAIN )
  809. {
  810. error = errno;
  811. event |= POLLERR;
  812. if ( debug_level )
  813. fprintf( stderr, "recv error on socket %p: %d\n", sock, errno );
  814. }
  815. }
  816. }
  817. if ( (hangup_seen || event & (POLLHUP|POLLERR)) && (sock->state & (FD_READ|FD_WRITE)) )
  818. {
  819. error = error ? error : sock_error( fd );
  820. if ( (event & POLLERR) || ( sock_shutdown_type == SOCK_SHUTDOWN_EOF && (event & POLLHUP) ))
  821. sock->state &= ~FD_WRITE;
  822. sock->state &= ~FD_READ;
  823. if (debug_level)
  824. fprintf(stderr, "socket %p aborted by error %d, event: %x\n", sock, error, event);
  825. }
  826. if (hangup_seen)
  827. event |= POLLHUP;
  828. }
  829. complete_async_polls( sock, event, error );
  830. event = sock_dispatch_asyncs( sock, event, error );
  831. sock_dispatch_events( sock, prevstate, event, error );
  832. sock_reselect( sock );
  833. }
  834. static void sock_dump( struct object *obj, int verbose )
  835. {
  836. struct sock *sock = (struct sock *)obj;
  837. assert( obj->ops == &sock_ops );
  838. fprintf( stderr, "Socket fd=%p, state=%x, mask=%x, pending=%x, reported=%x\n",
  839. sock->fd, sock->state,
  840. sock->mask, sock->pending_events, sock->reported_events );
  841. }
  842. static int poll_flags_from_afd( struct sock *sock, int flags )
  843. {
  844. int ev = 0;
  845. /* A connection-mode socket which has never been connected does
  846. * not return write or hangup events, but Linux returns
  847. * POLLOUT | POLLHUP. */
  848. if (sock->type == WS_SOCK_STREAM && !(sock->state & (FD_CONNECT | FD_WINE_CONNECTED | FD_WINE_LISTENING)))
  849. return -1;
  850. if (flags & (AFD_POLL_READ | AFD_POLL_ACCEPT))
  851. ev |= POLLIN;
  852. if ((flags & AFD_POLL_HUP) && sock->type == WS_SOCK_STREAM)
  853. ev |= POLLIN;
  854. if (flags & AFD_POLL_OOB)
  855. ev |= POLLPRI;
  856. if (flags & AFD_POLL_WRITE)
  857. ev |= POLLOUT;
  858. return ev;
  859. }
  860. static int sock_get_poll_events( struct fd *fd )
  861. {
  862. struct sock *sock = get_fd_user( fd );
  863. unsigned int mask = sock->mask & ~sock->reported_events;
  864. unsigned int smask = sock->state & mask;
  865. struct poll_req *req;
  866. int ev = 0;
  867. assert( sock->obj.ops == &sock_ops );
  868. if (sock->state & FD_CONNECT)
  869. /* connecting, wait for writable */
  870. return POLLOUT;
  871. if (!list_empty( &sock->accept_list ) || sock->accept_recv_req )
  872. {
  873. ev |= POLLIN | POLLPRI;
  874. }
  875. else if (async_queued( &sock->read_q ))
  876. {
  877. if (async_waiting( &sock->read_q )) ev |= POLLIN | POLLPRI;
  878. }
  879. else if (smask & FD_READ || (sock->state & FD_WINE_LISTENING && mask & FD_ACCEPT))
  880. ev |= POLLIN | POLLPRI;
  881. /* We use POLLIN with 0 bytes recv() as FD_CLOSE indication for stream sockets. */
  882. else if (sock->type == WS_SOCK_STREAM && (sock->state & FD_READ) && (mask & FD_CLOSE) &&
  883. !(sock->reported_events & FD_READ))
  884. ev |= POLLIN;
  885. if (async_queued( &sock->write_q ))
  886. {
  887. if (async_waiting( &sock->write_q )) ev |= POLLOUT;
  888. }
  889. else if (smask & FD_WRITE)
  890. ev |= POLLOUT;
  891. LIST_FOR_EACH_ENTRY( req, &poll_list, struct poll_req, entry )
  892. {
  893. unsigned int i;
  894. for (i = 0; i < req->count; ++i)
  895. {
  896. if (req->sockets[i].sock != sock) continue;
  897. ev |= poll_flags_from_afd( sock, req->sockets[i].flags );
  898. }
  899. }
  900. return ev;
  901. }
  902. static enum server_fd_type sock_get_fd_type( struct fd *fd )
  903. {
  904. return FD_TYPE_SOCKET;
  905. }
  906. static void sock_queue_async( struct fd *fd, struct async *async, int type, int count )
  907. {
  908. struct sock *sock = get_fd_user( fd );
  909. struct async_queue *queue;
  910. assert( sock->obj.ops == &sock_ops );
  911. switch (type)
  912. {
  913. case ASYNC_TYPE_READ:
  914. queue = &sock->read_q;
  915. break;
  916. case ASYNC_TYPE_WRITE:
  917. queue = &sock->write_q;
  918. break;
  919. default:
  920. set_error( STATUS_INVALID_PARAMETER );
  921. return;
  922. }
  923. if ( ( !( sock->state & (FD_READ|FD_CONNECT|FD_WINE_LISTENING) ) && type == ASYNC_TYPE_READ ) ||
  924. ( !( sock->state & (FD_WRITE|FD_CONNECT) ) && type == ASYNC_TYPE_WRITE ) )
  925. {
  926. set_error( STATUS_PIPE_DISCONNECTED );
  927. return;
  928. }
  929. queue_async( queue, async );
  930. sock_reselect( sock );
  931. set_error( STATUS_PENDING );
  932. }
  933. static void sock_reselect_async( struct fd *fd, struct async_queue *queue )
  934. {
  935. struct sock *sock = get_fd_user( fd );
  936. if (sock->wr_shutdown_pending && list_empty( &sock->write_q.queue ))
  937. shutdown( get_unix_fd( sock->fd ), SHUT_WR );
  938. /* ignore reselect on ifchange queue */
  939. if (&sock->ifchange_q != queue)
  940. sock_reselect( sock );
  941. }
  942. static struct fd *sock_get_fd( struct object *obj )
  943. {
  944. struct sock *sock = (struct sock *)obj;
  945. return (struct fd *)grab_object( sock->fd );
  946. }
  947. static int sock_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  948. {
  949. struct sock *sock = (struct sock *)obj;
  950. if (sock->obj.handle_count == 1) /* last handle */
  951. {
  952. struct accept_req *accept_req, *accept_next;
  953. struct poll_req *poll_req, *poll_next;
  954. if (sock->accept_recv_req)
  955. async_terminate( sock->accept_recv_req->async, STATUS_CANCELLED );
  956. LIST_FOR_EACH_ENTRY_SAFE( accept_req, accept_next, &sock->accept_list, struct accept_req, entry )
  957. async_terminate( accept_req->async, STATUS_CANCELLED );
  958. if (sock->connect_req.async)
  959. async_terminate( sock->connect_req.async, STATUS_CANCELLED );
  960. LIST_FOR_EACH_ENTRY_SAFE( poll_req, poll_next, &poll_list, struct poll_req, entry )
  961. {
  962. struct iosb *iosb = poll_req->iosb;
  963. unsigned int i;
  964. if (iosb->status != STATUS_PENDING) continue;
  965. for (i = 0; i < poll_req->count; ++i)
  966. {
  967. if (poll_req->sockets[i].sock == sock)
  968. {
  969. iosb->status = STATUS_SUCCESS;
  970. poll_req->output[i].flags = AFD_POLL_CLOSE;
  971. poll_req->output[i].status = 0;
  972. }
  973. }
  974. if (iosb->status != STATUS_PENDING)
  975. {
  976. iosb->out_data = poll_req->output;
  977. iosb->out_size = poll_req->count * sizeof(*poll_req->output);
  978. async_terminate( poll_req->async, STATUS_ALERTED );
  979. }
  980. }
  981. }
  982. return 1;
  983. }
  984. static void sock_destroy( struct object *obj )
  985. {
  986. struct sock *sock = (struct sock *)obj;
  987. assert( obj->ops == &sock_ops );
  988. /* FIXME: special socket shutdown stuff? */
  989. if ( sock->deferred )
  990. release_object( sock->deferred );
  991. async_wake_up( &sock->ifchange_q, STATUS_CANCELLED );
  992. sock_release_ifchange( sock );
  993. free_async_queue( &sock->read_q );
  994. free_async_queue( &sock->write_q );
  995. free_async_queue( &sock->ifchange_q );
  996. free_async_queue( &sock->accept_q );
  997. free_async_queue( &sock->connect_q );
  998. free_async_queue( &sock->poll_q );
  999. if (sock->event) release_object( sock->event );
  1000. if (sock->fd)
  1001. {
  1002. /* shut the socket down to force pending poll() calls in the client to return */
  1003. shutdown( get_unix_fd(sock->fd), SHUT_RDWR );
  1004. release_object( sock->fd );
  1005. }
  1006. }
  1007. static struct sock *create_socket(void)
  1008. {
  1009. struct sock *sock;
  1010. if (!(sock = alloc_object( &sock_ops ))) return NULL;
  1011. sock->fd = NULL;
  1012. sock->state = 0;
  1013. sock->mask = 0;
  1014. sock->pending_events = 0;
  1015. sock->reported_events = 0;
  1016. sock->polling = 0;
  1017. sock->wr_shutdown_pending = 0;
  1018. sock->flags = 0;
  1019. sock->proto = 0;
  1020. sock->type = 0;
  1021. sock->family = 0;
  1022. sock->event = NULL;
  1023. sock->window = 0;
  1024. sock->message = 0;
  1025. sock->wparam = 0;
  1026. sock->connect_time = 0;
  1027. sock->deferred = NULL;
  1028. sock->ifchange_obj = NULL;
  1029. sock->accept_recv_req = NULL;
  1030. sock->connect_req.async = NULL;
  1031. init_async_queue( &sock->read_q );
  1032. init_async_queue( &sock->write_q );
  1033. init_async_queue( &sock->ifchange_q );
  1034. init_async_queue( &sock->accept_q );
  1035. init_async_queue( &sock->connect_q );
  1036. init_async_queue( &sock->poll_q );
  1037. memset( sock->errors, 0, sizeof(sock->errors) );
  1038. list_init( &sock->accept_list );
  1039. return sock;
  1040. }
  1041. static int get_unix_family( int family )
  1042. {
  1043. switch (family)
  1044. {
  1045. case WS_AF_INET: return AF_INET;
  1046. case WS_AF_INET6: return AF_INET6;
  1047. #ifdef HAS_IPX
  1048. case WS_AF_IPX: return AF_IPX;
  1049. #endif
  1050. #ifdef AF_IRDA
  1051. case WS_AF_IRDA: return AF_IRDA;
  1052. #endif
  1053. case WS_AF_UNSPEC: return AF_UNSPEC;
  1054. default: return -1;
  1055. }
  1056. }
  1057. static int get_unix_type( int type )
  1058. {
  1059. switch (type)
  1060. {
  1061. case WS_SOCK_DGRAM: return SOCK_DGRAM;
  1062. case WS_SOCK_RAW: return SOCK_RAW;
  1063. case WS_SOCK_STREAM: return SOCK_STREAM;
  1064. default: return -1;
  1065. }
  1066. }
  1067. static int get_unix_protocol( int protocol )
  1068. {
  1069. if (protocol >= WS_NSPROTO_IPX && protocol <= WS_NSPROTO_IPX + 255)
  1070. return protocol;
  1071. switch (protocol)
  1072. {
  1073. case WS_IPPROTO_ICMP: return IPPROTO_ICMP;
  1074. case WS_IPPROTO_IGMP: return IPPROTO_IGMP;
  1075. case WS_IPPROTO_IP: return IPPROTO_IP;
  1076. case WS_IPPROTO_IPIP: return IPPROTO_IPIP;
  1077. case WS_IPPROTO_IPV6: return IPPROTO_IPV6;
  1078. case WS_IPPROTO_RAW: return IPPROTO_RAW;
  1079. case WS_IPPROTO_TCP: return IPPROTO_TCP;
  1080. case WS_IPPROTO_UDP: return IPPROTO_UDP;
  1081. default: return -1;
  1082. }
  1083. }
  1084. static void set_dont_fragment( int fd, int level, int value )
  1085. {
  1086. int optname;
  1087. if (level == IPPROTO_IP)
  1088. {
  1089. #ifdef IP_DONTFRAG
  1090. optname = IP_DONTFRAG;
  1091. #elif defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DO) && defined(IP_PMTUDISC_DONT)
  1092. optname = IP_MTU_DISCOVER;
  1093. value = value ? IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
  1094. #else
  1095. return;
  1096. #endif
  1097. }
  1098. else
  1099. {
  1100. #ifdef IPV6_DONTFRAG
  1101. optname = IPV6_DONTFRAG;
  1102. #elif defined(IPV6_MTU_DISCOVER) && defined(IPV6_PMTUDISC_DO) && defined(IPV6_PMTUDISC_DONT)
  1103. optname = IPV6_MTU_DISCOVER;
  1104. value = value ? IPV6_PMTUDISC_DO : IPV6_PMTUDISC_DONT;
  1105. #else
  1106. return;
  1107. #endif
  1108. }
  1109. setsockopt( fd, level, optname, &value, sizeof(value) );
  1110. }
  1111. static int init_socket( struct sock *sock, int family, int type, int protocol, unsigned int flags )
  1112. {
  1113. unsigned int options = 0;
  1114. int sockfd, unix_type, unix_family, unix_protocol;
  1115. unix_family = get_unix_family( family );
  1116. unix_type = get_unix_type( type );
  1117. unix_protocol = get_unix_protocol( protocol );
  1118. if (unix_protocol < 0)
  1119. {
  1120. if (type && unix_type < 0)
  1121. set_win32_error( WSAESOCKTNOSUPPORT );
  1122. else
  1123. set_win32_error( WSAEPROTONOSUPPORT );
  1124. return -1;
  1125. }
  1126. if (unix_family < 0)
  1127. {
  1128. if (family >= 0 && unix_type < 0)
  1129. set_win32_error( WSAESOCKTNOSUPPORT );
  1130. else
  1131. set_win32_error( WSAEAFNOSUPPORT );
  1132. return -1;
  1133. }
  1134. sockfd = socket( unix_family, unix_type, unix_protocol );
  1135. if (sockfd == -1)
  1136. {
  1137. if (errno == EINVAL) set_win32_error( WSAESOCKTNOSUPPORT );
  1138. else set_win32_error( sock_get_error( errno ));
  1139. return -1;
  1140. }
  1141. fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
  1142. if (family == WS_AF_IPX && protocol >= WS_NSPROTO_IPX && protocol <= WS_NSPROTO_IPX + 255)
  1143. {
  1144. #ifdef HAS_IPX
  1145. int ipx_type = protocol - WS_NSPROTO_IPX;
  1146. #ifdef SOL_IPX
  1147. setsockopt( sockfd, SOL_IPX, IPX_TYPE, &ipx_type, sizeof(ipx_type) );
  1148. #else
  1149. struct ipx val;
  1150. /* Should we retrieve val using a getsockopt call and then
  1151. * set the modified one? */
  1152. val.ipx_pt = ipx_type;
  1153. setsockopt( sockfd, 0, SO_DEFAULT_HEADERS, &val, sizeof(val) );
  1154. #endif
  1155. #endif
  1156. }
  1157. if (unix_family == AF_INET || unix_family == AF_INET6)
  1158. {
  1159. /* ensure IP_DONTFRAGMENT is disabled for SOCK_DGRAM and SOCK_RAW, enabled for SOCK_STREAM */
  1160. if (unix_type == SOCK_DGRAM || unix_type == SOCK_RAW) /* in Linux the global default can be enabled */
  1161. set_dont_fragment( sockfd, unix_family == AF_INET6 ? IPPROTO_IPV6 : IPPROTO_IP, FALSE );
  1162. else if (unix_type == SOCK_STREAM)
  1163. set_dont_fragment( sockfd, unix_family == AF_INET6 ? IPPROTO_IPV6 : IPPROTO_IP, TRUE );
  1164. }
  1165. #ifdef IPV6_V6ONLY
  1166. if (unix_family == AF_INET6)
  1167. {
  1168. static const int enable = 1;
  1169. setsockopt( sockfd, IPPROTO_IPV6, IPV6_V6ONLY, &enable, sizeof(enable) );
  1170. }
  1171. #endif
  1172. sock->state = (type != SOCK_STREAM) ? (FD_READ|FD_WRITE) : 0;
  1173. sock->flags = flags;
  1174. sock->proto = protocol;
  1175. sock->type = type;
  1176. sock->family = family;
  1177. if (sock->fd)
  1178. {
  1179. options = get_fd_options( sock->fd );
  1180. release_object( sock->fd );
  1181. }
  1182. if (!(sock->fd = create_anonymous_fd( &sock_fd_ops, sockfd, &sock->obj, options )))
  1183. {
  1184. return -1;
  1185. }
  1186. sock_reselect( sock );
  1187. clear_error();
  1188. return 0;
  1189. }
  1190. /* accepts a socket and inits it */
  1191. static int accept_new_fd( struct sock *sock )
  1192. {
  1193. /* Try to accept(2). We can't be safe that this an already connected socket
  1194. * or that accept() is allowed on it. In those cases we will get -1/errno
  1195. * return.
  1196. */
  1197. struct sockaddr saddr;
  1198. socklen_t slen = sizeof(saddr);
  1199. int acceptfd = accept( get_unix_fd(sock->fd), &saddr, &slen );
  1200. if (acceptfd != -1)
  1201. fcntl( acceptfd, F_SETFL, O_NONBLOCK );
  1202. else
  1203. set_error( sock_get_ntstatus( errno ));
  1204. return acceptfd;
  1205. }
  1206. /* accept a socket (creates a new fd) */
  1207. static struct sock *accept_socket( struct sock *sock )
  1208. {
  1209. struct sock *acceptsock;
  1210. int acceptfd;
  1211. if (get_unix_fd( sock->fd ) == -1) return NULL;
  1212. if ( sock->deferred )
  1213. {
  1214. acceptsock = sock->deferred;
  1215. sock->deferred = NULL;
  1216. }
  1217. else
  1218. {
  1219. if ((acceptfd = accept_new_fd( sock )) == -1) return NULL;
  1220. if (!(acceptsock = create_socket()))
  1221. {
  1222. close( acceptfd );
  1223. return NULL;
  1224. }
  1225. /* newly created socket gets the same properties of the listening socket */
  1226. acceptsock->state = FD_WINE_CONNECTED|FD_READ|FD_WRITE;
  1227. if (sock->state & FD_WINE_NONBLOCKING)
  1228. acceptsock->state |= FD_WINE_NONBLOCKING;
  1229. acceptsock->mask = sock->mask;
  1230. acceptsock->proto = sock->proto;
  1231. acceptsock->type = sock->type;
  1232. acceptsock->family = sock->family;
  1233. acceptsock->window = sock->window;
  1234. acceptsock->message = sock->message;
  1235. acceptsock->connect_time = current_time;
  1236. if (sock->event) acceptsock->event = (struct event *)grab_object( sock->event );
  1237. acceptsock->flags = sock->flags;
  1238. if (!(acceptsock->fd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
  1239. get_fd_options( sock->fd ) )))
  1240. {
  1241. release_object( acceptsock );
  1242. return NULL;
  1243. }
  1244. }
  1245. clear_error();
  1246. sock->pending_events &= ~FD_ACCEPT;
  1247. sock->reported_events &= ~FD_ACCEPT;
  1248. sock_reselect( sock );
  1249. return acceptsock;
  1250. }
  1251. static int accept_into_socket( struct sock *sock, struct sock *acceptsock )
  1252. {
  1253. int acceptfd;
  1254. struct fd *newfd;
  1255. if (get_unix_fd( sock->fd ) == -1) return FALSE;
  1256. if ( sock->deferred )
  1257. {
  1258. newfd = dup_fd_object( sock->deferred->fd, 0, 0,
  1259. get_fd_options( acceptsock->fd ) );
  1260. if ( !newfd )
  1261. return FALSE;
  1262. set_fd_user( newfd, &sock_fd_ops, &acceptsock->obj );
  1263. release_object( sock->deferred );
  1264. sock->deferred = NULL;
  1265. }
  1266. else
  1267. {
  1268. if ((acceptfd = accept_new_fd( sock )) == -1)
  1269. return FALSE;
  1270. if (!(newfd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
  1271. get_fd_options( acceptsock->fd ) )))
  1272. return FALSE;
  1273. }
  1274. acceptsock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
  1275. acceptsock->pending_events = 0;
  1276. acceptsock->reported_events = 0;
  1277. acceptsock->polling = 0;
  1278. acceptsock->proto = sock->proto;
  1279. acceptsock->type = sock->type;
  1280. acceptsock->family = sock->family;
  1281. acceptsock->wparam = 0;
  1282. acceptsock->deferred = NULL;
  1283. acceptsock->connect_time = current_time;
  1284. fd_copy_completion( acceptsock->fd, newfd );
  1285. release_object( acceptsock->fd );
  1286. acceptsock->fd = newfd;
  1287. clear_error();
  1288. sock->pending_events &= ~FD_ACCEPT;
  1289. sock->reported_events &= ~FD_ACCEPT;
  1290. sock_reselect( sock );
  1291. return TRUE;
  1292. }
  1293. /* return an errno value mapped to a WSA error */
  1294. static unsigned int sock_get_error( int err )
  1295. {
  1296. switch (err)
  1297. {
  1298. case EINTR: return WSAEINTR;
  1299. case EBADF: return WSAEBADF;
  1300. case EPERM:
  1301. case EACCES: return WSAEACCES;
  1302. case EFAULT: return WSAEFAULT;
  1303. case EINVAL: return WSAEINVAL;
  1304. case EMFILE: return WSAEMFILE;
  1305. case EINPROGRESS:
  1306. case EWOULDBLOCK: return WSAEWOULDBLOCK;
  1307. case EALREADY: return WSAEALREADY;
  1308. case ENOTSOCK: return WSAENOTSOCK;
  1309. case EDESTADDRREQ: return WSAEDESTADDRREQ;
  1310. case EMSGSIZE: return WSAEMSGSIZE;
  1311. case EPROTOTYPE: return WSAEPROTOTYPE;
  1312. case ENOPROTOOPT: return WSAENOPROTOOPT;
  1313. case EPROTONOSUPPORT: return WSAEPROTONOSUPPORT;
  1314. case ESOCKTNOSUPPORT: return WSAESOCKTNOSUPPORT;
  1315. case EOPNOTSUPP: return WSAEOPNOTSUPP;
  1316. case EPFNOSUPPORT: return WSAEPFNOSUPPORT;
  1317. case EAFNOSUPPORT: return WSAEAFNOSUPPORT;
  1318. case EADDRINUSE: return WSAEADDRINUSE;
  1319. case EADDRNOTAVAIL: return WSAEADDRNOTAVAIL;
  1320. case ENETDOWN: return WSAENETDOWN;
  1321. case ENETUNREACH: return WSAENETUNREACH;
  1322. case ENETRESET: return WSAENETRESET;
  1323. case ECONNABORTED: return WSAECONNABORTED;
  1324. case EPIPE:
  1325. case ECONNRESET: return WSAECONNRESET;
  1326. case ENOBUFS: return WSAENOBUFS;
  1327. case EISCONN: return WSAEISCONN;
  1328. case ENOTCONN: return WSAENOTCONN;
  1329. case ESHUTDOWN: return WSAESHUTDOWN;
  1330. case ETOOMANYREFS: return WSAETOOMANYREFS;
  1331. case ETIMEDOUT: return WSAETIMEDOUT;
  1332. case ECONNREFUSED: return WSAECONNREFUSED;
  1333. case ELOOP: return WSAELOOP;
  1334. case ENAMETOOLONG: return WSAENAMETOOLONG;
  1335. case EHOSTDOWN: return WSAEHOSTDOWN;
  1336. case EHOSTUNREACH: return WSAEHOSTUNREACH;
  1337. case ENOTEMPTY: return WSAENOTEMPTY;
  1338. #ifdef EPROCLIM
  1339. case EPROCLIM: return WSAEPROCLIM;
  1340. #endif
  1341. #ifdef EUSERS
  1342. case EUSERS: return WSAEUSERS;
  1343. #endif
  1344. #ifdef EDQUOT
  1345. case EDQUOT: return WSAEDQUOT;
  1346. #endif
  1347. #ifdef ESTALE
  1348. case ESTALE: return WSAESTALE;
  1349. #endif
  1350. #ifdef EREMOTE
  1351. case EREMOTE: return WSAEREMOTE;
  1352. #endif
  1353. case 0: return 0;
  1354. default:
  1355. errno = err;
  1356. perror("wineserver: sock_get_error() can't map error");
  1357. return WSAEFAULT;
  1358. }
  1359. }
  1360. static int sock_get_ntstatus( int err )
  1361. {
  1362. switch ( err )
  1363. {
  1364. case EBADF: return STATUS_INVALID_HANDLE;
  1365. case EBUSY: return STATUS_DEVICE_BUSY;
  1366. case EPERM:
  1367. case EACCES: return STATUS_ACCESS_DENIED;
  1368. case EFAULT: return STATUS_ACCESS_VIOLATION;
  1369. case EINVAL: return STATUS_INVALID_PARAMETER;
  1370. case ENFILE:
  1371. case EMFILE: return STATUS_TOO_MANY_OPENED_FILES;
  1372. case EINPROGRESS:
  1373. case EWOULDBLOCK: return STATUS_DEVICE_NOT_READY;
  1374. case EALREADY: return STATUS_NETWORK_BUSY;
  1375. case ENOTSOCK: return STATUS_OBJECT_TYPE_MISMATCH;
  1376. case EDESTADDRREQ: return STATUS_INVALID_PARAMETER;
  1377. case EMSGSIZE: return STATUS_BUFFER_OVERFLOW;
  1378. case EPROTONOSUPPORT:
  1379. case ESOCKTNOSUPPORT:
  1380. case EPFNOSUPPORT:
  1381. case EAFNOSUPPORT:
  1382. case EPROTOTYPE: return STATUS_NOT_SUPPORTED;
  1383. case ENOPROTOOPT: return STATUS_INVALID_PARAMETER;
  1384. case EOPNOTSUPP: return STATUS_NOT_SUPPORTED;
  1385. case EADDRINUSE: return STATUS_SHARING_VIOLATION;
  1386. case EADDRNOTAVAIL: return STATUS_INVALID_PARAMETER;
  1387. case ECONNREFUSED: return STATUS_CONNECTION_REFUSED;
  1388. case ESHUTDOWN: return STATUS_PIPE_DISCONNECTED;
  1389. case ENOTCONN: return STATUS_INVALID_CONNECTION;
  1390. case ETIMEDOUT: return STATUS_IO_TIMEOUT;
  1391. case ENETUNREACH: return STATUS_NETWORK_UNREACHABLE;
  1392. case EHOSTUNREACH: return STATUS_HOST_UNREACHABLE;
  1393. case ENETDOWN: return STATUS_NETWORK_BUSY;
  1394. case EPIPE:
  1395. case ECONNRESET: return STATUS_CONNECTION_RESET;
  1396. case ECONNABORTED: return STATUS_CONNECTION_ABORTED;
  1397. case EISCONN: return STATUS_CONNECTION_ACTIVE;
  1398. case 0: return STATUS_SUCCESS;
  1399. default:
  1400. errno = err;
  1401. perror("wineserver: sock_get_ntstatus() can't map error");
  1402. return STATUS_UNSUCCESSFUL;
  1403. }
  1404. }
  1405. static struct accept_req *alloc_accept_req( struct sock *sock, struct sock *acceptsock, struct async *async,
  1406. const struct afd_accept_into_params *params )
  1407. {
  1408. struct accept_req *req = mem_alloc( sizeof(*req) );
  1409. if (req)
  1410. {
  1411. req->async = (struct async *)grab_object( async );
  1412. req->iosb = async_get_iosb( async );
  1413. req->sock = (struct sock *)grab_object( sock );
  1414. req->acceptsock = acceptsock;
  1415. if (acceptsock) grab_object( acceptsock );
  1416. req->accepted = 0;
  1417. req->recv_len = 0;
  1418. req->local_len = 0;
  1419. if (params)
  1420. {
  1421. req->recv_len = params->recv_len;
  1422. req->local_len = params->local_len;
  1423. }
  1424. }
  1425. return req;
  1426. }
  1427. static int sock_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
  1428. {
  1429. struct sock *sock = get_fd_user( fd );
  1430. int unix_fd;
  1431. assert( sock->obj.ops == &sock_ops );
  1432. if ((unix_fd = get_unix_fd( fd )) == -1 && code != IOCTL_AFD_WINE_CREATE) return 0;
  1433. switch(code)
  1434. {
  1435. case IOCTL_AFD_WINE_CREATE:
  1436. {
  1437. const struct afd_create_params *params = get_req_data();
  1438. if (get_req_data_size() != sizeof(*params))
  1439. {
  1440. set_error( STATUS_INVALID_PARAMETER );
  1441. return 0;
  1442. }
  1443. init_socket( sock, params->family, params->type, params->protocol, params->flags );
  1444. return 0;
  1445. }
  1446. case IOCTL_AFD_WINE_ACCEPT:
  1447. {
  1448. struct sock *acceptsock;
  1449. obj_handle_t handle;
  1450. if (get_reply_max_size() != sizeof(handle))
  1451. {
  1452. set_error( STATUS_BUFFER_TOO_SMALL );
  1453. return 0;
  1454. }
  1455. if (!(acceptsock = accept_socket( sock )))
  1456. {
  1457. struct accept_req *req;
  1458. if (sock->state & FD_WINE_NONBLOCKING) return 0;
  1459. if (get_error() != STATUS_DEVICE_NOT_READY) return 0;
  1460. if (!(req = alloc_accept_req( sock, NULL, async, NULL ))) return 0;
  1461. list_add_tail( &sock->accept_list, &req->entry );
  1462. async_set_completion_callback( async, free_accept_req, req );
  1463. queue_async( &sock->accept_q, async );
  1464. sock_reselect( sock );
  1465. set_error( STATUS_PENDING );
  1466. return 1;
  1467. }
  1468. handle = alloc_handle( current->process, &acceptsock->obj,
  1469. GENERIC_READ | GENERIC_WRITE | SYNCHRONIZE, OBJ_INHERIT );
  1470. acceptsock->wparam = handle;
  1471. release_object( acceptsock );
  1472. set_reply_data( &handle, sizeof(handle) );
  1473. return 0;
  1474. }
  1475. case IOCTL_AFD_WINE_ACCEPT_INTO:
  1476. {
  1477. static const int access = FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | FILE_READ_DATA;
  1478. const struct afd_accept_into_params *params = get_req_data();
  1479. struct sock *acceptsock;
  1480. unsigned int remote_len;
  1481. struct accept_req *req;
  1482. if (get_req_data_size() != sizeof(*params) ||
  1483. get_reply_max_size() < params->recv_len ||
  1484. get_reply_max_size() - params->recv_len < params->local_len)
  1485. {
  1486. set_error( STATUS_BUFFER_TOO_SMALL );
  1487. return 0;
  1488. }
  1489. remote_len = get_reply_max_size() - params->recv_len - params->local_len;
  1490. if (remote_len < sizeof(int))
  1491. {
  1492. set_error( STATUS_INVALID_PARAMETER );
  1493. return 0;
  1494. }
  1495. if (!(acceptsock = (struct sock *)get_handle_obj( current->process, params->accept_handle, access, &sock_ops )))
  1496. return 0;
  1497. if (acceptsock->accept_recv_req)
  1498. {
  1499. release_object( acceptsock );
  1500. set_error( STATUS_INVALID_PARAMETER );
  1501. return 0;
  1502. }
  1503. if (!(req = alloc_accept_req( sock, acceptsock, async, params )))
  1504. {
  1505. release_object( acceptsock );
  1506. return 0;
  1507. }
  1508. list_add_tail( &sock->accept_list, &req->entry );
  1509. acceptsock->accept_recv_req = req;
  1510. release_object( acceptsock );
  1511. acceptsock->wparam = params->accept_handle;
  1512. async_set_completion_callback( async, free_accept_req, req );
  1513. queue_async( &sock->accept_q, async );
  1514. sock_reselect( sock );
  1515. set_error( STATUS_PENDING );
  1516. return 1;
  1517. }
  1518. case IOCTL_AFD_LISTEN:
  1519. {
  1520. const struct afd_listen_params *params = get_req_data();
  1521. if (get_req_data_size() < sizeof(*params))
  1522. {
  1523. set_error( STATUS_INVALID_PARAMETER );
  1524. return 0;
  1525. }
  1526. if (listen( unix_fd, params->backlog ) < 0)
  1527. {
  1528. set_error( sock_get_ntstatus( errno ) );
  1529. return 0;
  1530. }
  1531. sock->pending_events &= ~FD_ACCEPT;
  1532. sock->reported_events &= ~FD_ACCEPT;
  1533. sock->state |= FD_WINE_LISTENING;
  1534. sock->state &= ~(FD_CONNECT | FD_WINE_CONNECTED);
  1535. /* we may already be selecting for FD_ACCEPT */
  1536. sock_reselect( sock );
  1537. return 0;
  1538. }
  1539. case IOCTL_AFD_WINE_CONNECT:
  1540. {
  1541. const struct afd_connect_params *params = get_req_data();
  1542. const struct sockaddr *addr;
  1543. int send_len, ret;
  1544. if (get_req_data_size() < sizeof(*params) ||
  1545. get_req_data_size() - sizeof(*params) < params->addr_len)
  1546. {
  1547. set_error( STATUS_BUFFER_TOO_SMALL );
  1548. return 0;
  1549. }
  1550. send_len = get_req_data_size() - sizeof(*params) - params->addr_len;
  1551. addr = (const struct sockaddr *)(params + 1);
  1552. if (sock->accept_recv_req)
  1553. {
  1554. set_error( STATUS_INVALID_PARAMETER );
  1555. return 0;
  1556. }
  1557. if (sock->connect_req.async)
  1558. {
  1559. set_error( params->synchronous ? STATUS_INVALID_PARAMETER : STATUS_CONNECTION_ACTIVE );
  1560. return 0;
  1561. }
  1562. ret = connect( unix_fd, addr, params->addr_len );
  1563. if (ret < 0 && errno != EINPROGRESS)
  1564. {
  1565. set_error( sock_get_ntstatus( errno ) );
  1566. return 0;
  1567. }
  1568. sock->pending_events &= ~(FD_CONNECT | FD_READ | FD_WRITE);
  1569. sock->reported_events &= ~(FD_CONNECT | FD_READ | FD_WRITE);
  1570. if (!ret)
  1571. {
  1572. sock->state |= FD_WINE_CONNECTED | FD_READ | FD_WRITE;
  1573. sock->state &= ~FD_CONNECT;
  1574. if (!send_len) return 1;
  1575. }
  1576. sock->state |= FD_CONNECT;
  1577. if (params->synchronous && (sock->state & FD_WINE_NONBLOCKING))
  1578. {
  1579. sock_reselect( sock );
  1580. set_error( STATUS_DEVICE_NOT_READY );
  1581. return 0;
  1582. }
  1583. sock->connect_req.async = (struct async *)grab_object( async );
  1584. sock->connect_req.iosb = async_get_iosb( async );
  1585. sock->connect_req.sock = (struct sock *)grab_object( sock );
  1586. sock->connect_req.addr_len = params->addr_len;
  1587. sock->connect_req.send_len = send_len;
  1588. sock->connect_req.send_cursor = 0;
  1589. async_set_completion_callback( async, free_connect_req, &sock->connect_req );
  1590. queue_async( &sock->connect_q, async );
  1591. sock_reselect( sock );
  1592. set_error( STATUS_PENDING );
  1593. return 1;
  1594. }
  1595. case IOCTL_AFD_WINE_SHUTDOWN:
  1596. {
  1597. unsigned int how;
  1598. if (get_req_data_size() < sizeof(int))
  1599. {
  1600. set_error( STATUS_BUFFER_TOO_SMALL );
  1601. return 0;
  1602. }
  1603. how = *(int *)get_req_data();
  1604. if (how > SD_BOTH)
  1605. {
  1606. set_error( STATUS_INVALID_PARAMETER );
  1607. return 0;
  1608. }
  1609. if (sock->type == WS_SOCK_STREAM && !(sock->state & FD_WINE_CONNECTED))
  1610. {
  1611. set_error( STATUS_INVALID_CONNECTION );
  1612. return 0;
  1613. }
  1614. if (how != SD_SEND)
  1615. {
  1616. sock->state &= ~FD_READ;
  1617. }
  1618. if (how != SD_RECEIVE)
  1619. {
  1620. sock->state &= ~FD_WRITE;
  1621. if (list_empty( &sock->write_q.queue ))
  1622. shutdown( unix_fd, SHUT_WR );
  1623. else
  1624. sock->wr_shutdown_pending = 1;
  1625. }
  1626. if (how == SD_BOTH)
  1627. {
  1628. if (sock->event) release_object( sock->event );
  1629. sock->event = NULL;
  1630. sock->window = 0;
  1631. sock->mask = 0;
  1632. sock->state |= FD_WINE_NONBLOCKING;
  1633. }
  1634. sock_reselect( sock );
  1635. return 1;
  1636. }
  1637. case IOCTL_AFD_WINE_ADDRESS_LIST_CHANGE:
  1638. if ((sock->state & FD_WINE_NONBLOCKING) && async_is_blocking( async ))
  1639. {
  1640. set_error( STATUS_DEVICE_NOT_READY );
  1641. return 0;
  1642. }
  1643. if (!sock_get_ifchange( sock )) return 0;
  1644. queue_async( &sock->ifchange_q, async );
  1645. set_error( STATUS_PENDING );
  1646. return 1;
  1647. default:
  1648. set_error( STATUS_NOT_SUPPORTED );
  1649. return 0;
  1650. }
  1651. }
  1652. static int poll_socket( struct sock *poll_sock, struct async *async, timeout_t timeout,
  1653. unsigned int count, const struct poll_socket_input *input )
  1654. {
  1655. struct poll_socket_output *output;
  1656. struct poll_req *req;
  1657. unsigned int i, j;
  1658. if (!(output = mem_alloc( count * sizeof(*output) )))
  1659. return 0;
  1660. memset( output, 0, count * sizeof(*output) );
  1661. if (!(req = mem_alloc( offsetof( struct poll_req, sockets[count] ) )))
  1662. {
  1663. free( output );
  1664. return 0;
  1665. }
  1666. req->timeout = NULL;
  1667. if (timeout && timeout != TIMEOUT_INFINITE &&
  1668. !(req->timeout = add_timeout_user( timeout, async_poll_timeout, req )))
  1669. {
  1670. free( req );
  1671. free( output );
  1672. return 0;
  1673. }
  1674. for (i = 0; i < count; ++i)
  1675. {
  1676. req->sockets[i].sock = (struct sock *)get_handle_obj( current->process, input[i].socket, 0, &sock_ops );
  1677. if (!req->sockets[i].sock)
  1678. {
  1679. for (j = 0; j < i; ++j) release_object( req->sockets[i].sock );
  1680. if (req->timeout) remove_timeout_user( req->timeout );
  1681. free( req );
  1682. free( output );
  1683. return 0;
  1684. }
  1685. req->sockets[i].flags = input[i].flags;
  1686. }
  1687. req->count = count;
  1688. req->async = (struct async *)grab_object( async );
  1689. req->iosb = async_get_iosb( async );
  1690. req->output = output;
  1691. list_add_tail( &poll_list, &req->entry );
  1692. async_set_completion_callback( async, free_poll_req, req );
  1693. queue_async( &poll_sock->poll_q, async );
  1694. if (!timeout) req->iosb->status = STATUS_SUCCESS;
  1695. for (i = 0; i < count; ++i)
  1696. {
  1697. struct sock *sock = req->sockets[i].sock;
  1698. struct pollfd pollfd;
  1699. int flags;
  1700. pollfd.fd = get_unix_fd( sock->fd );
  1701. pollfd.events = poll_flags_from_afd( sock, req->sockets[i].flags );
  1702. if (pollfd.events < 0 || poll( &pollfd, 1, 0 ) < 0) continue;
  1703. if ((req->sockets[i].flags & AFD_POLL_HUP) && (pollfd.revents & POLLIN) &&
  1704. sock->type == WS_SOCK_STREAM)
  1705. {
  1706. char dummy;
  1707. if (!recv( get_unix_fd( sock->fd ), &dummy, 1, MSG_PEEK ))
  1708. {
  1709. pollfd.revents &= ~POLLIN;
  1710. pollfd.revents |= POLLHUP;
  1711. }
  1712. }
  1713. flags = get_poll_flags( sock, pollfd.revents ) & req->sockets[i].flags;
  1714. if (flags)
  1715. {
  1716. req->iosb->status = STATUS_SUCCESS;
  1717. output[i].flags = flags;
  1718. output[i].status = sock_get_ntstatus( sock_error( sock->fd ) );
  1719. }
  1720. }
  1721. if (req->iosb->status != STATUS_PENDING)
  1722. {
  1723. req->iosb->out_data = output;
  1724. req->iosb->out_size = count * sizeof(*output);
  1725. async_terminate( req->async, STATUS_ALERTED );
  1726. }
  1727. for (i = 0; i < req->count; ++i)
  1728. sock_reselect( req->sockets[i].sock );
  1729. set_error( STATUS_PENDING );
  1730. return 1;
  1731. }
  1732. #ifdef HAVE_LINUX_RTNETLINK_H
  1733. /* only keep one ifchange object around, all sockets waiting for wakeups will look to it */
  1734. static struct object *ifchange_object;
  1735. static void ifchange_dump( struct object *obj, int verbose );
  1736. static struct fd *ifchange_get_fd( struct object *obj );
  1737. static void ifchange_destroy( struct object *obj );
  1738. static int ifchange_get_poll_events( struct fd *fd );
  1739. static void ifchange_poll_event( struct fd *fd, int event );
  1740. struct ifchange
  1741. {
  1742. struct object obj; /* object header */
  1743. struct fd *fd; /* interface change file descriptor */
  1744. struct list sockets; /* list of sockets to send interface change notifications */
  1745. };
  1746. static const struct object_ops ifchange_ops =
  1747. {
  1748. sizeof(struct ifchange), /* size */
  1749. &no_type, /* type */
  1750. ifchange_dump, /* dump */
  1751. no_add_queue, /* add_queue */
  1752. NULL, /* remove_queue */
  1753. NULL, /* signaled */
  1754. no_satisfied, /* satisfied */
  1755. no_signal, /* signal */
  1756. ifchange_get_fd, /* get_fd */
  1757. default_map_access, /* map_access */
  1758. default_get_sd, /* get_sd */
  1759. default_set_sd, /* set_sd */
  1760. no_get_full_name, /* get_full_name */
  1761. no_lookup_name, /* lookup_name */
  1762. no_link_name, /* link_name */
  1763. NULL, /* unlink_name */
  1764. no_open_file, /* open_file */
  1765. no_kernel_obj_list, /* get_kernel_obj_list */
  1766. no_close_handle, /* close_handle */
  1767. ifchange_destroy /* destroy */
  1768. };
  1769. static const struct fd_ops ifchange_fd_ops =
  1770. {
  1771. ifchange_get_poll_events, /* get_poll_events */
  1772. ifchange_poll_event, /* poll_event */
  1773. NULL, /* get_fd_type */
  1774. no_fd_read, /* read */
  1775. no_fd_write, /* write */
  1776. no_fd_flush, /* flush */
  1777. no_fd_get_file_info, /* get_file_info */
  1778. no_fd_get_volume_info, /* get_volume_info */
  1779. no_fd_ioctl, /* ioctl */
  1780. NULL, /* queue_async */
  1781. NULL /* reselect_async */
  1782. };
  1783. static void ifchange_dump( struct object *obj, int verbose )
  1784. {
  1785. assert( obj->ops == &ifchange_ops );
  1786. fprintf( stderr, "Interface change\n" );
  1787. }
  1788. static struct fd *ifchange_get_fd( struct object *obj )
  1789. {
  1790. struct ifchange *ifchange = (struct ifchange *)obj;
  1791. return (struct fd *)grab_object( ifchange->fd );
  1792. }
  1793. static void ifchange_destroy( struct object *obj )
  1794. {
  1795. struct ifchange *ifchange = (struct ifchange *)obj;
  1796. assert( obj->ops == &ifchange_ops );
  1797. release_object( ifchange->fd );
  1798. /* reset the global ifchange object so that it will be recreated if it is needed again */
  1799. assert( obj == ifchange_object );
  1800. ifchange_object = NULL;
  1801. }
  1802. static int ifchange_get_poll_events( struct fd *fd )
  1803. {
  1804. return POLLIN;
  1805. }
  1806. /* wake up all the sockets waiting for a change notification event */
  1807. static void ifchange_wake_up( struct object *obj, unsigned int status )
  1808. {
  1809. struct ifchange *ifchange = (struct ifchange *)obj;
  1810. struct list *ptr, *next;
  1811. assert( obj->ops == &ifchange_ops );
  1812. assert( obj == ifchange_object );
  1813. LIST_FOR_EACH_SAFE( ptr, next, &ifchange->sockets )
  1814. {
  1815. struct sock *sock = LIST_ENTRY( ptr, struct sock, ifchange_entry );
  1816. assert( sock->ifchange_obj );
  1817. async_wake_up( &sock->ifchange_q, status ); /* issue ifchange notification for the socket */
  1818. sock_release_ifchange( sock ); /* remove socket from list and decrement ifchange refcount */
  1819. }
  1820. }
  1821. static void ifchange_poll_event( struct fd *fd, int event )
  1822. {
  1823. struct object *ifchange = get_fd_user( fd );
  1824. unsigned int status = STATUS_PENDING;
  1825. char buffer[PIPE_BUF];
  1826. int r;
  1827. r = recv( get_unix_fd(fd), buffer, sizeof(buffer), MSG_DONTWAIT );
  1828. if (r < 0)
  1829. {
  1830. if (errno == EWOULDBLOCK || (EWOULDBLOCK != EAGAIN && errno == EAGAIN))
  1831. return; /* retry when poll() says the socket is ready */
  1832. status = sock_get_ntstatus( errno );
  1833. }
  1834. else if (r > 0)
  1835. {
  1836. struct nlmsghdr *nlh;
  1837. for (nlh = (struct nlmsghdr *)buffer; NLMSG_OK(nlh, r); nlh = NLMSG_NEXT(nlh, r))
  1838. {
  1839. if (nlh->nlmsg_type == NLMSG_DONE)
  1840. break;
  1841. if (nlh->nlmsg_type == RTM_NEWADDR || nlh->nlmsg_type == RTM_DELADDR)
  1842. status = STATUS_SUCCESS;
  1843. }
  1844. }
  1845. else status = STATUS_CANCELLED;
  1846. if (status != STATUS_PENDING) ifchange_wake_up( ifchange, status );
  1847. }
  1848. #endif
  1849. /* we only need one of these interface notification objects, all of the sockets dependent upon
  1850. * it will wake up when a notification event occurs */
  1851. static struct object *get_ifchange( void )
  1852. {
  1853. #ifdef HAVE_LINUX_RTNETLINK_H
  1854. struct ifchange *ifchange;
  1855. struct sockaddr_nl addr;
  1856. int unix_fd;
  1857. if (ifchange_object)
  1858. {
  1859. /* increment the refcount for each socket that uses the ifchange object */
  1860. return grab_object( ifchange_object );
  1861. }
  1862. /* create the socket we need for processing interface change notifications */
  1863. unix_fd = socket( PF_NETLINK, SOCK_RAW, NETLINK_ROUTE );
  1864. if (unix_fd == -1)
  1865. {
  1866. set_error( sock_get_ntstatus( errno ));
  1867. return NULL;
  1868. }
  1869. fcntl( unix_fd, F_SETFL, O_NONBLOCK ); /* make socket nonblocking */
  1870. memset( &addr, 0, sizeof(addr) );
  1871. addr.nl_family = AF_NETLINK;
  1872. addr.nl_groups = RTMGRP_IPV4_IFADDR;
  1873. /* bind the socket to the special netlink kernel interface */
  1874. if (bind( unix_fd, (struct sockaddr *)&addr, sizeof(addr) ) == -1)
  1875. {
  1876. close( unix_fd );
  1877. set_error( sock_get_ntstatus( errno ));
  1878. return NULL;
  1879. }
  1880. if (!(ifchange = alloc_object( &ifchange_ops )))
  1881. {
  1882. close( unix_fd );
  1883. set_error( STATUS_NO_MEMORY );
  1884. return NULL;
  1885. }
  1886. list_init( &ifchange->sockets );
  1887. if (!(ifchange->fd = create_anonymous_fd( &ifchange_fd_ops, unix_fd, &ifchange->obj, 0 )))
  1888. {
  1889. release_object( ifchange );
  1890. set_error( STATUS_NO_MEMORY );
  1891. return NULL;
  1892. }
  1893. set_fd_events( ifchange->fd, POLLIN ); /* enable read wakeup on the file descriptor */
  1894. /* the ifchange object is now successfully configured */
  1895. ifchange_object = &ifchange->obj;
  1896. return &ifchange->obj;
  1897. #else
  1898. set_error( STATUS_NOT_SUPPORTED );
  1899. return NULL;
  1900. #endif
  1901. }
  1902. /* add the socket to the interface change notification list */
  1903. static void ifchange_add_sock( struct object *obj, struct sock *sock )
  1904. {
  1905. #ifdef HAVE_LINUX_RTNETLINK_H
  1906. struct ifchange *ifchange = (struct ifchange *)obj;
  1907. list_add_tail( &ifchange->sockets, &sock->ifchange_entry );
  1908. #endif
  1909. }
  1910. /* create a new ifchange queue for a specific socket or, if one already exists, reuse the existing one */
  1911. static struct object *sock_get_ifchange( struct sock *sock )
  1912. {
  1913. struct object *ifchange;
  1914. if (sock->ifchange_obj) /* reuse existing ifchange_obj for this socket */
  1915. return sock->ifchange_obj;
  1916. if (!(ifchange = get_ifchange()))
  1917. return NULL;
  1918. /* add the socket to the ifchange notification list */
  1919. ifchange_add_sock( ifchange, sock );
  1920. sock->ifchange_obj = ifchange;
  1921. return ifchange;
  1922. }
  1923. /* destroy an existing ifchange queue for a specific socket */
  1924. static void sock_release_ifchange( struct sock *sock )
  1925. {
  1926. if (sock->ifchange_obj)
  1927. {
  1928. list_remove( &sock->ifchange_entry );
  1929. release_object( sock->ifchange_obj );
  1930. sock->ifchange_obj = NULL;
  1931. }
  1932. }
  1933. static void socket_device_dump( struct object *obj, int verbose );
  1934. static struct object *socket_device_lookup_name( struct object *obj, struct unicode_str *name,
  1935. unsigned int attr, struct object *root );
  1936. static struct object *socket_device_open_file( struct object *obj, unsigned int access,
  1937. unsigned int sharing, unsigned int options );
  1938. static const struct object_ops socket_device_ops =
  1939. {
  1940. sizeof(struct object), /* size */
  1941. &device_type, /* type */
  1942. socket_device_dump, /* dump */
  1943. no_add_queue, /* add_queue */
  1944. NULL, /* remove_queue */
  1945. NULL, /* signaled */
  1946. no_satisfied, /* satisfied */
  1947. no_signal, /* signal */
  1948. no_get_fd, /* get_fd */
  1949. default_map_access, /* map_access */
  1950. default_get_sd, /* get_sd */
  1951. default_set_sd, /* set_sd */
  1952. default_get_full_name, /* get_full_name */
  1953. socket_device_lookup_name, /* lookup_name */
  1954. directory_link_name, /* link_name */
  1955. default_unlink_name, /* unlink_name */
  1956. socket_device_open_file, /* open_file */
  1957. no_kernel_obj_list, /* get_kernel_obj_list */
  1958. no_close_handle, /* close_handle */
  1959. no_destroy /* destroy */
  1960. };
  1961. static void socket_device_dump( struct object *obj, int verbose )
  1962. {
  1963. fputs( "Socket device\n", stderr );
  1964. }
  1965. static struct object *socket_device_lookup_name( struct object *obj, struct unicode_str *name,
  1966. unsigned int attr, struct object *root )
  1967. {
  1968. return NULL;
  1969. }
  1970. static struct object *socket_device_open_file( struct object *obj, unsigned int access,
  1971. unsigned int sharing, unsigned int options )
  1972. {
  1973. struct sock *sock;
  1974. if (!(sock = create_socket())) return NULL;
  1975. if (!(sock->fd = alloc_pseudo_fd( &sock_fd_ops, &sock->obj, options )))
  1976. {
  1977. release_object( sock );
  1978. return NULL;
  1979. }
  1980. return &sock->obj;
  1981. }
  1982. struct object *create_socket_device( struct object *root, const struct unicode_str *name,
  1983. unsigned int attr, const struct security_descriptor *sd )
  1984. {
  1985. return create_named_object( root, &socket_device_ops, name, attr, sd );
  1986. }
  1987. /* set socket event parameters */
  1988. DECL_HANDLER(set_socket_event)
  1989. {
  1990. struct sock *sock;
  1991. struct event *old_event;
  1992. if (!(sock = (struct sock *)get_handle_obj( current->process, req->handle,
  1993. FILE_WRITE_ATTRIBUTES, &sock_ops))) return;
  1994. if (get_unix_fd( sock->fd ) == -1) return;
  1995. old_event = sock->event;
  1996. sock->mask = req->mask;
  1997. if (req->window)
  1998. {
  1999. sock->pending_events &= ~FD_CONNECT;
  2000. sock->reported_events &= ~req->mask; /* re-enable held events */
  2001. }
  2002. sock->event = NULL;
  2003. sock->window = req->window;
  2004. sock->message = req->msg;
  2005. sock->wparam = req->handle; /* wparam is the socket handle */
  2006. if (req->event) sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );
  2007. if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
  2008. sock_reselect( sock );
  2009. sock->state |= FD_WINE_NONBLOCKING;
  2010. /* if a network event is pending, signal the event object
  2011. it is possible that FD_CONNECT or FD_ACCEPT network events has happened
  2012. before a WSAEventSelect() was done on it.
  2013. (when dealing with Asynchronous socket) */
  2014. sock_wake_up( sock );
  2015. if (old_event) release_object( old_event ); /* we're through with it */
  2016. release_object( &sock->obj );
  2017. }
  2018. /* get socket event parameters */
  2019. DECL_HANDLER(get_socket_event)
  2020. {
  2021. struct sock *sock;
  2022. if (!(sock = (struct sock *)get_handle_obj( current->process, req->handle,
  2023. FILE_READ_ATTRIBUTES, &sock_ops ))) return;
  2024. if (get_unix_fd( sock->fd ) == -1) return;
  2025. reply->mask = sock->mask;
  2026. reply->pmask = sock->pending_events;
  2027. reply->state = sock->state;
  2028. set_reply_data( sock->errors, min( get_reply_max_size(), sizeof(sock->errors) ));
  2029. if (req->service)
  2030. {
  2031. if (req->c_event)
  2032. {
  2033. struct event *cevent = get_event_obj( current->process, req->c_event,
  2034. EVENT_MODIFY_STATE );
  2035. if (cevent)
  2036. {
  2037. reset_event( cevent );
  2038. release_object( cevent );
  2039. }
  2040. }
  2041. sock->pending_events = 0;
  2042. sock_reselect( sock );
  2043. }
  2044. release_object( &sock->obj );
  2045. }
  2046. /* re-enable pending socket events */
  2047. DECL_HANDLER(enable_socket_event)
  2048. {
  2049. struct sock *sock;
  2050. if (!(sock = (struct sock*)get_handle_obj( current->process, req->handle,
  2051. FILE_WRITE_ATTRIBUTES, &sock_ops)))
  2052. return;
  2053. if (get_unix_fd( sock->fd ) == -1) return;
  2054. /* for event-based notification, windows erases stale events */
  2055. sock->pending_events &= ~req->mask;
  2056. sock->reported_events &= ~req->mask;
  2057. sock->state |= req->sstate;
  2058. sock->state &= ~req->cstate;
  2059. if (sock->type != WS_SOCK_STREAM) sock->state &= ~STREAM_FLAG_MASK;
  2060. sock_reselect( sock );
  2061. release_object( &sock->obj );
  2062. }
  2063. DECL_HANDLER(set_socket_deferred)
  2064. {
  2065. struct sock *sock, *acceptsock;
  2066. sock=(struct sock *)get_handle_obj( current->process, req->handle, FILE_WRITE_ATTRIBUTES, &sock_ops );
  2067. if ( !sock )
  2068. return;
  2069. acceptsock = (struct sock *)get_handle_obj( current->process, req->deferred, 0, &sock_ops );
  2070. if ( !acceptsock )
  2071. {
  2072. release_object( sock );
  2073. return;
  2074. }
  2075. sock->deferred = acceptsock;
  2076. release_object( sock );
  2077. }
  2078. DECL_HANDLER(get_socket_info)
  2079. {
  2080. struct sock *sock;
  2081. sock = (struct sock *)get_handle_obj( current->process, req->handle, FILE_READ_ATTRIBUTES, &sock_ops );
  2082. if (!sock) return;
  2083. if (get_unix_fd( sock->fd ) == -1) return;
  2084. reply->family = sock->family;
  2085. reply->type = sock->type;
  2086. reply->protocol = sock->proto;
  2087. release_object( &sock->obj );
  2088. }
  2089. DECL_HANDLER(recv_socket)
  2090. {
  2091. struct sock *sock = (struct sock *)get_handle_obj( current->process, req->async.handle, 0, &sock_ops );
  2092. unsigned int status = req->status;
  2093. timeout_t timeout = 0;
  2094. struct async *async;
  2095. struct fd *fd;
  2096. if (!sock) return;
  2097. fd = sock->fd;
  2098. /* recv() returned EWOULDBLOCK, i.e. no data available yet */
  2099. if (status == STATUS_DEVICE_NOT_READY && !(sock->state & FD_WINE_NONBLOCKING))
  2100. {
  2101. #ifdef SO_RCVTIMEO
  2102. struct timeval tv;
  2103. socklen_t len = sizeof(tv);
  2104. /* Set a timeout on the async if necessary.
  2105. *
  2106. * We want to do this *only* if the client gave us STATUS_DEVICE_NOT_READY.
  2107. * If the client gave us STATUS_PENDING, it expects the async to always
  2108. * block (it was triggered by WSARecv*() with a valid OVERLAPPED
  2109. * structure) and for the timeout not to be respected. */
  2110. if (is_fd_overlapped( fd ) && !getsockopt( get_unix_fd( fd ), SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, &len ))
  2111. timeout = tv.tv_sec * -10000000 + tv.tv_usec * -10;
  2112. #endif
  2113. status = STATUS_PENDING;
  2114. }
  2115. /* are we shut down? */
  2116. if (status == STATUS_PENDING && !(sock->state & FD_READ)) status = STATUS_PIPE_DISCONNECTED;
  2117. sock->pending_events &= ~FD_READ;
  2118. sock->reported_events &= ~FD_READ;
  2119. if ((async = create_request_async( fd, get_fd_comp_flags( fd ), &req->async )))
  2120. {
  2121. int success = 0;
  2122. if (status == STATUS_SUCCESS)
  2123. {
  2124. struct iosb *iosb = async_get_iosb( async );
  2125. iosb->result = req->total;
  2126. release_object( iosb );
  2127. success = 1;
  2128. }
  2129. else if (status == STATUS_PENDING)
  2130. {
  2131. success = 1;
  2132. }
  2133. set_error( status );
  2134. if (timeout)
  2135. async_set_timeout( async, timeout, STATUS_IO_TIMEOUT );
  2136. if (status == STATUS_PENDING)
  2137. queue_async( &sock->read_q, async );
  2138. /* always reselect; we changed reported_events above */
  2139. sock_reselect( sock );
  2140. reply->wait = async_handoff( async, success, NULL, 0 );
  2141. reply->options = get_fd_options( fd );
  2142. release_object( async );
  2143. }
  2144. release_object( sock );
  2145. }
  2146. DECL_HANDLER(poll_socket)
  2147. {
  2148. struct sock *sock = (struct sock *)get_handle_obj( current->process, req->async.handle, 0, &sock_ops );
  2149. const struct poll_socket_input *input = get_req_data();
  2150. struct async *async;
  2151. unsigned int count;
  2152. if (!sock) return;
  2153. count = get_req_data_size() / sizeof(*input);
  2154. if ((async = create_request_async( sock->fd, get_fd_comp_flags( sock->fd ), &req->async )))
  2155. {
  2156. reply->wait = async_handoff( async, poll_socket( sock, async, req->timeout, count, input ), NULL, 0 );
  2157. reply->options = get_fd_options( sock->fd );
  2158. release_object( async );
  2159. }
  2160. release_object( sock );
  2161. }