vmci_transport_notify.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * VMware vSockets Driver
  3. *
  4. * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation version 2 and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. */
  15. #include <linux/types.h>
  16. #include <linux/socket.h>
  17. #include <linux/stddef.h>
  18. #include <net/sock.h>
  19. #include "vmci_transport_notify.h"
  20. #define PKT_FIELD(vsk, field_name) (vmci_trans(vsk)->notify.pkt.field_name)
  21. static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
  22. {
  23. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  24. bool retval;
  25. u64 notify_limit;
  26. if (!PKT_FIELD(vsk, peer_waiting_write))
  27. return false;
  28. #ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
  29. /* When the sender blocks, we take that as a sign that the sender is
  30. * faster than the receiver. To reduce the transmit rate of the sender,
  31. * we delay the sending of the read notification by decreasing the
  32. * write_notify_window. The notification is delayed until the number of
  33. * bytes used in the queue drops below the write_notify_window.
  34. */
  35. if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
  36. PKT_FIELD(vsk, peer_waiting_write_detected) = true;
  37. if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
  38. PKT_FIELD(vsk, write_notify_window) =
  39. PKT_FIELD(vsk, write_notify_min_window);
  40. } else {
  41. PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
  42. if (PKT_FIELD(vsk, write_notify_window) <
  43. PKT_FIELD(vsk, write_notify_min_window))
  44. PKT_FIELD(vsk, write_notify_window) =
  45. PKT_FIELD(vsk, write_notify_min_window);
  46. }
  47. }
  48. notify_limit = vmci_trans(vsk)->consume_size -
  49. PKT_FIELD(vsk, write_notify_window);
  50. #else
  51. notify_limit = 0;
  52. #endif
  53. /* For now we ignore the wait information and just see if the free
  54. * space exceeds the notify limit. Note that improving this function
  55. * to be more intelligent will not require a protocol change and will
  56. * retain compatibility between endpoints with mixed versions of this
  57. * function.
  58. *
  59. * The notify_limit is used to delay notifications in the case where
  60. * flow control is enabled. Below the test is expressed in terms of
  61. * free space in the queue: if free_space > ConsumeSize -
  62. * write_notify_window then notify An alternate way of expressing this
  63. * is to rewrite the expression to use the data ready in the receive
  64. * queue: if write_notify_window > bufferReady then notify as
  65. * free_space == ConsumeSize - bufferReady.
  66. */
  67. retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
  68. notify_limit;
  69. #ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
  70. if (retval) {
  71. /*
  72. * Once we notify the peer, we reset the detected flag so the
  73. * next wait will again cause a decrease in the window size.
  74. */
  75. PKT_FIELD(vsk, peer_waiting_write_detected) = false;
  76. }
  77. #endif
  78. return retval;
  79. #else
  80. return true;
  81. #endif
  82. }
  83. static bool vmci_transport_notify_waiting_read(struct vsock_sock *vsk)
  84. {
  85. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  86. if (!PKT_FIELD(vsk, peer_waiting_read))
  87. return false;
  88. /* For now we ignore the wait information and just see if there is any
  89. * data for our peer to read. Note that improving this function to be
  90. * more intelligent will not require a protocol change and will retain
  91. * compatibility between endpoints with mixed versions of this
  92. * function.
  93. */
  94. return vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) > 0;
  95. #else
  96. return true;
  97. #endif
  98. }
  99. static void
  100. vmci_transport_handle_waiting_read(struct sock *sk,
  101. struct vmci_transport_packet *pkt,
  102. bool bottom_half,
  103. struct sockaddr_vm *dst,
  104. struct sockaddr_vm *src)
  105. {
  106. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  107. struct vsock_sock *vsk;
  108. vsk = vsock_sk(sk);
  109. PKT_FIELD(vsk, peer_waiting_read) = true;
  110. memcpy(&PKT_FIELD(vsk, peer_waiting_read_info), &pkt->u.wait,
  111. sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
  112. if (vmci_transport_notify_waiting_read(vsk)) {
  113. bool sent;
  114. if (bottom_half)
  115. sent = vmci_transport_send_wrote_bh(dst, src) > 0;
  116. else
  117. sent = vmci_transport_send_wrote(sk) > 0;
  118. if (sent)
  119. PKT_FIELD(vsk, peer_waiting_read) = false;
  120. }
  121. #endif
  122. }
  123. static void
  124. vmci_transport_handle_waiting_write(struct sock *sk,
  125. struct vmci_transport_packet *pkt,
  126. bool bottom_half,
  127. struct sockaddr_vm *dst,
  128. struct sockaddr_vm *src)
  129. {
  130. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  131. struct vsock_sock *vsk;
  132. vsk = vsock_sk(sk);
  133. PKT_FIELD(vsk, peer_waiting_write) = true;
  134. memcpy(&PKT_FIELD(vsk, peer_waiting_write_info), &pkt->u.wait,
  135. sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
  136. if (vmci_transport_notify_waiting_write(vsk)) {
  137. bool sent;
  138. if (bottom_half)
  139. sent = vmci_transport_send_read_bh(dst, src) > 0;
  140. else
  141. sent = vmci_transport_send_read(sk) > 0;
  142. if (sent)
  143. PKT_FIELD(vsk, peer_waiting_write) = false;
  144. }
  145. #endif
  146. }
  147. static void
  148. vmci_transport_handle_read(struct sock *sk,
  149. struct vmci_transport_packet *pkt,
  150. bool bottom_half,
  151. struct sockaddr_vm *dst, struct sockaddr_vm *src)
  152. {
  153. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  154. struct vsock_sock *vsk;
  155. vsk = vsock_sk(sk);
  156. PKT_FIELD(vsk, sent_waiting_write) = false;
  157. #endif
  158. sk->sk_write_space(sk);
  159. }
  160. static bool send_waiting_read(struct sock *sk, u64 room_needed)
  161. {
  162. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  163. struct vsock_sock *vsk;
  164. struct vmci_transport_waiting_info waiting_info;
  165. u64 tail;
  166. u64 head;
  167. u64 room_left;
  168. bool ret;
  169. vsk = vsock_sk(sk);
  170. if (PKT_FIELD(vsk, sent_waiting_read))
  171. return true;
  172. if (PKT_FIELD(vsk, write_notify_window) <
  173. vmci_trans(vsk)->consume_size)
  174. PKT_FIELD(vsk, write_notify_window) =
  175. min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
  176. vmci_trans(vsk)->consume_size);
  177. vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair, &tail, &head);
  178. room_left = vmci_trans(vsk)->consume_size - head;
  179. if (room_needed >= room_left) {
  180. waiting_info.offset = room_needed - room_left;
  181. waiting_info.generation =
  182. PKT_FIELD(vsk, consume_q_generation) + 1;
  183. } else {
  184. waiting_info.offset = head + room_needed;
  185. waiting_info.generation = PKT_FIELD(vsk, consume_q_generation);
  186. }
  187. ret = vmci_transport_send_waiting_read(sk, &waiting_info) > 0;
  188. if (ret)
  189. PKT_FIELD(vsk, sent_waiting_read) = true;
  190. return ret;
  191. #else
  192. return true;
  193. #endif
  194. }
  195. static bool send_waiting_write(struct sock *sk, u64 room_needed)
  196. {
  197. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  198. struct vsock_sock *vsk;
  199. struct vmci_transport_waiting_info waiting_info;
  200. u64 tail;
  201. u64 head;
  202. u64 room_left;
  203. bool ret;
  204. vsk = vsock_sk(sk);
  205. if (PKT_FIELD(vsk, sent_waiting_write))
  206. return true;
  207. vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair, &tail, &head);
  208. room_left = vmci_trans(vsk)->produce_size - tail;
  209. if (room_needed + 1 >= room_left) {
  210. /* Wraps around to current generation. */
  211. waiting_info.offset = room_needed + 1 - room_left;
  212. waiting_info.generation = PKT_FIELD(vsk, produce_q_generation);
  213. } else {
  214. waiting_info.offset = tail + room_needed + 1;
  215. waiting_info.generation =
  216. PKT_FIELD(vsk, produce_q_generation) - 1;
  217. }
  218. ret = vmci_transport_send_waiting_write(sk, &waiting_info) > 0;
  219. if (ret)
  220. PKT_FIELD(vsk, sent_waiting_write) = true;
  221. return ret;
  222. #else
  223. return true;
  224. #endif
  225. }
  226. static int vmci_transport_send_read_notification(struct sock *sk)
  227. {
  228. struct vsock_sock *vsk;
  229. bool sent_read;
  230. unsigned int retries;
  231. int err;
  232. vsk = vsock_sk(sk);
  233. sent_read = false;
  234. retries = 0;
  235. err = 0;
  236. if (vmci_transport_notify_waiting_write(vsk)) {
  237. /* Notify the peer that we have read, retrying the send on
  238. * failure up to our maximum value. XXX For now we just log
  239. * the failure, but later we should schedule a work item to
  240. * handle the resend until it succeeds. That would require
  241. * keeping track of work items in the vsk and cleaning them up
  242. * upon socket close.
  243. */
  244. while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
  245. !sent_read &&
  246. retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
  247. err = vmci_transport_send_read(sk);
  248. if (err >= 0)
  249. sent_read = true;
  250. retries++;
  251. }
  252. if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS)
  253. pr_err("%p unable to send read notify to peer\n", sk);
  254. else
  255. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  256. PKT_FIELD(vsk, peer_waiting_write) = false;
  257. #endif
  258. }
  259. return err;
  260. }
  261. static void
  262. vmci_transport_handle_wrote(struct sock *sk,
  263. struct vmci_transport_packet *pkt,
  264. bool bottom_half,
  265. struct sockaddr_vm *dst, struct sockaddr_vm *src)
  266. {
  267. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  268. struct vsock_sock *vsk = vsock_sk(sk);
  269. PKT_FIELD(vsk, sent_waiting_read) = false;
  270. #endif
  271. sk->sk_data_ready(sk);
  272. }
  273. static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
  274. {
  275. struct vsock_sock *vsk = vsock_sk(sk);
  276. PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
  277. PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
  278. PKT_FIELD(vsk, peer_waiting_read) = false;
  279. PKT_FIELD(vsk, peer_waiting_write) = false;
  280. PKT_FIELD(vsk, peer_waiting_write_detected) = false;
  281. PKT_FIELD(vsk, sent_waiting_read) = false;
  282. PKT_FIELD(vsk, sent_waiting_write) = false;
  283. PKT_FIELD(vsk, produce_q_generation) = 0;
  284. PKT_FIELD(vsk, consume_q_generation) = 0;
  285. memset(&PKT_FIELD(vsk, peer_waiting_read_info), 0,
  286. sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
  287. memset(&PKT_FIELD(vsk, peer_waiting_write_info), 0,
  288. sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
  289. }
  290. static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
  291. {
  292. }
  293. static int
  294. vmci_transport_notify_pkt_poll_in(struct sock *sk,
  295. size_t target, bool *data_ready_now)
  296. {
  297. struct vsock_sock *vsk = vsock_sk(sk);
  298. if (vsock_stream_has_data(vsk)) {
  299. *data_ready_now = true;
  300. } else {
  301. /* We can't read right now because there is nothing in the
  302. * queue. Ask for notifications when there is something to
  303. * read.
  304. */
  305. if (sk->sk_state == SS_CONNECTED) {
  306. if (!send_waiting_read(sk, 1))
  307. return -1;
  308. }
  309. *data_ready_now = false;
  310. }
  311. return 0;
  312. }
  313. static int
  314. vmci_transport_notify_pkt_poll_out(struct sock *sk,
  315. size_t target, bool *space_avail_now)
  316. {
  317. s64 produce_q_free_space;
  318. struct vsock_sock *vsk = vsock_sk(sk);
  319. produce_q_free_space = vsock_stream_has_space(vsk);
  320. if (produce_q_free_space > 0) {
  321. *space_avail_now = true;
  322. return 0;
  323. } else if (produce_q_free_space == 0) {
  324. /* This is a connected socket but we can't currently send data.
  325. * Notify the peer that we are waiting if the queue is full. We
  326. * only send a waiting write if the queue is full because
  327. * otherwise we end up in an infinite WAITING_WRITE, READ,
  328. * WAITING_WRITE, READ, etc. loop. Treat failing to send the
  329. * notification as a socket error, passing that back through
  330. * the mask.
  331. */
  332. if (!send_waiting_write(sk, 1))
  333. return -1;
  334. *space_avail_now = false;
  335. }
  336. return 0;
  337. }
  338. static int
  339. vmci_transport_notify_pkt_recv_init(
  340. struct sock *sk,
  341. size_t target,
  342. struct vmci_transport_recv_notify_data *data)
  343. {
  344. struct vsock_sock *vsk = vsock_sk(sk);
  345. #ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
  346. data->consume_head = 0;
  347. data->produce_tail = 0;
  348. #ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
  349. data->notify_on_block = false;
  350. if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
  351. PKT_FIELD(vsk, write_notify_min_window) = target + 1;
  352. if (PKT_FIELD(vsk, write_notify_window) <
  353. PKT_FIELD(vsk, write_notify_min_window)) {
  354. /* If the current window is smaller than the new
  355. * minimal window size, we need to reevaluate whether
  356. * we need to notify the sender. If the number of ready
  357. * bytes are smaller than the new window, we need to
  358. * send a notification to the sender before we block.
  359. */
  360. PKT_FIELD(vsk, write_notify_window) =
  361. PKT_FIELD(vsk, write_notify_min_window);
  362. data->notify_on_block = true;
  363. }
  364. }
  365. #endif
  366. #endif
  367. return 0;
  368. }
  369. static int
  370. vmci_transport_notify_pkt_recv_pre_block(
  371. struct sock *sk,
  372. size_t target,
  373. struct vmci_transport_recv_notify_data *data)
  374. {
  375. int err = 0;
  376. /* Notify our peer that we are waiting for data to read. */
  377. if (!send_waiting_read(sk, target)) {
  378. err = -EHOSTUNREACH;
  379. return err;
  380. }
  381. #ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
  382. if (data->notify_on_block) {
  383. err = vmci_transport_send_read_notification(sk);
  384. if (err < 0)
  385. return err;
  386. data->notify_on_block = false;
  387. }
  388. #endif
  389. return err;
  390. }
  391. static int
  392. vmci_transport_notify_pkt_recv_pre_dequeue(
  393. struct sock *sk,
  394. size_t target,
  395. struct vmci_transport_recv_notify_data *data)
  396. {
  397. struct vsock_sock *vsk = vsock_sk(sk);
  398. /* Now consume up to len bytes from the queue. Note that since we have
  399. * the socket locked we should copy at least ready bytes.
  400. */
  401. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  402. vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair,
  403. &data->produce_tail,
  404. &data->consume_head);
  405. #endif
  406. return 0;
  407. }
  408. static int
  409. vmci_transport_notify_pkt_recv_post_dequeue(
  410. struct sock *sk,
  411. size_t target,
  412. ssize_t copied,
  413. bool data_read,
  414. struct vmci_transport_recv_notify_data *data)
  415. {
  416. struct vsock_sock *vsk;
  417. int err;
  418. vsk = vsock_sk(sk);
  419. err = 0;
  420. if (data_read) {
  421. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  422. /* Detect a wrap-around to maintain queue generation. Note
  423. * that this is safe since we hold the socket lock across the
  424. * two queue pair operations.
  425. */
  426. if (copied >=
  427. vmci_trans(vsk)->consume_size - data->consume_head)
  428. PKT_FIELD(vsk, consume_q_generation)++;
  429. #endif
  430. err = vmci_transport_send_read_notification(sk);
  431. if (err < 0)
  432. return err;
  433. }
  434. return err;
  435. }
  436. static int
  437. vmci_transport_notify_pkt_send_init(
  438. struct sock *sk,
  439. struct vmci_transport_send_notify_data *data)
  440. {
  441. #ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
  442. data->consume_head = 0;
  443. data->produce_tail = 0;
  444. #endif
  445. return 0;
  446. }
  447. static int
  448. vmci_transport_notify_pkt_send_pre_block(
  449. struct sock *sk,
  450. struct vmci_transport_send_notify_data *data)
  451. {
  452. /* Notify our peer that we are waiting for room to write. */
  453. if (!send_waiting_write(sk, 1))
  454. return -EHOSTUNREACH;
  455. return 0;
  456. }
  457. static int
  458. vmci_transport_notify_pkt_send_pre_enqueue(
  459. struct sock *sk,
  460. struct vmci_transport_send_notify_data *data)
  461. {
  462. struct vsock_sock *vsk = vsock_sk(sk);
  463. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  464. vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair,
  465. &data->produce_tail,
  466. &data->consume_head);
  467. #endif
  468. return 0;
  469. }
  470. static int
  471. vmci_transport_notify_pkt_send_post_enqueue(
  472. struct sock *sk,
  473. ssize_t written,
  474. struct vmci_transport_send_notify_data *data)
  475. {
  476. int err = 0;
  477. struct vsock_sock *vsk;
  478. bool sent_wrote = false;
  479. int retries = 0;
  480. vsk = vsock_sk(sk);
  481. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  482. /* Detect a wrap-around to maintain queue generation. Note that this
  483. * is safe since we hold the socket lock across the two queue pair
  484. * operations.
  485. */
  486. if (written >= vmci_trans(vsk)->produce_size - data->produce_tail)
  487. PKT_FIELD(vsk, produce_q_generation)++;
  488. #endif
  489. if (vmci_transport_notify_waiting_read(vsk)) {
  490. /* Notify the peer that we have written, retrying the send on
  491. * failure up to our maximum value. See the XXX comment for the
  492. * corresponding piece of code in StreamRecvmsg() for potential
  493. * improvements.
  494. */
  495. while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
  496. !sent_wrote &&
  497. retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
  498. err = vmci_transport_send_wrote(sk);
  499. if (err >= 0)
  500. sent_wrote = true;
  501. retries++;
  502. }
  503. if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
  504. pr_err("%p unable to send wrote notify to peer\n", sk);
  505. return err;
  506. } else {
  507. #if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
  508. PKT_FIELD(vsk, peer_waiting_read) = false;
  509. #endif
  510. }
  511. }
  512. return err;
  513. }
  514. static void
  515. vmci_transport_notify_pkt_handle_pkt(
  516. struct sock *sk,
  517. struct vmci_transport_packet *pkt,
  518. bool bottom_half,
  519. struct sockaddr_vm *dst,
  520. struct sockaddr_vm *src, bool *pkt_processed)
  521. {
  522. bool processed = false;
  523. switch (pkt->type) {
  524. case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
  525. vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
  526. processed = true;
  527. break;
  528. case VMCI_TRANSPORT_PACKET_TYPE_READ:
  529. vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
  530. processed = true;
  531. break;
  532. case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
  533. vmci_transport_handle_waiting_write(sk, pkt, bottom_half,
  534. dst, src);
  535. processed = true;
  536. break;
  537. case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
  538. vmci_transport_handle_waiting_read(sk, pkt, bottom_half,
  539. dst, src);
  540. processed = true;
  541. break;
  542. }
  543. if (pkt_processed)
  544. *pkt_processed = processed;
  545. }
  546. static void vmci_transport_notify_pkt_process_request(struct sock *sk)
  547. {
  548. struct vsock_sock *vsk = vsock_sk(sk);
  549. PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
  550. if (vmci_trans(vsk)->consume_size <
  551. PKT_FIELD(vsk, write_notify_min_window))
  552. PKT_FIELD(vsk, write_notify_min_window) =
  553. vmci_trans(vsk)->consume_size;
  554. }
  555. static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
  556. {
  557. struct vsock_sock *vsk = vsock_sk(sk);
  558. PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
  559. if (vmci_trans(vsk)->consume_size <
  560. PKT_FIELD(vsk, write_notify_min_window))
  561. PKT_FIELD(vsk, write_notify_min_window) =
  562. vmci_trans(vsk)->consume_size;
  563. }
  564. /* Socket control packet based operations. */
  565. const struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
  566. vmci_transport_notify_pkt_socket_init,
  567. vmci_transport_notify_pkt_socket_destruct,
  568. vmci_transport_notify_pkt_poll_in,
  569. vmci_transport_notify_pkt_poll_out,
  570. vmci_transport_notify_pkt_handle_pkt,
  571. vmci_transport_notify_pkt_recv_init,
  572. vmci_transport_notify_pkt_recv_pre_block,
  573. vmci_transport_notify_pkt_recv_pre_dequeue,
  574. vmci_transport_notify_pkt_recv_post_dequeue,
  575. vmci_transport_notify_pkt_send_init,
  576. vmci_transport_notify_pkt_send_pre_block,
  577. vmci_transport_notify_pkt_send_pre_enqueue,
  578. vmci_transport_notify_pkt_send_post_enqueue,
  579. vmci_transport_notify_pkt_process_request,
  580. vmci_transport_notify_pkt_process_negotiate,
  581. };