smb_iod.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  3. *
  4. * Copyright (c) 2000-2001 Boris Popov
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  17. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  20. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  22. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  23. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  24. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  25. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  26. * SUCH DAMAGE.
  27. */
  28. #include <sys/cdefs.h>
  29. __FBSDID("$FreeBSD$");
  30. #include <sys/param.h>
  31. #include <sys/systm.h>
  32. #include <sys/endian.h>
  33. #include <sys/proc.h>
  34. #include <sys/kernel.h>
  35. #include <sys/kthread.h>
  36. #include <sys/malloc.h>
  37. #include <sys/mbuf.h>
  38. #include <sys/unistd.h>
  39. #include <netsmb/smb.h>
  40. #include <netsmb/smb_conn.h>
  41. #include <netsmb/smb_rq.h>
  42. #include <netsmb/smb_tran.h>
  43. #include <netsmb/smb_trantcp.h>
  44. #define SMBIOD_SLEEP_TIMO 2
  45. #define SMBIOD_PING_TIMO 60 /* seconds */
  46. #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock))
  47. #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock))
  48. #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock))
  49. #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock))
  50. #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
  51. #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock))
  52. #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
  53. static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
  54. static int smb_iod_next;
  55. static int smb_iod_sendall(struct smbiod *iod);
  56. static int smb_iod_disconnect(struct smbiod *iod);
  57. static void smb_iod_thread(void *);
  58. static __inline void
  59. smb_iod_rqprocessed(struct smb_rq *rqp, int error)
  60. {
  61. SMBRQ_SLOCK(rqp);
  62. rqp->sr_lerror = error;
  63. rqp->sr_rpgen++;
  64. rqp->sr_state = SMBRQ_NOTIFIED;
  65. wakeup(&rqp->sr_state);
  66. SMBRQ_SUNLOCK(rqp);
  67. }
  68. static void
  69. smb_iod_invrq(struct smbiod *iod)
  70. {
  71. struct smb_rq *rqp;
  72. /*
  73. * Invalidate all outstanding requests for this connection
  74. */
  75. SMB_IOD_RQLOCK(iod);
  76. TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
  77. rqp->sr_flags |= SMBR_RESTART;
  78. smb_iod_rqprocessed(rqp, ENOTCONN);
  79. }
  80. SMB_IOD_RQUNLOCK(iod);
  81. }
  82. static void
  83. smb_iod_closetran(struct smbiod *iod)
  84. {
  85. struct smb_vc *vcp = iod->iod_vc;
  86. struct thread *td = iod->iod_td;
  87. if (vcp->vc_tdata == NULL)
  88. return;
  89. SMB_TRAN_DISCONNECT(vcp, td);
  90. SMB_TRAN_DONE(vcp, td);
  91. vcp->vc_tdata = NULL;
  92. }
  93. static void
  94. smb_iod_dead(struct smbiod *iod)
  95. {
  96. iod->iod_state = SMBIOD_ST_DEAD;
  97. smb_iod_closetran(iod);
  98. smb_iod_invrq(iod);
  99. }
  100. static int
  101. smb_iod_connect(struct smbiod *iod)
  102. {
  103. struct smb_vc *vcp = iod->iod_vc;
  104. struct thread *td = iod->iod_td;
  105. int error;
  106. SMBIODEBUG("%d\n", iod->iod_state);
  107. switch(iod->iod_state) {
  108. case SMBIOD_ST_VCACTIVE:
  109. SMBERROR("called for already opened connection\n");
  110. return EISCONN;
  111. case SMBIOD_ST_DEAD:
  112. return ENOTCONN; /* XXX: last error code ? */
  113. default:
  114. break;
  115. }
  116. vcp->vc_genid++;
  117. error = 0;
  118. error = (int)SMB_TRAN_CREATE(vcp, td);
  119. if (error)
  120. goto fail;
  121. SMBIODEBUG("tcreate\n");
  122. if (vcp->vc_laddr) {
  123. error = (int)SMB_TRAN_BIND(vcp, vcp->vc_laddr, td);
  124. if (error)
  125. goto fail;
  126. }
  127. SMBIODEBUG("tbind\n");
  128. error = (int)SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td);
  129. if (error)
  130. goto fail;
  131. SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
  132. iod->iod_state = SMBIOD_ST_TRANACTIVE;
  133. SMBIODEBUG("tconnect\n");
  134. /* vcp->vc_mid = 0;*/
  135. error = (int)smb_smb_negotiate(vcp, &iod->iod_scred);
  136. if (error)
  137. goto fail;
  138. SMBIODEBUG("snegotiate\n");
  139. error = (int)smb_smb_ssnsetup(vcp, &iod->iod_scred);
  140. if (error)
  141. goto fail;
  142. iod->iod_state = SMBIOD_ST_VCACTIVE;
  143. SMBIODEBUG("completed\n");
  144. smb_iod_invrq(iod);
  145. return (0);
  146. fail:
  147. smb_iod_dead(iod);
  148. return (error);
  149. }
  150. static int
  151. smb_iod_disconnect(struct smbiod *iod)
  152. {
  153. struct smb_vc *vcp = iod->iod_vc;
  154. SMBIODEBUG("\n");
  155. if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
  156. smb_smb_ssnclose(vcp, &iod->iod_scred);
  157. iod->iod_state = SMBIOD_ST_TRANACTIVE;
  158. }
  159. vcp->vc_smbuid = SMB_UID_UNKNOWN;
  160. smb_iod_closetran(iod);
  161. iod->iod_state = SMBIOD_ST_NOTCONN;
  162. return 0;
  163. }
  164. static int
  165. smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
  166. {
  167. int error;
  168. if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
  169. if (iod->iod_state != SMBIOD_ST_DEAD)
  170. return ENOTCONN;
  171. iod->iod_state = SMBIOD_ST_RECONNECT;
  172. error = smb_iod_connect(iod);
  173. if (error)
  174. return error;
  175. }
  176. SMBIODEBUG("tree reconnect\n");
  177. SMBS_ST_LOCK(ssp);
  178. ssp->ss_flags |= SMBS_RECONNECTING;
  179. SMBS_ST_UNLOCK(ssp);
  180. error = smb_smb_treeconnect(ssp, &iod->iod_scred);
  181. SMBS_ST_LOCK(ssp);
  182. ssp->ss_flags &= ~SMBS_RECONNECTING;
  183. SMBS_ST_UNLOCK(ssp);
  184. wakeup(&ssp->ss_vcgenid);
  185. return error;
  186. }
  187. static int
  188. smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
  189. {
  190. struct thread *td = iod->iod_td;
  191. struct smb_vc *vcp = iod->iod_vc;
  192. struct smb_share *ssp = rqp->sr_share;
  193. struct mbuf *m;
  194. int error;
  195. SMBIODEBUG("iod_state = %d\n", iod->iod_state);
  196. switch (iod->iod_state) {
  197. case SMBIOD_ST_NOTCONN:
  198. smb_iod_rqprocessed(rqp, ENOTCONN);
  199. return 0;
  200. case SMBIOD_ST_DEAD:
  201. iod->iod_state = SMBIOD_ST_RECONNECT;
  202. return 0;
  203. case SMBIOD_ST_RECONNECT:
  204. return 0;
  205. default:
  206. break;
  207. }
  208. if (rqp->sr_sendcnt == 0) {
  209. #ifdef movedtoanotherplace
  210. if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
  211. return 0;
  212. #endif
  213. le16enc(rqp->sr_rqtid, ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
  214. le16enc(rqp->sr_rquid, vcp ? vcp->vc_smbuid : 0);
  215. mb_fixhdr(&rqp->sr_rq);
  216. if (vcp->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE)
  217. smb_rq_sign(rqp);
  218. }
  219. if (rqp->sr_sendcnt++ > 5) {
  220. rqp->sr_flags |= SMBR_RESTART;
  221. smb_iod_rqprocessed(rqp, rqp->sr_lerror);
  222. /*
  223. * If all attempts to send a request failed, then
  224. * something is seriously hosed.
  225. */
  226. return ENOTCONN;
  227. }
  228. SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
  229. m_dumpm(rqp->sr_rq.mb_top);
  230. m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAITOK);
  231. error = rqp->sr_lerror = SMB_TRAN_SEND(vcp, m, td);
  232. if (error == 0) {
  233. getnanotime(&rqp->sr_timesent);
  234. iod->iod_lastrqsent = rqp->sr_timesent;
  235. rqp->sr_flags |= SMBR_SENT;
  236. rqp->sr_state = SMBRQ_SENT;
  237. return 0;
  238. }
  239. /*
  240. * Check for fatal errors
  241. */
  242. if (SMB_TRAN_FATAL(vcp, error)) {
  243. /*
  244. * No further attempts should be made
  245. */
  246. return ENOTCONN;
  247. }
  248. if (smb_rq_intr(rqp))
  249. smb_iod_rqprocessed(rqp, EINTR);
  250. return 0;
  251. }
  252. /*
  253. * Process incoming packets
  254. */
  255. static int
  256. smb_iod_recvall(struct smbiod *iod)
  257. {
  258. struct smb_vc *vcp = iod->iod_vc;
  259. struct thread *td = iod->iod_td;
  260. struct smb_rq *rqp;
  261. struct mbuf *m;
  262. u_char *hp;
  263. u_short mid;
  264. int error;
  265. switch (iod->iod_state) {
  266. case SMBIOD_ST_NOTCONN:
  267. case SMBIOD_ST_DEAD:
  268. case SMBIOD_ST_RECONNECT:
  269. return 0;
  270. default:
  271. break;
  272. }
  273. for (;;) {
  274. m = NULL;
  275. error = SMB_TRAN_RECV(vcp, &m, td);
  276. if (error == EWOULDBLOCK)
  277. break;
  278. if (SMB_TRAN_FATAL(vcp, error)) {
  279. smb_iod_dead(iod);
  280. break;
  281. }
  282. if (error)
  283. break;
  284. if (m == NULL) {
  285. SMBERROR("tran return NULL without error\n");
  286. error = EPIPE;
  287. continue;
  288. }
  289. m = m_pullup(m, SMB_HDRLEN);
  290. if (m == NULL)
  291. continue; /* wait for a good packet */
  292. /*
  293. * Now we got an entire and possibly invalid SMB packet.
  294. * Be careful while parsing it.
  295. */
  296. m_dumpm(m);
  297. hp = mtod(m, u_char*);
  298. if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
  299. m_freem(m);
  300. continue;
  301. }
  302. mid = SMB_HDRMID(hp);
  303. SMBSDEBUG("mid %04x\n", (u_int)mid);
  304. SMB_IOD_RQLOCK(iod);
  305. TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
  306. if (rqp->sr_mid != mid)
  307. continue;
  308. SMBRQ_SLOCK(rqp);
  309. if (rqp->sr_rp.md_top == NULL) {
  310. md_initm(&rqp->sr_rp, m);
  311. } else {
  312. if (rqp->sr_flags & SMBR_MULTIPACKET) {
  313. md_append_record(&rqp->sr_rp, m);
  314. } else {
  315. SMBRQ_SUNLOCK(rqp);
  316. SMBERROR("duplicate response %d (ignored)\n", mid);
  317. break;
  318. }
  319. }
  320. SMBRQ_SUNLOCK(rqp);
  321. smb_iod_rqprocessed(rqp, 0);
  322. break;
  323. }
  324. SMB_IOD_RQUNLOCK(iod);
  325. if (rqp == NULL) {
  326. SMBERROR("drop resp with mid %d\n", (u_int)mid);
  327. /* smb_printrqlist(vcp);*/
  328. m_freem(m);
  329. }
  330. }
  331. /*
  332. * check for interrupts
  333. */
  334. SMB_IOD_RQLOCK(iod);
  335. TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
  336. if (smb_td_intr(rqp->sr_cred->scr_td)) {
  337. smb_iod_rqprocessed(rqp, EINTR);
  338. }
  339. }
  340. SMB_IOD_RQUNLOCK(iod);
  341. return 0;
  342. }
  343. int
  344. smb_iod_request(struct smbiod *iod, int event, void *ident)
  345. {
  346. struct smbiod_event *evp;
  347. int error;
  348. SMBIODEBUG("\n");
  349. evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
  350. evp->ev_type = event;
  351. evp->ev_ident = ident;
  352. SMB_IOD_EVLOCK(iod);
  353. STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
  354. if ((event & SMBIOD_EV_SYNC) == 0) {
  355. SMB_IOD_EVUNLOCK(iod);
  356. smb_iod_wakeup(iod);
  357. return 0;
  358. }
  359. smb_iod_wakeup(iod);
  360. msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
  361. error = evp->ev_error;
  362. free(evp, M_SMBIOD);
  363. return error;
  364. }
  365. /*
  366. * Place request in the queue.
  367. * Request from smbiod have a high priority.
  368. */
  369. int
  370. smb_iod_addrq(struct smb_rq *rqp)
  371. {
  372. struct smb_vc *vcp = rqp->sr_vc;
  373. struct smbiod *iod = vcp->vc_iod;
  374. int error;
  375. SMBIODEBUG("\n");
  376. if (rqp->sr_cred->scr_td != NULL &&
  377. rqp->sr_cred->scr_td->td_proc == iod->iod_p) {
  378. rqp->sr_flags |= SMBR_INTERNAL;
  379. SMB_IOD_RQLOCK(iod);
  380. TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
  381. SMB_IOD_RQUNLOCK(iod);
  382. for (;;) {
  383. if (smb_iod_sendrq(iod, rqp) != 0) {
  384. smb_iod_dead(iod);
  385. break;
  386. }
  387. /*
  388. * we don't need to lock state field here
  389. */
  390. if (rqp->sr_state != SMBRQ_NOTSENT)
  391. break;
  392. tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
  393. }
  394. if (rqp->sr_lerror)
  395. smb_iod_removerq(rqp);
  396. return rqp->sr_lerror;
  397. }
  398. switch (iod->iod_state) {
  399. case SMBIOD_ST_NOTCONN:
  400. return ENOTCONN;
  401. case SMBIOD_ST_DEAD:
  402. error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
  403. if (error)
  404. return error;
  405. return EXDEV;
  406. default:
  407. break;
  408. }
  409. SMB_IOD_RQLOCK(iod);
  410. for (;;) {
  411. if (vcp->vc_maxmux == 0) {
  412. SMBERROR("maxmux == 0\n");
  413. break;
  414. }
  415. if (iod->iod_muxcnt < vcp->vc_maxmux)
  416. break;
  417. iod->iod_muxwant++;
  418. msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
  419. PWAIT, "90mux", 0);
  420. }
  421. iod->iod_muxcnt++;
  422. TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
  423. SMB_IOD_RQUNLOCK(iod);
  424. smb_iod_wakeup(iod);
  425. return 0;
  426. }
  427. int
  428. smb_iod_removerq(struct smb_rq *rqp)
  429. {
  430. struct smb_vc *vcp = rqp->sr_vc;
  431. struct smbiod *iod = vcp->vc_iod;
  432. SMBIODEBUG("\n");
  433. if (rqp->sr_flags & SMBR_INTERNAL) {
  434. SMB_IOD_RQLOCK(iod);
  435. TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
  436. SMB_IOD_RQUNLOCK(iod);
  437. return 0;
  438. }
  439. SMB_IOD_RQLOCK(iod);
  440. while (rqp->sr_flags & SMBR_XLOCK) {
  441. rqp->sr_flags |= SMBR_XLOCKWANT;
  442. msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
  443. }
  444. TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
  445. iod->iod_muxcnt--;
  446. if (iod->iod_muxwant) {
  447. iod->iod_muxwant--;
  448. wakeup(&iod->iod_muxwant);
  449. }
  450. SMB_IOD_RQUNLOCK(iod);
  451. return 0;
  452. }
  453. int
  454. smb_iod_waitrq(struct smb_rq *rqp)
  455. {
  456. struct smbiod *iod = rqp->sr_vc->vc_iod;
  457. int error;
  458. SMBIODEBUG("\n");
  459. if (rqp->sr_flags & SMBR_INTERNAL) {
  460. for (;;) {
  461. smb_iod_sendall(iod);
  462. smb_iod_recvall(iod);
  463. if (rqp->sr_rpgen != rqp->sr_rplast)
  464. break;
  465. tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
  466. }
  467. smb_iod_removerq(rqp);
  468. return rqp->sr_lerror;
  469. }
  470. SMBRQ_SLOCK(rqp);
  471. if (rqp->sr_rpgen == rqp->sr_rplast)
  472. msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
  473. rqp->sr_rplast++;
  474. SMBRQ_SUNLOCK(rqp);
  475. error = rqp->sr_lerror;
  476. if (rqp->sr_flags & SMBR_MULTIPACKET) {
  477. /*
  478. * If request should stay in the list, then reinsert it
  479. * at the end of queue so other waiters have chance to concur
  480. */
  481. SMB_IOD_RQLOCK(iod);
  482. TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
  483. TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
  484. SMB_IOD_RQUNLOCK(iod);
  485. } else
  486. smb_iod_removerq(rqp);
  487. return error;
  488. }
  489. static int
  490. smb_iod_sendall(struct smbiod *iod)
  491. {
  492. struct smb_vc *vcp = iod->iod_vc;
  493. struct smb_rq *rqp;
  494. struct timespec ts, tstimeout;
  495. int herror;
  496. herror = 0;
  497. /*
  498. * Loop through the list of requests and send them if possible
  499. */
  500. SMB_IOD_RQLOCK(iod);
  501. TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
  502. switch (rqp->sr_state) {
  503. case SMBRQ_NOTSENT:
  504. rqp->sr_flags |= SMBR_XLOCK;
  505. SMB_IOD_RQUNLOCK(iod);
  506. herror = smb_iod_sendrq(iod, rqp);
  507. SMB_IOD_RQLOCK(iod);
  508. rqp->sr_flags &= ~SMBR_XLOCK;
  509. if (rqp->sr_flags & SMBR_XLOCKWANT) {
  510. rqp->sr_flags &= ~SMBR_XLOCKWANT;
  511. wakeup(rqp);
  512. }
  513. break;
  514. case SMBRQ_SENT:
  515. SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
  516. timespecadd(&tstimeout, &tstimeout, &tstimeout);
  517. getnanotime(&ts);
  518. timespecsub(&ts, &tstimeout, &ts);
  519. if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
  520. smb_iod_rqprocessed(rqp, ETIMEDOUT);
  521. }
  522. break;
  523. default:
  524. break;
  525. }
  526. if (herror)
  527. break;
  528. }
  529. SMB_IOD_RQUNLOCK(iod);
  530. if (herror == ENOTCONN)
  531. smb_iod_dead(iod);
  532. return 0;
  533. }
  534. /*
  535. * "main" function for smbiod daemon
  536. */
  537. static __inline void
  538. smb_iod_main(struct smbiod *iod)
  539. {
  540. /* struct smb_vc *vcp = iod->iod_vc;*/
  541. struct smbiod_event *evp;
  542. /* struct timespec tsnow;*/
  543. int error;
  544. SMBIODEBUG("\n");
  545. error = 0;
  546. /*
  547. * Check all interesting events
  548. */
  549. for (;;) {
  550. SMB_IOD_EVLOCK(iod);
  551. evp = STAILQ_FIRST(&iod->iod_evlist);
  552. if (evp == NULL) {
  553. SMB_IOD_EVUNLOCK(iod);
  554. break;
  555. }
  556. STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
  557. evp->ev_type |= SMBIOD_EV_PROCESSING;
  558. SMB_IOD_EVUNLOCK(iod);
  559. switch (evp->ev_type & SMBIOD_EV_MASK) {
  560. case SMBIOD_EV_CONNECT:
  561. iod->iod_state = SMBIOD_ST_RECONNECT;
  562. evp->ev_error = smb_iod_connect(iod);
  563. break;
  564. case SMBIOD_EV_DISCONNECT:
  565. evp->ev_error = smb_iod_disconnect(iod);
  566. break;
  567. case SMBIOD_EV_TREECONNECT:
  568. evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
  569. break;
  570. case SMBIOD_EV_SHUTDOWN:
  571. iod->iod_flags |= SMBIOD_SHUTDOWN;
  572. break;
  573. case SMBIOD_EV_NEWRQ:
  574. break;
  575. }
  576. if (evp->ev_type & SMBIOD_EV_SYNC) {
  577. SMB_IOD_EVLOCK(iod);
  578. wakeup(evp);
  579. SMB_IOD_EVUNLOCK(iod);
  580. } else
  581. free(evp, M_SMBIOD);
  582. }
  583. #if 0
  584. if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
  585. getnanotime(&tsnow);
  586. timespecsub(&tsnow, &iod->iod_pingtimo, &tsnow);
  587. if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
  588. smb_smb_echo(vcp, &iod->iod_scred);
  589. }
  590. }
  591. #endif
  592. smb_iod_sendall(iod);
  593. smb_iod_recvall(iod);
  594. return;
  595. }
  596. void
  597. smb_iod_thread(void *arg)
  598. {
  599. struct smbiod *iod = arg;
  600. mtx_lock(&Giant);
  601. /*
  602. * Here we assume that the thread structure will be the same
  603. * for an entire kthread (kproc, to be more precise) life.
  604. */
  605. iod->iod_td = curthread;
  606. smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
  607. while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
  608. smb_iod_main(iod);
  609. SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
  610. if (iod->iod_flags & SMBIOD_SHUTDOWN)
  611. break;
  612. tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
  613. }
  614. /* We can now safely destroy the mutexes and free the iod structure. */
  615. smb_sl_destroy(&iod->iod_rqlock);
  616. smb_sl_destroy(&iod->iod_evlock);
  617. free(iod, M_SMBIOD);
  618. mtx_unlock(&Giant);
  619. kproc_exit(0);
  620. }
  621. int
  622. smb_iod_create(struct smb_vc *vcp)
  623. {
  624. struct smbiod *iod;
  625. int error;
  626. iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
  627. iod->iod_id = smb_iod_next++;
  628. iod->iod_state = SMBIOD_ST_NOTCONN;
  629. iod->iod_vc = vcp;
  630. iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
  631. iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
  632. getnanotime(&iod->iod_lastrqsent);
  633. vcp->vc_iod = iod;
  634. smb_sl_init(&iod->iod_rqlock, "90rql");
  635. TAILQ_INIT(&iod->iod_rqlist);
  636. smb_sl_init(&iod->iod_evlock, "90evl");
  637. STAILQ_INIT(&iod->iod_evlist);
  638. error = kproc_create(smb_iod_thread, iod, &iod->iod_p,
  639. RFNOWAIT, 0, "smbiod%d", iod->iod_id);
  640. if (error) {
  641. SMBERROR("can't start smbiod: %d", error);
  642. vcp->vc_iod = NULL;
  643. smb_sl_destroy(&iod->iod_rqlock);
  644. smb_sl_destroy(&iod->iod_evlock);
  645. free(iod, M_SMBIOD);
  646. return error;
  647. }
  648. return 0;
  649. }
  650. int
  651. smb_iod_destroy(struct smbiod *iod)
  652. {
  653. smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
  654. return 0;
  655. }
  656. int
  657. smb_iod_init(void)
  658. {
  659. return 0;
  660. }
  661. int
  662. smb_iod_done(void)
  663. {
  664. return 0;
  665. }