smb_rq.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2000-2001 Boris Popov
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  17. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  20. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  22. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  23. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  24. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  25. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  26. * SUCH DAMAGE.
  27. */
  28. #include <sys/param.h>
  29. #include <sys/systm.h>
  30. #include <sys/endian.h>
  31. #include <sys/kernel.h>
  32. #include <sys/malloc.h>
  33. #include <sys/module.h>
  34. #include <sys/proc.h>
  35. #include <sys/lock.h>
  36. #include <sys/sysctl.h>
  37. #include <sys/socket.h>
  38. #include <sys/socketvar.h>
  39. #include <sys/mbuf.h>
  40. #include <netsmb/smb.h>
  41. #include <netsmb/smb_conn.h>
  42. #include <netsmb/smb_rq.h>
  43. #include <netsmb/smb_subr.h>
  44. #include <netsmb/smb_tran.h>
  45. static MALLOC_DEFINE(M_SMBRQ, "SMBRQ", "SMB request");
  46. MODULE_DEPEND(netsmb, libmchain, 1, 1, 1);
  47. static int smb_rq_reply(struct smb_rq *rqp);
  48. static int smb_rq_enqueue(struct smb_rq *rqp);
  49. static int smb_rq_getenv(struct smb_connobj *layer,
  50. struct smb_vc **vcpp, struct smb_share **sspp);
  51. static int smb_rq_new(struct smb_rq *rqp, u_char cmd);
  52. static int smb_t2_reply(struct smb_t2rq *t2p);
  53. int
  54. smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
  55. struct smb_rq **rqpp)
  56. {
  57. struct smb_rq *rqp;
  58. int error;
  59. rqp = malloc(sizeof(*rqp), M_SMBRQ, M_WAITOK);
  60. if (rqp == NULL)
  61. return ENOMEM;
  62. error = smb_rq_init(rqp, layer, cmd, scred);
  63. rqp->sr_flags |= SMBR_ALLOCED;
  64. if (error) {
  65. smb_rq_done(rqp);
  66. return error;
  67. }
  68. *rqpp = rqp;
  69. return 0;
  70. }
  71. static char tzero[12];
  72. int
  73. smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
  74. struct smb_cred *scred)
  75. {
  76. int error;
  77. bzero(rqp, sizeof(*rqp));
  78. smb_sl_init(&rqp->sr_slock, "srslock");
  79. error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
  80. if (error)
  81. return error;
  82. error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
  83. if (error)
  84. return error;
  85. if (rqp->sr_share) {
  86. error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
  87. if (error)
  88. return error;
  89. }
  90. rqp->sr_cred = scred;
  91. rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
  92. return smb_rq_new(rqp, cmd);
  93. }
  94. static int
  95. smb_rq_new(struct smb_rq *rqp, u_char cmd)
  96. {
  97. struct smb_vc *vcp = rqp->sr_vc;
  98. struct mbchain *mbp = &rqp->sr_rq;
  99. int error;
  100. u_int16_t flags2;
  101. rqp->sr_sendcnt = 0;
  102. mb_done(mbp);
  103. md_done(&rqp->sr_rp);
  104. error = mb_init(mbp);
  105. if (error)
  106. return error;
  107. mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
  108. mb_put_uint8(mbp, cmd);
  109. mb_put_uint32le(mbp, 0); /* DosError */
  110. mb_put_uint8(mbp, vcp->vc_hflags);
  111. flags2 = vcp->vc_hflags2;
  112. if (cmd == SMB_COM_TRANSACTION || cmd == SMB_COM_TRANSACTION_SECONDARY)
  113. flags2 &= ~SMB_FLAGS2_UNICODE;
  114. if (cmd == SMB_COM_NEGOTIATE)
  115. flags2 &= ~SMB_FLAGS2_SECURITY_SIGNATURE;
  116. mb_put_uint16le(mbp, flags2);
  117. if ((flags2 & SMB_FLAGS2_SECURITY_SIGNATURE) == 0) {
  118. mb_put_mem(mbp, tzero, 12, MB_MSYSTEM);
  119. rqp->sr_rqsig = NULL;
  120. } else {
  121. mb_put_uint16le(mbp, 0 /*scred->sc_p->p_pid >> 16*/);
  122. rqp->sr_rqsig = (u_int8_t *)mb_reserve(mbp, 8);
  123. mb_put_uint16le(mbp, 0);
  124. }
  125. rqp->sr_rqtid = mb_reserve(mbp, sizeof(u_int16_t));
  126. mb_put_uint16le(mbp, 1 /*scred->sc_p->p_pid & 0xffff*/);
  127. rqp->sr_rquid = mb_reserve(mbp, sizeof(u_int16_t));
  128. mb_put_uint16le(mbp, rqp->sr_mid);
  129. return 0;
  130. }
  131. void
  132. smb_rq_done(struct smb_rq *rqp)
  133. {
  134. mb_done(&rqp->sr_rq);
  135. md_done(&rqp->sr_rp);
  136. smb_sl_destroy(&rqp->sr_slock);
  137. if (rqp->sr_flags & SMBR_ALLOCED)
  138. free(rqp, M_SMBRQ);
  139. }
  140. /*
  141. * Simple request-reply exchange
  142. */
  143. int
  144. smb_rq_simple(struct smb_rq *rqp)
  145. {
  146. struct smb_vc *vcp = rqp->sr_vc;
  147. int error = EINVAL, i;
  148. for (i = 0; i < SMB_MAXRCN; i++) {
  149. rqp->sr_flags &= ~SMBR_RESTART;
  150. rqp->sr_timo = vcp->vc_timo;
  151. rqp->sr_state = SMBRQ_NOTSENT;
  152. error = smb_rq_enqueue(rqp);
  153. if (error)
  154. return error;
  155. error = smb_rq_reply(rqp);
  156. if (error == 0)
  157. break;
  158. if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
  159. break;
  160. }
  161. return error;
  162. }
  163. static int
  164. smb_rq_enqueue(struct smb_rq *rqp)
  165. {
  166. struct smb_share *ssp = rqp->sr_share;
  167. int error;
  168. if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
  169. return smb_iod_addrq(rqp);
  170. }
  171. for (;;) {
  172. SMBS_ST_LOCK(ssp);
  173. if (ssp->ss_flags & SMBS_RECONNECTING) {
  174. msleep(&ssp->ss_vcgenid, SMBS_ST_LOCKPTR(ssp),
  175. PWAIT | PDROP, "90trcn", hz);
  176. if (smb_td_intr(rqp->sr_cred->scr_td))
  177. return EINTR;
  178. continue;
  179. }
  180. if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
  181. SMBS_ST_UNLOCK(ssp);
  182. } else {
  183. SMBS_ST_UNLOCK(ssp);
  184. error = smb_iod_request(rqp->sr_vc->vc_iod,
  185. SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
  186. if (error)
  187. return error;
  188. }
  189. error = smb_iod_addrq(rqp);
  190. if (error != EXDEV)
  191. break;
  192. }
  193. return error;
  194. }
  195. void
  196. smb_rq_wstart(struct smb_rq *rqp)
  197. {
  198. rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
  199. rqp->sr_rq.mb_count = 0;
  200. }
  201. void
  202. smb_rq_wend(struct smb_rq *rqp)
  203. {
  204. if (rqp->sr_wcount == NULL) {
  205. SMBERROR("no wcount\n"); /* actually panic */
  206. return;
  207. }
  208. if (rqp->sr_rq.mb_count & 1)
  209. SMBERROR("odd word count\n");
  210. *rqp->sr_wcount = rqp->sr_rq.mb_count / 2;
  211. }
  212. void
  213. smb_rq_bstart(struct smb_rq *rqp)
  214. {
  215. rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof(u_short));
  216. rqp->sr_rq.mb_count = 0;
  217. }
  218. void
  219. smb_rq_bend(struct smb_rq *rqp)
  220. {
  221. int bcnt;
  222. if (rqp->sr_bcount == NULL) {
  223. SMBERROR("no bcount\n"); /* actually panic */
  224. return;
  225. }
  226. bcnt = rqp->sr_rq.mb_count;
  227. if (bcnt > 0xffff)
  228. SMBERROR("byte count too large (%d)\n", bcnt);
  229. le16enc(rqp->sr_bcount, bcnt);
  230. }
  231. int
  232. smb_rq_intr(struct smb_rq *rqp)
  233. {
  234. if (rqp->sr_flags & SMBR_INTR)
  235. return EINTR;
  236. return smb_td_intr(rqp->sr_cred->scr_td);
  237. }
  238. int
  239. smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
  240. {
  241. *mbpp = &rqp->sr_rq;
  242. return 0;
  243. }
  244. int
  245. smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
  246. {
  247. *mbpp = &rqp->sr_rp;
  248. return 0;
  249. }
  250. static int
  251. smb_rq_getenv(struct smb_connobj *layer,
  252. struct smb_vc **vcpp, struct smb_share **sspp)
  253. {
  254. struct smb_vc *vcp = NULL;
  255. struct smb_share *ssp = NULL;
  256. struct smb_connobj *cp;
  257. int error = 0;
  258. switch (layer->co_level) {
  259. case SMBL_VC:
  260. vcp = CPTOVC(layer);
  261. if (layer->co_parent == NULL) {
  262. SMBERROR("zombie VC %s\n", vcp->vc_srvname);
  263. error = EINVAL;
  264. break;
  265. }
  266. break;
  267. case SMBL_SHARE:
  268. ssp = CPTOSS(layer);
  269. cp = layer->co_parent;
  270. if (cp == NULL) {
  271. SMBERROR("zombie share %s\n", ssp->ss_name);
  272. error = EINVAL;
  273. break;
  274. }
  275. error = smb_rq_getenv(cp, &vcp, NULL);
  276. if (error)
  277. break;
  278. break;
  279. default:
  280. SMBERROR("invalid layer %d passed\n", layer->co_level);
  281. error = EINVAL;
  282. }
  283. if (vcpp)
  284. *vcpp = vcp;
  285. if (sspp)
  286. *sspp = ssp;
  287. return error;
  288. }
  289. /*
  290. * Wait for reply on the request
  291. */
  292. static int
  293. smb_rq_reply(struct smb_rq *rqp)
  294. {
  295. struct mdchain *mdp = &rqp->sr_rp;
  296. u_int32_t tdw;
  297. u_int8_t tb;
  298. int error, rperror = 0;
  299. error = smb_iod_waitrq(rqp);
  300. if (error)
  301. return error;
  302. error = md_get_uint32(mdp, &tdw);
  303. if (error)
  304. return error;
  305. error = md_get_uint8(mdp, &tb);
  306. if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
  307. error = md_get_uint32le(mdp, &rqp->sr_error);
  308. } else {
  309. error = md_get_uint8(mdp, &rqp->sr_errclass);
  310. error = md_get_uint8(mdp, &tb);
  311. error = md_get_uint16le(mdp, &rqp->sr_serror);
  312. if (!error)
  313. rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
  314. }
  315. error = md_get_uint8(mdp, &rqp->sr_rpflags);
  316. error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
  317. error = md_get_uint32(mdp, &tdw);
  318. error = md_get_uint32(mdp, &tdw);
  319. error = md_get_uint32(mdp, &tdw);
  320. error = md_get_uint16le(mdp, &rqp->sr_rptid);
  321. error = md_get_uint16le(mdp, &rqp->sr_rppid);
  322. error = md_get_uint16le(mdp, &rqp->sr_rpuid);
  323. error = md_get_uint16le(mdp, &rqp->sr_rpmid);
  324. if (error == 0 &&
  325. (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE))
  326. error = smb_rq_verify(rqp);
  327. SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
  328. rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
  329. rqp->sr_errclass, rqp->sr_serror);
  330. return error ? error : rperror;
  331. }
  332. #define ALIGN4(a) (((a) + 3) & ~3)
  333. /*
  334. * TRANS2 request implementation
  335. */
  336. int
  337. smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
  338. struct smb_t2rq **t2pp)
  339. {
  340. struct smb_t2rq *t2p;
  341. int error;
  342. t2p = malloc(sizeof(*t2p), M_SMBRQ, M_WAITOK);
  343. if (t2p == NULL)
  344. return ENOMEM;
  345. error = smb_t2_init(t2p, layer, setup, scred);
  346. t2p->t2_flags |= SMBT2_ALLOCED;
  347. if (error) {
  348. smb_t2_done(t2p);
  349. return error;
  350. }
  351. *t2pp = t2p;
  352. return 0;
  353. }
  354. int
  355. smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
  356. struct smb_cred *scred)
  357. {
  358. int error;
  359. bzero(t2p, sizeof(*t2p));
  360. t2p->t2_source = source;
  361. t2p->t2_setupcount = 1;
  362. t2p->t2_setupdata = t2p->t2_setup;
  363. t2p->t2_setup[0] = setup;
  364. t2p->t2_fid = 0xffff;
  365. t2p->t2_cred = scred;
  366. error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
  367. if (error)
  368. return error;
  369. return 0;
  370. }
  371. void
  372. smb_t2_done(struct smb_t2rq *t2p)
  373. {
  374. mb_done(&t2p->t2_tparam);
  375. mb_done(&t2p->t2_tdata);
  376. md_done(&t2p->t2_rparam);
  377. md_done(&t2p->t2_rdata);
  378. if (t2p->t2_flags & SMBT2_ALLOCED)
  379. free(t2p, M_SMBRQ);
  380. }
  381. static int
  382. smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
  383. struct mdchain *mdp)
  384. {
  385. struct mbuf *m0;
  386. int len;
  387. len = m_length(mtop, NULL);
  388. if (offset + count > len)
  389. return (EPROTO);
  390. m0 = m_split(mtop, offset, M_WAITOK);
  391. if (len != offset + count) {
  392. len -= offset + count;
  393. m_adj(m0, -len);
  394. }
  395. if (mdp->md_top == NULL) {
  396. md_initm(mdp, m0);
  397. } else
  398. m_cat(mdp->md_top, m0);
  399. return 0;
  400. }
  401. static int
  402. smb_t2_reply(struct smb_t2rq *t2p)
  403. {
  404. struct mdchain *mdp;
  405. struct smb_rq *rqp = t2p->t2_rq;
  406. int error, totpgot, totdgot;
  407. u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
  408. u_int16_t tmp, bc, dcount;
  409. u_int8_t wc;
  410. error = smb_rq_reply(rqp);
  411. if (error)
  412. return error;
  413. if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
  414. /*
  415. * this is an interim response, ignore it.
  416. */
  417. SMBRQ_SLOCK(rqp);
  418. md_next_record(&rqp->sr_rp);
  419. SMBRQ_SUNLOCK(rqp);
  420. return 0;
  421. }
  422. /*
  423. * Now we have to get all subsequent responses. The CIFS specification
  424. * says that they can be disordered which is weird.
  425. * TODO: timo
  426. */
  427. totpgot = totdgot = 0;
  428. totpcount = totdcount = 0xffff;
  429. mdp = &rqp->sr_rp;
  430. for (;;) {
  431. m_dumpm(mdp->md_top);
  432. if ((error = md_get_uint8(mdp, &wc)) != 0)
  433. break;
  434. if (wc < 10) {
  435. error = ENOENT;
  436. break;
  437. }
  438. if ((error = md_get_uint16le(mdp, &tmp)) != 0)
  439. break;
  440. if (totpcount > tmp)
  441. totpcount = tmp;
  442. md_get_uint16le(mdp, &tmp);
  443. if (totdcount > tmp)
  444. totdcount = tmp;
  445. if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
  446. (error = md_get_uint16le(mdp, &pcount)) != 0 ||
  447. (error = md_get_uint16le(mdp, &poff)) != 0 ||
  448. (error = md_get_uint16le(mdp, &pdisp)) != 0)
  449. break;
  450. if (pcount != 0 && pdisp != totpgot) {
  451. SMBERROR("Can't handle disordered parameters %d:%d\n",
  452. pdisp, totpgot);
  453. error = EINVAL;
  454. break;
  455. }
  456. if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
  457. (error = md_get_uint16le(mdp, &doff)) != 0 ||
  458. (error = md_get_uint16le(mdp, &ddisp)) != 0)
  459. break;
  460. if (dcount != 0 && ddisp != totdgot) {
  461. SMBERROR("Can't handle disordered data\n");
  462. error = EINVAL;
  463. break;
  464. }
  465. md_get_uint8(mdp, &wc);
  466. md_get_uint8(mdp, NULL);
  467. tmp = wc;
  468. while (tmp--)
  469. md_get_uint16(mdp, NULL);
  470. if ((error = md_get_uint16le(mdp, &bc)) != 0)
  471. break;
  472. /* tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
  473. if (dcount) {
  474. error = smb_t2_placedata(mdp->md_top, doff, dcount,
  475. &t2p->t2_rdata);
  476. if (error)
  477. break;
  478. }
  479. if (pcount) {
  480. error = smb_t2_placedata(mdp->md_top, poff, pcount,
  481. &t2p->t2_rparam);
  482. if (error)
  483. break;
  484. }
  485. totpgot += pcount;
  486. totdgot += dcount;
  487. if (totpgot >= totpcount && totdgot >= totdcount) {
  488. error = 0;
  489. t2p->t2_flags |= SMBT2_ALLRECV;
  490. break;
  491. }
  492. /*
  493. * We're done with this reply, look for the next one.
  494. */
  495. SMBRQ_SLOCK(rqp);
  496. md_next_record(&rqp->sr_rp);
  497. SMBRQ_SUNLOCK(rqp);
  498. error = smb_rq_reply(rqp);
  499. if (error)
  500. break;
  501. }
  502. return error;
  503. }
  504. /*
  505. * Perform a full round of TRANS2 request
  506. */
  507. static int
  508. smb_t2_request_int(struct smb_t2rq *t2p)
  509. {
  510. struct smb_vc *vcp = t2p->t2_vc;
  511. struct smb_cred *scred = t2p->t2_cred;
  512. struct mbchain *mbp;
  513. struct mdchain *mdp, mbparam, mbdata;
  514. struct mbuf *m;
  515. struct smb_rq *rqp;
  516. int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
  517. int error, doff, poff, txdcount, txpcount, nmlen;
  518. m = t2p->t2_tparam.mb_top;
  519. if (m) {
  520. md_initm(&mbparam, m); /* do not free it! */
  521. totpcount = m_fixhdr(m);
  522. if (totpcount > 0xffff) /* maxvalue for u_short */
  523. return EINVAL;
  524. } else
  525. totpcount = 0;
  526. m = t2p->t2_tdata.mb_top;
  527. if (m) {
  528. md_initm(&mbdata, m); /* do not free it! */
  529. totdcount = m_fixhdr(m);
  530. if (totdcount > 0xffff)
  531. return EINVAL;
  532. } else
  533. totdcount = 0;
  534. leftdcount = totdcount;
  535. leftpcount = totpcount;
  536. txmax = vcp->vc_txmax;
  537. error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
  538. SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
  539. if (error)
  540. return error;
  541. rqp->sr_flags |= SMBR_MULTIPACKET;
  542. t2p->t2_rq = rqp;
  543. rqp->sr_t2 = t2p;
  544. mbp = &rqp->sr_rq;
  545. smb_rq_wstart(rqp);
  546. mb_put_uint16le(mbp, totpcount);
  547. mb_put_uint16le(mbp, totdcount);
  548. mb_put_uint16le(mbp, t2p->t2_maxpcount);
  549. mb_put_uint16le(mbp, t2p->t2_maxdcount);
  550. mb_put_uint8(mbp, t2p->t2_maxscount);
  551. mb_put_uint8(mbp, 0); /* reserved */
  552. mb_put_uint16le(mbp, 0); /* flags */
  553. mb_put_uint32le(mbp, 0); /* Timeout */
  554. mb_put_uint16le(mbp, 0); /* reserved 2 */
  555. len = mb_fixhdr(mbp);
  556. /*
  557. * now we have known packet size as
  558. * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
  559. * and need to decide which parts should go into the first request
  560. */
  561. nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
  562. len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
  563. if (len + leftpcount > txmax) {
  564. txpcount = min(leftpcount, txmax - len);
  565. poff = len;
  566. txdcount = 0;
  567. doff = 0;
  568. } else {
  569. txpcount = leftpcount;
  570. poff = txpcount ? len : 0;
  571. len = ALIGN4(len + txpcount);
  572. txdcount = min(leftdcount, txmax - len);
  573. doff = txdcount ? len : 0;
  574. }
  575. leftpcount -= txpcount;
  576. leftdcount -= txdcount;
  577. mb_put_uint16le(mbp, txpcount);
  578. mb_put_uint16le(mbp, poff);
  579. mb_put_uint16le(mbp, txdcount);
  580. mb_put_uint16le(mbp, doff);
  581. mb_put_uint8(mbp, t2p->t2_setupcount);
  582. mb_put_uint8(mbp, 0);
  583. for (i = 0; i < t2p->t2_setupcount; i++)
  584. mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
  585. smb_rq_wend(rqp);
  586. smb_rq_bstart(rqp);
  587. /* TDUNICODE */
  588. if (t2p->t_name)
  589. mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
  590. mb_put_uint8(mbp, 0); /* terminating zero */
  591. len = mb_fixhdr(mbp);
  592. if (txpcount) {
  593. mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
  594. error = md_get_mbuf(&mbparam, txpcount, &m);
  595. SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
  596. if (error)
  597. goto freerq;
  598. mb_put_mbuf(mbp, m);
  599. }
  600. len = mb_fixhdr(mbp);
  601. if (txdcount) {
  602. mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
  603. error = md_get_mbuf(&mbdata, txdcount, &m);
  604. if (error)
  605. goto freerq;
  606. mb_put_mbuf(mbp, m);
  607. }
  608. smb_rq_bend(rqp); /* incredible, but thats it... */
  609. error = smb_rq_enqueue(rqp);
  610. if (error)
  611. goto freerq;
  612. if (leftpcount == 0 && leftdcount == 0)
  613. t2p->t2_flags |= SMBT2_ALLSENT;
  614. error = smb_t2_reply(t2p);
  615. if (error)
  616. goto bad;
  617. while (leftpcount || leftdcount) {
  618. t2p->t2_flags |= SMBT2_SECONDARY;
  619. error = smb_rq_new(rqp, t2p->t_name ?
  620. SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
  621. if (error)
  622. goto bad;
  623. mbp = &rqp->sr_rq;
  624. smb_rq_wstart(rqp);
  625. mb_put_uint16le(mbp, totpcount);
  626. mb_put_uint16le(mbp, totdcount);
  627. len = mb_fixhdr(mbp);
  628. /*
  629. * now we have known packet size as
  630. * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
  631. * and need to decide which parts should go into request
  632. */
  633. len = ALIGN4(len + 6 * 2 + 2);
  634. if (t2p->t_name == NULL)
  635. len += 2;
  636. if (len + leftpcount > txmax) {
  637. txpcount = min(leftpcount, txmax - len);
  638. poff = len;
  639. txdcount = 0;
  640. doff = 0;
  641. } else {
  642. txpcount = leftpcount;
  643. poff = txpcount ? len : 0;
  644. len = ALIGN4(len + txpcount);
  645. txdcount = min(leftdcount, txmax - len);
  646. doff = txdcount ? len : 0;
  647. }
  648. mb_put_uint16le(mbp, txpcount);
  649. mb_put_uint16le(mbp, poff);
  650. mb_put_uint16le(mbp, totpcount - leftpcount);
  651. mb_put_uint16le(mbp, txdcount);
  652. mb_put_uint16le(mbp, doff);
  653. mb_put_uint16le(mbp, totdcount - leftdcount);
  654. leftpcount -= txpcount;
  655. leftdcount -= txdcount;
  656. if (t2p->t_name == NULL)
  657. mb_put_uint16le(mbp, t2p->t2_fid);
  658. smb_rq_wend(rqp);
  659. smb_rq_bstart(rqp);
  660. mb_put_uint8(mbp, 0); /* name */
  661. len = mb_fixhdr(mbp);
  662. if (txpcount) {
  663. mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
  664. error = md_get_mbuf(&mbparam, txpcount, &m);
  665. if (error)
  666. goto bad;
  667. mb_put_mbuf(mbp, m);
  668. }
  669. len = mb_fixhdr(mbp);
  670. if (txdcount) {
  671. mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
  672. error = md_get_mbuf(&mbdata, txdcount, &m);
  673. if (error)
  674. goto bad;
  675. mb_put_mbuf(mbp, m);
  676. }
  677. smb_rq_bend(rqp);
  678. rqp->sr_state = SMBRQ_NOTSENT;
  679. error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
  680. if (error)
  681. goto bad;
  682. } /* while left params or data */
  683. t2p->t2_flags |= SMBT2_ALLSENT;
  684. mdp = &t2p->t2_rdata;
  685. if (mdp->md_top) {
  686. m_fixhdr(mdp->md_top);
  687. md_initm(mdp, mdp->md_top);
  688. }
  689. mdp = &t2p->t2_rparam;
  690. if (mdp->md_top) {
  691. m_fixhdr(mdp->md_top);
  692. md_initm(mdp, mdp->md_top);
  693. }
  694. bad:
  695. smb_iod_removerq(rqp);
  696. freerq:
  697. if (error) {
  698. if (rqp->sr_flags & SMBR_RESTART)
  699. t2p->t2_flags |= SMBT2_RESTART;
  700. md_done(&t2p->t2_rparam);
  701. md_done(&t2p->t2_rdata);
  702. }
  703. smb_rq_done(rqp);
  704. return error;
  705. }
  706. int
  707. smb_t2_request(struct smb_t2rq *t2p)
  708. {
  709. int error = EINVAL, i;
  710. for (i = 0; i < SMB_MAXRCN; i++) {
  711. t2p->t2_flags &= ~SMBR_RESTART;
  712. error = smb_t2_request_int(t2p);
  713. if (error == 0)
  714. break;
  715. if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
  716. break;
  717. }
  718. return error;
  719. }