sd.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787
  1. /* $OpenBSD: sd.c,v 1.261 2015/06/07 19:13:27 krw Exp $ */
  2. /* $NetBSD: sd.c,v 1.111 1997/04/02 02:29:41 mycroft Exp $ */
  3. /*-
  4. * Copyright (c) 1998 The NetBSD Foundation, Inc.
  5. * All rights reserved.
  6. *
  7. * This code is derived from software contributed to The NetBSD Foundation
  8. * by Charles M. Hannum.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions and the following disclaimer.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
  20. * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  21. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  22. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
  23. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  24. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  25. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  26. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  27. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  28. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  29. * POSSIBILITY OF SUCH DAMAGE.
  30. */
  31. /*
  32. * Originally written by Julian Elischer (julian@dialix.oz.au)
  33. * for TRW Financial Systems for use under the MACH(2.5) operating system.
  34. *
  35. * TRW Financial Systems, in accordance with their agreement with Carnegie
  36. * Mellon University, makes this software available to CMU to distribute
  37. * or use in any manner that they see fit as long as this message is kept with
  38. * the software. For this reason TFS also grants any other persons or
  39. * organisations permission to use or modify this software.
  40. *
  41. * TFS supplies this software to be publicly redistributed
  42. * on the understanding that TFS is not responsible for the correct
  43. * functioning of this software in any circumstances.
  44. *
  45. * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
  46. */
  47. #include <sys/stdint.h>
  48. #include <sys/types.h>
  49. #include <sys/param.h>
  50. #include <sys/systm.h>
  51. #include <sys/timeout.h>
  52. #include <sys/file.h>
  53. #include <sys/stat.h>
  54. #include <sys/ioctl.h>
  55. #include <sys/mtio.h>
  56. #include <sys/mutex.h>
  57. #include <sys/buf.h>
  58. #include <sys/uio.h>
  59. #include <sys/malloc.h>
  60. #include <sys/pool.h>
  61. #include <sys/errno.h>
  62. #include <sys/device.h>
  63. #include <sys/disklabel.h>
  64. #include <sys/disk.h>
  65. #include <sys/conf.h>
  66. #include <sys/scsiio.h>
  67. #include <sys/dkio.h>
  68. #include <sys/reboot.h>
  69. #include <scsi/scsi_all.h>
  70. #include <scsi/scsi_disk.h>
  71. #include <scsi/scsiconf.h>
  72. #include <scsi/sdvar.h>
  73. #include <ufs/ffs/fs.h> /* for BBSIZE and SBSIZE */
  74. #include <sys/vnode.h>
  75. int sdmatch(struct device *, void *, void *);
  76. void sdattach(struct device *, struct device *, void *);
  77. int sdactivate(struct device *, int);
  78. int sddetach(struct device *, int);
  79. void sdminphys(struct buf *);
  80. int sdgetdisklabel(dev_t, struct sd_softc *, struct disklabel *, int);
  81. void sdstart(struct scsi_xfer *);
  82. int sd_interpret_sense(struct scsi_xfer *);
  83. int sd_read_cap_10(struct sd_softc *, int);
  84. int sd_read_cap_16(struct sd_softc *, int);
  85. int sd_size(struct sd_softc *, int);
  86. int sd_thin_pages(struct sd_softc *, int);
  87. int sd_vpd_block_limits(struct sd_softc *, int);
  88. int sd_vpd_thin(struct sd_softc *, int);
  89. int sd_thin_params(struct sd_softc *, int);
  90. int sd_get_parms(struct sd_softc *, struct disk_parms *, int);
  91. void sd_flush(struct sd_softc *, int);
  92. void viscpy(u_char *, u_char *, int);
  93. int sd_ioctl_inquiry(struct sd_softc *, struct dk_inquiry *);
  94. int sd_ioctl_cache(struct sd_softc *, long, struct dk_cache *);
  95. void sd_cmd_rw6(struct scsi_xfer *, int, u_int64_t, u_int);
  96. void sd_cmd_rw10(struct scsi_xfer *, int, u_int64_t, u_int);
  97. void sd_cmd_rw12(struct scsi_xfer *, int, u_int64_t, u_int);
  98. void sd_cmd_rw16(struct scsi_xfer *, int, u_int64_t, u_int);
  99. void sd_buf_done(struct scsi_xfer *);
  100. struct cfattach sd_ca = {
  101. sizeof(struct sd_softc), sdmatch, sdattach,
  102. sddetach, sdactivate
  103. };
  104. struct cfdriver sd_cd = {
  105. NULL, "sd", DV_DISK
  106. };
  107. const struct scsi_inquiry_pattern sd_patterns[] = {
  108. {T_DIRECT, T_FIXED,
  109. "", "", ""},
  110. {T_DIRECT, T_REMOV,
  111. "", "", ""},
  112. {T_RDIRECT, T_FIXED,
  113. "", "", ""},
  114. {T_RDIRECT, T_REMOV,
  115. "", "", ""},
  116. {T_OPTICAL, T_FIXED,
  117. "", "", ""},
  118. {T_OPTICAL, T_REMOV,
  119. "", "", ""},
  120. };
  121. #define sdlookup(unit) (struct sd_softc *)disk_lookup(&sd_cd, (unit))
  122. int
  123. sdmatch(struct device *parent, void *match, void *aux)
  124. {
  125. struct scsi_attach_args *sa = aux;
  126. int priority;
  127. (void)scsi_inqmatch(sa->sa_inqbuf,
  128. sd_patterns, nitems(sd_patterns),
  129. sizeof(sd_patterns[0]), &priority);
  130. return (priority);
  131. }
  132. /*
  133. * The routine called by the low level scsi routine when it discovers
  134. * a device suitable for this driver.
  135. */
  136. void
  137. sdattach(struct device *parent, struct device *self, void *aux)
  138. {
  139. struct sd_softc *sc = (struct sd_softc *)self;
  140. struct scsi_attach_args *sa = aux;
  141. struct disk_parms *dp = &sc->params;
  142. struct scsi_link *sc_link = sa->sa_sc_link;
  143. int sd_autoconf = scsi_autoconf | SCSI_SILENT |
  144. SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE;
  145. struct dk_cache dkc;
  146. int error, result, sortby = BUFQ_DEFAULT;
  147. SC_DEBUG(sc_link, SDEV_DB2, ("sdattach:\n"));
  148. /*
  149. * Store information needed to contact our base driver
  150. */
  151. sc->sc_link = sc_link;
  152. sc_link->interpret_sense = sd_interpret_sense;
  153. sc_link->device_softc = sc;
  154. if ((sc_link->flags & SDEV_ATAPI) && (sc_link->flags & SDEV_REMOVABLE))
  155. sc_link->quirks |= SDEV_NOSYNCCACHE;
  156. if (!(sc_link->inqdata.flags & SID_RelAdr))
  157. sc_link->quirks |= SDEV_ONLYBIG;
  158. /*
  159. * Note if this device is ancient. This is used in sdminphys().
  160. */
  161. if (!(sc_link->flags & SDEV_ATAPI) &&
  162. SCSISPC(sa->sa_inqbuf->version) == 0)
  163. sc->flags |= SDF_ANCIENT;
  164. /*
  165. * Use the subdriver to request information regarding
  166. * the drive. We cannot use interrupts yet, so the
  167. * request must specify this.
  168. */
  169. printf("\n");
  170. scsi_xsh_set(&sc->sc_xsh, sc_link, sdstart);
  171. timeout_set(&sc->sc_timeout, (void (*)(void *))scsi_xsh_add,
  172. &sc->sc_xsh);
  173. /* Spin up non-UMASS devices ready or not. */
  174. if ((sc->sc_link->flags & SDEV_UMASS) == 0)
  175. scsi_start(sc_link, SSS_START, sd_autoconf);
  176. /*
  177. * Some devices (e.g. BlackBerry Pearl) won't admit they have
  178. * media loaded unless its been locked in.
  179. */
  180. if ((sc_link->flags & SDEV_REMOVABLE) != 0)
  181. scsi_prevent(sc_link, PR_PREVENT, sd_autoconf);
  182. /* Check that it is still responding and ok. */
  183. error = scsi_test_unit_ready(sc->sc_link, TEST_READY_RETRIES * 3,
  184. sd_autoconf);
  185. if (error)
  186. result = SDGP_RESULT_OFFLINE;
  187. else
  188. result = sd_get_parms(sc, &sc->params, sd_autoconf);
  189. if ((sc_link->flags & SDEV_REMOVABLE) != 0)
  190. scsi_prevent(sc_link, PR_ALLOW, sd_autoconf);
  191. switch (result) {
  192. case SDGP_RESULT_OK:
  193. printf("%s: %lluMB, %lu bytes/sector, %llu sectors",
  194. sc->sc_dev.dv_xname,
  195. dp->disksize / (1048576 / dp->secsize), dp->secsize,
  196. dp->disksize);
  197. if (ISSET(sc->flags, SDF_THIN)) {
  198. sortby = BUFQ_FIFO;
  199. printf(", thin");
  200. }
  201. if (ISSET(sc_link->flags, SDEV_READONLY)) {
  202. printf(", readonly");
  203. }
  204. printf("\n");
  205. break;
  206. case SDGP_RESULT_OFFLINE:
  207. break;
  208. #ifdef DIAGNOSTIC
  209. default:
  210. panic("sdattach: unknown result (%#x) from get_parms", result);
  211. break;
  212. #endif
  213. }
  214. /*
  215. * Initialize disk structures.
  216. */
  217. sc->sc_dk.dk_name = sc->sc_dev.dv_xname;
  218. bufq_init(&sc->sc_bufq, sortby);
  219. /*
  220. * Enable write cache by default.
  221. */
  222. memset(&dkc, 0, sizeof(dkc));
  223. if (sd_ioctl_cache(sc, DIOCGCACHE, &dkc) == 0 && dkc.wrcache == 0) {
  224. dkc.wrcache = 1;
  225. sd_ioctl_cache(sc, DIOCSCACHE, &dkc);
  226. }
  227. /* Attach disk. */
  228. disk_attach(&sc->sc_dev, &sc->sc_dk);
  229. }
  230. int
  231. sdactivate(struct device *self, int act)
  232. {
  233. struct sd_softc *sc = (struct sd_softc *)self;
  234. int rv = 0;
  235. switch (act) {
  236. case DVACT_SUSPEND:
  237. /*
  238. * We flush the cache, since we our next step before
  239. * DVACT_POWERDOWN might be a hibernate operation.
  240. */
  241. if ((sc->flags & SDF_DIRTY) != 0)
  242. sd_flush(sc, SCSI_AUTOCONF);
  243. break;
  244. case DVACT_POWERDOWN:
  245. /*
  246. * Stop the disk. Stopping the disk should flush the
  247. * cache, but we are paranoid so we flush the cache
  248. * first. We're cold at this point, so we poll for
  249. * completion.
  250. */
  251. if ((sc->flags & SDF_DIRTY) != 0)
  252. sd_flush(sc, SCSI_AUTOCONF);
  253. if (boothowto & RB_POWERDOWN)
  254. scsi_start(sc->sc_link, SSS_STOP,
  255. SCSI_IGNORE_ILLEGAL_REQUEST |
  256. SCSI_IGNORE_NOT_READY | SCSI_AUTOCONF);
  257. break;
  258. case DVACT_RESUME:
  259. scsi_start(sc->sc_link, SSS_START,
  260. SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_AUTOCONF);
  261. break;
  262. case DVACT_DEACTIVATE:
  263. sc->flags |= SDF_DYING;
  264. scsi_xsh_del(&sc->sc_xsh);
  265. break;
  266. }
  267. return (rv);
  268. }
  269. int
  270. sddetach(struct device *self, int flags)
  271. {
  272. struct sd_softc *sc = (struct sd_softc *)self;
  273. bufq_drain(&sc->sc_bufq);
  274. disk_gone(sdopen, self->dv_unit);
  275. /* Detach disk. */
  276. bufq_destroy(&sc->sc_bufq);
  277. disk_detach(&sc->sc_dk);
  278. return (0);
  279. }
  280. /*
  281. * Open the device. Make sure the partition info is as up-to-date as can be.
  282. */
  283. int
  284. sdopen(dev_t dev, int flag, int fmt, struct proc *p)
  285. {
  286. struct scsi_link *sc_link;
  287. struct sd_softc *sc;
  288. int error = 0, part, rawopen, unit;
  289. unit = DISKUNIT(dev);
  290. part = DISKPART(dev);
  291. rawopen = (part == RAW_PART) && (fmt == S_IFCHR);
  292. sc = sdlookup(unit);
  293. if (sc == NULL)
  294. return (ENXIO);
  295. sc_link = sc->sc_link;
  296. if (sc->flags & SDF_DYING) {
  297. device_unref(&sc->sc_dev);
  298. return (ENXIO);
  299. }
  300. if (ISSET(flag, FWRITE) && ISSET(sc_link->flags, SDEV_READONLY)) {
  301. device_unref(&sc->sc_dev);
  302. return (EACCES);
  303. }
  304. SC_DEBUG(sc_link, SDEV_DB1,
  305. ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
  306. sd_cd.cd_ndevs, part));
  307. if ((error = disk_lock(&sc->sc_dk)) != 0) {
  308. device_unref(&sc->sc_dev);
  309. return (error);
  310. }
  311. if (sc->sc_dk.dk_openmask != 0) {
  312. /*
  313. * If any partition is open, but the disk has been invalidated,
  314. * disallow further opens of non-raw partition.
  315. */
  316. if ((sc_link->flags & SDEV_MEDIA_LOADED) == 0) {
  317. if (rawopen)
  318. goto out;
  319. error = EIO;
  320. goto bad;
  321. }
  322. } else {
  323. /* Spin up non-UMASS devices ready or not. */
  324. if ((sc->sc_link->flags & SDEV_UMASS) == 0)
  325. scsi_start(sc_link, SSS_START, (rawopen ? SCSI_SILENT :
  326. 0) | SCSI_IGNORE_ILLEGAL_REQUEST |
  327. SCSI_IGNORE_MEDIA_CHANGE);
  328. /* Use sd_interpret_sense() for sense errors.
  329. *
  330. * But only after spinning the disk up! Just in case a broken
  331. * device returns "Initialization command required." and causes
  332. * a loop of scsi_start() calls.
  333. */
  334. sc_link->flags |= SDEV_OPEN;
  335. /*
  336. * Try to prevent the unloading of a removable device while
  337. * it's open. But allow the open to proceed if the device can't
  338. * be locked in.
  339. */
  340. if ((sc_link->flags & SDEV_REMOVABLE) != 0) {
  341. scsi_prevent(sc_link, PR_PREVENT, SCSI_SILENT |
  342. SCSI_IGNORE_ILLEGAL_REQUEST |
  343. SCSI_IGNORE_MEDIA_CHANGE);
  344. }
  345. /* Check that it is still responding and ok. */
  346. error = scsi_test_unit_ready(sc_link,
  347. TEST_READY_RETRIES, SCSI_SILENT |
  348. SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE);
  349. if (error) {
  350. if (rawopen) {
  351. error = 0;
  352. goto out;
  353. } else
  354. goto bad;
  355. }
  356. /* Load the physical device parameters. */
  357. sc_link->flags |= SDEV_MEDIA_LOADED;
  358. if (sd_get_parms(sc, &sc->params, (rawopen ? SCSI_SILENT : 0))
  359. == SDGP_RESULT_OFFLINE) {
  360. sc_link->flags &= ~SDEV_MEDIA_LOADED;
  361. error = ENXIO;
  362. goto bad;
  363. }
  364. SC_DEBUG(sc_link, SDEV_DB3, ("Params loaded\n"));
  365. /* Load the partition info if not already loaded. */
  366. if (sdgetdisklabel(dev, sc, sc->sc_dk.dk_label, 0) == EIO) {
  367. error = EIO;
  368. goto bad;
  369. }
  370. SC_DEBUG(sc_link, SDEV_DB3, ("Disklabel loaded\n"));
  371. }
  372. out:
  373. if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0)
  374. goto bad;
  375. SC_DEBUG(sc_link, SDEV_DB3, ("open complete\n"));
  376. /* It's OK to fall through because dk_openmask is now non-zero. */
  377. bad:
  378. if (sc->sc_dk.dk_openmask == 0) {
  379. if ((sc->sc_link->flags & SDEV_REMOVABLE) != 0)
  380. scsi_prevent(sc_link, PR_ALLOW, SCSI_SILENT |
  381. SCSI_IGNORE_ILLEGAL_REQUEST |
  382. SCSI_IGNORE_MEDIA_CHANGE);
  383. sc_link->flags &= ~(SDEV_OPEN | SDEV_MEDIA_LOADED);
  384. }
  385. disk_unlock(&sc->sc_dk);
  386. device_unref(&sc->sc_dev);
  387. return (error);
  388. }
  389. /*
  390. * Close the device. Only called if we are the last occurrence of an open
  391. * device. Convenient now but usually a pain.
  392. */
  393. int
  394. sdclose(dev_t dev, int flag, int fmt, struct proc *p)
  395. {
  396. struct sd_softc *sc;
  397. int part = DISKPART(dev);
  398. sc = sdlookup(DISKUNIT(dev));
  399. if (sc == NULL)
  400. return (ENXIO);
  401. if (sc->flags & SDF_DYING) {
  402. device_unref(&sc->sc_dev);
  403. return (ENXIO);
  404. }
  405. disk_lock_nointr(&sc->sc_dk);
  406. disk_closepart(&sc->sc_dk, part, fmt);
  407. if (sc->sc_dk.dk_openmask == 0) {
  408. if ((sc->flags & SDF_DIRTY) != 0)
  409. sd_flush(sc, 0);
  410. if ((sc->sc_link->flags & SDEV_REMOVABLE) != 0)
  411. scsi_prevent(sc->sc_link, PR_ALLOW,
  412. SCSI_IGNORE_ILLEGAL_REQUEST |
  413. SCSI_IGNORE_NOT_READY | SCSI_SILENT);
  414. sc->sc_link->flags &= ~(SDEV_OPEN | SDEV_MEDIA_LOADED);
  415. if (sc->sc_link->flags & SDEV_EJECTING) {
  416. scsi_start(sc->sc_link, SSS_STOP|SSS_LOEJ, 0);
  417. sc->sc_link->flags &= ~SDEV_EJECTING;
  418. }
  419. timeout_del(&sc->sc_timeout);
  420. scsi_xsh_del(&sc->sc_xsh);
  421. }
  422. disk_unlock(&sc->sc_dk);
  423. device_unref(&sc->sc_dev);
  424. return 0;
  425. }
  426. /*
  427. * Actually translate the requested transfer into one the physical driver
  428. * can understand. The transfer is described by a buf and will include
  429. * only one physical transfer.
  430. */
  431. void
  432. sdstrategy(struct buf *bp)
  433. {
  434. struct sd_softc *sc;
  435. int s;
  436. sc = sdlookup(DISKUNIT(bp->b_dev));
  437. if (sc == NULL) {
  438. bp->b_error = ENXIO;
  439. goto bad;
  440. }
  441. if (sc->flags & SDF_DYING) {
  442. bp->b_error = ENXIO;
  443. goto bad;
  444. }
  445. SC_DEBUG(sc->sc_link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %lld\n",
  446. bp->b_bcount, (long long)bp->b_blkno));
  447. /*
  448. * If the device has been made invalid, error out
  449. */
  450. if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) == 0) {
  451. if (sc->sc_link->flags & SDEV_OPEN)
  452. bp->b_error = EIO;
  453. else
  454. bp->b_error = ENODEV;
  455. goto bad;
  456. }
  457. /* Validate the request. */
  458. if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1)
  459. goto done;
  460. /* Place it in the queue of disk activities for this disk. */
  461. bufq_queue(&sc->sc_bufq, bp);
  462. /*
  463. * Tell the device to get going on the transfer if it's
  464. * not doing anything, otherwise just wait for completion
  465. */
  466. scsi_xsh_add(&sc->sc_xsh);
  467. device_unref(&sc->sc_dev);
  468. return;
  469. bad:
  470. bp->b_flags |= B_ERROR;
  471. bp->b_resid = bp->b_bcount;
  472. done:
  473. s = splbio();
  474. biodone(bp);
  475. splx(s);
  476. if (sc != NULL)
  477. device_unref(&sc->sc_dev);
  478. }
  479. void
  480. sd_cmd_rw6(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
  481. {
  482. struct scsi_rw *cmd = (struct scsi_rw *)xs->cmd;
  483. cmd->opcode = read ? READ_COMMAND : WRITE_COMMAND;
  484. _lto3b(secno, cmd->addr);
  485. cmd->length = nsecs;
  486. xs->cmdlen = sizeof(*cmd);
  487. }
  488. void
  489. sd_cmd_rw10(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
  490. {
  491. struct scsi_rw_big *cmd = (struct scsi_rw_big *)xs->cmd;
  492. cmd->opcode = read ? READ_BIG : WRITE_BIG;
  493. _lto4b(secno, cmd->addr);
  494. _lto2b(nsecs, cmd->length);
  495. xs->cmdlen = sizeof(*cmd);
  496. }
  497. void
  498. sd_cmd_rw12(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
  499. {
  500. struct scsi_rw_12 *cmd = (struct scsi_rw_12 *)xs->cmd;
  501. cmd->opcode = read ? READ_12 : WRITE_12;
  502. _lto4b(secno, cmd->addr);
  503. _lto4b(nsecs, cmd->length);
  504. xs->cmdlen = sizeof(*cmd);
  505. }
  506. void
  507. sd_cmd_rw16(struct scsi_xfer *xs, int read, u_int64_t secno, u_int nsecs)
  508. {
  509. struct scsi_rw_16 *cmd = (struct scsi_rw_16 *)xs->cmd;
  510. cmd->opcode = read ? READ_16 : WRITE_16;
  511. _lto8b(secno, cmd->addr);
  512. _lto4b(nsecs, cmd->length);
  513. xs->cmdlen = sizeof(*cmd);
  514. }
  515. /*
  516. * sdstart looks to see if there is a buf waiting for the device
  517. * and that the device is not already busy. If both are true,
  518. * It dequeues the buf and creates a scsi command to perform the
  519. * transfer in the buf. The transfer request will call scsi_done
  520. * on completion, which will in turn call this routine again
  521. * so that the next queued transfer is performed.
  522. * The bufs are queued by the strategy routine (sdstrategy)
  523. *
  524. * This routine is also called after other non-queued requests
  525. * have been made of the scsi driver, to ensure that the queue
  526. * continues to be drained.
  527. */
  528. void
  529. sdstart(struct scsi_xfer *xs)
  530. {
  531. struct scsi_link *link = xs->sc_link;
  532. struct sd_softc *sc = link->device_softc;
  533. struct buf *bp;
  534. u_int64_t secno;
  535. int nsecs;
  536. int read;
  537. struct partition *p;
  538. if (sc->flags & SDF_DYING) {
  539. scsi_xs_put(xs);
  540. return;
  541. }
  542. if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
  543. bufq_drain(&sc->sc_bufq);
  544. scsi_xs_put(xs);
  545. return;
  546. }
  547. bp = bufq_dequeue(&sc->sc_bufq);
  548. if (bp == NULL) {
  549. scsi_xs_put(xs);
  550. return;
  551. }
  552. secno = DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno);
  553. p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
  554. secno += DL_GETPOFFSET(p);
  555. nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize);
  556. read = bp->b_flags & B_READ;
  557. /*
  558. * Fill out the scsi command. If the transfer will
  559. * fit in a "small" cdb, use it.
  560. */
  561. if (!(link->flags & SDEV_ATAPI) &&
  562. !(link->quirks & SDEV_ONLYBIG) &&
  563. ((secno & 0x1fffff) == secno) &&
  564. ((nsecs & 0xff) == nsecs))
  565. sd_cmd_rw6(xs, read, secno, nsecs);
  566. else if (((secno & 0xffffffff) == secno) &&
  567. ((nsecs & 0xffff) == nsecs))
  568. sd_cmd_rw10(xs, read, secno, nsecs);
  569. else if (((secno & 0xffffffff) == secno) &&
  570. ((nsecs & 0xffffffff) == nsecs))
  571. sd_cmd_rw12(xs, read, secno, nsecs);
  572. else
  573. sd_cmd_rw16(xs, read, secno, nsecs);
  574. xs->flags |= (read ? SCSI_DATA_IN : SCSI_DATA_OUT);
  575. xs->timeout = 60000;
  576. xs->data = bp->b_data;
  577. xs->datalen = bp->b_bcount;
  578. xs->done = sd_buf_done;
  579. xs->cookie = bp;
  580. xs->bp = bp;
  581. /* Instrumentation. */
  582. disk_busy(&sc->sc_dk);
  583. /* Mark disk as dirty. */
  584. if (!read)
  585. sc->flags |= SDF_DIRTY;
  586. scsi_xs_exec(xs);
  587. /* move onto the next io */
  588. if (ISSET(sc->flags, SDF_WAITING))
  589. CLR(sc->flags, SDF_WAITING);
  590. else if (bufq_peek(&sc->sc_bufq))
  591. scsi_xsh_add(&sc->sc_xsh);
  592. }
  593. void
  594. sd_buf_done(struct scsi_xfer *xs)
  595. {
  596. struct sd_softc *sc = xs->sc_link->device_softc;
  597. struct buf *bp = xs->cookie;
  598. int error, s;
  599. switch (xs->error) {
  600. case XS_NOERROR:
  601. bp->b_error = 0;
  602. bp->b_resid = xs->resid;
  603. break;
  604. case XS_NO_CCB:
  605. /* The adapter is busy, requeue the buf and try it later. */
  606. disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid,
  607. bp->b_flags & B_READ);
  608. bufq_requeue(&sc->sc_bufq, bp);
  609. scsi_xs_put(xs);
  610. SET(sc->flags, SDF_WAITING);
  611. timeout_add(&sc->sc_timeout, 1);
  612. return;
  613. case XS_SENSE:
  614. case XS_SHORTSENSE:
  615. #ifdef SCSIDEBUG
  616. scsi_sense_print_debug(xs);
  617. #endif
  618. error = sd_interpret_sense(xs);
  619. if (error == 0) {
  620. bp->b_error = 0;
  621. bp->b_resid = xs->resid;
  622. break;
  623. }
  624. if (error != ERESTART) {
  625. bp->b_error = error;
  626. xs->retries = 0;
  627. }
  628. goto retry;
  629. case XS_BUSY:
  630. if (xs->retries) {
  631. if (scsi_delay(xs, 1) != ERESTART)
  632. xs->retries = 0;
  633. }
  634. goto retry;
  635. case XS_TIMEOUT:
  636. retry:
  637. if (xs->retries--) {
  638. scsi_xs_exec(xs);
  639. return;
  640. }
  641. /* FALLTHROUGH */
  642. default:
  643. if (bp->b_error == 0)
  644. bp->b_error = EIO;
  645. bp->b_flags |= B_ERROR;
  646. bp->b_resid = bp->b_bcount;
  647. break;
  648. }
  649. disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid,
  650. bp->b_flags & B_READ);
  651. s = splbio();
  652. biodone(bp);
  653. splx(s);
  654. scsi_xs_put(xs);
  655. }
  656. void
  657. sdminphys(struct buf *bp)
  658. {
  659. struct sd_softc *sc;
  660. long max;
  661. sc = sdlookup(DISKUNIT(bp->b_dev));
  662. if (sc == NULL)
  663. return; /* XXX - right way to fail this? */
  664. /*
  665. * If the device is ancient, we want to make sure that
  666. * the transfer fits into a 6-byte cdb.
  667. *
  668. * XXX Note that the SCSI-I spec says that 256-block transfers
  669. * are allowed in a 6-byte read/write, and are specified
  670. * by setting the "length" to 0. However, we're conservative
  671. * here, allowing only 255-block transfers in case an
  672. * ancient device gets confused by length == 0. A length of 0
  673. * in a 10-byte read/write actually means 0 blocks.
  674. */
  675. if (sc->flags & SDF_ANCIENT) {
  676. max = sc->sc_dk.dk_label->d_secsize * 0xff;
  677. if (bp->b_bcount > max)
  678. bp->b_bcount = max;
  679. }
  680. (*sc->sc_link->adapter->scsi_minphys)(bp, sc->sc_link);
  681. device_unref(&sc->sc_dev);
  682. }
  683. int
  684. sdread(dev_t dev, struct uio *uio, int ioflag)
  685. {
  686. return (physio(sdstrategy, dev, B_READ, sdminphys, uio));
  687. }
  688. int
  689. sdwrite(dev_t dev, struct uio *uio, int ioflag)
  690. {
  691. return (physio(sdstrategy, dev, B_WRITE, sdminphys, uio));
  692. }
  693. /*
  694. * Perform special action on behalf of the user
  695. * Knows about the internals of this device
  696. */
  697. int
  698. sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
  699. {
  700. struct sd_softc *sc;
  701. struct disklabel *lp;
  702. int error = 0;
  703. int part = DISKPART(dev);
  704. sc = sdlookup(DISKUNIT(dev));
  705. if (sc == NULL)
  706. return (ENXIO);
  707. if (sc->flags & SDF_DYING) {
  708. device_unref(&sc->sc_dev);
  709. return (ENXIO);
  710. }
  711. SC_DEBUG(sc->sc_link, SDEV_DB2, ("sdioctl 0x%lx\n", cmd));
  712. /*
  713. * If the device is not valid.. abandon ship
  714. */
  715. if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) == 0) {
  716. switch (cmd) {
  717. case DIOCLOCK:
  718. case DIOCEJECT:
  719. case SCIOCIDENTIFY:
  720. case SCIOCCOMMAND:
  721. case SCIOCDEBUG:
  722. if (part == RAW_PART)
  723. break;
  724. /* FALLTHROUGH */
  725. default:
  726. if ((sc->sc_link->flags & SDEV_OPEN) == 0) {
  727. error = ENODEV;
  728. goto exit;
  729. } else {
  730. error = EIO;
  731. goto exit;
  732. }
  733. }
  734. }
  735. switch (cmd) {
  736. case DIOCRLDINFO:
  737. lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK);
  738. sdgetdisklabel(dev, sc, lp, 0);
  739. memcpy(sc->sc_dk.dk_label, lp, sizeof(*lp));
  740. free(lp, M_TEMP, sizeof(*lp));
  741. goto exit;
  742. case DIOCGPDINFO:
  743. sdgetdisklabel(dev, sc, (struct disklabel *)addr, 1);
  744. goto exit;
  745. case DIOCGDINFO:
  746. *(struct disklabel *)addr = *(sc->sc_dk.dk_label);
  747. goto exit;
  748. case DIOCGPART:
  749. ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
  750. ((struct partinfo *)addr)->part =
  751. &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)];
  752. goto exit;
  753. case DIOCWDINFO:
  754. case DIOCSDINFO:
  755. if ((flag & FWRITE) == 0) {
  756. error = EBADF;
  757. goto exit;
  758. }
  759. if ((error = disk_lock(&sc->sc_dk)) != 0)
  760. goto exit;
  761. error = setdisklabel(sc->sc_dk.dk_label,
  762. (struct disklabel *)addr, sc->sc_dk.dk_openmask);
  763. if (error == 0) {
  764. if (cmd == DIOCWDINFO)
  765. error = writedisklabel(DISKLABELDEV(dev),
  766. sdstrategy, sc->sc_dk.dk_label);
  767. }
  768. disk_unlock(&sc->sc_dk);
  769. goto exit;
  770. case DIOCLOCK:
  771. error = scsi_prevent(sc->sc_link,
  772. (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0);
  773. goto exit;
  774. case MTIOCTOP:
  775. if (((struct mtop *)addr)->mt_op != MTOFFL) {
  776. error = EIO;
  777. goto exit;
  778. }
  779. /* FALLTHROUGH */
  780. case DIOCEJECT:
  781. if ((sc->sc_link->flags & SDEV_REMOVABLE) == 0) {
  782. error = ENOTTY;
  783. goto exit;
  784. }
  785. sc->sc_link->flags |= SDEV_EJECTING;
  786. goto exit;
  787. case DIOCINQ:
  788. error = scsi_do_ioctl(sc->sc_link, cmd, addr, flag);
  789. if (error == ENOTTY)
  790. error = sd_ioctl_inquiry(sc,
  791. (struct dk_inquiry *)addr);
  792. goto exit;
  793. case DIOCSCACHE:
  794. if (!ISSET(flag, FWRITE)) {
  795. error = EBADF;
  796. goto exit;
  797. }
  798. /* FALLTHROUGH */
  799. case DIOCGCACHE:
  800. error = sd_ioctl_cache(sc, cmd, (struct dk_cache *)addr);
  801. goto exit;
  802. default:
  803. if (part != RAW_PART) {
  804. error = ENOTTY;
  805. goto exit;
  806. }
  807. error = scsi_do_ioctl(sc->sc_link, cmd, addr, flag);
  808. }
  809. exit:
  810. device_unref(&sc->sc_dev);
  811. return (error);
  812. }
  813. int
  814. sd_ioctl_inquiry(struct sd_softc *sc, struct dk_inquiry *di)
  815. {
  816. struct scsi_vpd_serial *vpd;
  817. vpd = dma_alloc(sizeof(*vpd), PR_WAITOK | PR_ZERO);
  818. bzero(di, sizeof(struct dk_inquiry));
  819. scsi_strvis(di->vendor, sc->sc_link->inqdata.vendor,
  820. sizeof(sc->sc_link->inqdata.vendor));
  821. scsi_strvis(di->product, sc->sc_link->inqdata.product,
  822. sizeof(sc->sc_link->inqdata.product));
  823. scsi_strvis(di->revision, sc->sc_link->inqdata.revision,
  824. sizeof(sc->sc_link->inqdata.revision));
  825. /* the serial vpd page is optional */
  826. if (scsi_inquire_vpd(sc->sc_link, vpd, sizeof(*vpd),
  827. SI_PG_SERIAL, 0) == 0)
  828. scsi_strvis(di->serial, vpd->serial, sizeof(vpd->serial));
  829. else
  830. strlcpy(di->serial, "(unknown)", sizeof(vpd->serial));
  831. dma_free(vpd, sizeof(*vpd));
  832. return (0);
  833. }
  834. int
  835. sd_ioctl_cache(struct sd_softc *sc, long cmd, struct dk_cache *dkc)
  836. {
  837. union scsi_mode_sense_buf *buf;
  838. struct page_caching_mode *mode = NULL;
  839. u_int wrcache, rdcache;
  840. int big;
  841. int rv;
  842. if (ISSET(sc->sc_link->flags, SDEV_UMASS))
  843. return (EOPNOTSUPP);
  844. /* see if the adapter has special handling */
  845. rv = scsi_do_ioctl(sc->sc_link, cmd, (caddr_t)dkc, 0);
  846. if (rv != ENOTTY)
  847. return (rv);
  848. buf = dma_alloc(sizeof(*buf), PR_WAITOK);
  849. if (buf == NULL)
  850. return (ENOMEM);
  851. rv = scsi_do_mode_sense(sc->sc_link, PAGE_CACHING_MODE,
  852. buf, (void **)&mode, NULL, NULL, NULL,
  853. sizeof(*mode) - 4, scsi_autoconf | SCSI_SILENT, &big);
  854. if (rv != 0)
  855. goto done;
  856. if ((mode == NULL) || (!DISK_PGCODE(mode, PAGE_CACHING_MODE))) {
  857. rv = EIO;
  858. goto done;
  859. }
  860. wrcache = (ISSET(mode->flags, PG_CACHE_FL_WCE) ? 1 : 0);
  861. rdcache = (ISSET(mode->flags, PG_CACHE_FL_RCD) ? 0 : 1);
  862. switch (cmd) {
  863. case DIOCGCACHE:
  864. dkc->wrcache = wrcache;
  865. dkc->rdcache = rdcache;
  866. break;
  867. case DIOCSCACHE:
  868. if (dkc->wrcache == wrcache && dkc->rdcache == rdcache)
  869. break;
  870. if (dkc->wrcache)
  871. SET(mode->flags, PG_CACHE_FL_WCE);
  872. else
  873. CLR(mode->flags, PG_CACHE_FL_WCE);
  874. if (dkc->rdcache)
  875. CLR(mode->flags, PG_CACHE_FL_RCD);
  876. else
  877. SET(mode->flags, PG_CACHE_FL_RCD);
  878. if (big) {
  879. rv = scsi_mode_select_big(sc->sc_link, SMS_PF,
  880. &buf->hdr_big, scsi_autoconf | SCSI_SILENT, 20000);
  881. } else {
  882. rv = scsi_mode_select(sc->sc_link, SMS_PF,
  883. &buf->hdr, scsi_autoconf | SCSI_SILENT, 20000);
  884. }
  885. break;
  886. }
  887. done:
  888. dma_free(buf, sizeof(*buf));
  889. return (rv);
  890. }
  891. /*
  892. * Load the label information on the named device
  893. */
  894. int
  895. sdgetdisklabel(dev_t dev, struct sd_softc *sc, struct disklabel *lp,
  896. int spoofonly)
  897. {
  898. size_t len;
  899. char packname[sizeof(lp->d_packname) + 1];
  900. char product[17], vendor[9];
  901. bzero(lp, sizeof(struct disklabel));
  902. lp->d_secsize = sc->params.secsize;
  903. lp->d_ntracks = sc->params.heads;
  904. lp->d_nsectors = sc->params.sectors;
  905. lp->d_ncylinders = sc->params.cyls;
  906. lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
  907. if (lp->d_secpercyl == 0) {
  908. lp->d_secpercyl = 100;
  909. /* as long as it's not 0 - readdisklabel divides by it */
  910. }
  911. lp->d_type = DTYPE_SCSI;
  912. if ((sc->sc_link->inqdata.device & SID_TYPE) == T_OPTICAL)
  913. strncpy(lp->d_typename, "SCSI optical",
  914. sizeof(lp->d_typename));
  915. else
  916. strncpy(lp->d_typename, "SCSI disk",
  917. sizeof(lp->d_typename));
  918. /*
  919. * Try to fit '<vendor> <product>' into d_packname. If that doesn't fit
  920. * then leave out '<vendor> ' and use only as much of '<product>' as
  921. * does fit.
  922. */
  923. viscpy(vendor, sc->sc_link->inqdata.vendor, 8);
  924. viscpy(product, sc->sc_link->inqdata.product, 16);
  925. len = snprintf(packname, sizeof(packname), "%s %s", vendor, product);
  926. if (len > sizeof(lp->d_packname)) {
  927. strlcpy(packname, product, sizeof(packname));
  928. len = strlen(packname);
  929. }
  930. /*
  931. * It is safe to use len as the count of characters to copy because
  932. * packname is sizeof(lp->d_packname)+1, the string in packname is
  933. * always null terminated and len does not count the terminating null.
  934. * d_packname is not a null terminated string.
  935. */
  936. memcpy(lp->d_packname, packname, len);
  937. DL_SETDSIZE(lp, sc->params.disksize);
  938. lp->d_version = 1;
  939. lp->d_flags = 0;
  940. /* XXX - these values for BBSIZE and SBSIZE assume ffs */
  941. lp->d_bbsize = BBSIZE;
  942. lp->d_sbsize = SBSIZE;
  943. lp->d_magic = DISKMAGIC;
  944. lp->d_magic2 = DISKMAGIC;
  945. lp->d_checksum = dkcksum(lp);
  946. /*
  947. * Call the generic disklabel extraction routine
  948. */
  949. return readdisklabel(DISKLABELDEV(dev), sdstrategy, lp, spoofonly);
  950. }
  951. /*
  952. * Check Errors
  953. */
  954. int
  955. sd_interpret_sense(struct scsi_xfer *xs)
  956. {
  957. struct scsi_sense_data *sense = &xs->sense;
  958. struct scsi_link *sc_link = xs->sc_link;
  959. struct sd_softc *sc = sc_link->device_softc;
  960. u_int8_t serr = sense->error_code & SSD_ERRCODE;
  961. int retval;
  962. /*
  963. * Let the generic code handle everything except a few categories of
  964. * LUN not ready errors on open devices.
  965. */
  966. if (((sc_link->flags & SDEV_OPEN) == 0) ||
  967. (serr != SSD_ERRCODE_CURRENT && serr != SSD_ERRCODE_DEFERRED) ||
  968. ((sense->flags & SSD_KEY) != SKEY_NOT_READY) ||
  969. (sense->extra_len < 6))
  970. return (scsi_interpret_sense(xs));
  971. if ((xs->flags & SCSI_IGNORE_NOT_READY) != 0)
  972. return (0);
  973. switch (ASC_ASCQ(sense)) {
  974. case SENSE_NOT_READY_BECOMING_READY:
  975. SC_DEBUG(sc_link, SDEV_DB1, ("becoming ready.\n"));
  976. retval = scsi_delay(xs, 5);
  977. break;
  978. case SENSE_NOT_READY_INIT_REQUIRED:
  979. SC_DEBUG(sc_link, SDEV_DB1, ("spinning up\n"));
  980. retval = scsi_start(sc->sc_link, SSS_START,
  981. SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_NOSLEEP);
  982. if (retval == 0)
  983. retval = ERESTART;
  984. else if (retval == ENOMEM)
  985. /* Can't issue the command. Fall back on a delay. */
  986. retval = scsi_delay(xs, 5);
  987. else
  988. SC_DEBUG(sc_link, SDEV_DB1, ("spin up failed (%#x)\n",
  989. retval));
  990. break;
  991. default:
  992. retval = scsi_interpret_sense(xs);
  993. break;
  994. }
  995. return (retval);
  996. }
  997. daddr_t
  998. sdsize(dev_t dev)
  999. {
  1000. struct disklabel *lp;
  1001. struct sd_softc *sc;
  1002. int part, omask;
  1003. daddr_t size;
  1004. sc = sdlookup(DISKUNIT(dev));
  1005. if (sc == NULL)
  1006. return -1;
  1007. if (sc->flags & SDF_DYING) {
  1008. size = -1;
  1009. goto exit;
  1010. }
  1011. part = DISKPART(dev);
  1012. omask = sc->sc_dk.dk_openmask & (1 << part);
  1013. if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) {
  1014. size = -1;
  1015. goto exit;
  1016. }
  1017. lp = sc->sc_dk.dk_label;
  1018. if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) == 0)
  1019. size = -1;
  1020. else if (lp->d_partitions[part].p_fstype != FS_SWAP)
  1021. size = -1;
  1022. else
  1023. size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part]));
  1024. if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
  1025. size = -1;
  1026. exit:
  1027. device_unref(&sc->sc_dev);
  1028. return size;
  1029. }
  1030. /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
  1031. static int sddoingadump;
  1032. /*
  1033. * dump all of physical memory into the partition specified, starting
  1034. * at offset 'dumplo' into the partition.
  1035. */
  1036. int
  1037. sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
  1038. {
  1039. struct sd_softc *sc; /* disk unit to do the I/O */
  1040. struct disklabel *lp; /* disk's disklabel */
  1041. int unit, part;
  1042. u_int32_t sectorsize; /* size of a disk sector */
  1043. u_int64_t nsects; /* number of sectors in partition */
  1044. u_int64_t sectoff; /* sector offset of partition */
  1045. u_int64_t totwrt; /* total number of sectors left to write */
  1046. u_int32_t nwrt; /* current number of sectors to write */
  1047. struct scsi_xfer *xs; /* ... convenience */
  1048. int rv;
  1049. /* Check if recursive dump; if so, punt. */
  1050. if (sddoingadump)
  1051. return EFAULT;
  1052. if (blkno < 0)
  1053. return EINVAL;
  1054. /* Mark as active early. */
  1055. sddoingadump = 1;
  1056. unit = DISKUNIT(dev); /* Decompose unit & partition. */
  1057. part = DISKPART(dev);
  1058. /* Check for acceptable drive number. */
  1059. if (unit >= sd_cd.cd_ndevs || (sc = sd_cd.cd_devs[unit]) == NULL)
  1060. return ENXIO;
  1061. /*
  1062. * XXX Can't do this check, since the media might have been
  1063. * XXX marked `invalid' by successful unmounting of all
  1064. * XXX filesystems.
  1065. */
  1066. #if 0
  1067. /* Make sure it was initialized. */
  1068. if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) != SDEV_MEDIA_LOADED)
  1069. return ENXIO;
  1070. #endif
  1071. /* Convert to disk sectors. Request must be a multiple of size. */
  1072. lp = sc->sc_dk.dk_label;
  1073. sectorsize = lp->d_secsize;
  1074. if ((size % sectorsize) != 0)
  1075. return EFAULT;
  1076. if ((blkno % DL_BLKSPERSEC(lp)) != 0)
  1077. return EFAULT;
  1078. totwrt = size / sectorsize;
  1079. blkno = DL_BLKTOSEC(lp, blkno);
  1080. nsects = DL_GETPSIZE(&lp->d_partitions[part]);
  1081. sectoff = DL_GETPOFFSET(&lp->d_partitions[part]);
  1082. /* Check transfer bounds against partition size. */
  1083. if ((blkno + totwrt) > nsects)
  1084. return EINVAL;
  1085. /* Offset block number to start of partition. */
  1086. blkno += sectoff;
  1087. while (totwrt > 0) {
  1088. if (totwrt > UINT32_MAX)
  1089. nwrt = UINT32_MAX;
  1090. else
  1091. nwrt = totwrt;
  1092. #ifndef SD_DUMP_NOT_TRUSTED
  1093. xs = scsi_xs_get(sc->sc_link, SCSI_NOSLEEP);
  1094. if (xs == NULL)
  1095. return (ENOMEM);
  1096. xs->timeout = 10000;
  1097. xs->flags |= SCSI_DATA_OUT;
  1098. xs->data = va;
  1099. xs->datalen = nwrt * sectorsize;
  1100. sd_cmd_rw10(xs, 0, blkno, nwrt); /* XXX */
  1101. rv = scsi_xs_sync(xs);
  1102. scsi_xs_put(xs);
  1103. if (rv != 0)
  1104. return (ENXIO);
  1105. #else /* SD_DUMP_NOT_TRUSTED */
  1106. /* Let's just talk about this first... */
  1107. printf("sd%d: dump addr 0x%x, blk %lld\n", unit, va,
  1108. (long long)blkno);
  1109. delay(500 * 1000); /* half a second */
  1110. #endif /* SD_DUMP_NOT_TRUSTED */
  1111. /* update block count */
  1112. totwrt -= nwrt;
  1113. blkno += nwrt;
  1114. va += sectorsize * nwrt;
  1115. }
  1116. sddoingadump = 0;
  1117. return (0);
  1118. }
  1119. /*
  1120. * Copy up to len chars from src to dst, ignoring non-printables.
  1121. * Must be room for len+1 chars in dst so we can write the NUL.
  1122. * Does not assume src is NUL-terminated.
  1123. */
  1124. void
  1125. viscpy(u_char *dst, u_char *src, int len)
  1126. {
  1127. while (len > 0 && *src != '\0') {
  1128. if (*src < 0x20 || *src >= 0x80) {
  1129. src++;
  1130. continue;
  1131. }
  1132. *dst++ = *src++;
  1133. len--;
  1134. }
  1135. *dst = '\0';
  1136. }
  1137. int
  1138. sd_read_cap_10(struct sd_softc *sc, int flags)
  1139. {
  1140. struct scsi_read_capacity cdb;
  1141. struct scsi_read_cap_data *rdcap;
  1142. struct scsi_xfer *xs;
  1143. int rv = ENOMEM;
  1144. CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
  1145. rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
  1146. PR_NOWAIT : PR_WAITOK) | PR_ZERO);
  1147. if (rdcap == NULL)
  1148. return (ENOMEM);
  1149. xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
  1150. if (xs == NULL)
  1151. goto done;
  1152. bzero(&cdb, sizeof(cdb));
  1153. cdb.opcode = READ_CAPACITY;
  1154. memcpy(xs->cmd, &cdb, sizeof(cdb));
  1155. xs->cmdlen = sizeof(cdb);
  1156. xs->data = (void *)rdcap;
  1157. xs->datalen = sizeof(*rdcap);
  1158. xs->timeout = 20000;
  1159. rv = scsi_xs_sync(xs);
  1160. scsi_xs_put(xs);
  1161. if (rv == 0) {
  1162. sc->params.disksize = _4btol(rdcap->addr) + 1ll;
  1163. sc->params.secsize = _4btol(rdcap->length);
  1164. CLR(sc->flags, SDF_THIN);
  1165. }
  1166. done:
  1167. dma_free(rdcap, sizeof(*rdcap));
  1168. return (rv);
  1169. }
  1170. int
  1171. sd_read_cap_16(struct sd_softc *sc, int flags)
  1172. {
  1173. struct scsi_read_capacity_16 cdb;
  1174. struct scsi_read_cap_data_16 *rdcap;
  1175. struct scsi_xfer *xs;
  1176. int rv = ENOMEM;
  1177. CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST);
  1178. rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ?
  1179. PR_NOWAIT : PR_WAITOK) | PR_ZERO);
  1180. if (rdcap == NULL)
  1181. return (ENOMEM);
  1182. xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT);
  1183. if (xs == NULL)
  1184. goto done;
  1185. bzero(&cdb, sizeof(cdb));
  1186. cdb.opcode = READ_CAPACITY_16;
  1187. cdb.byte2 = SRC16_SERVICE_ACTION;
  1188. _lto4b(sizeof(*rdcap), cdb.length);
  1189. memcpy(xs->cmd, &cdb, sizeof(cdb));
  1190. xs->cmdlen = sizeof(cdb);
  1191. xs->data = (void *)rdcap;
  1192. xs->datalen = sizeof(*rdcap);
  1193. xs->timeout = 20000;
  1194. rv = scsi_xs_sync(xs);
  1195. scsi_xs_put(xs);
  1196. if (rv == 0) {
  1197. if (_8btol(rdcap->addr) == 0) {
  1198. rv = EIO;
  1199. goto done;
  1200. }
  1201. sc->params.disksize = _8btol(rdcap->addr) + 1;
  1202. sc->params.secsize = _4btol(rdcap->length);
  1203. if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE))
  1204. SET(sc->flags, SDF_THIN);
  1205. else
  1206. CLR(sc->flags, SDF_THIN);
  1207. }
  1208. done:
  1209. dma_free(rdcap, sizeof(*rdcap));
  1210. return (rv);
  1211. }
  1212. int
  1213. sd_size(struct sd_softc *sc, int flags)
  1214. {
  1215. int rv;
  1216. if (SCSISPC(sc->sc_link->inqdata.version) >= 3) {
  1217. rv = sd_read_cap_16(sc, flags);
  1218. if (rv != 0)
  1219. rv = sd_read_cap_10(sc, flags);
  1220. } else {
  1221. rv = sd_read_cap_10(sc, flags);
  1222. if (rv == 0 && sc->params.disksize == 0x100000000ll)
  1223. rv = sd_read_cap_16(sc, flags);
  1224. }
  1225. return (rv);
  1226. }
  1227. int
  1228. sd_thin_pages(struct sd_softc *sc, int flags)
  1229. {
  1230. struct scsi_vpd_hdr *pg;
  1231. size_t len = 0;
  1232. u_int8_t *pages;
  1233. int i, score = 0;
  1234. int rv;
  1235. pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
  1236. PR_NOWAIT : PR_WAITOK) | PR_ZERO);
  1237. if (pg == NULL)
  1238. return (ENOMEM);
  1239. rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
  1240. SI_PG_SUPPORTED, flags);
  1241. if (rv != 0)
  1242. goto done;
  1243. len = _2btol(pg->page_length);
  1244. dma_free(pg, sizeof(*pg));
  1245. pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ?
  1246. PR_NOWAIT : PR_WAITOK) | PR_ZERO);
  1247. if (pg == NULL)
  1248. return (ENOMEM);
  1249. rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len,
  1250. SI_PG_SUPPORTED, flags);
  1251. if (rv != 0)
  1252. goto done;
  1253. pages = (u_int8_t *)(pg + 1);
  1254. if (pages[0] != SI_PG_SUPPORTED) {
  1255. rv = EIO;
  1256. goto done;
  1257. }
  1258. for (i = 1; i < len; i++) {
  1259. switch (pages[i]) {
  1260. case SI_PG_DISK_LIMITS:
  1261. case SI_PG_DISK_THIN:
  1262. score++;
  1263. break;
  1264. }
  1265. }
  1266. if (score < 2)
  1267. rv = EOPNOTSUPP;
  1268. done:
  1269. dma_free(pg, sizeof(*pg) + len);
  1270. return (rv);
  1271. }
  1272. int
  1273. sd_vpd_block_limits(struct sd_softc *sc, int flags)
  1274. {
  1275. struct scsi_vpd_disk_limits *pg;
  1276. int rv;
  1277. pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
  1278. PR_NOWAIT : PR_WAITOK) | PR_ZERO);
  1279. if (pg == NULL)
  1280. return (ENOMEM);
  1281. rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
  1282. SI_PG_DISK_LIMITS, flags);
  1283. if (rv != 0)
  1284. goto done;
  1285. if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) {
  1286. sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count);
  1287. sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count);
  1288. } else
  1289. rv = EOPNOTSUPP;
  1290. done:
  1291. dma_free(pg, sizeof(*pg));
  1292. return (rv);
  1293. }
  1294. int
  1295. sd_vpd_thin(struct sd_softc *sc, int flags)
  1296. {
  1297. struct scsi_vpd_disk_thin *pg;
  1298. int rv;
  1299. pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ?
  1300. PR_NOWAIT : PR_WAITOK) | PR_ZERO);
  1301. if (pg == NULL)
  1302. return (ENOMEM);
  1303. rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg),
  1304. SI_PG_DISK_THIN, flags);
  1305. if (rv != 0)
  1306. goto done;
  1307. #ifdef notyet
  1308. if (ISSET(pg->flags, VPD_DISK_THIN_TPU))
  1309. sc->sc_delete = sd_unmap;
  1310. else if (ISSET(pg->flags, VPD_DISK_THIN_TPWS)) {
  1311. sc->sc_delete = sd_write_same_16;
  1312. sc->params.unmap_descs = 1; /* WRITE SAME 16 only does one */
  1313. } else
  1314. rv = EOPNOTSUPP;
  1315. #endif
  1316. done:
  1317. dma_free(pg, sizeof(*pg));
  1318. return (rv);
  1319. }
  1320. int
  1321. sd_thin_params(struct sd_softc *sc, int flags)
  1322. {
  1323. int rv;
  1324. rv = sd_thin_pages(sc, flags);
  1325. if (rv != 0)
  1326. return (rv);
  1327. rv = sd_vpd_block_limits(sc, flags);
  1328. if (rv != 0)
  1329. return (rv);
  1330. rv = sd_vpd_thin(sc, flags);
  1331. if (rv != 0)
  1332. return (rv);
  1333. return (0);
  1334. }
  1335. /*
  1336. * Fill out the disk parameter structure. Return SDGP_RESULT_OK if the
  1337. * structure is correctly filled in, SDGP_RESULT_OFFLINE otherwise. The caller
  1338. * is responsible for clearing the SDEV_MEDIA_LOADED flag if the structure
  1339. * cannot be completed.
  1340. */
  1341. int
  1342. sd_get_parms(struct sd_softc *sc, struct disk_parms *dp, int flags)
  1343. {
  1344. union scsi_mode_sense_buf *buf = NULL;
  1345. struct page_rigid_geometry *rigid = NULL;
  1346. struct page_flex_geometry *flex = NULL;
  1347. struct page_reduced_geometry *reduced = NULL;
  1348. u_char *page0 = NULL;
  1349. u_int32_t heads = 0, sectors = 0, cyls = 0, secsize = 0;
  1350. int err = 0, big;
  1351. if (sd_size(sc, flags) != 0)
  1352. return (SDGP_RESULT_OFFLINE);
  1353. if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) {
  1354. /* we dont know the unmap limits, so we cant use thin shizz */
  1355. CLR(sc->flags, SDF_THIN);
  1356. }
  1357. buf = dma_alloc(sizeof(*buf), PR_NOWAIT);
  1358. if (buf == NULL)
  1359. goto validate;
  1360. /*
  1361. * Ask for page 0 (vendor specific) mode sense data to find
  1362. * READONLY info. The only thing USB devices will ask for.
  1363. */
  1364. err = scsi_do_mode_sense(sc->sc_link, 0, buf, (void **)&page0,
  1365. NULL, NULL, NULL, 1, flags | SCSI_SILENT, &big);
  1366. if (err == 0) {
  1367. if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT)
  1368. SET(sc->sc_link->flags, SDEV_READONLY);
  1369. else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT)
  1370. SET(sc->sc_link->flags, SDEV_READONLY);
  1371. else
  1372. CLR(sc->sc_link->flags, SDEV_READONLY);
  1373. }
  1374. /*
  1375. * Many UMASS devices choke when asked about their geometry. Most
  1376. * don't have a meaningful geometry anyway, so just fake it if
  1377. * scsi_size() worked.
  1378. */
  1379. if ((sc->sc_link->flags & SDEV_UMASS) && (dp->disksize > 0))
  1380. goto validate;
  1381. switch (sc->sc_link->inqdata.device & SID_TYPE) {
  1382. case T_OPTICAL:
  1383. /* No more information needed or available. */
  1384. break;
  1385. case T_RDIRECT:
  1386. /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */
  1387. err = scsi_do_mode_sense(sc->sc_link, PAGE_REDUCED_GEOMETRY,
  1388. buf, (void **)&reduced, NULL, NULL, &secsize,
  1389. sizeof(*reduced), flags | SCSI_SILENT, NULL);
  1390. if (!err && reduced &&
  1391. DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) {
  1392. if (dp->disksize == 0)
  1393. dp->disksize = _5btol(reduced->sectors);
  1394. if (secsize == 0)
  1395. secsize = _2btol(reduced->bytes_s);
  1396. }
  1397. break;
  1398. default:
  1399. /*
  1400. * NOTE: Some devices leave off the last four bytes of
  1401. * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages.
  1402. * The only information in those four bytes is RPM information
  1403. * so accept the page. The extra bytes will be zero and RPM will
  1404. * end up with the default value of 3600.
  1405. */
  1406. if (((sc->sc_link->flags & SDEV_ATAPI) == 0) ||
  1407. ((sc->sc_link->flags & SDEV_REMOVABLE) == 0))
  1408. err = scsi_do_mode_sense(sc->sc_link,
  1409. PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL,
  1410. NULL, &secsize, sizeof(*rigid) - 4,
  1411. flags | SCSI_SILENT, NULL);
  1412. if (!err && rigid && DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) {
  1413. heads = rigid->nheads;
  1414. cyls = _3btol(rigid->ncyl);
  1415. if (heads * cyls > 0)
  1416. sectors = dp->disksize / (heads * cyls);
  1417. } else {
  1418. err = scsi_do_mode_sense(sc->sc_link,
  1419. PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL,
  1420. &secsize, sizeof(*flex) - 4,
  1421. flags | SCSI_SILENT, NULL);
  1422. if (!err && flex &&
  1423. DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) {
  1424. sectors = flex->ph_sec_tr;
  1425. heads = flex->nheads;
  1426. cyls = _2btol(flex->ncyl);
  1427. if (secsize == 0)
  1428. secsize = _2btol(flex->bytes_s);
  1429. if (dp->disksize == 0)
  1430. dp->disksize = heads * cyls * sectors;
  1431. }
  1432. }
  1433. break;
  1434. }
  1435. validate:
  1436. if (buf)
  1437. dma_free(buf, sizeof(*buf));
  1438. if (dp->disksize == 0)
  1439. return (SDGP_RESULT_OFFLINE);
  1440. if (dp->secsize == 0)
  1441. dp->secsize = (secsize == 0) ? 512 : secsize;
  1442. /*
  1443. * Restrict secsize values to powers of two between 512 and 64k.
  1444. */
  1445. switch (dp->secsize) {
  1446. case 0x200: /* == 512, == DEV_BSIZE on all architectures. */
  1447. case 0x400:
  1448. case 0x800:
  1449. case 0x1000:
  1450. case 0x2000:
  1451. case 0x4000:
  1452. case 0x8000:
  1453. case 0x10000:
  1454. break;
  1455. default:
  1456. SC_DEBUG(sc->sc_link, SDEV_DB1,
  1457. ("sd_get_parms: bad secsize: %#lx\n", dp->secsize));
  1458. return (SDGP_RESULT_OFFLINE);
  1459. }
  1460. /*
  1461. * XXX THINK ABOUT THIS!! Using values such that sectors * heads *
  1462. * cyls is <= disk_size can lead to wasted space. We need a more
  1463. * careful calculation/validation to make everything work out
  1464. * optimally.
  1465. */
  1466. if (dp->disksize > 0xffffffff && (dp->heads * dp->sectors) < 0xffff) {
  1467. dp->heads = 511;
  1468. dp->sectors = 255;
  1469. cyls = 0;
  1470. } else {
  1471. /*
  1472. * Use standard geometry values for anything we still don't
  1473. * know.
  1474. */
  1475. dp->heads = (heads == 0) ? 255 : heads;
  1476. dp->sectors = (sectors == 0) ? 63 : sectors;
  1477. }
  1478. dp->cyls = (cyls == 0) ? dp->disksize / (dp->heads * dp->sectors) :
  1479. cyls;
  1480. if (dp->cyls == 0) {
  1481. dp->heads = dp->cyls = 1;
  1482. dp->sectors = dp->disksize;
  1483. }
  1484. return (SDGP_RESULT_OK);
  1485. }
  1486. void
  1487. sd_flush(struct sd_softc *sc, int flags)
  1488. {
  1489. struct scsi_link *link = sc->sc_link;
  1490. struct scsi_xfer *xs;
  1491. struct scsi_synchronize_cache *cmd;
  1492. if (link->quirks & SDEV_NOSYNCCACHE)
  1493. return;
  1494. /*
  1495. * Issue a SYNCHRONIZE CACHE. Address 0, length 0 means "all remaining
  1496. * blocks starting at address 0". Ignore ILLEGAL REQUEST in the event
  1497. * that the command is not supported by the device.
  1498. */
  1499. xs = scsi_xs_get(link, flags);
  1500. if (xs == NULL) {
  1501. SC_DEBUG(link, SDEV_DB1, ("cache sync failed to get xs\n"));
  1502. return;
  1503. }
  1504. cmd = (struct scsi_synchronize_cache *)xs->cmd;
  1505. cmd->opcode = SYNCHRONIZE_CACHE;
  1506. xs->cmdlen = sizeof(*cmd);
  1507. xs->timeout = 100000;
  1508. xs->flags |= SCSI_IGNORE_ILLEGAL_REQUEST;
  1509. if (scsi_xs_sync(xs) == 0)
  1510. sc->flags &= ~SDF_DIRTY;
  1511. else
  1512. SC_DEBUG(link, SDEV_DB1, ("cache sync failed\n"));
  1513. scsi_xs_put(xs);
  1514. }