geom_subr.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673
  1. /*-
  2. * SPDX-License-Identifier: BSD-3-Clause
  3. *
  4. * Copyright (c) 2002 Poul-Henning Kamp
  5. * Copyright (c) 2002 Networks Associates Technology, Inc.
  6. * All rights reserved.
  7. *
  8. * This software was developed for the FreeBSD Project by Poul-Henning Kamp
  9. * and NAI Labs, the Security Research Division of Network Associates, Inc.
  10. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
  11. * DARPA CHATS research program.
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. * 1. Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. * 2. Redistributions in binary form must reproduce the above copyright
  19. * notice, this list of conditions and the following disclaimer in the
  20. * documentation and/or other materials provided with the distribution.
  21. * 3. The names of the authors may not be used to endorse or promote
  22. * products derived from this software without specific prior written
  23. * permission.
  24. *
  25. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  26. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  27. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  28. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  29. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  30. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  31. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  32. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  33. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  34. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  35. * SUCH DAMAGE.
  36. */
  37. #include <sys/cdefs.h>
  38. #include "opt_ddb.h"
  39. #include <sys/param.h>
  40. #include <sys/systm.h>
  41. #include <sys/devicestat.h>
  42. #include <sys/kernel.h>
  43. #include <sys/malloc.h>
  44. #include <sys/bio.h>
  45. #include <sys/proc.h>
  46. #include <sys/kthread.h>
  47. #include <sys/lock.h>
  48. #include <sys/mutex.h>
  49. #include <sys/errno.h>
  50. #include <sys/sbuf.h>
  51. #include <sys/sdt.h>
  52. #include <geom/geom.h>
  53. #include <geom/geom_dbg.h>
  54. #include <geom/geom_int.h>
  55. #include <machine/stdarg.h>
  56. #ifdef DDB
  57. #include <ddb/ddb.h>
  58. #endif
  59. #ifdef KDB
  60. #include <sys/kdb.h>
  61. #endif
  62. SDT_PROVIDER_DEFINE(geom);
  63. struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes);
  64. static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms);
  65. char *g_wait_event, *g_wait_up, *g_wait_down;
  66. struct g_hh00 {
  67. struct g_class *mp;
  68. struct g_provider *pp;
  69. off_t size;
  70. int error;
  71. int post;
  72. };
  73. void
  74. g_dbg_printf(const char *classname, int lvl, struct bio *bp,
  75. const char *format,
  76. ...)
  77. {
  78. #ifndef PRINTF_BUFR_SIZE
  79. #define PRINTF_BUFR_SIZE 64
  80. #endif
  81. char bufr[PRINTF_BUFR_SIZE];
  82. struct sbuf sb, *sbp __unused;
  83. va_list ap;
  84. sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN);
  85. KASSERT(sbp != NULL, ("sbuf_new misused?"));
  86. sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
  87. sbuf_cat(&sb, classname);
  88. if (lvl >= 0)
  89. sbuf_printf(&sb, "[%d]", lvl);
  90. va_start(ap, format);
  91. sbuf_vprintf(&sb, format, ap);
  92. va_end(ap);
  93. if (bp != NULL) {
  94. sbuf_putc(&sb, ' ');
  95. g_format_bio(&sb, bp);
  96. }
  97. /* Terminate the debug line with a single '\n'. */
  98. sbuf_nl_terminate(&sb);
  99. /* Flush line to printf. */
  100. sbuf_finish(&sb);
  101. sbuf_delete(&sb);
  102. }
  103. /*
  104. * This event offers a new class a chance to taste all preexisting providers.
  105. */
  106. static void
  107. g_load_class(void *arg, int flag)
  108. {
  109. struct g_hh00 *hh;
  110. struct g_class *mp2, *mp;
  111. struct g_geom *gp;
  112. struct g_provider *pp;
  113. g_topology_assert();
  114. if (flag == EV_CANCEL) /* XXX: can't happen ? */
  115. return;
  116. if (g_shutdown)
  117. return;
  118. hh = arg;
  119. mp = hh->mp;
  120. hh->error = 0;
  121. if (hh->post) {
  122. g_free(hh);
  123. hh = NULL;
  124. }
  125. g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name);
  126. KASSERT(mp->name != NULL && *mp->name != '\0',
  127. ("GEOM class has no name"));
  128. LIST_FOREACH(mp2, &g_classes, class) {
  129. if (mp2 == mp) {
  130. printf("The GEOM class %s is already loaded.\n",
  131. mp2->name);
  132. if (hh != NULL)
  133. hh->error = EEXIST;
  134. return;
  135. } else if (strcmp(mp2->name, mp->name) == 0) {
  136. printf("A GEOM class %s is already loaded.\n",
  137. mp2->name);
  138. if (hh != NULL)
  139. hh->error = EEXIST;
  140. return;
  141. }
  142. }
  143. LIST_INIT(&mp->geom);
  144. LIST_INSERT_HEAD(&g_classes, mp, class);
  145. if (mp->init != NULL)
  146. mp->init(mp);
  147. if (mp->taste == NULL)
  148. return;
  149. LIST_FOREACH(mp2, &g_classes, class) {
  150. if (mp == mp2)
  151. continue;
  152. LIST_FOREACH(gp, &mp2->geom, geom) {
  153. LIST_FOREACH(pp, &gp->provider, provider) {
  154. mp->taste(mp, pp, 0);
  155. g_topology_assert();
  156. }
  157. }
  158. }
  159. }
  160. static int
  161. g_unload_class(struct g_class *mp)
  162. {
  163. struct g_geom *gp;
  164. struct g_provider *pp;
  165. struct g_consumer *cp;
  166. int error;
  167. g_topology_lock();
  168. g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name);
  169. retry:
  170. G_VALID_CLASS(mp);
  171. LIST_FOREACH(gp, &mp->geom, geom) {
  172. /* We refuse to unload if anything is open */
  173. LIST_FOREACH(pp, &gp->provider, provider)
  174. if (pp->acr || pp->acw || pp->ace) {
  175. g_topology_unlock();
  176. return (EBUSY);
  177. }
  178. LIST_FOREACH(cp, &gp->consumer, consumer)
  179. if (cp->acr || cp->acw || cp->ace) {
  180. g_topology_unlock();
  181. return (EBUSY);
  182. }
  183. /* If the geom is withering, wait for it to finish. */
  184. if (gp->flags & G_GEOM_WITHER) {
  185. g_topology_sleep(mp, 1);
  186. goto retry;
  187. }
  188. }
  189. /*
  190. * We allow unloading if we have no geoms, or a class
  191. * method we can use to get rid of them.
  192. */
  193. if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) {
  194. g_topology_unlock();
  195. return (EOPNOTSUPP);
  196. }
  197. /* Bar new entries */
  198. mp->taste = NULL;
  199. LIST_FOREACH(gp, &mp->geom, geom) {
  200. error = mp->destroy_geom(NULL, mp, gp);
  201. if (error != 0) {
  202. g_topology_unlock();
  203. return (error);
  204. }
  205. }
  206. /* Wait for withering to finish. */
  207. for (;;) {
  208. gp = LIST_FIRST(&mp->geom);
  209. if (gp == NULL)
  210. break;
  211. KASSERT(gp->flags & G_GEOM_WITHER,
  212. ("Non-withering geom in class %s", mp->name));
  213. g_topology_sleep(mp, 1);
  214. }
  215. G_VALID_CLASS(mp);
  216. if (mp->fini != NULL)
  217. mp->fini(mp);
  218. LIST_REMOVE(mp, class);
  219. g_topology_unlock();
  220. return (0);
  221. }
  222. int
  223. g_modevent(module_t mod, int type, void *data)
  224. {
  225. struct g_hh00 *hh;
  226. int error;
  227. static int g_ignition;
  228. struct g_class *mp;
  229. mp = data;
  230. if (mp->version != G_VERSION) {
  231. printf("GEOM class %s has Wrong version %x\n",
  232. mp->name, mp->version);
  233. return (EINVAL);
  234. }
  235. if (!g_ignition) {
  236. g_ignition++;
  237. g_init();
  238. }
  239. error = EOPNOTSUPP;
  240. switch (type) {
  241. case MOD_LOAD:
  242. g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", mp->name);
  243. hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO);
  244. hh->mp = mp;
  245. /*
  246. * Once the system is not cold, MOD_LOAD calls will be
  247. * from the userland and the g_event thread will be able
  248. * to acknowledge their completion.
  249. */
  250. if (cold) {
  251. hh->post = 1;
  252. error = g_post_event(g_load_class, hh, M_WAITOK, NULL);
  253. } else {
  254. error = g_waitfor_event(g_load_class, hh, M_WAITOK,
  255. NULL);
  256. if (error == 0)
  257. error = hh->error;
  258. g_free(hh);
  259. }
  260. break;
  261. case MOD_UNLOAD:
  262. g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name);
  263. error = g_unload_class(mp);
  264. if (error == 0) {
  265. KASSERT(LIST_EMPTY(&mp->geom),
  266. ("Unloaded class (%s) still has geom", mp->name));
  267. }
  268. break;
  269. }
  270. return (error);
  271. }
  272. static void
  273. g_retaste_event(void *arg, int flag)
  274. {
  275. struct g_class *mp, *mp2;
  276. struct g_geom *gp;
  277. struct g_hh00 *hh;
  278. struct g_provider *pp;
  279. struct g_consumer *cp;
  280. g_topology_assert();
  281. if (flag == EV_CANCEL) /* XXX: can't happen ? */
  282. return;
  283. if (g_shutdown || g_notaste)
  284. return;
  285. hh = arg;
  286. mp = hh->mp;
  287. hh->error = 0;
  288. if (hh->post) {
  289. g_free(hh);
  290. hh = NULL;
  291. }
  292. g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name);
  293. LIST_FOREACH(mp2, &g_classes, class) {
  294. LIST_FOREACH(gp, &mp2->geom, geom) {
  295. LIST_FOREACH(pp, &gp->provider, provider) {
  296. if (pp->acr || pp->acw || pp->ace)
  297. continue;
  298. LIST_FOREACH(cp, &pp->consumers, consumers) {
  299. if (cp->geom->class == mp &&
  300. (cp->flags & G_CF_ORPHAN) == 0)
  301. break;
  302. }
  303. if (cp != NULL) {
  304. cp->flags |= G_CF_ORPHAN;
  305. g_wither_geom(cp->geom, ENXIO);
  306. }
  307. mp->taste(mp, pp, 0);
  308. g_topology_assert();
  309. }
  310. }
  311. }
  312. }
  313. int
  314. g_retaste(struct g_class *mp)
  315. {
  316. struct g_hh00 *hh;
  317. int error;
  318. if (mp->taste == NULL)
  319. return (EINVAL);
  320. hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO);
  321. hh->mp = mp;
  322. if (cold) {
  323. hh->post = 1;
  324. error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL);
  325. } else {
  326. error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL);
  327. if (error == 0)
  328. error = hh->error;
  329. g_free(hh);
  330. }
  331. return (error);
  332. }
  333. struct g_geom *
  334. g_new_geomf(struct g_class *mp, const char *fmt, ...)
  335. {
  336. struct g_geom *gp;
  337. va_list ap;
  338. struct sbuf *sb;
  339. g_topology_assert();
  340. G_VALID_CLASS(mp);
  341. sb = sbuf_new_auto();
  342. va_start(ap, fmt);
  343. sbuf_vprintf(sb, fmt, ap);
  344. va_end(ap);
  345. sbuf_finish(sb);
  346. gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO);
  347. gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO);
  348. gp->class = mp;
  349. gp->rank = 1;
  350. LIST_INIT(&gp->consumer);
  351. LIST_INIT(&gp->provider);
  352. LIST_INSERT_HEAD(&mp->geom, gp, geom);
  353. TAILQ_INSERT_HEAD(&geoms, gp, geoms);
  354. strcpy(gp->name, sbuf_data(sb));
  355. sbuf_delete(sb);
  356. /* Fill in defaults from class */
  357. gp->start = mp->start;
  358. gp->spoiled = mp->spoiled;
  359. gp->attrchanged = mp->attrchanged;
  360. gp->providergone = mp->providergone;
  361. gp->dumpconf = mp->dumpconf;
  362. gp->access = mp->access;
  363. gp->orphan = mp->orphan;
  364. gp->ioctl = mp->ioctl;
  365. gp->resize = mp->resize;
  366. return (gp);
  367. }
  368. void
  369. g_destroy_geom(struct g_geom *gp)
  370. {
  371. g_topology_assert();
  372. G_VALID_GEOM(gp);
  373. g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name);
  374. KASSERT(LIST_EMPTY(&gp->consumer),
  375. ("g_destroy_geom(%s) with consumer(s) [%p]",
  376. gp->name, LIST_FIRST(&gp->consumer)));
  377. KASSERT(LIST_EMPTY(&gp->provider),
  378. ("g_destroy_geom(%s) with provider(s) [%p]",
  379. gp->name, LIST_FIRST(&gp->provider)));
  380. g_cancel_event(gp);
  381. LIST_REMOVE(gp, geom);
  382. TAILQ_REMOVE(&geoms, gp, geoms);
  383. g_free(gp->name);
  384. g_free(gp);
  385. }
  386. /*
  387. * This function is called (repeatedly) until the geom has withered away.
  388. */
  389. void
  390. g_wither_geom(struct g_geom *gp, int error)
  391. {
  392. struct g_provider *pp;
  393. g_topology_assert();
  394. G_VALID_GEOM(gp);
  395. g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name);
  396. if (!(gp->flags & G_GEOM_WITHER)) {
  397. gp->flags |= G_GEOM_WITHER;
  398. LIST_FOREACH(pp, &gp->provider, provider)
  399. if (!(pp->flags & G_PF_ORPHAN))
  400. g_orphan_provider(pp, error);
  401. }
  402. g_do_wither();
  403. }
  404. /*
  405. * Convenience function to destroy a particular provider.
  406. */
  407. void
  408. g_wither_provider(struct g_provider *pp, int error)
  409. {
  410. pp->flags |= G_PF_WITHER;
  411. if (!(pp->flags & G_PF_ORPHAN))
  412. g_orphan_provider(pp, error);
  413. }
  414. /*
  415. * This function is called (repeatedly) until the has withered away.
  416. */
  417. void
  418. g_wither_geom_close(struct g_geom *gp, int error)
  419. {
  420. struct g_consumer *cp;
  421. g_topology_assert();
  422. G_VALID_GEOM(gp);
  423. g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name);
  424. LIST_FOREACH(cp, &gp->consumer, consumer)
  425. if (cp->acr || cp->acw || cp->ace)
  426. g_access(cp, -cp->acr, -cp->acw, -cp->ace);
  427. g_wither_geom(gp, error);
  428. }
  429. /*
  430. * This function is called (repeatedly) until we can't wash away more
  431. * withered bits at present.
  432. */
  433. void
  434. g_wither_washer(void)
  435. {
  436. struct g_class *mp;
  437. struct g_geom *gp, *gp2;
  438. struct g_provider *pp, *pp2;
  439. struct g_consumer *cp, *cp2;
  440. g_topology_assert();
  441. LIST_FOREACH(mp, &g_classes, class) {
  442. LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
  443. LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) {
  444. if (!(pp->flags & G_PF_WITHER))
  445. continue;
  446. if (LIST_EMPTY(&pp->consumers))
  447. g_destroy_provider(pp);
  448. }
  449. if (!(gp->flags & G_GEOM_WITHER))
  450. continue;
  451. LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) {
  452. if (LIST_EMPTY(&pp->consumers))
  453. g_destroy_provider(pp);
  454. }
  455. LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) {
  456. if (cp->acr || cp->acw || cp->ace)
  457. continue;
  458. if (cp->provider != NULL)
  459. g_detach(cp);
  460. g_destroy_consumer(cp);
  461. }
  462. if (LIST_EMPTY(&gp->provider) &&
  463. LIST_EMPTY(&gp->consumer))
  464. g_destroy_geom(gp);
  465. }
  466. }
  467. }
  468. struct g_consumer *
  469. g_new_consumer(struct g_geom *gp)
  470. {
  471. struct g_consumer *cp;
  472. g_topology_assert();
  473. G_VALID_GEOM(gp);
  474. KASSERT(!(gp->flags & G_GEOM_WITHER),
  475. ("g_new_consumer on WITHERing geom(%s) (class %s)",
  476. gp->name, gp->class->name));
  477. KASSERT(gp->orphan != NULL,
  478. ("g_new_consumer on geom(%s) (class %s) without orphan",
  479. gp->name, gp->class->name));
  480. cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO);
  481. cp->geom = gp;
  482. cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED,
  483. DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
  484. LIST_INSERT_HEAD(&gp->consumer, cp, consumer);
  485. return(cp);
  486. }
  487. void
  488. g_destroy_consumer(struct g_consumer *cp)
  489. {
  490. struct g_geom *gp;
  491. g_topology_assert();
  492. G_VALID_CONSUMER(cp);
  493. g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp);
  494. KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached"));
  495. KASSERT (cp->acr == 0, ("g_destroy_consumer with acr"));
  496. KASSERT (cp->acw == 0, ("g_destroy_consumer with acw"));
  497. KASSERT (cp->ace == 0, ("g_destroy_consumer with ace"));
  498. g_cancel_event(cp);
  499. gp = cp->geom;
  500. LIST_REMOVE(cp, consumer);
  501. devstat_remove_entry(cp->stat);
  502. g_free(cp);
  503. if (gp->flags & G_GEOM_WITHER)
  504. g_do_wither();
  505. }
  506. static void
  507. g_new_provider_event(void *arg, int flag)
  508. {
  509. struct g_class *mp;
  510. struct g_provider *pp;
  511. struct g_consumer *cp, *next_cp;
  512. g_topology_assert();
  513. if (flag == EV_CANCEL)
  514. return;
  515. if (g_shutdown)
  516. return;
  517. pp = arg;
  518. G_VALID_PROVIDER(pp);
  519. if ((pp->flags & G_PF_WITHER) != 0)
  520. return;
  521. LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) {
  522. if ((cp->flags & G_CF_ORPHAN) == 0 &&
  523. cp->geom->attrchanged != NULL)
  524. cp->geom->attrchanged(cp, "GEOM::media");
  525. }
  526. if (g_notaste)
  527. return;
  528. LIST_FOREACH(mp, &g_classes, class) {
  529. if (mp->taste == NULL)
  530. continue;
  531. LIST_FOREACH(cp, &pp->consumers, consumers)
  532. if (cp->geom->class == mp &&
  533. (cp->flags & G_CF_ORPHAN) == 0)
  534. break;
  535. if (cp != NULL)
  536. continue;
  537. mp->taste(mp, pp, 0);
  538. g_topology_assert();
  539. }
  540. }
  541. struct g_provider *
  542. g_new_providerf(struct g_geom *gp, const char *fmt, ...)
  543. {
  544. struct g_provider *pp;
  545. struct sbuf *sb;
  546. va_list ap;
  547. g_topology_assert();
  548. G_VALID_GEOM(gp);
  549. KASSERT(gp->access != NULL,
  550. ("new provider on geom(%s) without ->access (class %s)",
  551. gp->name, gp->class->name));
  552. KASSERT(gp->start != NULL,
  553. ("new provider on geom(%s) without ->start (class %s)",
  554. gp->name, gp->class->name));
  555. KASSERT(!(gp->flags & G_GEOM_WITHER),
  556. ("new provider on WITHERing geom(%s) (class %s)",
  557. gp->name, gp->class->name));
  558. sb = sbuf_new_auto();
  559. va_start(ap, fmt);
  560. sbuf_vprintf(sb, fmt, ap);
  561. va_end(ap);
  562. sbuf_finish(sb);
  563. pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO);
  564. pp->name = (char *)(pp + 1);
  565. strcpy(pp->name, sbuf_data(sb));
  566. sbuf_delete(sb);
  567. LIST_INIT(&pp->consumers);
  568. LIST_INIT(&pp->aliases);
  569. pp->error = ENXIO;
  570. pp->geom = gp;
  571. pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED,
  572. DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
  573. LIST_INSERT_HEAD(&gp->provider, pp, provider);
  574. g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL);
  575. return (pp);
  576. }
  577. void
  578. g_provider_add_alias(struct g_provider *pp, const char *fmt, ...)
  579. {
  580. struct sbuf *sb;
  581. struct g_geom_alias *gap;
  582. va_list ap;
  583. /*
  584. * Generate the alias string and save it in the list.
  585. */
  586. sb = sbuf_new_auto();
  587. va_start(ap, fmt);
  588. sbuf_vprintf(sb, fmt, ap);
  589. va_end(ap);
  590. sbuf_finish(sb);
  591. LIST_FOREACH(gap, &pp->aliases, ga_next) {
  592. if (strcmp(gap->ga_alias, sbuf_data(sb)) != 0)
  593. continue;
  594. /* Don't re-add the same alias. */
  595. sbuf_delete(sb);
  596. return;
  597. }
  598. gap = g_malloc(sizeof(*gap) + sbuf_len(sb) + 1, M_WAITOK | M_ZERO);
  599. memcpy((char *)(gap + 1), sbuf_data(sb), sbuf_len(sb));
  600. sbuf_delete(sb);
  601. gap->ga_alias = (const char *)(gap + 1);
  602. LIST_INSERT_HEAD(&pp->aliases, gap, ga_next);
  603. }
  604. void
  605. g_error_provider(struct g_provider *pp, int error)
  606. {
  607. /* G_VALID_PROVIDER(pp); We may not have g_topology */
  608. pp->error = error;
  609. }
  610. static void
  611. g_resize_provider_event(void *arg, int flag)
  612. {
  613. struct g_hh00 *hh;
  614. struct g_class *mp;
  615. struct g_geom *gp;
  616. struct g_provider *pp;
  617. struct g_consumer *cp, *cp2;
  618. off_t size;
  619. g_topology_assert();
  620. if (g_shutdown)
  621. return;
  622. hh = arg;
  623. pp = hh->pp;
  624. size = hh->size;
  625. g_free(hh);
  626. G_VALID_PROVIDER(pp);
  627. KASSERT(!(pp->flags & G_PF_WITHER),
  628. ("g_resize_provider_event but withered"));
  629. g_trace(G_T_TOPOLOGY, "g_resize_provider_event(%p)", pp);
  630. LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
  631. gp = cp->geom;
  632. if (gp->resize == NULL && size < pp->mediasize) {
  633. /*
  634. * XXX: g_dev_orphan method does deferred destroying
  635. * and it is possible, that other event could already
  636. * call the orphan method. Check consumer's flags to
  637. * do not schedule it twice.
  638. */
  639. if (cp->flags & G_CF_ORPHAN)
  640. continue;
  641. cp->flags |= G_CF_ORPHAN;
  642. cp->geom->orphan(cp);
  643. }
  644. }
  645. pp->mediasize = size;
  646. LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
  647. gp = cp->geom;
  648. if ((gp->flags & G_GEOM_WITHER) == 0 && gp->resize != NULL)
  649. gp->resize(cp);
  650. }
  651. /*
  652. * After resizing, the previously invalid GEOM class metadata
  653. * might become valid. This means we should retaste.
  654. */
  655. LIST_FOREACH(mp, &g_classes, class) {
  656. if (mp->taste == NULL)
  657. continue;
  658. LIST_FOREACH(cp, &pp->consumers, consumers)
  659. if (cp->geom->class == mp &&
  660. (cp->flags & G_CF_ORPHAN) == 0)
  661. break;
  662. if (cp != NULL)
  663. continue;
  664. mp->taste(mp, pp, 0);
  665. g_topology_assert();
  666. }
  667. }
  668. void
  669. g_resize_provider(struct g_provider *pp, off_t size)
  670. {
  671. struct g_hh00 *hh;
  672. G_VALID_PROVIDER(pp);
  673. if (pp->flags & G_PF_WITHER)
  674. return;
  675. if (size == pp->mediasize)
  676. return;
  677. hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO);
  678. hh->pp = pp;
  679. hh->size = size;
  680. g_post_event(g_resize_provider_event, hh, M_WAITOK, NULL);
  681. }
  682. struct g_provider *
  683. g_provider_by_name(char const *arg)
  684. {
  685. struct g_class *cp;
  686. struct g_geom *gp;
  687. struct g_provider *pp, *wpp;
  688. if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
  689. arg += sizeof(_PATH_DEV) - 1;
  690. wpp = NULL;
  691. LIST_FOREACH(cp, &g_classes, class) {
  692. LIST_FOREACH(gp, &cp->geom, geom) {
  693. LIST_FOREACH(pp, &gp->provider, provider) {
  694. if (strcmp(arg, pp->name) != 0)
  695. continue;
  696. if ((gp->flags & G_GEOM_WITHER) == 0 &&
  697. (pp->flags & G_PF_WITHER) == 0)
  698. return (pp);
  699. else
  700. wpp = pp;
  701. }
  702. }
  703. }
  704. return (wpp);
  705. }
  706. void
  707. g_destroy_provider(struct g_provider *pp)
  708. {
  709. struct g_geom *gp;
  710. struct g_geom_alias *gap, *gaptmp;
  711. g_topology_assert();
  712. G_VALID_PROVIDER(pp);
  713. KASSERT(LIST_EMPTY(&pp->consumers),
  714. ("g_destroy_provider but attached"));
  715. KASSERT (pp->acr == 0, ("g_destroy_provider with acr"));
  716. KASSERT (pp->acw == 0, ("g_destroy_provider with acw"));
  717. KASSERT (pp->ace == 0, ("g_destroy_provider with ace"));
  718. g_cancel_event(pp);
  719. LIST_REMOVE(pp, provider);
  720. gp = pp->geom;
  721. devstat_remove_entry(pp->stat);
  722. /*
  723. * If a callback was provided, send notification that the provider
  724. * is now gone.
  725. */
  726. if (gp->providergone != NULL)
  727. gp->providergone(pp);
  728. LIST_FOREACH_SAFE(gap, &pp->aliases, ga_next, gaptmp)
  729. g_free(gap);
  730. g_free(pp);
  731. if ((gp->flags & G_GEOM_WITHER))
  732. g_do_wither();
  733. }
  734. /*
  735. * We keep the "geoms" list sorted by topological order (== increasing
  736. * numerical rank) at all times.
  737. * When an attach is done, the attaching geoms rank is invalidated
  738. * and it is moved to the tail of the list.
  739. * All geoms later in the sequence has their ranks reevaluated in
  740. * sequence. If we cannot assign rank to a geom because it's
  741. * prerequisites do not have rank, we move that element to the tail
  742. * of the sequence with invalid rank as well.
  743. * At some point we encounter our original geom and if we stil fail
  744. * to assign it a rank, there must be a loop and we fail back to
  745. * g_attach() which detach again and calls redo_rank again
  746. * to fix up the damage.
  747. * It would be much simpler code wise to do it recursively, but we
  748. * can't risk that on the kernel stack.
  749. */
  750. static int
  751. redo_rank(struct g_geom *gp)
  752. {
  753. struct g_consumer *cp;
  754. struct g_geom *gp1, *gp2;
  755. int n, m;
  756. g_topology_assert();
  757. G_VALID_GEOM(gp);
  758. /* Invalidate this geoms rank and move it to the tail */
  759. gp1 = TAILQ_NEXT(gp, geoms);
  760. if (gp1 != NULL) {
  761. gp->rank = 0;
  762. TAILQ_REMOVE(&geoms, gp, geoms);
  763. TAILQ_INSERT_TAIL(&geoms, gp, geoms);
  764. } else {
  765. gp1 = gp;
  766. }
  767. /* re-rank the rest of the sequence */
  768. for (; gp1 != NULL; gp1 = gp2) {
  769. gp1->rank = 0;
  770. m = 1;
  771. LIST_FOREACH(cp, &gp1->consumer, consumer) {
  772. if (cp->provider == NULL)
  773. continue;
  774. n = cp->provider->geom->rank;
  775. if (n == 0) {
  776. m = 0;
  777. break;
  778. } else if (n >= m)
  779. m = n + 1;
  780. }
  781. gp1->rank = m;
  782. gp2 = TAILQ_NEXT(gp1, geoms);
  783. /* got a rank, moving on */
  784. if (m != 0)
  785. continue;
  786. /* no rank to original geom means loop */
  787. if (gp == gp1)
  788. return (ELOOP);
  789. /* no rank, put it at the end move on */
  790. TAILQ_REMOVE(&geoms, gp1, geoms);
  791. TAILQ_INSERT_TAIL(&geoms, gp1, geoms);
  792. }
  793. return (0);
  794. }
  795. int
  796. g_attach(struct g_consumer *cp, struct g_provider *pp)
  797. {
  798. int error;
  799. g_topology_assert();
  800. G_VALID_CONSUMER(cp);
  801. G_VALID_PROVIDER(pp);
  802. g_trace(G_T_TOPOLOGY, "g_attach(%p, %p)", cp, pp);
  803. KASSERT(cp->provider == NULL, ("attach but attached"));
  804. if ((pp->flags & (G_PF_ORPHAN | G_PF_WITHER)) != 0)
  805. return (ENXIO);
  806. cp->provider = pp;
  807. cp->flags &= ~G_CF_ORPHAN;
  808. LIST_INSERT_HEAD(&pp->consumers, cp, consumers);
  809. error = redo_rank(cp->geom);
  810. if (error) {
  811. LIST_REMOVE(cp, consumers);
  812. cp->provider = NULL;
  813. redo_rank(cp->geom);
  814. }
  815. return (error);
  816. }
  817. void
  818. g_detach(struct g_consumer *cp)
  819. {
  820. struct g_provider *pp;
  821. g_topology_assert();
  822. G_VALID_CONSUMER(cp);
  823. g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp);
  824. KASSERT(cp->provider != NULL, ("detach but not attached"));
  825. KASSERT(cp->acr == 0, ("detach but nonzero acr"));
  826. KASSERT(cp->acw == 0, ("detach but nonzero acw"));
  827. KASSERT(cp->ace == 0, ("detach but nonzero ace"));
  828. KASSERT(cp->nstart == cp->nend,
  829. ("detach with active requests"));
  830. pp = cp->provider;
  831. LIST_REMOVE(cp, consumers);
  832. cp->provider = NULL;
  833. if ((cp->geom->flags & G_GEOM_WITHER) ||
  834. (pp->geom->flags & G_GEOM_WITHER) ||
  835. (pp->flags & G_PF_WITHER))
  836. g_do_wither();
  837. redo_rank(cp->geom);
  838. }
  839. /*
  840. * g_access()
  841. *
  842. * Access-check with delta values. The question asked is "can provider
  843. * "cp" change the access counters by the relative amounts dc[rwe] ?"
  844. */
  845. int
  846. g_access(struct g_consumer *cp, int dcr, int dcw, int dce)
  847. {
  848. struct g_provider *pp;
  849. struct g_geom *gp;
  850. int pw, pe;
  851. #ifdef INVARIANTS
  852. int sr, sw, se;
  853. #endif
  854. int error;
  855. g_topology_assert();
  856. G_VALID_CONSUMER(cp);
  857. pp = cp->provider;
  858. KASSERT(pp != NULL, ("access but not attached"));
  859. G_VALID_PROVIDER(pp);
  860. gp = pp->geom;
  861. g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)",
  862. cp, pp->name, dcr, dcw, dce);
  863. KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr"));
  864. KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw"));
  865. KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace"));
  866. KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request"));
  867. KASSERT(cp->acr + dcr != 0 || cp->acw + dcw != 0 ||
  868. cp->ace + dce != 0 || cp->nstart == cp->nend,
  869. ("Last close with active requests"));
  870. KASSERT(gp->access != NULL, ("NULL geom->access"));
  871. /*
  872. * If our class cares about being spoiled, and we have been, we
  873. * are probably just ahead of the event telling us that. Fail
  874. * now rather than having to unravel this later.
  875. */
  876. if (cp->geom->spoiled != NULL && (cp->flags & G_CF_SPOILED) &&
  877. (dcr > 0 || dcw > 0 || dce > 0))
  878. return (ENXIO);
  879. /*
  880. * A number of GEOM classes either need to perform an I/O on the first
  881. * open or to acquire a different subsystem's lock. To do that they
  882. * may have to drop the topology lock.
  883. * Other GEOM classes perform special actions when opening a lower rank
  884. * geom for the first time. As a result, more than one thread may
  885. * end up performing the special actions.
  886. * So, we prevent concurrent "first" opens by marking the consumer with
  887. * special flag.
  888. *
  889. * Note that if the geom's access method never drops the topology lock,
  890. * then we will never see G_GEOM_IN_ACCESS here.
  891. */
  892. while ((gp->flags & G_GEOM_IN_ACCESS) != 0) {
  893. g_trace(G_T_ACCESS,
  894. "%s: race on geom %s via provider %s and consumer of %s",
  895. __func__, gp->name, pp->name, cp->geom->name);
  896. gp->flags |= G_GEOM_ACCESS_WAIT;
  897. g_topology_sleep(gp, 0);
  898. }
  899. /*
  900. * Figure out what counts the provider would have had, if this
  901. * consumer had (r0w0e0) at this time.
  902. */
  903. pw = pp->acw - cp->acw;
  904. pe = pp->ace - cp->ace;
  905. g_trace(G_T_ACCESS,
  906. "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)",
  907. dcr, dcw, dce,
  908. cp->acr, cp->acw, cp->ace,
  909. pp->acr, pp->acw, pp->ace,
  910. pp, pp->name);
  911. /* If foot-shooting is enabled, any open on rank#1 is OK */
  912. if ((g_debugflags & G_F_FOOTSHOOTING) && gp->rank == 1)
  913. ;
  914. /* If we try exclusive but already write: fail */
  915. else if (dce > 0 && pw > 0)
  916. return (EPERM);
  917. /* If we try write but already exclusive: fail */
  918. else if (dcw > 0 && pe > 0)
  919. return (EPERM);
  920. /* If we try to open more but provider is error'ed: fail */
  921. else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) {
  922. printf("%s(%d): provider %s has error %d set\n",
  923. __func__, __LINE__, pp->name, pp->error);
  924. return (pp->error);
  925. }
  926. /* Ok then... */
  927. #ifdef INVARIANTS
  928. sr = cp->acr;
  929. sw = cp->acw;
  930. se = cp->ace;
  931. #endif
  932. gp->flags |= G_GEOM_IN_ACCESS;
  933. error = gp->access(pp, dcr, dcw, dce);
  934. KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0,
  935. ("Geom provider %s::%s dcr=%d dcw=%d dce=%d error=%d failed "
  936. "closing ->access()", gp->class->name, pp->name, dcr, dcw,
  937. dce, error));
  938. g_topology_assert();
  939. gp->flags &= ~G_GEOM_IN_ACCESS;
  940. KASSERT(cp->acr == sr && cp->acw == sw && cp->ace == se,
  941. ("Access counts changed during geom->access"));
  942. if ((gp->flags & G_GEOM_ACCESS_WAIT) != 0) {
  943. gp->flags &= ~G_GEOM_ACCESS_WAIT;
  944. wakeup(gp);
  945. }
  946. if (!error) {
  947. /*
  948. * If we open first write, spoil any partner consumers.
  949. * If we close last write and provider is not errored,
  950. * trigger re-taste.
  951. */
  952. if (pp->acw == 0 && dcw != 0)
  953. g_spoil(pp, cp);
  954. else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 &&
  955. !(gp->flags & G_GEOM_WITHER))
  956. g_post_event(g_new_provider_event, pp, M_WAITOK,
  957. pp, NULL);
  958. pp->acr += dcr;
  959. pp->acw += dcw;
  960. pp->ace += dce;
  961. cp->acr += dcr;
  962. cp->acw += dcw;
  963. cp->ace += dce;
  964. if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)
  965. KASSERT(pp->sectorsize > 0,
  966. ("Provider %s lacks sectorsize", pp->name));
  967. if ((cp->geom->flags & G_GEOM_WITHER) &&
  968. cp->acr == 0 && cp->acw == 0 && cp->ace == 0)
  969. g_do_wither();
  970. }
  971. return (error);
  972. }
  973. int
  974. g_handleattr_int(struct bio *bp, const char *attribute, int val)
  975. {
  976. return (g_handleattr(bp, attribute, &val, sizeof val));
  977. }
  978. int
  979. g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val)
  980. {
  981. return (g_handleattr(bp, attribute, &val, sizeof val));
  982. }
  983. int
  984. g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val)
  985. {
  986. return (g_handleattr(bp, attribute, &val, sizeof val));
  987. }
  988. int
  989. g_handleattr_str(struct bio *bp, const char *attribute, const char *str)
  990. {
  991. return (g_handleattr(bp, attribute, str, 0));
  992. }
  993. int
  994. g_handleattr(struct bio *bp, const char *attribute, const void *val, int len)
  995. {
  996. int error = 0;
  997. if (strcmp(bp->bio_attribute, attribute))
  998. return (0);
  999. if (len == 0) {
  1000. bzero(bp->bio_data, bp->bio_length);
  1001. if (strlcpy(bp->bio_data, val, bp->bio_length) >=
  1002. bp->bio_length) {
  1003. printf("%s: %s %s bio_length %jd strlen %zu -> EFAULT\n",
  1004. __func__, bp->bio_to->name, attribute,
  1005. (intmax_t)bp->bio_length, strlen(val));
  1006. error = EFAULT;
  1007. }
  1008. } else if (bp->bio_length == len) {
  1009. bcopy(val, bp->bio_data, len);
  1010. } else {
  1011. printf("%s: %s %s bio_length %jd len %d -> EFAULT\n", __func__,
  1012. bp->bio_to->name, attribute, (intmax_t)bp->bio_length, len);
  1013. error = EFAULT;
  1014. }
  1015. if (error == 0)
  1016. bp->bio_completed = bp->bio_length;
  1017. g_io_deliver(bp, error);
  1018. return (1);
  1019. }
  1020. int
  1021. g_std_access(struct g_provider *pp,
  1022. int dr __unused, int dw __unused, int de __unused)
  1023. {
  1024. g_topology_assert();
  1025. G_VALID_PROVIDER(pp);
  1026. return (0);
  1027. }
  1028. void
  1029. g_std_done(struct bio *bp)
  1030. {
  1031. struct bio *bp2;
  1032. bp2 = bp->bio_parent;
  1033. if (bp2->bio_error == 0)
  1034. bp2->bio_error = bp->bio_error;
  1035. bp2->bio_completed += bp->bio_completed;
  1036. g_destroy_bio(bp);
  1037. bp2->bio_inbed++;
  1038. if (bp2->bio_children == bp2->bio_inbed) {
  1039. if (bp2->bio_cmd == BIO_SPEEDUP)
  1040. bp2->bio_completed = bp2->bio_length;
  1041. g_io_deliver(bp2, bp2->bio_error);
  1042. }
  1043. }
  1044. /* XXX: maybe this is only g_slice_spoiled */
  1045. void
  1046. g_std_spoiled(struct g_consumer *cp)
  1047. {
  1048. struct g_geom *gp;
  1049. struct g_provider *pp;
  1050. g_topology_assert();
  1051. G_VALID_CONSUMER(cp);
  1052. g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp);
  1053. cp->flags |= G_CF_ORPHAN;
  1054. g_detach(cp);
  1055. gp = cp->geom;
  1056. LIST_FOREACH(pp, &gp->provider, provider)
  1057. g_orphan_provider(pp, ENXIO);
  1058. g_destroy_consumer(cp);
  1059. if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer))
  1060. g_destroy_geom(gp);
  1061. else
  1062. gp->flags |= G_GEOM_WITHER;
  1063. }
  1064. /*
  1065. * Spoiling happens when a provider is opened for writing, but consumers
  1066. * which are configured by in-band data are attached (slicers for instance).
  1067. * Since the write might potentially change the in-band data, such consumers
  1068. * need to re-evaluate their existence after the writing session closes.
  1069. * We do this by (offering to) tear them down when the open for write happens
  1070. * in return for a re-taste when it closes again.
  1071. * Together with the fact that such consumers grab an 'e' bit whenever they
  1072. * are open, regardless of mode, this ends up DTRT.
  1073. */
  1074. static void
  1075. g_spoil_event(void *arg, int flag)
  1076. {
  1077. struct g_provider *pp;
  1078. struct g_consumer *cp, *cp2;
  1079. g_topology_assert();
  1080. if (flag == EV_CANCEL)
  1081. return;
  1082. pp = arg;
  1083. G_VALID_PROVIDER(pp);
  1084. g_trace(G_T_TOPOLOGY, "%s %p(%s:%s:%s)", __func__, pp,
  1085. pp->geom->class->name, pp->geom->name, pp->name);
  1086. for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) {
  1087. cp2 = LIST_NEXT(cp, consumers);
  1088. if ((cp->flags & G_CF_SPOILED) == 0)
  1089. continue;
  1090. cp->flags &= ~G_CF_SPOILED;
  1091. if (cp->geom->spoiled == NULL)
  1092. continue;
  1093. cp->geom->spoiled(cp);
  1094. g_topology_assert();
  1095. }
  1096. }
  1097. void
  1098. g_spoil(struct g_provider *pp, struct g_consumer *cp)
  1099. {
  1100. struct g_consumer *cp2;
  1101. g_topology_assert();
  1102. G_VALID_PROVIDER(pp);
  1103. G_VALID_CONSUMER(cp);
  1104. LIST_FOREACH(cp2, &pp->consumers, consumers) {
  1105. if (cp2 == cp)
  1106. continue;
  1107. /*
  1108. KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr));
  1109. KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw));
  1110. */
  1111. KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace));
  1112. cp2->flags |= G_CF_SPOILED;
  1113. }
  1114. g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL);
  1115. }
  1116. static void
  1117. g_media_changed_event(void *arg, int flag)
  1118. {
  1119. struct g_provider *pp;
  1120. int retaste;
  1121. g_topology_assert();
  1122. if (flag == EV_CANCEL)
  1123. return;
  1124. pp = arg;
  1125. G_VALID_PROVIDER(pp);
  1126. /*
  1127. * If provider was not open for writing, queue retaste after spoiling.
  1128. * If it was, retaste will happen automatically on close.
  1129. */
  1130. retaste = (pp->acw == 0 && pp->error == 0 &&
  1131. !(pp->geom->flags & G_GEOM_WITHER));
  1132. g_spoil_event(arg, flag);
  1133. if (retaste)
  1134. g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL);
  1135. }
  1136. int
  1137. g_media_changed(struct g_provider *pp, int flag)
  1138. {
  1139. struct g_consumer *cp;
  1140. LIST_FOREACH(cp, &pp->consumers, consumers)
  1141. cp->flags |= G_CF_SPOILED;
  1142. return (g_post_event(g_media_changed_event, pp, flag, pp, NULL));
  1143. }
  1144. int
  1145. g_media_gone(struct g_provider *pp, int flag)
  1146. {
  1147. struct g_consumer *cp;
  1148. LIST_FOREACH(cp, &pp->consumers, consumers)
  1149. cp->flags |= G_CF_SPOILED;
  1150. return (g_post_event(g_spoil_event, pp, flag, pp, NULL));
  1151. }
  1152. int
  1153. g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len)
  1154. {
  1155. int error, i;
  1156. i = len;
  1157. error = g_io_getattr(attr, cp, &i, var);
  1158. if (error)
  1159. return (error);
  1160. if (i != len)
  1161. return (EINVAL);
  1162. return (0);
  1163. }
  1164. static int
  1165. g_get_device_prefix_len(const char *name)
  1166. {
  1167. int len;
  1168. if (strncmp(name, "ada", 3) == 0)
  1169. len = 3;
  1170. else if (strncmp(name, "ad", 2) == 0)
  1171. len = 2;
  1172. else
  1173. return (0);
  1174. if (name[len] < '0' || name[len] > '9')
  1175. return (0);
  1176. do {
  1177. len++;
  1178. } while (name[len] >= '0' && name[len] <= '9');
  1179. return (len);
  1180. }
  1181. int
  1182. g_compare_names(const char *namea, const char *nameb)
  1183. {
  1184. int deva, devb;
  1185. if (strcmp(namea, nameb) == 0)
  1186. return (1);
  1187. deva = g_get_device_prefix_len(namea);
  1188. if (deva == 0)
  1189. return (0);
  1190. devb = g_get_device_prefix_len(nameb);
  1191. if (devb == 0)
  1192. return (0);
  1193. if (strcmp(namea + deva, nameb + devb) == 0)
  1194. return (1);
  1195. return (0);
  1196. }
  1197. #if defined(DIAGNOSTIC) || defined(DDB)
  1198. /*
  1199. * This function walks the mesh and returns a non-zero integer if it
  1200. * finds the argument pointer is an object. The return value indicates
  1201. * which type of object it is believed to be. If topology is not locked,
  1202. * this function is potentially dangerous, but we don't assert that the
  1203. * topology lock is held when called from debugger.
  1204. */
  1205. int
  1206. g_valid_obj(void const *ptr)
  1207. {
  1208. struct g_class *mp;
  1209. struct g_geom *gp;
  1210. struct g_consumer *cp;
  1211. struct g_provider *pp;
  1212. #ifdef KDB
  1213. if (kdb_active == 0)
  1214. #endif
  1215. g_topology_assert();
  1216. LIST_FOREACH(mp, &g_classes, class) {
  1217. if (ptr == mp)
  1218. return (1);
  1219. LIST_FOREACH(gp, &mp->geom, geom) {
  1220. if (ptr == gp)
  1221. return (2);
  1222. LIST_FOREACH(cp, &gp->consumer, consumer)
  1223. if (ptr == cp)
  1224. return (3);
  1225. LIST_FOREACH(pp, &gp->provider, provider)
  1226. if (ptr == pp)
  1227. return (4);
  1228. }
  1229. }
  1230. return(0);
  1231. }
  1232. #endif
  1233. #ifdef DDB
  1234. #define gprintf(...) do { \
  1235. db_printf("%*s", indent, ""); \
  1236. db_printf(__VA_ARGS__); \
  1237. } while (0)
  1238. #define gprintln(...) do { \
  1239. gprintf(__VA_ARGS__); \
  1240. db_printf("\n"); \
  1241. } while (0)
  1242. #define ADDFLAG(obj, flag, sflag) do { \
  1243. if ((obj)->flags & (flag)) { \
  1244. if (comma) \
  1245. strlcat(str, ",", size); \
  1246. strlcat(str, (sflag), size); \
  1247. comma = 1; \
  1248. } \
  1249. } while (0)
  1250. static char *
  1251. provider_flags_to_string(struct g_provider *pp, char *str, size_t size)
  1252. {
  1253. int comma = 0;
  1254. bzero(str, size);
  1255. if (pp->flags == 0) {
  1256. strlcpy(str, "NONE", size);
  1257. return (str);
  1258. }
  1259. ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER");
  1260. ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN");
  1261. return (str);
  1262. }
  1263. static char *
  1264. geom_flags_to_string(struct g_geom *gp, char *str, size_t size)
  1265. {
  1266. int comma = 0;
  1267. bzero(str, size);
  1268. if (gp->flags == 0) {
  1269. strlcpy(str, "NONE", size);
  1270. return (str);
  1271. }
  1272. ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER");
  1273. return (str);
  1274. }
  1275. static void
  1276. db_show_geom_consumer(int indent, struct g_consumer *cp)
  1277. {
  1278. if (indent == 0) {
  1279. gprintln("consumer: %p", cp);
  1280. gprintln(" class: %s (%p)", cp->geom->class->name,
  1281. cp->geom->class);
  1282. gprintln(" geom: %s (%p)", cp->geom->name, cp->geom);
  1283. if (cp->provider == NULL)
  1284. gprintln(" provider: none");
  1285. else {
  1286. gprintln(" provider: %s (%p)", cp->provider->name,
  1287. cp->provider);
  1288. }
  1289. gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace);
  1290. gprintln(" flags: 0x%04x", cp->flags);
  1291. #ifdef INVARIANTS
  1292. gprintln(" nstart: %u", cp->nstart);
  1293. gprintln(" nend: %u", cp->nend);
  1294. #endif
  1295. } else {
  1296. gprintf("consumer: %p (%s), access=r%dw%de%d", cp,
  1297. cp->provider != NULL ? cp->provider->name : "none",
  1298. cp->acr, cp->acw, cp->ace);
  1299. if (cp->flags)
  1300. db_printf(", flags=0x%04x", cp->flags);
  1301. db_printf("\n");
  1302. }
  1303. }
  1304. static void
  1305. db_show_geom_provider(int indent, struct g_provider *pp)
  1306. {
  1307. struct g_consumer *cp;
  1308. char flags[64];
  1309. if (indent == 0) {
  1310. gprintln("provider: %s (%p)", pp->name, pp);
  1311. gprintln(" class: %s (%p)", pp->geom->class->name,
  1312. pp->geom->class);
  1313. gprintln(" geom: %s (%p)", pp->geom->name, pp->geom);
  1314. gprintln(" mediasize: %jd", (intmax_t)pp->mediasize);
  1315. gprintln(" sectorsize: %u", pp->sectorsize);
  1316. gprintln(" stripesize: %ju", (uintmax_t)pp->stripesize);
  1317. gprintln(" stripeoffset: %ju", (uintmax_t)pp->stripeoffset);
  1318. gprintln(" access: r%dw%de%d", pp->acr, pp->acw,
  1319. pp->ace);
  1320. gprintln(" flags: %s (0x%04x)",
  1321. provider_flags_to_string(pp, flags, sizeof(flags)),
  1322. pp->flags);
  1323. gprintln(" error: %d", pp->error);
  1324. if (LIST_EMPTY(&pp->consumers))
  1325. gprintln(" consumers: none");
  1326. } else {
  1327. gprintf("provider: %s (%p), access=r%dw%de%d",
  1328. pp->name, pp, pp->acr, pp->acw, pp->ace);
  1329. if (pp->flags != 0) {
  1330. db_printf(", flags=%s (0x%04x)",
  1331. provider_flags_to_string(pp, flags, sizeof(flags)),
  1332. pp->flags);
  1333. }
  1334. db_printf("\n");
  1335. }
  1336. if (!LIST_EMPTY(&pp->consumers)) {
  1337. LIST_FOREACH(cp, &pp->consumers, consumers) {
  1338. db_show_geom_consumer(indent + 2, cp);
  1339. if (db_pager_quit)
  1340. break;
  1341. }
  1342. }
  1343. }
  1344. static void
  1345. db_show_geom_geom(int indent, struct g_geom *gp)
  1346. {
  1347. struct g_provider *pp;
  1348. struct g_consumer *cp;
  1349. char flags[64];
  1350. if (indent == 0) {
  1351. gprintln("geom: %s (%p)", gp->name, gp);
  1352. gprintln(" class: %s (%p)", gp->class->name, gp->class);
  1353. gprintln(" flags: %s (0x%04x)",
  1354. geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags);
  1355. gprintln(" rank: %d", gp->rank);
  1356. if (LIST_EMPTY(&gp->provider))
  1357. gprintln(" providers: none");
  1358. if (LIST_EMPTY(&gp->consumer))
  1359. gprintln(" consumers: none");
  1360. } else {
  1361. gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank);
  1362. if (gp->flags != 0) {
  1363. db_printf(", flags=%s (0x%04x)",
  1364. geom_flags_to_string(gp, flags, sizeof(flags)),
  1365. gp->flags);
  1366. }
  1367. db_printf("\n");
  1368. }
  1369. if (!LIST_EMPTY(&gp->provider)) {
  1370. LIST_FOREACH(pp, &gp->provider, provider) {
  1371. db_show_geom_provider(indent + 2, pp);
  1372. if (db_pager_quit)
  1373. break;
  1374. }
  1375. }
  1376. if (!LIST_EMPTY(&gp->consumer)) {
  1377. LIST_FOREACH(cp, &gp->consumer, consumer) {
  1378. db_show_geom_consumer(indent + 2, cp);
  1379. if (db_pager_quit)
  1380. break;
  1381. }
  1382. }
  1383. }
  1384. static void
  1385. db_show_geom_class(struct g_class *mp)
  1386. {
  1387. struct g_geom *gp;
  1388. db_printf("class: %s (%p)\n", mp->name, mp);
  1389. LIST_FOREACH(gp, &mp->geom, geom) {
  1390. db_show_geom_geom(2, gp);
  1391. if (db_pager_quit)
  1392. break;
  1393. }
  1394. }
  1395. /*
  1396. * Print the GEOM topology or the given object.
  1397. */
  1398. DB_SHOW_COMMAND(geom, db_show_geom)
  1399. {
  1400. struct g_class *mp;
  1401. if (!have_addr) {
  1402. /* No address given, print the entire topology. */
  1403. LIST_FOREACH(mp, &g_classes, class) {
  1404. db_show_geom_class(mp);
  1405. db_printf("\n");
  1406. if (db_pager_quit)
  1407. break;
  1408. }
  1409. } else {
  1410. switch (g_valid_obj((void *)addr)) {
  1411. case 1:
  1412. db_show_geom_class((struct g_class *)addr);
  1413. break;
  1414. case 2:
  1415. db_show_geom_geom(0, (struct g_geom *)addr);
  1416. break;
  1417. case 3:
  1418. db_show_geom_consumer(0, (struct g_consumer *)addr);
  1419. break;
  1420. case 4:
  1421. db_show_geom_provider(0, (struct g_provider *)addr);
  1422. break;
  1423. default:
  1424. db_printf("Not a GEOM object.\n");
  1425. break;
  1426. }
  1427. }
  1428. }
  1429. static void
  1430. db_print_bio_cmd(struct bio *bp)
  1431. {
  1432. db_printf(" cmd: ");
  1433. switch (bp->bio_cmd) {
  1434. case BIO_READ: db_printf("BIO_READ"); break;
  1435. case BIO_WRITE: db_printf("BIO_WRITE"); break;
  1436. case BIO_DELETE: db_printf("BIO_DELETE"); break;
  1437. case BIO_GETATTR: db_printf("BIO_GETATTR"); break;
  1438. case BIO_FLUSH: db_printf("BIO_FLUSH"); break;
  1439. case BIO_CMD0: db_printf("BIO_CMD0"); break;
  1440. case BIO_CMD1: db_printf("BIO_CMD1"); break;
  1441. case BIO_CMD2: db_printf("BIO_CMD2"); break;
  1442. case BIO_ZONE: db_printf("BIO_ZONE"); break;
  1443. default: db_printf("UNKNOWN"); break;
  1444. }
  1445. db_printf("\n");
  1446. }
  1447. static void
  1448. db_print_bio_flags(struct bio *bp)
  1449. {
  1450. int comma;
  1451. comma = 0;
  1452. db_printf(" flags: ");
  1453. if (bp->bio_flags & BIO_ERROR) {
  1454. db_printf("BIO_ERROR");
  1455. comma = 1;
  1456. }
  1457. if (bp->bio_flags & BIO_DONE) {
  1458. db_printf("%sBIO_DONE", (comma ? ", " : ""));
  1459. comma = 1;
  1460. }
  1461. if (bp->bio_flags & BIO_ONQUEUE)
  1462. db_printf("%sBIO_ONQUEUE", (comma ? ", " : ""));
  1463. db_printf("\n");
  1464. }
  1465. /*
  1466. * Print useful information in a BIO
  1467. */
  1468. DB_SHOW_COMMAND(bio, db_show_bio)
  1469. {
  1470. struct bio *bp;
  1471. if (have_addr) {
  1472. bp = (struct bio *)addr;
  1473. db_printf("BIO %p\n", bp);
  1474. db_print_bio_cmd(bp);
  1475. db_print_bio_flags(bp);
  1476. db_printf(" cflags: 0x%hx\n", bp->bio_cflags);
  1477. db_printf(" pflags: 0x%hx\n", bp->bio_pflags);
  1478. db_printf(" offset: %jd\n", (intmax_t)bp->bio_offset);
  1479. db_printf(" length: %jd\n", (intmax_t)bp->bio_length);
  1480. db_printf(" bcount: %ld\n", bp->bio_bcount);
  1481. db_printf(" resid: %ld\n", bp->bio_resid);
  1482. db_printf(" completed: %jd\n", (intmax_t)bp->bio_completed);
  1483. db_printf(" children: %u\n", bp->bio_children);
  1484. db_printf(" inbed: %u\n", bp->bio_inbed);
  1485. db_printf(" error: %d\n", bp->bio_error);
  1486. db_printf(" parent: %p\n", bp->bio_parent);
  1487. db_printf(" driver1: %p\n", bp->bio_driver1);
  1488. db_printf(" driver2: %p\n", bp->bio_driver2);
  1489. db_printf(" caller1: %p\n", bp->bio_caller1);
  1490. db_printf(" caller2: %p\n", bp->bio_caller2);
  1491. db_printf(" bio_from: %p\n", bp->bio_from);
  1492. db_printf(" bio_to: %p\n", bp->bio_to);
  1493. #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
  1494. db_printf(" bio_track_bp: %p\n", bp->bio_track_bp);
  1495. #endif
  1496. }
  1497. }
  1498. #undef gprintf
  1499. #undef gprintln
  1500. #undef ADDFLAG
  1501. #endif /* DDB */