fdt.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions for working with the Flattened Device Tree data format
  4. *
  5. * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
  6. * benh@kernel.crashing.org
  7. */
  8. #define pr_fmt(fmt) "OF: fdt: " fmt
  9. #include <linux/crc32.h>
  10. #include <linux/kernel.h>
  11. #include <linux/initrd.h>
  12. #include <linux/bootmem.h>
  13. #include <linux/memblock.h>
  14. #include <linux/mutex.h>
  15. #include <linux/of.h>
  16. #include <linux/of_fdt.h>
  17. #include <linux/of_reserved_mem.h>
  18. #include <linux/sizes.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/slab.h>
  22. #include <linux/libfdt.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/serial_core.h>
  25. #include <linux/sysfs.h>
  26. #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
  27. #include <asm/page.h>
  28. #include "of_private.h"
  29. /*
  30. * of_fdt_limit_memory - limit the number of regions in the /memory node
  31. * @limit: maximum entries
  32. *
  33. * Adjust the flattened device tree to have at most 'limit' number of
  34. * memory entries in the /memory node. This function may be called
  35. * any time after initial_boot_param is set.
  36. */
  37. void of_fdt_limit_memory(int limit)
  38. {
  39. int memory;
  40. int len;
  41. const void *val;
  42. int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
  43. int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
  44. const __be32 *addr_prop;
  45. const __be32 *size_prop;
  46. int root_offset;
  47. int cell_size;
  48. root_offset = fdt_path_offset(initial_boot_params, "/");
  49. if (root_offset < 0)
  50. return;
  51. addr_prop = fdt_getprop(initial_boot_params, root_offset,
  52. "#address-cells", NULL);
  53. if (addr_prop)
  54. nr_address_cells = fdt32_to_cpu(*addr_prop);
  55. size_prop = fdt_getprop(initial_boot_params, root_offset,
  56. "#size-cells", NULL);
  57. if (size_prop)
  58. nr_size_cells = fdt32_to_cpu(*size_prop);
  59. cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
  60. memory = fdt_path_offset(initial_boot_params, "/memory");
  61. if (memory > 0) {
  62. val = fdt_getprop(initial_boot_params, memory, "reg", &len);
  63. if (len > limit*cell_size) {
  64. len = limit*cell_size;
  65. pr_debug("Limiting number of entries to %d\n", limit);
  66. fdt_setprop(initial_boot_params, memory, "reg", val,
  67. len);
  68. }
  69. }
  70. }
  71. /**
  72. * of_fdt_is_compatible - Return true if given node from the given blob has
  73. * compat in its compatible list
  74. * @blob: A device tree blob
  75. * @node: node to test
  76. * @compat: compatible string to compare with compatible list.
  77. *
  78. * On match, returns a non-zero value with smaller values returned for more
  79. * specific compatible values.
  80. */
  81. static int of_fdt_is_compatible(const void *blob,
  82. unsigned long node, const char *compat)
  83. {
  84. const char *cp;
  85. int cplen;
  86. unsigned long l, score = 0;
  87. cp = fdt_getprop(blob, node, "compatible", &cplen);
  88. if (cp == NULL)
  89. return 0;
  90. while (cplen > 0) {
  91. score++;
  92. if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
  93. return score;
  94. l = strlen(cp) + 1;
  95. cp += l;
  96. cplen -= l;
  97. }
  98. return 0;
  99. }
  100. /**
  101. * of_fdt_is_big_endian - Return true if given node needs BE MMIO accesses
  102. * @blob: A device tree blob
  103. * @node: node to test
  104. *
  105. * Returns true if the node has a "big-endian" property, or if the kernel
  106. * was compiled for BE *and* the node has a "native-endian" property.
  107. * Returns false otherwise.
  108. */
  109. bool of_fdt_is_big_endian(const void *blob, unsigned long node)
  110. {
  111. if (fdt_getprop(blob, node, "big-endian", NULL))
  112. return true;
  113. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
  114. fdt_getprop(blob, node, "native-endian", NULL))
  115. return true;
  116. return false;
  117. }
  118. static bool of_fdt_device_is_available(const void *blob, unsigned long node)
  119. {
  120. const char *status = fdt_getprop(blob, node, "status", NULL);
  121. if (!status)
  122. return true;
  123. if (!strcmp(status, "ok") || !strcmp(status, "okay"))
  124. return true;
  125. return false;
  126. }
  127. /**
  128. * of_fdt_match - Return true if node matches a list of compatible values
  129. */
  130. int of_fdt_match(const void *blob, unsigned long node,
  131. const char *const *compat)
  132. {
  133. unsigned int tmp, score = 0;
  134. if (!compat)
  135. return 0;
  136. while (*compat) {
  137. tmp = of_fdt_is_compatible(blob, node, *compat);
  138. if (tmp && (score == 0 || (tmp < score)))
  139. score = tmp;
  140. compat++;
  141. }
  142. return score;
  143. }
  144. static void *unflatten_dt_alloc(void **mem, unsigned long size,
  145. unsigned long align)
  146. {
  147. void *res;
  148. *mem = PTR_ALIGN(*mem, align);
  149. res = *mem;
  150. *mem += size;
  151. return res;
  152. }
  153. static void populate_properties(const void *blob,
  154. int offset,
  155. void **mem,
  156. struct device_node *np,
  157. const char *nodename,
  158. bool dryrun)
  159. {
  160. struct property *pp, **pprev = NULL;
  161. int cur;
  162. bool has_name = false;
  163. pprev = &np->properties;
  164. for (cur = fdt_first_property_offset(blob, offset);
  165. cur >= 0;
  166. cur = fdt_next_property_offset(blob, cur)) {
  167. const __be32 *val;
  168. const char *pname;
  169. u32 sz;
  170. val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
  171. if (!val) {
  172. pr_warn("Cannot locate property at 0x%x\n", cur);
  173. continue;
  174. }
  175. if (!pname) {
  176. pr_warn("Cannot find property name at 0x%x\n", cur);
  177. continue;
  178. }
  179. if (!strcmp(pname, "name"))
  180. has_name = true;
  181. pp = unflatten_dt_alloc(mem, sizeof(struct property),
  182. __alignof__(struct property));
  183. if (dryrun)
  184. continue;
  185. /* We accept flattened tree phandles either in
  186. * ePAPR-style "phandle" properties, or the
  187. * legacy "linux,phandle" properties. If both
  188. * appear and have different values, things
  189. * will get weird. Don't do that.
  190. */
  191. if (!strcmp(pname, "phandle") ||
  192. !strcmp(pname, "linux,phandle")) {
  193. if (!np->phandle)
  194. np->phandle = be32_to_cpup(val);
  195. }
  196. /* And we process the "ibm,phandle" property
  197. * used in pSeries dynamic device tree
  198. * stuff
  199. */
  200. if (!strcmp(pname, "ibm,phandle"))
  201. np->phandle = be32_to_cpup(val);
  202. pp->name = (char *)pname;
  203. pp->length = sz;
  204. pp->value = (__be32 *)val;
  205. *pprev = pp;
  206. pprev = &pp->next;
  207. }
  208. /* With version 0x10 we may not have the name property,
  209. * recreate it here from the unit name if absent
  210. */
  211. if (!has_name) {
  212. const char *p = nodename, *ps = p, *pa = NULL;
  213. int len;
  214. while (*p) {
  215. if ((*p) == '@')
  216. pa = p;
  217. else if ((*p) == '/')
  218. ps = p + 1;
  219. p++;
  220. }
  221. if (pa < ps)
  222. pa = p;
  223. len = (pa - ps) + 1;
  224. pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
  225. __alignof__(struct property));
  226. if (!dryrun) {
  227. pp->name = "name";
  228. pp->length = len;
  229. pp->value = pp + 1;
  230. *pprev = pp;
  231. pprev = &pp->next;
  232. memcpy(pp->value, ps, len - 1);
  233. ((char *)pp->value)[len - 1] = 0;
  234. pr_debug("fixed up name for %s -> %s\n",
  235. nodename, (char *)pp->value);
  236. }
  237. }
  238. if (!dryrun)
  239. *pprev = NULL;
  240. }
  241. static bool populate_node(const void *blob,
  242. int offset,
  243. void **mem,
  244. struct device_node *dad,
  245. struct device_node **pnp,
  246. bool dryrun)
  247. {
  248. struct device_node *np;
  249. const char *pathp;
  250. unsigned int l, allocl;
  251. pathp = fdt_get_name(blob, offset, &l);
  252. if (!pathp) {
  253. *pnp = NULL;
  254. return false;
  255. }
  256. allocl = ++l;
  257. np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
  258. __alignof__(struct device_node));
  259. if (!dryrun) {
  260. char *fn;
  261. of_node_init(np);
  262. np->full_name = fn = ((char *)np) + sizeof(*np);
  263. memcpy(fn, pathp, l);
  264. if (dad != NULL) {
  265. np->parent = dad;
  266. np->sibling = dad->child;
  267. dad->child = np;
  268. }
  269. }
  270. populate_properties(blob, offset, mem, np, pathp, dryrun);
  271. if (!dryrun) {
  272. np->name = of_get_property(np, "name", NULL);
  273. np->type = of_get_property(np, "device_type", NULL);
  274. if (!np->name)
  275. np->name = "<NULL>";
  276. if (!np->type)
  277. np->type = "<NULL>";
  278. }
  279. *pnp = np;
  280. return true;
  281. }
  282. static void reverse_nodes(struct device_node *parent)
  283. {
  284. struct device_node *child, *next;
  285. /* In-depth first */
  286. child = parent->child;
  287. while (child) {
  288. reverse_nodes(child);
  289. child = child->sibling;
  290. }
  291. /* Reverse the nodes in the child list */
  292. child = parent->child;
  293. parent->child = NULL;
  294. while (child) {
  295. next = child->sibling;
  296. child->sibling = parent->child;
  297. parent->child = child;
  298. child = next;
  299. }
  300. }
  301. /**
  302. * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
  303. * @blob: The parent device tree blob
  304. * @mem: Memory chunk to use for allocating device nodes and properties
  305. * @dad: Parent struct device_node
  306. * @nodepp: The device_node tree created by the call
  307. *
  308. * It returns the size of unflattened device tree or error code
  309. */
  310. static int unflatten_dt_nodes(const void *blob,
  311. void *mem,
  312. struct device_node *dad,
  313. struct device_node **nodepp)
  314. {
  315. struct device_node *root;
  316. int offset = 0, depth = 0, initial_depth = 0;
  317. #define FDT_MAX_DEPTH 64
  318. struct device_node *nps[FDT_MAX_DEPTH];
  319. void *base = mem;
  320. bool dryrun = !base;
  321. if (nodepp)
  322. *nodepp = NULL;
  323. /*
  324. * We're unflattening device sub-tree if @dad is valid. There are
  325. * possibly multiple nodes in the first level of depth. We need
  326. * set @depth to 1 to make fdt_next_node() happy as it bails
  327. * immediately when negative @depth is found. Otherwise, the device
  328. * nodes except the first one won't be unflattened successfully.
  329. */
  330. if (dad)
  331. depth = initial_depth = 1;
  332. root = dad;
  333. nps[depth] = dad;
  334. for (offset = 0;
  335. offset >= 0 && depth >= initial_depth;
  336. offset = fdt_next_node(blob, offset, &depth)) {
  337. if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
  338. continue;
  339. if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
  340. !of_fdt_device_is_available(blob, offset))
  341. continue;
  342. if (!populate_node(blob, offset, &mem, nps[depth],
  343. &nps[depth+1], dryrun))
  344. return mem - base;
  345. if (!dryrun && nodepp && !*nodepp)
  346. *nodepp = nps[depth+1];
  347. if (!dryrun && !root)
  348. root = nps[depth+1];
  349. }
  350. if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
  351. pr_err("Error %d processing FDT\n", offset);
  352. return -EINVAL;
  353. }
  354. /*
  355. * Reverse the child list. Some drivers assumes node order matches .dts
  356. * node order
  357. */
  358. if (!dryrun)
  359. reverse_nodes(root);
  360. return mem - base;
  361. }
  362. /**
  363. * __unflatten_device_tree - create tree of device_nodes from flat blob
  364. *
  365. * unflattens a device-tree, creating the
  366. * tree of struct device_node. It also fills the "name" and "type"
  367. * pointers of the nodes so the normal device-tree walking functions
  368. * can be used.
  369. * @blob: The blob to expand
  370. * @dad: Parent device node
  371. * @mynodes: The device_node tree created by the call
  372. * @dt_alloc: An allocator that provides a virtual address to memory
  373. * for the resulting tree
  374. * @detached: if true set OF_DETACHED on @mynodes
  375. *
  376. * Returns NULL on failure or the memory chunk containing the unflattened
  377. * device tree on success.
  378. */
  379. void *__unflatten_device_tree(const void *blob,
  380. struct device_node *dad,
  381. struct device_node **mynodes,
  382. void *(*dt_alloc)(u64 size, u64 align),
  383. bool detached)
  384. {
  385. int size;
  386. void *mem;
  387. pr_debug(" -> unflatten_device_tree()\n");
  388. if (!blob) {
  389. pr_debug("No device tree pointer\n");
  390. return NULL;
  391. }
  392. pr_debug("Unflattening device tree:\n");
  393. pr_debug("magic: %08x\n", fdt_magic(blob));
  394. pr_debug("size: %08x\n", fdt_totalsize(blob));
  395. pr_debug("version: %08x\n", fdt_version(blob));
  396. if (fdt_check_header(blob)) {
  397. pr_err("Invalid device tree blob header\n");
  398. return NULL;
  399. }
  400. /* First pass, scan for size */
  401. size = unflatten_dt_nodes(blob, NULL, dad, NULL);
  402. if (size < 0)
  403. return NULL;
  404. size = ALIGN(size, 4);
  405. pr_debug(" size is %d, allocating...\n", size);
  406. /* Allocate memory for the expanded device tree */
  407. mem = dt_alloc(size + 4, __alignof__(struct device_node));
  408. if (!mem)
  409. return NULL;
  410. memset(mem, 0, size);
  411. *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
  412. pr_debug(" unflattening %p...\n", mem);
  413. /* Second pass, do actual unflattening */
  414. unflatten_dt_nodes(blob, mem, dad, mynodes);
  415. if (be32_to_cpup(mem + size) != 0xdeadbeef)
  416. pr_warning("End of tree marker overwritten: %08x\n",
  417. be32_to_cpup(mem + size));
  418. if (detached && mynodes) {
  419. of_node_set_flag(*mynodes, OF_DETACHED);
  420. pr_debug("unflattened tree is detached\n");
  421. }
  422. pr_debug(" <- unflatten_device_tree()\n");
  423. return mem;
  424. }
  425. static void *kernel_tree_alloc(u64 size, u64 align)
  426. {
  427. return kzalloc(size, GFP_KERNEL);
  428. }
  429. static DEFINE_MUTEX(of_fdt_unflatten_mutex);
  430. /**
  431. * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
  432. * @blob: Flat device tree blob
  433. * @dad: Parent device node
  434. * @mynodes: The device tree created by the call
  435. *
  436. * unflattens the device-tree passed by the firmware, creating the
  437. * tree of struct device_node. It also fills the "name" and "type"
  438. * pointers of the nodes so the normal device-tree walking functions
  439. * can be used.
  440. *
  441. * Returns NULL on failure or the memory chunk containing the unflattened
  442. * device tree on success.
  443. */
  444. void *of_fdt_unflatten_tree(const unsigned long *blob,
  445. struct device_node *dad,
  446. struct device_node **mynodes)
  447. {
  448. void *mem;
  449. mutex_lock(&of_fdt_unflatten_mutex);
  450. mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
  451. true);
  452. mutex_unlock(&of_fdt_unflatten_mutex);
  453. return mem;
  454. }
  455. EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
  456. /* Everything below here references initial_boot_params directly. */
  457. int __initdata dt_root_addr_cells;
  458. int __initdata dt_root_size_cells;
  459. void *initial_boot_params;
  460. #ifdef CONFIG_OF_EARLY_FLATTREE
  461. static u32 of_fdt_crc32;
  462. /**
  463. * res_mem_reserve_reg() - reserve all memory described in 'reg' property
  464. */
  465. static int __init __reserved_mem_reserve_reg(unsigned long node,
  466. const char *uname)
  467. {
  468. int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
  469. phys_addr_t base, size;
  470. int len;
  471. const __be32 *prop;
  472. int nomap, first = 1;
  473. prop = of_get_flat_dt_prop(node, "reg", &len);
  474. if (!prop)
  475. return -ENOENT;
  476. if (len && len % t_len != 0) {
  477. pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
  478. uname);
  479. return -EINVAL;
  480. }
  481. nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
  482. while (len >= t_len) {
  483. base = dt_mem_next_cell(dt_root_addr_cells, &prop);
  484. size = dt_mem_next_cell(dt_root_size_cells, &prop);
  485. if (size &&
  486. early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
  487. pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
  488. uname, &base, (unsigned long)size / SZ_1M);
  489. else
  490. pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
  491. uname, &base, (unsigned long)size / SZ_1M);
  492. len -= t_len;
  493. if (first) {
  494. fdt_reserved_mem_save_node(node, uname, base, size);
  495. first = 0;
  496. }
  497. }
  498. return 0;
  499. }
  500. /**
  501. * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
  502. * in /reserved-memory matches the values supported by the current implementation,
  503. * also check if ranges property has been provided
  504. */
  505. static int __init __reserved_mem_check_root(unsigned long node)
  506. {
  507. const __be32 *prop;
  508. prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
  509. if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
  510. return -EINVAL;
  511. prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
  512. if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
  513. return -EINVAL;
  514. prop = of_get_flat_dt_prop(node, "ranges", NULL);
  515. if (!prop)
  516. return -EINVAL;
  517. return 0;
  518. }
  519. /**
  520. * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
  521. */
  522. static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
  523. int depth, void *data)
  524. {
  525. static int found;
  526. int err;
  527. if (!found && depth == 1 && strcmp(uname, "reserved-memory") == 0) {
  528. if (__reserved_mem_check_root(node) != 0) {
  529. pr_err("Reserved memory: unsupported node format, ignoring\n");
  530. /* break scan */
  531. return 1;
  532. }
  533. found = 1;
  534. /* scan next node */
  535. return 0;
  536. } else if (!found) {
  537. /* scan next node */
  538. return 0;
  539. } else if (found && depth < 2) {
  540. /* scanning of /reserved-memory has been finished */
  541. return 1;
  542. }
  543. if (!of_fdt_device_is_available(initial_boot_params, node))
  544. return 0;
  545. err = __reserved_mem_reserve_reg(node, uname);
  546. if (err == -ENOENT && of_get_flat_dt_prop(node, "size", NULL))
  547. fdt_reserved_mem_save_node(node, uname, 0, 0);
  548. /* scan next node */
  549. return 0;
  550. }
  551. /**
  552. * early_init_fdt_scan_reserved_mem() - create reserved memory regions
  553. *
  554. * This function grabs memory from early allocator for device exclusive use
  555. * defined in device tree structures. It should be called by arch specific code
  556. * once the early allocator (i.e. memblock) has been fully activated.
  557. */
  558. void __init early_init_fdt_scan_reserved_mem(void)
  559. {
  560. int n;
  561. u64 base, size;
  562. if (!initial_boot_params)
  563. return;
  564. /* Process header /memreserve/ fields */
  565. for (n = 0; ; n++) {
  566. fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
  567. if (!size)
  568. break;
  569. early_init_dt_reserve_memory_arch(base, size, 0);
  570. }
  571. of_scan_flat_dt(__fdt_scan_reserved_mem, NULL);
  572. fdt_init_reserved_mem();
  573. }
  574. /**
  575. * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
  576. */
  577. void __init early_init_fdt_reserve_self(void)
  578. {
  579. if (!initial_boot_params)
  580. return;
  581. /* Reserve the dtb region */
  582. early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
  583. fdt_totalsize(initial_boot_params),
  584. 0);
  585. }
  586. /**
  587. * of_scan_flat_dt - scan flattened tree blob and call callback on each.
  588. * @it: callback function
  589. * @data: context data pointer
  590. *
  591. * This function is used to scan the flattened device-tree, it is
  592. * used to extract the memory information at boot before we can
  593. * unflatten the tree
  594. */
  595. int __init of_scan_flat_dt(int (*it)(unsigned long node,
  596. const char *uname, int depth,
  597. void *data),
  598. void *data)
  599. {
  600. const void *blob = initial_boot_params;
  601. const char *pathp;
  602. int offset, rc = 0, depth = -1;
  603. if (!blob)
  604. return 0;
  605. for (offset = fdt_next_node(blob, -1, &depth);
  606. offset >= 0 && depth >= 0 && !rc;
  607. offset = fdt_next_node(blob, offset, &depth)) {
  608. pathp = fdt_get_name(blob, offset, NULL);
  609. if (*pathp == '/')
  610. pathp = kbasename(pathp);
  611. rc = it(offset, pathp, depth, data);
  612. }
  613. return rc;
  614. }
  615. /**
  616. * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
  617. * @it: callback function
  618. * @data: context data pointer
  619. *
  620. * This function is used to scan sub-nodes of a node.
  621. */
  622. int __init of_scan_flat_dt_subnodes(unsigned long parent,
  623. int (*it)(unsigned long node,
  624. const char *uname,
  625. void *data),
  626. void *data)
  627. {
  628. const void *blob = initial_boot_params;
  629. int node;
  630. fdt_for_each_subnode(node, blob, parent) {
  631. const char *pathp;
  632. int rc;
  633. pathp = fdt_get_name(blob, node, NULL);
  634. if (*pathp == '/')
  635. pathp = kbasename(pathp);
  636. rc = it(node, pathp, data);
  637. if (rc)
  638. return rc;
  639. }
  640. return 0;
  641. }
  642. /**
  643. * of_get_flat_dt_subnode_by_name - get the subnode by given name
  644. *
  645. * @node: the parent node
  646. * @uname: the name of subnode
  647. * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
  648. */
  649. int of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
  650. {
  651. return fdt_subnode_offset(initial_boot_params, node, uname);
  652. }
  653. /**
  654. * of_get_flat_dt_root - find the root node in the flat blob
  655. */
  656. unsigned long __init of_get_flat_dt_root(void)
  657. {
  658. return 0;
  659. }
  660. /**
  661. * of_get_flat_dt_size - Return the total size of the FDT
  662. */
  663. int __init of_get_flat_dt_size(void)
  664. {
  665. return fdt_totalsize(initial_boot_params);
  666. }
  667. /**
  668. * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
  669. *
  670. * This function can be used within scan_flattened_dt callback to get
  671. * access to properties
  672. */
  673. const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
  674. int *size)
  675. {
  676. return fdt_getprop(initial_boot_params, node, name, size);
  677. }
  678. /**
  679. * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
  680. * @node: node to test
  681. * @compat: compatible string to compare with compatible list.
  682. */
  683. int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
  684. {
  685. return of_fdt_is_compatible(initial_boot_params, node, compat);
  686. }
  687. /**
  688. * of_flat_dt_match - Return true if node matches a list of compatible values
  689. */
  690. int __init of_flat_dt_match(unsigned long node, const char *const *compat)
  691. {
  692. return of_fdt_match(initial_boot_params, node, compat);
  693. }
  694. /**
  695. * of_get_flat_dt_prop - Given a node in the flat blob, return the phandle
  696. */
  697. uint32_t __init of_get_flat_dt_phandle(unsigned long node)
  698. {
  699. return fdt_get_phandle(initial_boot_params, node);
  700. }
  701. struct fdt_scan_status {
  702. const char *name;
  703. int namelen;
  704. int depth;
  705. int found;
  706. int (*iterator)(unsigned long node, const char *uname, int depth, void *data);
  707. void *data;
  708. };
  709. const char * __init of_flat_dt_get_machine_name(void)
  710. {
  711. const char *name;
  712. unsigned long dt_root = of_get_flat_dt_root();
  713. name = of_get_flat_dt_prop(dt_root, "model", NULL);
  714. if (!name)
  715. name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
  716. return name;
  717. }
  718. /**
  719. * of_flat_dt_match_machine - Iterate match tables to find matching machine.
  720. *
  721. * @default_match: A machine specific ptr to return in case of no match.
  722. * @get_next_compat: callback function to return next compatible match table.
  723. *
  724. * Iterate through machine match tables to find the best match for the machine
  725. * compatible string in the FDT.
  726. */
  727. const void * __init of_flat_dt_match_machine(const void *default_match,
  728. const void * (*get_next_compat)(const char * const**))
  729. {
  730. const void *data = NULL;
  731. const void *best_data = default_match;
  732. const char *const *compat;
  733. unsigned long dt_root;
  734. unsigned int best_score = ~1, score = 0;
  735. dt_root = of_get_flat_dt_root();
  736. while ((data = get_next_compat(&compat))) {
  737. score = of_flat_dt_match(dt_root, compat);
  738. if (score > 0 && score < best_score) {
  739. best_data = data;
  740. best_score = score;
  741. }
  742. }
  743. if (!best_data) {
  744. const char *prop;
  745. int size;
  746. pr_err("\n unrecognized device tree list:\n[ ");
  747. prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
  748. if (prop) {
  749. while (size > 0) {
  750. printk("'%s' ", prop);
  751. size -= strlen(prop) + 1;
  752. prop += strlen(prop) + 1;
  753. }
  754. }
  755. printk("]\n\n");
  756. return NULL;
  757. }
  758. pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
  759. return best_data;
  760. }
  761. #ifdef CONFIG_BLK_DEV_INITRD
  762. #ifndef __early_init_dt_declare_initrd
  763. static void __early_init_dt_declare_initrd(unsigned long start,
  764. unsigned long end)
  765. {
  766. initrd_start = (unsigned long)__va(start);
  767. initrd_end = (unsigned long)__va(end);
  768. initrd_below_start_ok = 1;
  769. }
  770. #endif
  771. /**
  772. * early_init_dt_check_for_initrd - Decode initrd location from flat tree
  773. * @node: reference to node containing initrd location ('chosen')
  774. */
  775. static void __init early_init_dt_check_for_initrd(unsigned long node)
  776. {
  777. u64 start, end;
  778. int len;
  779. const __be32 *prop;
  780. pr_debug("Looking for initrd properties... ");
  781. prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
  782. if (!prop)
  783. return;
  784. start = of_read_number(prop, len/4);
  785. prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
  786. if (!prop)
  787. return;
  788. end = of_read_number(prop, len/4);
  789. __early_init_dt_declare_initrd(start, end);
  790. pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
  791. (unsigned long long)start, (unsigned long long)end);
  792. }
  793. #else
  794. static inline void early_init_dt_check_for_initrd(unsigned long node)
  795. {
  796. }
  797. #endif /* CONFIG_BLK_DEV_INITRD */
  798. #ifdef CONFIG_SERIAL_EARLYCON
  799. int __init early_init_dt_scan_chosen_stdout(void)
  800. {
  801. int offset;
  802. const char *p, *q, *options = NULL;
  803. int l;
  804. const struct earlycon_id **p_match;
  805. const void *fdt = initial_boot_params;
  806. offset = fdt_path_offset(fdt, "/chosen");
  807. if (offset < 0)
  808. offset = fdt_path_offset(fdt, "/chosen@0");
  809. if (offset < 0)
  810. return -ENOENT;
  811. p = fdt_getprop(fdt, offset, "stdout-path", &l);
  812. if (!p)
  813. p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
  814. if (!p || !l)
  815. return -ENOENT;
  816. q = strchrnul(p, ':');
  817. if (*q != '\0')
  818. options = q + 1;
  819. l = q - p;
  820. /* Get the node specified by stdout-path */
  821. offset = fdt_path_offset_namelen(fdt, p, l);
  822. if (offset < 0) {
  823. pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
  824. return 0;
  825. }
  826. for (p_match = __earlycon_table; p_match < __earlycon_table_end;
  827. p_match++) {
  828. const struct earlycon_id *match = *p_match;
  829. if (!match->compatible[0])
  830. continue;
  831. if (fdt_node_check_compatible(fdt, offset, match->compatible))
  832. continue;
  833. of_setup_earlycon(match, offset, options);
  834. return 0;
  835. }
  836. return -ENODEV;
  837. }
  838. #endif
  839. /**
  840. * early_init_dt_scan_root - fetch the top level address and size cells
  841. */
  842. int __init early_init_dt_scan_root(unsigned long node, const char *uname,
  843. int depth, void *data)
  844. {
  845. const __be32 *prop;
  846. if (depth != 0)
  847. return 0;
  848. dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
  849. dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
  850. prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
  851. if (prop)
  852. dt_root_size_cells = be32_to_cpup(prop);
  853. pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
  854. prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
  855. if (prop)
  856. dt_root_addr_cells = be32_to_cpup(prop);
  857. pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
  858. /* break now */
  859. return 1;
  860. }
  861. u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
  862. {
  863. const __be32 *p = *cellp;
  864. *cellp = p + s;
  865. return of_read_number(p, s);
  866. }
  867. /**
  868. * early_init_dt_scan_memory - Look for and parse memory nodes
  869. */
  870. int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
  871. int depth, void *data)
  872. {
  873. const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  874. const __be32 *reg, *endp;
  875. int l;
  876. bool hotpluggable;
  877. /* We are scanning "memory" nodes only */
  878. if (type == NULL || strcmp(type, "memory") != 0)
  879. return 0;
  880. reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
  881. if (reg == NULL)
  882. reg = of_get_flat_dt_prop(node, "reg", &l);
  883. if (reg == NULL)
  884. return 0;
  885. endp = reg + (l / sizeof(__be32));
  886. hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
  887. pr_debug("memory scan node %s, reg size %d,\n", uname, l);
  888. while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
  889. u64 base, size;
  890. base = dt_mem_next_cell(dt_root_addr_cells, &reg);
  891. size = dt_mem_next_cell(dt_root_size_cells, &reg);
  892. if (size == 0)
  893. continue;
  894. pr_debug(" - %llx , %llx\n", (unsigned long long)base,
  895. (unsigned long long)size);
  896. early_init_dt_add_memory_arch(base, size);
  897. if (!hotpluggable)
  898. continue;
  899. if (early_init_dt_mark_hotplug_memory_arch(base, size))
  900. pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
  901. base, base + size);
  902. }
  903. return 0;
  904. }
  905. int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
  906. int depth, void *data)
  907. {
  908. int l;
  909. const char *p;
  910. pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
  911. if (depth != 1 || !data ||
  912. (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
  913. return 0;
  914. early_init_dt_check_for_initrd(node);
  915. /* Retrieve command line */
  916. p = of_get_flat_dt_prop(node, "bootargs", &l);
  917. if (p != NULL && l > 0)
  918. strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
  919. /*
  920. * CONFIG_CMDLINE is meant to be a default in case nothing else
  921. * managed to set the command line, unless CONFIG_CMDLINE_FORCE
  922. * is set in which case we override whatever was found earlier.
  923. */
  924. #ifdef CONFIG_CMDLINE
  925. #if defined(CONFIG_CMDLINE_EXTEND)
  926. strlcat(data, " ", COMMAND_LINE_SIZE);
  927. strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  928. #elif defined(CONFIG_CMDLINE_FORCE)
  929. strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  930. #else
  931. /* No arguments from boot loader, use kernel's cmdl*/
  932. if (!((char *)data)[0])
  933. strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  934. #endif
  935. #endif /* CONFIG_CMDLINE */
  936. pr_debug("Command line is: %s\n", (char*)data);
  937. /* break now */
  938. return 1;
  939. }
  940. #ifdef CONFIG_HAVE_MEMBLOCK
  941. #ifndef MIN_MEMBLOCK_ADDR
  942. #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
  943. #endif
  944. #ifndef MAX_MEMBLOCK_ADDR
  945. #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
  946. #endif
  947. void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
  948. {
  949. const u64 phys_offset = MIN_MEMBLOCK_ADDR;
  950. if (!PAGE_ALIGNED(base)) {
  951. if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
  952. pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
  953. base, base + size);
  954. return;
  955. }
  956. size -= PAGE_SIZE - (base & ~PAGE_MASK);
  957. base = PAGE_ALIGN(base);
  958. }
  959. size &= PAGE_MASK;
  960. if (base > MAX_MEMBLOCK_ADDR) {
  961. pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
  962. base, base + size);
  963. return;
  964. }
  965. if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
  966. pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
  967. ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
  968. size = MAX_MEMBLOCK_ADDR - base + 1;
  969. }
  970. if (base + size < phys_offset) {
  971. pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
  972. base, base + size);
  973. return;
  974. }
  975. if (base < phys_offset) {
  976. pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
  977. base, phys_offset);
  978. size -= phys_offset - base;
  979. base = phys_offset;
  980. }
  981. memblock_add(base, size);
  982. }
  983. int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
  984. {
  985. return memblock_mark_hotplug(base, size);
  986. }
  987. int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
  988. phys_addr_t size, bool nomap)
  989. {
  990. if (nomap)
  991. return memblock_remove(base, size);
  992. return memblock_reserve(base, size);
  993. }
  994. #else
  995. void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
  996. {
  997. WARN_ON(1);
  998. }
  999. int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
  1000. {
  1001. return -ENOSYS;
  1002. }
  1003. int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
  1004. phys_addr_t size, bool nomap)
  1005. {
  1006. pr_err("Reserved memory not supported, ignoring range %pa - %pa%s\n",
  1007. &base, &size, nomap ? " (nomap)" : "");
  1008. return -ENOSYS;
  1009. }
  1010. #endif
  1011. static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
  1012. {
  1013. return memblock_virt_alloc(size, align);
  1014. }
  1015. bool __init early_init_dt_verify(void *params)
  1016. {
  1017. if (!params)
  1018. return false;
  1019. /* check device tree validity */
  1020. if (fdt_check_header(params))
  1021. return false;
  1022. /* Setup flat device-tree pointer */
  1023. initial_boot_params = params;
  1024. of_fdt_crc32 = crc32_be(~0, initial_boot_params,
  1025. fdt_totalsize(initial_boot_params));
  1026. return true;
  1027. }
  1028. void __init early_init_dt_scan_nodes(void)
  1029. {
  1030. /* Retrieve various information from the /chosen node */
  1031. of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
  1032. /* Initialize {size,address}-cells info */
  1033. of_scan_flat_dt(early_init_dt_scan_root, NULL);
  1034. /* Setup memory, calling early_init_dt_add_memory_arch */
  1035. of_scan_flat_dt(early_init_dt_scan_memory, NULL);
  1036. }
  1037. bool __init early_init_dt_scan(void *params)
  1038. {
  1039. bool status;
  1040. status = early_init_dt_verify(params);
  1041. if (!status)
  1042. return false;
  1043. early_init_dt_scan_nodes();
  1044. return true;
  1045. }
  1046. /**
  1047. * unflatten_device_tree - create tree of device_nodes from flat blob
  1048. *
  1049. * unflattens the device-tree passed by the firmware, creating the
  1050. * tree of struct device_node. It also fills the "name" and "type"
  1051. * pointers of the nodes so the normal device-tree walking functions
  1052. * can be used.
  1053. */
  1054. void __init unflatten_device_tree(void)
  1055. {
  1056. __unflatten_device_tree(initial_boot_params, NULL, &of_root,
  1057. early_init_dt_alloc_memory_arch, false);
  1058. /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
  1059. of_alias_scan(early_init_dt_alloc_memory_arch);
  1060. unittest_unflatten_overlay_base();
  1061. }
  1062. /**
  1063. * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
  1064. *
  1065. * Copies and unflattens the device-tree passed by the firmware, creating the
  1066. * tree of struct device_node. It also fills the "name" and "type"
  1067. * pointers of the nodes so the normal device-tree walking functions
  1068. * can be used. This should only be used when the FDT memory has not been
  1069. * reserved such is the case when the FDT is built-in to the kernel init
  1070. * section. If the FDT memory is reserved already then unflatten_device_tree
  1071. * should be used instead.
  1072. */
  1073. void __init unflatten_and_copy_device_tree(void)
  1074. {
  1075. int size;
  1076. void *dt;
  1077. if (!initial_boot_params) {
  1078. pr_warn("No valid device tree found, continuing without\n");
  1079. return;
  1080. }
  1081. size = fdt_totalsize(initial_boot_params);
  1082. dt = early_init_dt_alloc_memory_arch(size,
  1083. roundup_pow_of_two(FDT_V17_SIZE));
  1084. if (dt) {
  1085. memcpy(dt, initial_boot_params, size);
  1086. initial_boot_params = dt;
  1087. }
  1088. unflatten_device_tree();
  1089. }
  1090. #ifdef CONFIG_SYSFS
  1091. static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
  1092. struct bin_attribute *bin_attr,
  1093. char *buf, loff_t off, size_t count)
  1094. {
  1095. memcpy(buf, initial_boot_params + off, count);
  1096. return count;
  1097. }
  1098. static int __init of_fdt_raw_init(void)
  1099. {
  1100. static struct bin_attribute of_fdt_raw_attr =
  1101. __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
  1102. if (!initial_boot_params)
  1103. return 0;
  1104. if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
  1105. fdt_totalsize(initial_boot_params))) {
  1106. pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
  1107. return 0;
  1108. }
  1109. of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
  1110. return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
  1111. }
  1112. late_initcall(of_fdt_raw_init);
  1113. #endif
  1114. #endif /* CONFIG_OF_EARLY_FLATTREE */