user_namespace.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License as
  4. * published by the Free Software Foundation, version 2 of the
  5. * License.
  6. */
  7. #include <linux/export.h>
  8. #include <linux/nsproxy.h>
  9. #include <linux/slab.h>
  10. #include <linux/user_namespace.h>
  11. #include <linux/proc_ns.h>
  12. #include <linux/highuid.h>
  13. #include <linux/cred.h>
  14. #include <linux/securebits.h>
  15. #include <linux/keyctl.h>
  16. #include <linux/key-type.h>
  17. #include <keys/user-type.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/fs.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/ctype.h>
  22. #include <linux/projid.h>
  23. #include <linux/fs_struct.h>
  24. static struct kmem_cache *user_ns_cachep __read_mostly;
  25. static DEFINE_MUTEX(userns_state_mutex);
  26. static bool new_idmap_permitted(const struct file *file,
  27. struct user_namespace *ns, int cap_setid,
  28. struct uid_gid_map *map);
  29. static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
  30. {
  31. /* Start with the same capabilities as init but useless for doing
  32. * anything as the capabilities are bound to the new user namespace.
  33. */
  34. cred->securebits = SECUREBITS_DEFAULT;
  35. cred->cap_inheritable = CAP_EMPTY_SET;
  36. cred->cap_permitted = CAP_FULL_SET;
  37. cred->cap_effective = CAP_FULL_SET;
  38. cred->cap_bset = CAP_FULL_SET;
  39. #ifdef CONFIG_KEYS
  40. key_put(cred->request_key_auth);
  41. cred->request_key_auth = NULL;
  42. #endif
  43. /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
  44. cred->user_ns = user_ns;
  45. }
  46. /*
  47. * Create a new user namespace, deriving the creator from the user in the
  48. * passed credentials, and replacing that user with the new root user for the
  49. * new namespace.
  50. *
  51. * This is called by copy_creds(), which will finish setting the target task's
  52. * credentials.
  53. */
  54. int create_user_ns(struct cred *new)
  55. {
  56. struct user_namespace *ns, *parent_ns = new->user_ns;
  57. kuid_t owner = new->euid;
  58. kgid_t group = new->egid;
  59. int ret;
  60. if (parent_ns->level > 32)
  61. return -EUSERS;
  62. /*
  63. * Verify that we can not violate the policy of which files
  64. * may be accessed that is specified by the root directory,
  65. * by verifing that the root directory is at the root of the
  66. * mount namespace which allows all files to be accessed.
  67. */
  68. if (current_chrooted())
  69. return -EPERM;
  70. /* The creator needs a mapping in the parent user namespace
  71. * or else we won't be able to reasonably tell userspace who
  72. * created a user_namespace.
  73. */
  74. if (!kuid_has_mapping(parent_ns, owner) ||
  75. !kgid_has_mapping(parent_ns, group))
  76. return -EPERM;
  77. ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
  78. if (!ns)
  79. return -ENOMEM;
  80. ret = ns_alloc_inum(&ns->ns);
  81. if (ret) {
  82. kmem_cache_free(user_ns_cachep, ns);
  83. return ret;
  84. }
  85. ns->ns.ops = &userns_operations;
  86. atomic_set(&ns->count, 1);
  87. /* Leave the new->user_ns reference with the new user namespace. */
  88. ns->parent = parent_ns;
  89. ns->level = parent_ns->level + 1;
  90. ns->owner = owner;
  91. ns->group = group;
  92. /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
  93. mutex_lock(&userns_state_mutex);
  94. ns->flags = parent_ns->flags;
  95. mutex_unlock(&userns_state_mutex);
  96. set_cred_user_ns(new, ns);
  97. #ifdef CONFIG_PERSISTENT_KEYRINGS
  98. init_rwsem(&ns->persistent_keyring_register_sem);
  99. #endif
  100. return 0;
  101. }
  102. int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
  103. {
  104. struct cred *cred;
  105. int err = -ENOMEM;
  106. if (!(unshare_flags & CLONE_NEWUSER))
  107. return 0;
  108. cred = prepare_creds();
  109. if (cred) {
  110. err = create_user_ns(cred);
  111. if (err)
  112. put_cred(cred);
  113. else
  114. *new_cred = cred;
  115. }
  116. return err;
  117. }
  118. void free_user_ns(struct user_namespace *ns)
  119. {
  120. struct user_namespace *parent;
  121. do {
  122. parent = ns->parent;
  123. #ifdef CONFIG_PERSISTENT_KEYRINGS
  124. key_put(ns->persistent_keyring_register);
  125. #endif
  126. ns_free_inum(&ns->ns);
  127. kmem_cache_free(user_ns_cachep, ns);
  128. ns = parent;
  129. } while (atomic_dec_and_test(&parent->count));
  130. }
  131. EXPORT_SYMBOL(free_user_ns);
  132. static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
  133. {
  134. unsigned idx, extents;
  135. u32 first, last, id2;
  136. id2 = id + count - 1;
  137. /* Find the matching extent */
  138. extents = map->nr_extents;
  139. smp_rmb();
  140. for (idx = 0; idx < extents; idx++) {
  141. first = map->extent[idx].first;
  142. last = first + map->extent[idx].count - 1;
  143. if (id >= first && id <= last &&
  144. (id2 >= first && id2 <= last))
  145. break;
  146. }
  147. /* Map the id or note failure */
  148. if (idx < extents)
  149. id = (id - first) + map->extent[idx].lower_first;
  150. else
  151. id = (u32) -1;
  152. return id;
  153. }
  154. static u32 map_id_down(struct uid_gid_map *map, u32 id)
  155. {
  156. unsigned idx, extents;
  157. u32 first, last;
  158. /* Find the matching extent */
  159. extents = map->nr_extents;
  160. smp_rmb();
  161. for (idx = 0; idx < extents; idx++) {
  162. first = map->extent[idx].first;
  163. last = first + map->extent[idx].count - 1;
  164. if (id >= first && id <= last)
  165. break;
  166. }
  167. /* Map the id or note failure */
  168. if (idx < extents)
  169. id = (id - first) + map->extent[idx].lower_first;
  170. else
  171. id = (u32) -1;
  172. return id;
  173. }
  174. static u32 map_id_up(struct uid_gid_map *map, u32 id)
  175. {
  176. unsigned idx, extents;
  177. u32 first, last;
  178. /* Find the matching extent */
  179. extents = map->nr_extents;
  180. smp_rmb();
  181. for (idx = 0; idx < extents; idx++) {
  182. first = map->extent[idx].lower_first;
  183. last = first + map->extent[idx].count - 1;
  184. if (id >= first && id <= last)
  185. break;
  186. }
  187. /* Map the id or note failure */
  188. if (idx < extents)
  189. id = (id - first) + map->extent[idx].first;
  190. else
  191. id = (u32) -1;
  192. return id;
  193. }
  194. /**
  195. * make_kuid - Map a user-namespace uid pair into a kuid.
  196. * @ns: User namespace that the uid is in
  197. * @uid: User identifier
  198. *
  199. * Maps a user-namespace uid pair into a kernel internal kuid,
  200. * and returns that kuid.
  201. *
  202. * When there is no mapping defined for the user-namespace uid
  203. * pair INVALID_UID is returned. Callers are expected to test
  204. * for and handle INVALID_UID being returned. INVALID_UID
  205. * may be tested for using uid_valid().
  206. */
  207. kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
  208. {
  209. /* Map the uid to a global kernel uid */
  210. return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
  211. }
  212. EXPORT_SYMBOL(make_kuid);
  213. /**
  214. * from_kuid - Create a uid from a kuid user-namespace pair.
  215. * @targ: The user namespace we want a uid in.
  216. * @kuid: The kernel internal uid to start with.
  217. *
  218. * Map @kuid into the user-namespace specified by @targ and
  219. * return the resulting uid.
  220. *
  221. * There is always a mapping into the initial user_namespace.
  222. *
  223. * If @kuid has no mapping in @targ (uid_t)-1 is returned.
  224. */
  225. uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
  226. {
  227. /* Map the uid from a global kernel uid */
  228. return map_id_up(&targ->uid_map, __kuid_val(kuid));
  229. }
  230. EXPORT_SYMBOL(from_kuid);
  231. /**
  232. * from_kuid_munged - Create a uid from a kuid user-namespace pair.
  233. * @targ: The user namespace we want a uid in.
  234. * @kuid: The kernel internal uid to start with.
  235. *
  236. * Map @kuid into the user-namespace specified by @targ and
  237. * return the resulting uid.
  238. *
  239. * There is always a mapping into the initial user_namespace.
  240. *
  241. * Unlike from_kuid from_kuid_munged never fails and always
  242. * returns a valid uid. This makes from_kuid_munged appropriate
  243. * for use in syscalls like stat and getuid where failing the
  244. * system call and failing to provide a valid uid are not an
  245. * options.
  246. *
  247. * If @kuid has no mapping in @targ overflowuid is returned.
  248. */
  249. uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
  250. {
  251. uid_t uid;
  252. uid = from_kuid(targ, kuid);
  253. if (uid == (uid_t) -1)
  254. uid = overflowuid;
  255. return uid;
  256. }
  257. EXPORT_SYMBOL(from_kuid_munged);
  258. /**
  259. * make_kgid - Map a user-namespace gid pair into a kgid.
  260. * @ns: User namespace that the gid is in
  261. * @gid: group identifier
  262. *
  263. * Maps a user-namespace gid pair into a kernel internal kgid,
  264. * and returns that kgid.
  265. *
  266. * When there is no mapping defined for the user-namespace gid
  267. * pair INVALID_GID is returned. Callers are expected to test
  268. * for and handle INVALID_GID being returned. INVALID_GID may be
  269. * tested for using gid_valid().
  270. */
  271. kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
  272. {
  273. /* Map the gid to a global kernel gid */
  274. return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
  275. }
  276. EXPORT_SYMBOL(make_kgid);
  277. /**
  278. * from_kgid - Create a gid from a kgid user-namespace pair.
  279. * @targ: The user namespace we want a gid in.
  280. * @kgid: The kernel internal gid to start with.
  281. *
  282. * Map @kgid into the user-namespace specified by @targ and
  283. * return the resulting gid.
  284. *
  285. * There is always a mapping into the initial user_namespace.
  286. *
  287. * If @kgid has no mapping in @targ (gid_t)-1 is returned.
  288. */
  289. gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
  290. {
  291. /* Map the gid from a global kernel gid */
  292. return map_id_up(&targ->gid_map, __kgid_val(kgid));
  293. }
  294. EXPORT_SYMBOL(from_kgid);
  295. /**
  296. * from_kgid_munged - Create a gid from a kgid user-namespace pair.
  297. * @targ: The user namespace we want a gid in.
  298. * @kgid: The kernel internal gid to start with.
  299. *
  300. * Map @kgid into the user-namespace specified by @targ and
  301. * return the resulting gid.
  302. *
  303. * There is always a mapping into the initial user_namespace.
  304. *
  305. * Unlike from_kgid from_kgid_munged never fails and always
  306. * returns a valid gid. This makes from_kgid_munged appropriate
  307. * for use in syscalls like stat and getgid where failing the
  308. * system call and failing to provide a valid gid are not options.
  309. *
  310. * If @kgid has no mapping in @targ overflowgid is returned.
  311. */
  312. gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
  313. {
  314. gid_t gid;
  315. gid = from_kgid(targ, kgid);
  316. if (gid == (gid_t) -1)
  317. gid = overflowgid;
  318. return gid;
  319. }
  320. EXPORT_SYMBOL(from_kgid_munged);
  321. /**
  322. * make_kprojid - Map a user-namespace projid pair into a kprojid.
  323. * @ns: User namespace that the projid is in
  324. * @projid: Project identifier
  325. *
  326. * Maps a user-namespace uid pair into a kernel internal kuid,
  327. * and returns that kuid.
  328. *
  329. * When there is no mapping defined for the user-namespace projid
  330. * pair INVALID_PROJID is returned. Callers are expected to test
  331. * for and handle handle INVALID_PROJID being returned. INVALID_PROJID
  332. * may be tested for using projid_valid().
  333. */
  334. kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
  335. {
  336. /* Map the uid to a global kernel uid */
  337. return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
  338. }
  339. EXPORT_SYMBOL(make_kprojid);
  340. /**
  341. * from_kprojid - Create a projid from a kprojid user-namespace pair.
  342. * @targ: The user namespace we want a projid in.
  343. * @kprojid: The kernel internal project identifier to start with.
  344. *
  345. * Map @kprojid into the user-namespace specified by @targ and
  346. * return the resulting projid.
  347. *
  348. * There is always a mapping into the initial user_namespace.
  349. *
  350. * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
  351. */
  352. projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
  353. {
  354. /* Map the uid from a global kernel uid */
  355. return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
  356. }
  357. EXPORT_SYMBOL(from_kprojid);
  358. /**
  359. * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
  360. * @targ: The user namespace we want a projid in.
  361. * @kprojid: The kernel internal projid to start with.
  362. *
  363. * Map @kprojid into the user-namespace specified by @targ and
  364. * return the resulting projid.
  365. *
  366. * There is always a mapping into the initial user_namespace.
  367. *
  368. * Unlike from_kprojid from_kprojid_munged never fails and always
  369. * returns a valid projid. This makes from_kprojid_munged
  370. * appropriate for use in syscalls like stat and where
  371. * failing the system call and failing to provide a valid projid are
  372. * not an options.
  373. *
  374. * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
  375. */
  376. projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
  377. {
  378. projid_t projid;
  379. projid = from_kprojid(targ, kprojid);
  380. if (projid == (projid_t) -1)
  381. projid = OVERFLOW_PROJID;
  382. return projid;
  383. }
  384. EXPORT_SYMBOL(from_kprojid_munged);
  385. static int uid_m_show(struct seq_file *seq, void *v)
  386. {
  387. struct user_namespace *ns = seq->private;
  388. struct uid_gid_extent *extent = v;
  389. struct user_namespace *lower_ns;
  390. uid_t lower;
  391. lower_ns = seq_user_ns(seq);
  392. if ((lower_ns == ns) && lower_ns->parent)
  393. lower_ns = lower_ns->parent;
  394. lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
  395. seq_printf(seq, "%10u %10u %10u\n",
  396. extent->first,
  397. lower,
  398. extent->count);
  399. return 0;
  400. }
  401. static int gid_m_show(struct seq_file *seq, void *v)
  402. {
  403. struct user_namespace *ns = seq->private;
  404. struct uid_gid_extent *extent = v;
  405. struct user_namespace *lower_ns;
  406. gid_t lower;
  407. lower_ns = seq_user_ns(seq);
  408. if ((lower_ns == ns) && lower_ns->parent)
  409. lower_ns = lower_ns->parent;
  410. lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
  411. seq_printf(seq, "%10u %10u %10u\n",
  412. extent->first,
  413. lower,
  414. extent->count);
  415. return 0;
  416. }
  417. static int projid_m_show(struct seq_file *seq, void *v)
  418. {
  419. struct user_namespace *ns = seq->private;
  420. struct uid_gid_extent *extent = v;
  421. struct user_namespace *lower_ns;
  422. projid_t lower;
  423. lower_ns = seq_user_ns(seq);
  424. if ((lower_ns == ns) && lower_ns->parent)
  425. lower_ns = lower_ns->parent;
  426. lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
  427. seq_printf(seq, "%10u %10u %10u\n",
  428. extent->first,
  429. lower,
  430. extent->count);
  431. return 0;
  432. }
  433. static void *m_start(struct seq_file *seq, loff_t *ppos,
  434. struct uid_gid_map *map)
  435. {
  436. struct uid_gid_extent *extent = NULL;
  437. loff_t pos = *ppos;
  438. if (pos < map->nr_extents)
  439. extent = &map->extent[pos];
  440. return extent;
  441. }
  442. static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
  443. {
  444. struct user_namespace *ns = seq->private;
  445. return m_start(seq, ppos, &ns->uid_map);
  446. }
  447. static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
  448. {
  449. struct user_namespace *ns = seq->private;
  450. return m_start(seq, ppos, &ns->gid_map);
  451. }
  452. static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
  453. {
  454. struct user_namespace *ns = seq->private;
  455. return m_start(seq, ppos, &ns->projid_map);
  456. }
  457. static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
  458. {
  459. (*pos)++;
  460. return seq->op->start(seq, pos);
  461. }
  462. static void m_stop(struct seq_file *seq, void *v)
  463. {
  464. return;
  465. }
  466. const struct seq_operations proc_uid_seq_operations = {
  467. .start = uid_m_start,
  468. .stop = m_stop,
  469. .next = m_next,
  470. .show = uid_m_show,
  471. };
  472. const struct seq_operations proc_gid_seq_operations = {
  473. .start = gid_m_start,
  474. .stop = m_stop,
  475. .next = m_next,
  476. .show = gid_m_show,
  477. };
  478. const struct seq_operations proc_projid_seq_operations = {
  479. .start = projid_m_start,
  480. .stop = m_stop,
  481. .next = m_next,
  482. .show = projid_m_show,
  483. };
  484. static bool mappings_overlap(struct uid_gid_map *new_map,
  485. struct uid_gid_extent *extent)
  486. {
  487. u32 upper_first, lower_first, upper_last, lower_last;
  488. unsigned idx;
  489. upper_first = extent->first;
  490. lower_first = extent->lower_first;
  491. upper_last = upper_first + extent->count - 1;
  492. lower_last = lower_first + extent->count - 1;
  493. for (idx = 0; idx < new_map->nr_extents; idx++) {
  494. u32 prev_upper_first, prev_lower_first;
  495. u32 prev_upper_last, prev_lower_last;
  496. struct uid_gid_extent *prev;
  497. prev = &new_map->extent[idx];
  498. prev_upper_first = prev->first;
  499. prev_lower_first = prev->lower_first;
  500. prev_upper_last = prev_upper_first + prev->count - 1;
  501. prev_lower_last = prev_lower_first + prev->count - 1;
  502. /* Does the upper range intersect a previous extent? */
  503. if ((prev_upper_first <= upper_last) &&
  504. (prev_upper_last >= upper_first))
  505. return true;
  506. /* Does the lower range intersect a previous extent? */
  507. if ((prev_lower_first <= lower_last) &&
  508. (prev_lower_last >= lower_first))
  509. return true;
  510. }
  511. return false;
  512. }
  513. static ssize_t map_write(struct file *file, const char __user *buf,
  514. size_t count, loff_t *ppos,
  515. int cap_setid,
  516. struct uid_gid_map *map,
  517. struct uid_gid_map *parent_map)
  518. {
  519. struct seq_file *seq = file->private_data;
  520. struct user_namespace *ns = seq->private;
  521. struct uid_gid_map new_map;
  522. unsigned idx;
  523. struct uid_gid_extent *extent = NULL;
  524. unsigned long page = 0;
  525. char *kbuf, *pos, *next_line;
  526. ssize_t ret = -EINVAL;
  527. /*
  528. * The userns_state_mutex serializes all writes to any given map.
  529. *
  530. * Any map is only ever written once.
  531. *
  532. * An id map fits within 1 cache line on most architectures.
  533. *
  534. * On read nothing needs to be done unless you are on an
  535. * architecture with a crazy cache coherency model like alpha.
  536. *
  537. * There is a one time data dependency between reading the
  538. * count of the extents and the values of the extents. The
  539. * desired behavior is to see the values of the extents that
  540. * were written before the count of the extents.
  541. *
  542. * To achieve this smp_wmb() is used on guarantee the write
  543. * order and smp_rmb() is guaranteed that we don't have crazy
  544. * architectures returning stale data.
  545. */
  546. mutex_lock(&userns_state_mutex);
  547. ret = -EPERM;
  548. /* Only allow one successful write to the map */
  549. if (map->nr_extents != 0)
  550. goto out;
  551. /*
  552. * Adjusting namespace settings requires capabilities on the target.
  553. */
  554. if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
  555. goto out;
  556. /* Get a buffer */
  557. ret = -ENOMEM;
  558. page = __get_free_page(GFP_TEMPORARY);
  559. kbuf = (char *) page;
  560. if (!page)
  561. goto out;
  562. /* Only allow < page size writes at the beginning of the file */
  563. ret = -EINVAL;
  564. if ((*ppos != 0) || (count >= PAGE_SIZE))
  565. goto out;
  566. /* Slurp in the user data */
  567. ret = -EFAULT;
  568. if (copy_from_user(kbuf, buf, count))
  569. goto out;
  570. kbuf[count] = '\0';
  571. /* Parse the user data */
  572. ret = -EINVAL;
  573. pos = kbuf;
  574. new_map.nr_extents = 0;
  575. for (; pos; pos = next_line) {
  576. extent = &new_map.extent[new_map.nr_extents];
  577. /* Find the end of line and ensure I don't look past it */
  578. next_line = strchr(pos, '\n');
  579. if (next_line) {
  580. *next_line = '\0';
  581. next_line++;
  582. if (*next_line == '\0')
  583. next_line = NULL;
  584. }
  585. pos = skip_spaces(pos);
  586. extent->first = simple_strtoul(pos, &pos, 10);
  587. if (!isspace(*pos))
  588. goto out;
  589. pos = skip_spaces(pos);
  590. extent->lower_first = simple_strtoul(pos, &pos, 10);
  591. if (!isspace(*pos))
  592. goto out;
  593. pos = skip_spaces(pos);
  594. extent->count = simple_strtoul(pos, &pos, 10);
  595. if (*pos && !isspace(*pos))
  596. goto out;
  597. /* Verify there is not trailing junk on the line */
  598. pos = skip_spaces(pos);
  599. if (*pos != '\0')
  600. goto out;
  601. /* Verify we have been given valid starting values */
  602. if ((extent->first == (u32) -1) ||
  603. (extent->lower_first == (u32) -1))
  604. goto out;
  605. /* Verify count is not zero and does not cause the
  606. * extent to wrap
  607. */
  608. if ((extent->first + extent->count) <= extent->first)
  609. goto out;
  610. if ((extent->lower_first + extent->count) <=
  611. extent->lower_first)
  612. goto out;
  613. /* Do the ranges in extent overlap any previous extents? */
  614. if (mappings_overlap(&new_map, extent))
  615. goto out;
  616. new_map.nr_extents++;
  617. /* Fail if the file contains too many extents */
  618. if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
  619. (next_line != NULL))
  620. goto out;
  621. }
  622. /* Be very certaint the new map actually exists */
  623. if (new_map.nr_extents == 0)
  624. goto out;
  625. ret = -EPERM;
  626. /* Validate the user is allowed to use user id's mapped to. */
  627. if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
  628. goto out;
  629. /* Map the lower ids from the parent user namespace to the
  630. * kernel global id space.
  631. */
  632. for (idx = 0; idx < new_map.nr_extents; idx++) {
  633. u32 lower_first;
  634. extent = &new_map.extent[idx];
  635. lower_first = map_id_range_down(parent_map,
  636. extent->lower_first,
  637. extent->count);
  638. /* Fail if we can not map the specified extent to
  639. * the kernel global id space.
  640. */
  641. if (lower_first == (u32) -1)
  642. goto out;
  643. extent->lower_first = lower_first;
  644. }
  645. /* Install the map */
  646. memcpy(map->extent, new_map.extent,
  647. new_map.nr_extents*sizeof(new_map.extent[0]));
  648. smp_wmb();
  649. map->nr_extents = new_map.nr_extents;
  650. *ppos = count;
  651. ret = count;
  652. out:
  653. mutex_unlock(&userns_state_mutex);
  654. if (page)
  655. free_page(page);
  656. return ret;
  657. }
  658. ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
  659. size_t size, loff_t *ppos)
  660. {
  661. struct seq_file *seq = file->private_data;
  662. struct user_namespace *ns = seq->private;
  663. struct user_namespace *seq_ns = seq_user_ns(seq);
  664. if (!ns->parent)
  665. return -EPERM;
  666. if ((seq_ns != ns) && (seq_ns != ns->parent))
  667. return -EPERM;
  668. return map_write(file, buf, size, ppos, CAP_SETUID,
  669. &ns->uid_map, &ns->parent->uid_map);
  670. }
  671. ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
  672. size_t size, loff_t *ppos)
  673. {
  674. struct seq_file *seq = file->private_data;
  675. struct user_namespace *ns = seq->private;
  676. struct user_namespace *seq_ns = seq_user_ns(seq);
  677. if (!ns->parent)
  678. return -EPERM;
  679. if ((seq_ns != ns) && (seq_ns != ns->parent))
  680. return -EPERM;
  681. return map_write(file, buf, size, ppos, CAP_SETGID,
  682. &ns->gid_map, &ns->parent->gid_map);
  683. }
  684. ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
  685. size_t size, loff_t *ppos)
  686. {
  687. struct seq_file *seq = file->private_data;
  688. struct user_namespace *ns = seq->private;
  689. struct user_namespace *seq_ns = seq_user_ns(seq);
  690. if (!ns->parent)
  691. return -EPERM;
  692. if ((seq_ns != ns) && (seq_ns != ns->parent))
  693. return -EPERM;
  694. /* Anyone can set any valid project id no capability needed */
  695. return map_write(file, buf, size, ppos, -1,
  696. &ns->projid_map, &ns->parent->projid_map);
  697. }
  698. static bool new_idmap_permitted(const struct file *file,
  699. struct user_namespace *ns, int cap_setid,
  700. struct uid_gid_map *new_map)
  701. {
  702. const struct cred *cred = file->f_cred;
  703. /* Don't allow mappings that would allow anything that wouldn't
  704. * be allowed without the establishment of unprivileged mappings.
  705. */
  706. if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
  707. uid_eq(ns->owner, cred->euid)) {
  708. u32 id = new_map->extent[0].lower_first;
  709. if (cap_setid == CAP_SETUID) {
  710. kuid_t uid = make_kuid(ns->parent, id);
  711. if (uid_eq(uid, cred->euid))
  712. return true;
  713. } else if (cap_setid == CAP_SETGID) {
  714. kgid_t gid = make_kgid(ns->parent, id);
  715. if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
  716. gid_eq(gid, cred->egid))
  717. return true;
  718. }
  719. }
  720. /* Allow anyone to set a mapping that doesn't require privilege */
  721. if (!cap_valid(cap_setid))
  722. return true;
  723. /* Allow the specified ids if we have the appropriate capability
  724. * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
  725. * And the opener of the id file also had the approprpiate capability.
  726. */
  727. if (ns_capable(ns->parent, cap_setid) &&
  728. file_ns_capable(file, ns->parent, cap_setid))
  729. return true;
  730. return false;
  731. }
  732. int proc_setgroups_show(struct seq_file *seq, void *v)
  733. {
  734. struct user_namespace *ns = seq->private;
  735. unsigned long userns_flags = ACCESS_ONCE(ns->flags);
  736. seq_printf(seq, "%s\n",
  737. (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
  738. "allow" : "deny");
  739. return 0;
  740. }
  741. ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
  742. size_t count, loff_t *ppos)
  743. {
  744. struct seq_file *seq = file->private_data;
  745. struct user_namespace *ns = seq->private;
  746. char kbuf[8], *pos;
  747. bool setgroups_allowed;
  748. ssize_t ret;
  749. /* Only allow a very narrow range of strings to be written */
  750. ret = -EINVAL;
  751. if ((*ppos != 0) || (count >= sizeof(kbuf)))
  752. goto out;
  753. /* What was written? */
  754. ret = -EFAULT;
  755. if (copy_from_user(kbuf, buf, count))
  756. goto out;
  757. kbuf[count] = '\0';
  758. pos = kbuf;
  759. /* What is being requested? */
  760. ret = -EINVAL;
  761. if (strncmp(pos, "allow", 5) == 0) {
  762. pos += 5;
  763. setgroups_allowed = true;
  764. }
  765. else if (strncmp(pos, "deny", 4) == 0) {
  766. pos += 4;
  767. setgroups_allowed = false;
  768. }
  769. else
  770. goto out;
  771. /* Verify there is not trailing junk on the line */
  772. pos = skip_spaces(pos);
  773. if (*pos != '\0')
  774. goto out;
  775. ret = -EPERM;
  776. mutex_lock(&userns_state_mutex);
  777. if (setgroups_allowed) {
  778. /* Enabling setgroups after setgroups has been disabled
  779. * is not allowed.
  780. */
  781. if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
  782. goto out_unlock;
  783. } else {
  784. /* Permanently disabling setgroups after setgroups has
  785. * been enabled by writing the gid_map is not allowed.
  786. */
  787. if (ns->gid_map.nr_extents != 0)
  788. goto out_unlock;
  789. ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
  790. }
  791. mutex_unlock(&userns_state_mutex);
  792. /* Report a successful write */
  793. *ppos = count;
  794. ret = count;
  795. out:
  796. return ret;
  797. out_unlock:
  798. mutex_unlock(&userns_state_mutex);
  799. goto out;
  800. }
  801. bool userns_may_setgroups(const struct user_namespace *ns)
  802. {
  803. bool allowed;
  804. mutex_lock(&userns_state_mutex);
  805. /* It is not safe to use setgroups until a gid mapping in
  806. * the user namespace has been established.
  807. */
  808. allowed = ns->gid_map.nr_extents != 0;
  809. /* Is setgroups allowed? */
  810. allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
  811. mutex_unlock(&userns_state_mutex);
  812. return allowed;
  813. }
  814. static inline struct user_namespace *to_user_ns(struct ns_common *ns)
  815. {
  816. return container_of(ns, struct user_namespace, ns);
  817. }
  818. static struct ns_common *userns_get(struct task_struct *task)
  819. {
  820. struct user_namespace *user_ns;
  821. rcu_read_lock();
  822. user_ns = get_user_ns(__task_cred(task)->user_ns);
  823. rcu_read_unlock();
  824. return user_ns ? &user_ns->ns : NULL;
  825. }
  826. static void userns_put(struct ns_common *ns)
  827. {
  828. put_user_ns(to_user_ns(ns));
  829. }
  830. static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
  831. {
  832. struct user_namespace *user_ns = to_user_ns(ns);
  833. struct cred *cred;
  834. /* Don't allow gaining capabilities by reentering
  835. * the same user namespace.
  836. */
  837. if (user_ns == current_user_ns())
  838. return -EINVAL;
  839. /* Threaded processes may not enter a different user namespace */
  840. if (atomic_read(&current->mm->mm_users) > 1)
  841. return -EINVAL;
  842. if (current->fs->users != 1)
  843. return -EINVAL;
  844. if (!ns_capable(user_ns, CAP_SYS_ADMIN))
  845. return -EPERM;
  846. cred = prepare_creds();
  847. if (!cred)
  848. return -ENOMEM;
  849. put_user_ns(cred->user_ns);
  850. set_cred_user_ns(cred, get_user_ns(user_ns));
  851. return commit_creds(cred);
  852. }
  853. const struct proc_ns_operations userns_operations = {
  854. .name = "user",
  855. .type = CLONE_NEWUSER,
  856. .get = userns_get,
  857. .put = userns_put,
  858. .install = userns_install,
  859. };
  860. static __init int user_namespaces_init(void)
  861. {
  862. user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
  863. return 0;
  864. }
  865. subsys_initcall(user_namespaces_init);