logger.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. /*
  2. * drivers/misc/logger.c
  3. *
  4. * A Logging Subsystem
  5. *
  6. * Copyright (C) 2007-2008 Google, Inc.
  7. *
  8. * Robert Love <rlove@google.com>
  9. *
  10. * This software is licensed under the terms of the GNU General Public
  11. * License version 2, as published by the Free Software Foundation, and
  12. * may be copied, distributed, and modified under those terms.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/sched.h>
  20. #include <linux/module.h>
  21. #include <linux/fs.h>
  22. #include <linux/miscdevice.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/poll.h>
  25. #include <linux/slab.h>
  26. #include <linux/time.h>
  27. #include "logger.h"
  28. #include <asm/ioctls.h>
  29. /*
  30. * struct logger_log - represents a specific log, such as 'main' or 'radio'
  31. *
  32. * This structure lives from module insertion until module removal, so it does
  33. * not need additional reference counting. The structure is protected by the
  34. * mutex 'mutex'.
  35. */
  36. struct logger_log {
  37. unsigned char *buffer;/* the ring buffer itself */
  38. struct miscdevice misc; /* misc device representing the log */
  39. wait_queue_head_t wq; /* wait queue for readers */
  40. struct list_head readers; /* this log's readers */
  41. struct mutex mutex; /* mutex protecting buffer */
  42. size_t w_off; /* current write head offset */
  43. size_t head; /* new readers start here */
  44. size_t size; /* size of the log */
  45. };
  46. /*
  47. * struct logger_reader - a logging device open for reading
  48. *
  49. * This object lives from open to release, so we don't need additional
  50. * reference counting. The structure is protected by log->mutex.
  51. */
  52. struct logger_reader {
  53. struct logger_log *log; /* associated log */
  54. struct list_head list; /* entry in logger_log's list */
  55. size_t r_off; /* current read head offset */
  56. bool r_all; /* reader can read all entries */
  57. int r_ver; /* reader ABI version */
  58. };
  59. /* logger_offset - returns index 'n' into the log via (optimized) modulus */
  60. #define logger_offset(n) ((n) & (log->size - 1))
  61. /*
  62. * file_get_log - Given a file structure, return the associated log
  63. *
  64. * This isn't aesthetic. We have several goals:
  65. *
  66. * 1) Need to quickly obtain the associated log during an I/O operation
  67. * 2) Readers need to maintain state (logger_reader)
  68. * 3) Writers need to be very fast (open() should be a near no-op)
  69. *
  70. * In the reader case, we can trivially go file->logger_reader->logger_log.
  71. * For a writer, we don't want to maintain a logger_reader, so we just go
  72. * file->logger_log. Thus what file->private_data points at depends on whether
  73. * or not the file was opened for reading. This function hides that dirtiness.
  74. */
  75. static inline struct logger_log *file_get_log(struct file *file)
  76. {
  77. if (file->f_mode & FMODE_READ) {
  78. struct logger_reader *reader = file->private_data;
  79. return reader->log;
  80. } else
  81. return file->private_data;
  82. }
  83. /*
  84. * get_entry_header - returns a pointer to the logger_entry header within
  85. * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
  86. * be provided. Typically the return value will be a pointer within
  87. * 'logger->buf'. However, a pointer to 'scratch' may be returned if
  88. * the log entry spans the end and beginning of the circular buffer.
  89. */
  90. static struct logger_entry *get_entry_header(struct logger_log *log,
  91. size_t off, struct logger_entry *scratch)
  92. {
  93. size_t len = min(sizeof(struct logger_entry), log->size - off);
  94. if (len != sizeof(struct logger_entry)) {
  95. memcpy(((void *) scratch), log->buffer + off, len);
  96. memcpy(((void *) scratch) + len, log->buffer,
  97. sizeof(struct logger_entry) - len);
  98. return scratch;
  99. }
  100. return (struct logger_entry *) (log->buffer + off);
  101. }
  102. /*
  103. * get_entry_msg_len - Grabs the length of the message of the entry
  104. * starting from from 'off'.
  105. *
  106. * Caller needs to hold log->mutex.
  107. */
  108. static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
  109. {
  110. struct logger_entry scratch;
  111. struct logger_entry *entry;
  112. entry = get_entry_header(log, off, &scratch);
  113. return entry->len;
  114. }
  115. static size_t get_user_hdr_len(int ver)
  116. {
  117. if (ver < 2)
  118. return sizeof(struct user_logger_entry_compat);
  119. else
  120. return sizeof(struct logger_entry);
  121. }
  122. static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
  123. char __user *buf)
  124. {
  125. void *hdr;
  126. size_t hdr_len;
  127. struct user_logger_entry_compat v1;
  128. if (ver < 2) {
  129. v1.len = entry->len;
  130. v1.__pad = 0;
  131. v1.pid = entry->pid;
  132. v1.tid = entry->tid;
  133. v1.sec = entry->sec;
  134. v1.nsec = entry->nsec;
  135. hdr = &v1;
  136. hdr_len = sizeof(struct user_logger_entry_compat);
  137. } else {
  138. hdr = entry;
  139. hdr_len = sizeof(struct logger_entry);
  140. }
  141. return copy_to_user(buf, hdr, hdr_len);
  142. }
  143. /*
  144. * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
  145. * user-space buffer 'buf'. Returns 'count' on success.
  146. *
  147. * Caller must hold log->mutex.
  148. */
  149. static ssize_t do_read_log_to_user(struct logger_log *log,
  150. struct logger_reader *reader,
  151. char __user *buf,
  152. size_t count)
  153. {
  154. struct logger_entry scratch;
  155. struct logger_entry *entry;
  156. size_t len;
  157. size_t msg_start;
  158. /*
  159. * First, copy the header to userspace, using the version of
  160. * the header requested
  161. */
  162. entry = get_entry_header(log, reader->r_off, &scratch);
  163. if (copy_header_to_user(reader->r_ver, entry, buf))
  164. return -EFAULT;
  165. count -= get_user_hdr_len(reader->r_ver);
  166. buf += get_user_hdr_len(reader->r_ver);
  167. msg_start = logger_offset(reader->r_off + sizeof(struct logger_entry));
  168. /*
  169. * We read from the msg in two disjoint operations. First, we read from
  170. * the current msg head offset up to 'count' bytes or to the end of
  171. * the log, whichever comes first.
  172. */
  173. len = min(count, log->size - msg_start);
  174. if (copy_to_user(buf, log->buffer + msg_start, len))
  175. return -EFAULT;
  176. /*
  177. * Second, we read any remaining bytes, starting back at the head of
  178. * the log.
  179. */
  180. if (count != len)
  181. if (copy_to_user(buf + len, log->buffer, count - len))
  182. return -EFAULT;
  183. reader->r_off = logger_offset(reader->r_off +
  184. sizeof(struct logger_entry) + count);
  185. return count + get_user_hdr_len(reader->r_ver);
  186. }
  187. /*
  188. * get_next_entry_by_uid - Starting at 'off', returns an offset into
  189. * 'log->buffer' which contains the first entry readable by 'euid'
  190. */
  191. static size_t get_next_entry_by_uid(struct logger_log *log,
  192. size_t off, uid_t euid)
  193. {
  194. while (off != log->w_off) {
  195. struct logger_entry *entry;
  196. struct logger_entry scratch;
  197. size_t next_len;
  198. entry = get_entry_header(log, off, &scratch);
  199. if (entry->euid == euid)
  200. return off;
  201. next_len = sizeof(struct logger_entry) + entry->len;
  202. off = logger_offset(off + next_len);
  203. }
  204. return off;
  205. }
  206. /*
  207. * logger_read - our log's read() method
  208. *
  209. * Behavior:
  210. *
  211. * - O_NONBLOCK works
  212. * - If there are no log entries to read, blocks until log is written to
  213. * - Atomically reads exactly one log entry
  214. *
  215. * Will set errno to EINVAL if read
  216. * buffer is insufficient to hold next entry.
  217. */
  218. static ssize_t logger_read(struct file *file, char __user *buf,
  219. size_t count, loff_t *pos)
  220. {
  221. struct logger_reader *reader = file->private_data;
  222. struct logger_log *log = reader->log;
  223. ssize_t ret;
  224. DEFINE_WAIT(wait);
  225. start:
  226. while (1) {
  227. prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
  228. mutex_lock(&log->mutex);
  229. ret = (log->w_off == reader->r_off);
  230. mutex_unlock(&log->mutex);
  231. if (!ret)
  232. break;
  233. if (file->f_flags & O_NONBLOCK) {
  234. ret = -EAGAIN;
  235. break;
  236. }
  237. if (signal_pending(current)) {
  238. ret = -EINTR;
  239. break;
  240. }
  241. schedule();
  242. }
  243. finish_wait(&log->wq, &wait);
  244. if (ret)
  245. return ret;
  246. mutex_lock(&log->mutex);
  247. if (!reader->r_all)
  248. reader->r_off = get_next_entry_by_uid(log,
  249. reader->r_off, current_euid());
  250. /* is there still something to read or did we race? */
  251. if (unlikely(log->w_off == reader->r_off)) {
  252. mutex_unlock(&log->mutex);
  253. goto start;
  254. }
  255. /* get the size of the next entry */
  256. ret = get_user_hdr_len(reader->r_ver) +
  257. get_entry_msg_len(log, reader->r_off);
  258. if (count < ret) {
  259. ret = -EINVAL;
  260. goto out;
  261. }
  262. /* get exactly one entry from the log */
  263. ret = do_read_log_to_user(log, reader, buf, ret);
  264. out:
  265. mutex_unlock(&log->mutex);
  266. return ret;
  267. }
  268. /*
  269. * get_next_entry - return the offset of the first valid entry at least 'len'
  270. * bytes after 'off'.
  271. *
  272. * Caller must hold log->mutex.
  273. */
  274. static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
  275. {
  276. size_t count = 0;
  277. do {
  278. size_t nr = sizeof(struct logger_entry) +
  279. get_entry_msg_len(log, off);
  280. off = logger_offset(off + nr);
  281. count += nr;
  282. } while (count < len);
  283. return off;
  284. }
  285. /*
  286. * clock_interval - is a < c < b in mod-space? Put another way, does the line
  287. * from a to b cross c?
  288. */
  289. static inline int clock_interval(size_t a, size_t b, size_t c)
  290. {
  291. if (b < a) {
  292. if (a < c || b >= c)
  293. return 1;
  294. } else {
  295. if (a < c && b >= c)
  296. return 1;
  297. }
  298. return 0;
  299. }
  300. /*
  301. * fix_up_readers - walk the list of all readers and "fix up" any who were
  302. * lapped by the writer; also do the same for the default "start head".
  303. * We do this by "pulling forward" the readers and start head to the first
  304. * entry after the new write head.
  305. *
  306. * The caller needs to hold log->mutex.
  307. */
  308. static void fix_up_readers(struct logger_log *log, size_t len)
  309. {
  310. size_t old = log->w_off;
  311. size_t new = logger_offset(old + len);
  312. struct logger_reader *reader;
  313. if (clock_interval(old, new, log->head))
  314. log->head = get_next_entry(log, log->head, len);
  315. list_for_each_entry(reader, &log->readers, list)
  316. if (clock_interval(old, new, reader->r_off))
  317. reader->r_off = get_next_entry(log, reader->r_off, len);
  318. }
  319. /*
  320. * do_write_log - writes 'len' bytes from 'buf' to 'log'
  321. *
  322. * The caller needs to hold log->mutex.
  323. */
  324. static void do_write_log(struct logger_log *log, const void *buf, size_t count)
  325. {
  326. size_t len;
  327. len = min(count, log->size - log->w_off);
  328. memcpy(log->buffer + log->w_off, buf, len);
  329. if (count != len)
  330. memcpy(log->buffer, buf + len, count - len);
  331. log->w_off = logger_offset(log->w_off + count);
  332. }
  333. /*
  334. * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
  335. * the log 'log'
  336. *
  337. * The caller needs to hold log->mutex.
  338. *
  339. * Returns 'count' on success, negative error code on failure.
  340. */
  341. static ssize_t do_write_log_from_user(struct logger_log *log,
  342. const void __user *buf, size_t count)
  343. {
  344. size_t len;
  345. len = min(count, log->size - log->w_off);
  346. if (len && copy_from_user(log->buffer + log->w_off, buf, len))
  347. return -EFAULT;
  348. if (count != len)
  349. if (copy_from_user(log->buffer, buf + len, count - len))
  350. return -EFAULT;
  351. log->w_off = logger_offset(log->w_off + count);
  352. return count;
  353. }
  354. /*
  355. * logger_aio_write - our write method, implementing support for write(),
  356. * writev(), and aio_write(). Writes are our fast path, and we try to optimize
  357. * them above all else.
  358. */
  359. ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
  360. unsigned long nr_segs, loff_t ppos)
  361. {
  362. struct logger_log *log = file_get_log(iocb->ki_filp);
  363. size_t orig = log->w_off;
  364. struct logger_entry header;
  365. struct timespec now;
  366. ssize_t ret = 0;
  367. now = current_kernel_time();
  368. header.pid = current->tgid;
  369. header.tid = current->pid;
  370. header.sec = now.tv_sec;
  371. header.nsec = now.tv_nsec;
  372. header.euid = current_euid();
  373. header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
  374. header.hdr_size = sizeof(struct logger_entry);
  375. /* null writes succeed, return zero */
  376. if (unlikely(!header.len))
  377. return 0;
  378. mutex_lock(&log->mutex);
  379. /*
  380. * Fix up any readers, pulling them forward to the first readable
  381. * entry after (what will be) the new write offset. We do this now
  382. * because if we partially fail, we can end up with clobbered log
  383. * entries that encroach on readable buffer.
  384. */
  385. fix_up_readers(log, sizeof(struct logger_entry) + header.len);
  386. do_write_log(log, &header, sizeof(struct logger_entry));
  387. while (nr_segs-- > 0) {
  388. size_t len;
  389. ssize_t nr;
  390. /* figure out how much of this vector we can keep */
  391. len = min_t(size_t, iov->iov_len, header.len - ret);
  392. /* write out this segment's payload */
  393. nr = do_write_log_from_user(log, iov->iov_base, len);
  394. if (unlikely(nr < 0)) {
  395. log->w_off = orig;
  396. mutex_unlock(&log->mutex);
  397. return nr;
  398. }
  399. iov++;
  400. ret += nr;
  401. }
  402. mutex_unlock(&log->mutex);
  403. /* wake up any blocked readers */
  404. wake_up_interruptible(&log->wq);
  405. return ret;
  406. }
  407. static struct logger_log *get_log_from_minor(int);
  408. /*
  409. * logger_open - the log's open() file operation
  410. *
  411. * Note how near a no-op this is in the write-only case. Keep it that way!
  412. */
  413. static int logger_open(struct inode *inode, struct file *file)
  414. {
  415. struct logger_log *log;
  416. int ret;
  417. ret = nonseekable_open(inode, file);
  418. if (ret)
  419. return ret;
  420. log = get_log_from_minor(MINOR(inode->i_rdev));
  421. if (!log)
  422. return -ENODEV;
  423. if (file->f_mode & FMODE_READ) {
  424. struct logger_reader *reader;
  425. reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
  426. if (!reader)
  427. return -ENOMEM;
  428. reader->log = log;
  429. reader->r_ver = 1;
  430. reader->r_all = in_egroup_p(inode->i_gid) ||
  431. capable(CAP_SYSLOG);
  432. INIT_LIST_HEAD(&reader->list);
  433. mutex_lock(&log->mutex);
  434. reader->r_off = log->head;
  435. list_add_tail(&reader->list, &log->readers);
  436. mutex_unlock(&log->mutex);
  437. file->private_data = reader;
  438. } else
  439. file->private_data = log;
  440. return 0;
  441. }
  442. /*
  443. * logger_release - the log's release file operation
  444. *
  445. * Note this is a total no-op in the write-only case. Keep it that way!
  446. */
  447. static int logger_release(struct inode *ignored, struct file *file)
  448. {
  449. if (file->f_mode & FMODE_READ) {
  450. struct logger_reader *reader = file->private_data;
  451. list_del(&reader->list);
  452. kfree(reader);
  453. }
  454. return 0;
  455. }
  456. /*
  457. * logger_poll - the log's poll file operation, for poll/select/epoll
  458. *
  459. * Note we always return POLLOUT, because you can always write() to the log.
  460. * Note also that, strictly speaking, a return value of POLLIN does not
  461. * guarantee that the log is readable without blocking, as there is a small
  462. * chance that the writer can lap the reader in the interim between poll()
  463. * returning and the read() request.
  464. */
  465. static unsigned int logger_poll(struct file *file, poll_table *wait)
  466. {
  467. struct logger_reader *reader;
  468. struct logger_log *log;
  469. unsigned int ret = POLLOUT | POLLWRNORM;
  470. if (!(file->f_mode & FMODE_READ))
  471. return ret;
  472. reader = file->private_data;
  473. log = reader->log;
  474. poll_wait(file, &log->wq, wait);
  475. mutex_lock(&log->mutex);
  476. if (!reader->r_all)
  477. reader->r_off = get_next_entry_by_uid(log,
  478. reader->r_off, current_euid());
  479. if (log->w_off != reader->r_off)
  480. ret |= POLLIN | POLLRDNORM;
  481. mutex_unlock(&log->mutex);
  482. return ret;
  483. }
  484. static long logger_set_version(struct logger_reader *reader, void __user *arg)
  485. {
  486. int version;
  487. if (copy_from_user(&version, arg, sizeof(int)))
  488. return -EFAULT;
  489. if ((version < 1) || (version > 2))
  490. return -EINVAL;
  491. reader->r_ver = version;
  492. return 0;
  493. }
  494. static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  495. {
  496. struct logger_log *log = file_get_log(file);
  497. struct logger_reader *reader;
  498. long ret = -EINVAL;
  499. void __user *argp = (void __user *) arg;
  500. mutex_lock(&log->mutex);
  501. switch (cmd) {
  502. case LOGGER_GET_LOG_BUF_SIZE:
  503. ret = log->size;
  504. break;
  505. case LOGGER_GET_LOG_LEN:
  506. if (!(file->f_mode & FMODE_READ)) {
  507. ret = -EBADF;
  508. break;
  509. }
  510. reader = file->private_data;
  511. if (log->w_off >= reader->r_off)
  512. ret = log->w_off - reader->r_off;
  513. else
  514. ret = (log->size - reader->r_off) + log->w_off;
  515. break;
  516. case LOGGER_GET_NEXT_ENTRY_LEN:
  517. if (!(file->f_mode & FMODE_READ)) {
  518. ret = -EBADF;
  519. break;
  520. }
  521. reader = file->private_data;
  522. if (!reader->r_all)
  523. reader->r_off = get_next_entry_by_uid(log,
  524. reader->r_off, current_euid());
  525. if (log->w_off != reader->r_off)
  526. ret = get_user_hdr_len(reader->r_ver) +
  527. get_entry_msg_len(log, reader->r_off);
  528. else
  529. ret = 0;
  530. break;
  531. case LOGGER_FLUSH_LOG:
  532. if (!(file->f_mode & FMODE_WRITE)) {
  533. ret = -EBADF;
  534. break;
  535. }
  536. list_for_each_entry(reader, &log->readers, list)
  537. reader->r_off = log->w_off;
  538. log->head = log->w_off;
  539. ret = 0;
  540. break;
  541. case LOGGER_GET_VERSION:
  542. if (!(file->f_mode & FMODE_READ)) {
  543. ret = -EBADF;
  544. break;
  545. }
  546. reader = file->private_data;
  547. ret = reader->r_ver;
  548. break;
  549. case LOGGER_SET_VERSION:
  550. if (!(file->f_mode & FMODE_READ)) {
  551. ret = -EBADF;
  552. break;
  553. }
  554. reader = file->private_data;
  555. ret = logger_set_version(reader, argp);
  556. break;
  557. }
  558. mutex_unlock(&log->mutex);
  559. return ret;
  560. }
  561. static const struct file_operations logger_fops = {
  562. .owner = THIS_MODULE,
  563. .read = logger_read,
  564. .aio_write = logger_aio_write,
  565. .poll = logger_poll,
  566. .unlocked_ioctl = logger_ioctl,
  567. .compat_ioctl = logger_ioctl,
  568. .open = logger_open,
  569. .release = logger_release,
  570. };
  571. /*
  572. * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
  573. * must be a power of two, and greater than
  574. * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
  575. */
  576. #define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
  577. static unsigned char _buf_ ## VAR[SIZE]; \
  578. static struct logger_log VAR = { \
  579. .buffer = _buf_ ## VAR, \
  580. .misc = { \
  581. .minor = MISC_DYNAMIC_MINOR, \
  582. .name = NAME, \
  583. .fops = &logger_fops, \
  584. .parent = NULL, \
  585. }, \
  586. .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
  587. .readers = LIST_HEAD_INIT(VAR .readers), \
  588. .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
  589. .w_off = 0, \
  590. .head = 0, \
  591. .size = SIZE, \
  592. };
  593. DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
  594. DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
  595. DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
  596. DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
  597. static struct logger_log *get_log_from_minor(int minor)
  598. {
  599. if (log_main.misc.minor == minor)
  600. return &log_main;
  601. if (log_events.misc.minor == minor)
  602. return &log_events;
  603. if (log_radio.misc.minor == minor)
  604. return &log_radio;
  605. if (log_system.misc.minor == minor)
  606. return &log_system;
  607. return NULL;
  608. }
  609. static int __init init_log(struct logger_log *log)
  610. {
  611. int ret;
  612. ret = misc_register(&log->misc);
  613. if (unlikely(ret)) {
  614. printk(KERN_ERR "logger: failed to register misc "
  615. "device for log '%s'!\n", log->misc.name);
  616. return ret;
  617. }
  618. printk(KERN_INFO "logger: created %luK log '%s'\n",
  619. (unsigned long) log->size >> 10, log->misc.name);
  620. return 0;
  621. }
  622. static int __init logger_init(void)
  623. {
  624. int ret;
  625. ret = init_log(&log_main);
  626. if (unlikely(ret))
  627. goto out;
  628. ret = init_log(&log_events);
  629. if (unlikely(ret))
  630. goto out;
  631. ret = init_log(&log_radio);
  632. if (unlikely(ret))
  633. goto out;
  634. ret = init_log(&log_system);
  635. if (unlikely(ret))
  636. goto out;
  637. out:
  638. return ret;
  639. }
  640. device_initcall(logger_init);