seq_clientmgr.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591
  1. /*
  2. * ALSA sequencer Client Manager
  3. * Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. * Jaroslav Kysela <perex@perex.cz>
  5. * Takashi Iwai <tiwai@suse.de>
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/init.h>
  24. #include <linux/slab.h>
  25. #include <sound/core.h>
  26. #include <sound/minors.h>
  27. #include <linux/kmod.h>
  28. #include <sound/seq_kernel.h>
  29. #include "seq_clientmgr.h"
  30. #include "seq_memory.h"
  31. #include "seq_queue.h"
  32. #include "seq_timer.h"
  33. #include "seq_info.h"
  34. #include "seq_system.h"
  35. #include <sound/seq_device.h>
  36. #ifdef CONFIG_COMPAT
  37. #include <linux/compat.h>
  38. #endif
  39. /* Client Manager
  40. * this module handles the connections of userland and kernel clients
  41. *
  42. */
  43. /*
  44. * There are four ranges of client numbers (last two shared):
  45. * 0..15: global clients
  46. * 16..127: statically allocated client numbers for cards 0..27
  47. * 128..191: dynamically allocated client numbers for cards 28..31
  48. * 128..191: dynamically allocated client numbers for applications
  49. */
  50. /* number of kernel non-card clients */
  51. #define SNDRV_SEQ_GLOBAL_CLIENTS 16
  52. /* clients per cards, for static clients */
  53. #define SNDRV_SEQ_CLIENTS_PER_CARD 4
  54. /* dynamically allocated client numbers (both kernel drivers and user space) */
  55. #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN 128
  56. #define SNDRV_SEQ_LFLG_INPUT 0x0001
  57. #define SNDRV_SEQ_LFLG_OUTPUT 0x0002
  58. #define SNDRV_SEQ_LFLG_OPEN (SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT)
  59. static DEFINE_SPINLOCK(clients_lock);
  60. static DEFINE_MUTEX(register_mutex);
  61. /*
  62. * client table
  63. */
  64. static char clienttablock[SNDRV_SEQ_MAX_CLIENTS];
  65. static struct snd_seq_client *clienttab[SNDRV_SEQ_MAX_CLIENTS];
  66. static struct snd_seq_usage client_usage;
  67. /*
  68. * prototypes
  69. */
  70. static int bounce_error_event(struct snd_seq_client *client,
  71. struct snd_seq_event *event,
  72. int err, int atomic, int hop);
  73. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  74. struct snd_seq_event *event,
  75. int filter, int atomic, int hop);
  76. /*
  77. */
  78. static inline mm_segment_t snd_enter_user(void)
  79. {
  80. mm_segment_t fs = get_fs();
  81. set_fs(get_ds());
  82. return fs;
  83. }
  84. static inline void snd_leave_user(mm_segment_t fs)
  85. {
  86. set_fs(fs);
  87. }
  88. /*
  89. */
  90. static inline unsigned short snd_seq_file_flags(struct file *file)
  91. {
  92. switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
  93. case FMODE_WRITE:
  94. return SNDRV_SEQ_LFLG_OUTPUT;
  95. case FMODE_READ:
  96. return SNDRV_SEQ_LFLG_INPUT;
  97. default:
  98. return SNDRV_SEQ_LFLG_OPEN;
  99. }
  100. }
  101. static inline int snd_seq_write_pool_allocated(struct snd_seq_client *client)
  102. {
  103. return snd_seq_total_cells(client->pool) > 0;
  104. }
  105. /* return pointer to client structure for specified id */
  106. static struct snd_seq_client *clientptr(int clientid)
  107. {
  108. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  109. snd_printd("Seq: oops. Trying to get pointer to client %d\n",
  110. clientid);
  111. return NULL;
  112. }
  113. return clienttab[clientid];
  114. }
  115. struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
  116. {
  117. unsigned long flags;
  118. struct snd_seq_client *client;
  119. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  120. snd_printd("Seq: oops. Trying to get pointer to client %d\n",
  121. clientid);
  122. return NULL;
  123. }
  124. spin_lock_irqsave(&clients_lock, flags);
  125. client = clientptr(clientid);
  126. if (client)
  127. goto __lock;
  128. if (clienttablock[clientid]) {
  129. spin_unlock_irqrestore(&clients_lock, flags);
  130. return NULL;
  131. }
  132. spin_unlock_irqrestore(&clients_lock, flags);
  133. #ifdef CONFIG_MODULES
  134. if (!in_interrupt()) {
  135. static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
  136. static char card_requested[SNDRV_CARDS];
  137. if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
  138. int idx;
  139. if (!client_requested[clientid]) {
  140. client_requested[clientid] = 1;
  141. for (idx = 0; idx < 15; idx++) {
  142. if (seq_client_load[idx] < 0)
  143. break;
  144. if (seq_client_load[idx] == clientid) {
  145. request_module("snd-seq-client-%i",
  146. clientid);
  147. break;
  148. }
  149. }
  150. }
  151. } else if (clientid < SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) {
  152. int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
  153. SNDRV_SEQ_CLIENTS_PER_CARD;
  154. if (card < snd_ecards_limit) {
  155. if (! card_requested[card]) {
  156. card_requested[card] = 1;
  157. snd_request_card(card);
  158. }
  159. snd_seq_device_load_drivers();
  160. }
  161. }
  162. spin_lock_irqsave(&clients_lock, flags);
  163. client = clientptr(clientid);
  164. if (client)
  165. goto __lock;
  166. spin_unlock_irqrestore(&clients_lock, flags);
  167. }
  168. #endif
  169. return NULL;
  170. __lock:
  171. snd_use_lock_use(&client->use_lock);
  172. spin_unlock_irqrestore(&clients_lock, flags);
  173. return client;
  174. }
  175. static void usage_alloc(struct snd_seq_usage *res, int num)
  176. {
  177. res->cur += num;
  178. if (res->cur > res->peak)
  179. res->peak = res->cur;
  180. }
  181. static void usage_free(struct snd_seq_usage *res, int num)
  182. {
  183. res->cur -= num;
  184. }
  185. /* initialise data structures */
  186. int __init client_init_data(void)
  187. {
  188. /* zap out the client table */
  189. memset(&clienttablock, 0, sizeof(clienttablock));
  190. memset(&clienttab, 0, sizeof(clienttab));
  191. return 0;
  192. }
  193. static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
  194. {
  195. unsigned long flags;
  196. int c;
  197. struct snd_seq_client *client;
  198. /* init client data */
  199. client = kzalloc(sizeof(*client), GFP_KERNEL);
  200. if (client == NULL)
  201. return NULL;
  202. client->pool = snd_seq_pool_new(poolsize);
  203. if (client->pool == NULL) {
  204. kfree(client);
  205. return NULL;
  206. }
  207. client->type = NO_CLIENT;
  208. snd_use_lock_init(&client->use_lock);
  209. rwlock_init(&client->ports_lock);
  210. mutex_init(&client->ports_mutex);
  211. INIT_LIST_HEAD(&client->ports_list_head);
  212. /* find free slot in the client table */
  213. spin_lock_irqsave(&clients_lock, flags);
  214. if (client_index < 0) {
  215. for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN;
  216. c < SNDRV_SEQ_MAX_CLIENTS;
  217. c++) {
  218. if (clienttab[c] || clienttablock[c])
  219. continue;
  220. clienttab[client->number = c] = client;
  221. spin_unlock_irqrestore(&clients_lock, flags);
  222. return client;
  223. }
  224. } else {
  225. if (clienttab[client_index] == NULL && !clienttablock[client_index]) {
  226. clienttab[client->number = client_index] = client;
  227. spin_unlock_irqrestore(&clients_lock, flags);
  228. return client;
  229. }
  230. }
  231. spin_unlock_irqrestore(&clients_lock, flags);
  232. snd_seq_pool_delete(&client->pool);
  233. kfree(client);
  234. return NULL; /* no free slot found or busy, return failure code */
  235. }
  236. static int seq_free_client1(struct snd_seq_client *client)
  237. {
  238. unsigned long flags;
  239. if (!client)
  240. return 0;
  241. snd_seq_delete_all_ports(client);
  242. snd_seq_queue_client_leave(client->number);
  243. spin_lock_irqsave(&clients_lock, flags);
  244. clienttablock[client->number] = 1;
  245. clienttab[client->number] = NULL;
  246. spin_unlock_irqrestore(&clients_lock, flags);
  247. snd_use_lock_sync(&client->use_lock);
  248. snd_seq_queue_client_termination(client->number);
  249. if (client->pool)
  250. snd_seq_pool_delete(&client->pool);
  251. spin_lock_irqsave(&clients_lock, flags);
  252. clienttablock[client->number] = 0;
  253. spin_unlock_irqrestore(&clients_lock, flags);
  254. return 0;
  255. }
  256. static void seq_free_client(struct snd_seq_client * client)
  257. {
  258. mutex_lock(&register_mutex);
  259. switch (client->type) {
  260. case NO_CLIENT:
  261. snd_printk(KERN_WARNING "Seq: Trying to free unused client %d\n",
  262. client->number);
  263. break;
  264. case USER_CLIENT:
  265. case KERNEL_CLIENT:
  266. seq_free_client1(client);
  267. usage_free(&client_usage, 1);
  268. break;
  269. default:
  270. snd_printk(KERN_ERR "Seq: Trying to free client %d with undefined type = %d\n",
  271. client->number, client->type);
  272. }
  273. mutex_unlock(&register_mutex);
  274. snd_seq_system_client_ev_client_exit(client->number);
  275. }
  276. /* -------------------------------------------------------- */
  277. /* create a user client */
  278. static int snd_seq_open(struct inode *inode, struct file *file)
  279. {
  280. int c, mode; /* client id */
  281. struct snd_seq_client *client;
  282. struct snd_seq_user_client *user;
  283. int err;
  284. err = nonseekable_open(inode, file);
  285. if (err < 0)
  286. return err;
  287. if (mutex_lock_interruptible(&register_mutex))
  288. return -ERESTARTSYS;
  289. client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS);
  290. if (client == NULL) {
  291. mutex_unlock(&register_mutex);
  292. return -ENOMEM; /* failure code */
  293. }
  294. mode = snd_seq_file_flags(file);
  295. if (mode & SNDRV_SEQ_LFLG_INPUT)
  296. client->accept_input = 1;
  297. if (mode & SNDRV_SEQ_LFLG_OUTPUT)
  298. client->accept_output = 1;
  299. user = &client->data.user;
  300. user->fifo = NULL;
  301. user->fifo_pool_size = 0;
  302. if (mode & SNDRV_SEQ_LFLG_INPUT) {
  303. user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS;
  304. user->fifo = snd_seq_fifo_new(user->fifo_pool_size);
  305. if (user->fifo == NULL) {
  306. seq_free_client1(client);
  307. kfree(client);
  308. mutex_unlock(&register_mutex);
  309. return -ENOMEM;
  310. }
  311. }
  312. usage_alloc(&client_usage, 1);
  313. client->type = USER_CLIENT;
  314. mutex_unlock(&register_mutex);
  315. c = client->number;
  316. file->private_data = client;
  317. /* fill client data */
  318. user->file = file;
  319. sprintf(client->name, "Client-%d", c);
  320. /* make others aware this new client */
  321. snd_seq_system_client_ev_client_start(c);
  322. return 0;
  323. }
  324. /* delete a user client */
  325. static int snd_seq_release(struct inode *inode, struct file *file)
  326. {
  327. struct snd_seq_client *client = file->private_data;
  328. if (client) {
  329. seq_free_client(client);
  330. if (client->data.user.fifo)
  331. snd_seq_fifo_delete(&client->data.user.fifo);
  332. kfree(client);
  333. }
  334. return 0;
  335. }
  336. /* handle client read() */
  337. /* possible error values:
  338. * -ENXIO invalid client or file open mode
  339. * -ENOSPC FIFO overflow (the flag is cleared after this error report)
  340. * -EINVAL no enough user-space buffer to write the whole event
  341. * -EFAULT seg. fault during copy to user space
  342. */
  343. static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
  344. loff_t *offset)
  345. {
  346. struct snd_seq_client *client = file->private_data;
  347. struct snd_seq_fifo *fifo;
  348. int err;
  349. long result = 0;
  350. struct snd_seq_event_cell *cell;
  351. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT))
  352. return -ENXIO;
  353. if (!access_ok(VERIFY_WRITE, buf, count))
  354. return -EFAULT;
  355. /* check client structures are in place */
  356. if (snd_BUG_ON(!client))
  357. return -ENXIO;
  358. if (!client->accept_input || (fifo = client->data.user.fifo) == NULL)
  359. return -ENXIO;
  360. if (atomic_read(&fifo->overflow) > 0) {
  361. /* buffer overflow is detected */
  362. snd_seq_fifo_clear(fifo);
  363. /* return error code */
  364. return -ENOSPC;
  365. }
  366. cell = NULL;
  367. err = 0;
  368. snd_seq_fifo_lock(fifo);
  369. /* while data available in queue */
  370. while (count >= sizeof(struct snd_seq_event)) {
  371. int nonblock;
  372. nonblock = (file->f_flags & O_NONBLOCK) || result > 0;
  373. if ((err = snd_seq_fifo_cell_out(fifo, &cell, nonblock)) < 0) {
  374. break;
  375. }
  376. if (snd_seq_ev_is_variable(&cell->event)) {
  377. struct snd_seq_event tmpev;
  378. tmpev = cell->event;
  379. tmpev.data.ext.len &= ~SNDRV_SEQ_EXT_MASK;
  380. if (copy_to_user(buf, &tmpev, sizeof(struct snd_seq_event))) {
  381. err = -EFAULT;
  382. break;
  383. }
  384. count -= sizeof(struct snd_seq_event);
  385. buf += sizeof(struct snd_seq_event);
  386. err = snd_seq_expand_var_event(&cell->event, count,
  387. (char __force *)buf, 0,
  388. sizeof(struct snd_seq_event));
  389. if (err < 0)
  390. break;
  391. result += err;
  392. count -= err;
  393. buf += err;
  394. } else {
  395. if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) {
  396. err = -EFAULT;
  397. break;
  398. }
  399. count -= sizeof(struct snd_seq_event);
  400. buf += sizeof(struct snd_seq_event);
  401. }
  402. snd_seq_cell_free(cell);
  403. cell = NULL; /* to be sure */
  404. result += sizeof(struct snd_seq_event);
  405. }
  406. if (err < 0) {
  407. if (cell)
  408. snd_seq_fifo_cell_putback(fifo, cell);
  409. if (err == -EAGAIN && result > 0)
  410. err = 0;
  411. }
  412. snd_seq_fifo_unlock(fifo);
  413. return (err < 0) ? err : result;
  414. }
  415. /*
  416. * check access permission to the port
  417. */
  418. static int check_port_perm(struct snd_seq_client_port *port, unsigned int flags)
  419. {
  420. if ((port->capability & flags) != flags)
  421. return 0;
  422. return flags;
  423. }
  424. /*
  425. * check if the destination client is available, and return the pointer
  426. * if filter is non-zero, client filter bitmap is tested.
  427. */
  428. static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event,
  429. int filter)
  430. {
  431. struct snd_seq_client *dest;
  432. dest = snd_seq_client_use_ptr(event->dest.client);
  433. if (dest == NULL)
  434. return NULL;
  435. if (! dest->accept_input)
  436. goto __not_avail;
  437. if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
  438. ! test_bit(event->type, dest->event_filter))
  439. goto __not_avail;
  440. if (filter && !(dest->filter & filter))
  441. goto __not_avail;
  442. return dest; /* ok - accessible */
  443. __not_avail:
  444. snd_seq_client_unlock(dest);
  445. return NULL;
  446. }
  447. /*
  448. * Return the error event.
  449. *
  450. * If the receiver client is a user client, the original event is
  451. * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If
  452. * the original event is also variable length, the external data is
  453. * copied after the event record.
  454. * If the receiver client is a kernel client, the original event is
  455. * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra
  456. * kmalloc.
  457. */
  458. static int bounce_error_event(struct snd_seq_client *client,
  459. struct snd_seq_event *event,
  460. int err, int atomic, int hop)
  461. {
  462. struct snd_seq_event bounce_ev;
  463. int result;
  464. if (client == NULL ||
  465. ! (client->filter & SNDRV_SEQ_FILTER_BOUNCE) ||
  466. ! client->accept_input)
  467. return 0; /* ignored */
  468. /* set up quoted error */
  469. memset(&bounce_ev, 0, sizeof(bounce_ev));
  470. bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR;
  471. bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED;
  472. bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT;
  473. bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
  474. bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
  475. bounce_ev.dest.client = client->number;
  476. bounce_ev.dest.port = event->source.port;
  477. bounce_ev.data.quote.origin = event->dest;
  478. bounce_ev.data.quote.event = event;
  479. bounce_ev.data.quote.value = -err; /* use positive value */
  480. result = snd_seq_deliver_single_event(NULL, &bounce_ev, 0, atomic, hop + 1);
  481. if (result < 0) {
  482. client->event_lost++;
  483. return result;
  484. }
  485. return result;
  486. }
  487. /*
  488. * rewrite the time-stamp of the event record with the curren time
  489. * of the given queue.
  490. * return non-zero if updated.
  491. */
  492. static int update_timestamp_of_queue(struct snd_seq_event *event,
  493. int queue, int real_time)
  494. {
  495. struct snd_seq_queue *q;
  496. q = queueptr(queue);
  497. if (! q)
  498. return 0;
  499. event->queue = queue;
  500. event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
  501. if (real_time) {
  502. event->time.time = snd_seq_timer_get_cur_time(q->timer);
  503. event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
  504. } else {
  505. event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
  506. event->flags |= SNDRV_SEQ_TIME_STAMP_TICK;
  507. }
  508. queuefree(q);
  509. return 1;
  510. }
  511. /*
  512. * deliver an event to the specified destination.
  513. * if filter is non-zero, client filter bitmap is tested.
  514. *
  515. * RETURN VALUE: 0 : if succeeded
  516. * <0 : error
  517. */
  518. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  519. struct snd_seq_event *event,
  520. int filter, int atomic, int hop)
  521. {
  522. struct snd_seq_client *dest = NULL;
  523. struct snd_seq_client_port *dest_port = NULL;
  524. int result = -ENOENT;
  525. int direct;
  526. direct = snd_seq_ev_is_direct(event);
  527. dest = get_event_dest_client(event, filter);
  528. if (dest == NULL)
  529. goto __skip;
  530. dest_port = snd_seq_port_use_ptr(dest, event->dest.port);
  531. if (dest_port == NULL)
  532. goto __skip;
  533. /* check permission */
  534. if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) {
  535. result = -EPERM;
  536. goto __skip;
  537. }
  538. if (dest_port->timestamping)
  539. update_timestamp_of_queue(event, dest_port->time_queue,
  540. dest_port->time_real);
  541. switch (dest->type) {
  542. case USER_CLIENT:
  543. if (dest->data.user.fifo)
  544. result = snd_seq_fifo_event_in(dest->data.user.fifo, event);
  545. break;
  546. case KERNEL_CLIENT:
  547. if (dest_port->event_input == NULL)
  548. break;
  549. result = dest_port->event_input(event, direct,
  550. dest_port->private_data,
  551. atomic, hop);
  552. break;
  553. default:
  554. break;
  555. }
  556. __skip:
  557. if (dest_port)
  558. snd_seq_port_unlock(dest_port);
  559. if (dest)
  560. snd_seq_client_unlock(dest);
  561. if (result < 0 && !direct) {
  562. result = bounce_error_event(client, event, result, atomic, hop);
  563. }
  564. return result;
  565. }
  566. /*
  567. * send the event to all subscribers:
  568. */
  569. static int deliver_to_subscribers(struct snd_seq_client *client,
  570. struct snd_seq_event *event,
  571. int atomic, int hop)
  572. {
  573. struct snd_seq_subscribers *subs;
  574. int err = 0, num_ev = 0;
  575. struct snd_seq_event event_saved;
  576. struct snd_seq_client_port *src_port;
  577. struct snd_seq_port_subs_info *grp;
  578. src_port = snd_seq_port_use_ptr(client, event->source.port);
  579. if (src_port == NULL)
  580. return -EINVAL; /* invalid source port */
  581. /* save original event record */
  582. event_saved = *event;
  583. grp = &src_port->c_src;
  584. /* lock list */
  585. if (atomic)
  586. read_lock(&grp->list_lock);
  587. else
  588. down_read(&grp->list_mutex);
  589. list_for_each_entry(subs, &grp->list_head, src_list) {
  590. event->dest = subs->info.dest;
  591. if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  592. /* convert time according to flag with subscription */
  593. update_timestamp_of_queue(event, subs->info.queue,
  594. subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL);
  595. err = snd_seq_deliver_single_event(client, event,
  596. 0, atomic, hop);
  597. if (err < 0)
  598. break;
  599. num_ev++;
  600. /* restore original event record */
  601. *event = event_saved;
  602. }
  603. if (atomic)
  604. read_unlock(&grp->list_lock);
  605. else
  606. up_read(&grp->list_mutex);
  607. *event = event_saved; /* restore */
  608. snd_seq_port_unlock(src_port);
  609. return (err < 0) ? err : num_ev;
  610. }
  611. #ifdef SUPPORT_BROADCAST
  612. /*
  613. * broadcast to all ports:
  614. */
  615. static int port_broadcast_event(struct snd_seq_client *client,
  616. struct snd_seq_event *event,
  617. int atomic, int hop)
  618. {
  619. int num_ev = 0, err = 0;
  620. struct snd_seq_client *dest_client;
  621. struct snd_seq_client_port *port;
  622. dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST);
  623. if (dest_client == NULL)
  624. return 0; /* no matching destination */
  625. read_lock(&dest_client->ports_lock);
  626. list_for_each_entry(port, &dest_client->ports_list_head, list) {
  627. event->dest.port = port->addr.port;
  628. /* pass NULL as source client to avoid error bounce */
  629. err = snd_seq_deliver_single_event(NULL, event,
  630. SNDRV_SEQ_FILTER_BROADCAST,
  631. atomic, hop);
  632. if (err < 0)
  633. break;
  634. num_ev++;
  635. }
  636. read_unlock(&dest_client->ports_lock);
  637. snd_seq_client_unlock(dest_client);
  638. event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */
  639. return (err < 0) ? err : num_ev;
  640. }
  641. /*
  642. * send the event to all clients:
  643. * if destination port is also ADDRESS_BROADCAST, deliver to all ports.
  644. */
  645. static int broadcast_event(struct snd_seq_client *client,
  646. struct snd_seq_event *event, int atomic, int hop)
  647. {
  648. int err = 0, num_ev = 0;
  649. int dest;
  650. struct snd_seq_addr addr;
  651. addr = event->dest; /* save */
  652. for (dest = 0; dest < SNDRV_SEQ_MAX_CLIENTS; dest++) {
  653. /* don't send to itself */
  654. if (dest == client->number)
  655. continue;
  656. event->dest.client = dest;
  657. event->dest.port = addr.port;
  658. if (addr.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  659. err = port_broadcast_event(client, event, atomic, hop);
  660. else
  661. /* pass NULL as source client to avoid error bounce */
  662. err = snd_seq_deliver_single_event(NULL, event,
  663. SNDRV_SEQ_FILTER_BROADCAST,
  664. atomic, hop);
  665. if (err < 0)
  666. break;
  667. num_ev += err;
  668. }
  669. event->dest = addr; /* restore */
  670. return (err < 0) ? err : num_ev;
  671. }
  672. /* multicast - not supported yet */
  673. static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event,
  674. int atomic, int hop)
  675. {
  676. snd_printd("seq: multicast not supported yet.\n");
  677. return 0; /* ignored */
  678. }
  679. #endif /* SUPPORT_BROADCAST */
  680. /* deliver an event to the destination port(s).
  681. * if the event is to subscribers or broadcast, the event is dispatched
  682. * to multiple targets.
  683. *
  684. * RETURN VALUE: n > 0 : the number of delivered events.
  685. * n == 0 : the event was not passed to any client.
  686. * n < 0 : error - event was not processed.
  687. */
  688. static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event,
  689. int atomic, int hop)
  690. {
  691. int result;
  692. hop++;
  693. if (hop >= SNDRV_SEQ_MAX_HOPS) {
  694. snd_printd("too long delivery path (%d:%d->%d:%d)\n",
  695. event->source.client, event->source.port,
  696. event->dest.client, event->dest.port);
  697. return -EMLINK;
  698. }
  699. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
  700. event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS)
  701. result = deliver_to_subscribers(client, event, atomic, hop);
  702. #ifdef SUPPORT_BROADCAST
  703. else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST ||
  704. event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST)
  705. result = broadcast_event(client, event, atomic, hop);
  706. else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS)
  707. result = multicast_event(client, event, atomic, hop);
  708. else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  709. result = port_broadcast_event(client, event, atomic, hop);
  710. #endif
  711. else
  712. result = snd_seq_deliver_single_event(client, event, 0, atomic, hop);
  713. return result;
  714. }
  715. /*
  716. * dispatch an event cell:
  717. * This function is called only from queue check routines in timer
  718. * interrupts or after enqueued.
  719. * The event cell shall be released or re-queued in this function.
  720. *
  721. * RETURN VALUE: n > 0 : the number of delivered events.
  722. * n == 0 : the event was not passed to any client.
  723. * n < 0 : error - event was not processed.
  724. */
  725. int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
  726. {
  727. struct snd_seq_client *client;
  728. int result;
  729. if (snd_BUG_ON(!cell))
  730. return -EINVAL;
  731. client = snd_seq_client_use_ptr(cell->event.source.client);
  732. if (client == NULL) {
  733. snd_seq_cell_free(cell); /* release this cell */
  734. return -EINVAL;
  735. }
  736. if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) {
  737. /* NOTE event:
  738. * the event cell is re-used as a NOTE-OFF event and
  739. * enqueued again.
  740. */
  741. struct snd_seq_event tmpev, *ev;
  742. /* reserve this event to enqueue note-off later */
  743. tmpev = cell->event;
  744. tmpev.type = SNDRV_SEQ_EVENT_NOTEON;
  745. result = snd_seq_deliver_event(client, &tmpev, atomic, hop);
  746. /*
  747. * This was originally a note event. We now re-use the
  748. * cell for the note-off event.
  749. */
  750. ev = &cell->event;
  751. ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
  752. ev->flags |= SNDRV_SEQ_PRIORITY_HIGH;
  753. /* add the duration time */
  754. switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) {
  755. case SNDRV_SEQ_TIME_STAMP_TICK:
  756. ev->time.tick += ev->data.note.duration;
  757. break;
  758. case SNDRV_SEQ_TIME_STAMP_REAL:
  759. /* unit for duration is ms */
  760. ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000);
  761. ev->time.time.tv_sec += ev->data.note.duration / 1000 +
  762. ev->time.time.tv_nsec / 1000000000;
  763. ev->time.time.tv_nsec %= 1000000000;
  764. break;
  765. }
  766. ev->data.note.velocity = ev->data.note.off_velocity;
  767. /* Now queue this cell as the note off event */
  768. if (snd_seq_enqueue_event(cell, atomic, hop) < 0)
  769. snd_seq_cell_free(cell); /* release this cell */
  770. } else {
  771. /* Normal events:
  772. * event cell is freed after processing the event
  773. */
  774. result = snd_seq_deliver_event(client, &cell->event, atomic, hop);
  775. snd_seq_cell_free(cell);
  776. }
  777. snd_seq_client_unlock(client);
  778. return result;
  779. }
  780. /* Allocate a cell from client pool and enqueue it to queue:
  781. * if pool is empty and blocking is TRUE, sleep until a new cell is
  782. * available.
  783. */
  784. static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
  785. struct snd_seq_event *event,
  786. struct file *file, int blocking,
  787. int atomic, int hop)
  788. {
  789. struct snd_seq_event_cell *cell;
  790. int err;
  791. /* special queue values - force direct passing */
  792. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  793. event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
  794. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  795. } else
  796. #ifdef SUPPORT_BROADCAST
  797. if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) {
  798. event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST;
  799. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  800. }
  801. #endif
  802. if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  803. /* check presence of source port */
  804. struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port);
  805. if (src_port == NULL)
  806. return -EINVAL;
  807. snd_seq_port_unlock(src_port);
  808. }
  809. /* direct event processing without enqueued */
  810. if (snd_seq_ev_is_direct(event)) {
  811. if (event->type == SNDRV_SEQ_EVENT_NOTE)
  812. return -EINVAL; /* this event must be enqueued! */
  813. return snd_seq_deliver_event(client, event, atomic, hop);
  814. }
  815. /* Not direct, normal queuing */
  816. if (snd_seq_queue_is_used(event->queue, client->number) <= 0)
  817. return -EINVAL; /* invalid queue */
  818. if (! snd_seq_write_pool_allocated(client))
  819. return -ENXIO; /* queue is not allocated */
  820. /* allocate an event cell */
  821. err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
  822. if (err < 0)
  823. return err;
  824. /* we got a cell. enqueue it. */
  825. if ((err = snd_seq_enqueue_event(cell, atomic, hop)) < 0) {
  826. snd_seq_cell_free(cell);
  827. return err;
  828. }
  829. return 0;
  830. }
  831. /*
  832. * check validity of event type and data length.
  833. * return non-zero if invalid.
  834. */
  835. static int check_event_type_and_length(struct snd_seq_event *ev)
  836. {
  837. switch (snd_seq_ev_length_type(ev)) {
  838. case SNDRV_SEQ_EVENT_LENGTH_FIXED:
  839. if (snd_seq_ev_is_variable_type(ev))
  840. return -EINVAL;
  841. break;
  842. case SNDRV_SEQ_EVENT_LENGTH_VARIABLE:
  843. if (! snd_seq_ev_is_variable_type(ev) ||
  844. (ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN)
  845. return -EINVAL;
  846. break;
  847. case SNDRV_SEQ_EVENT_LENGTH_VARUSR:
  848. if (! snd_seq_ev_is_direct(ev))
  849. return -EINVAL;
  850. break;
  851. }
  852. return 0;
  853. }
  854. /* handle write() */
  855. /* possible error values:
  856. * -ENXIO invalid client or file open mode
  857. * -ENOMEM malloc failed
  858. * -EFAULT seg. fault during copy from user space
  859. * -EINVAL invalid event
  860. * -EAGAIN no space in output pool
  861. * -EINTR interrupts while sleep
  862. * -EMLINK too many hops
  863. * others depends on return value from driver callback
  864. */
  865. static ssize_t snd_seq_write(struct file *file, const char __user *buf,
  866. size_t count, loff_t *offset)
  867. {
  868. struct snd_seq_client *client = file->private_data;
  869. int written = 0, len;
  870. int err = -EINVAL;
  871. struct snd_seq_event event;
  872. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
  873. return -ENXIO;
  874. /* check client structures are in place */
  875. if (snd_BUG_ON(!client))
  876. return -ENXIO;
  877. if (!client->accept_output || client->pool == NULL)
  878. return -ENXIO;
  879. /* allocate the pool now if the pool is not allocated yet */
  880. if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
  881. if (snd_seq_pool_init(client->pool) < 0)
  882. return -ENOMEM;
  883. }
  884. /* only process whole events */
  885. while (count >= sizeof(struct snd_seq_event)) {
  886. /* Read in the event header from the user */
  887. len = sizeof(event);
  888. if (copy_from_user(&event, buf, len)) {
  889. err = -EFAULT;
  890. break;
  891. }
  892. event.source.client = client->number; /* fill in client number */
  893. /* Check for extension data length */
  894. if (check_event_type_and_length(&event)) {
  895. err = -EINVAL;
  896. break;
  897. }
  898. /* check for special events */
  899. if (event.type == SNDRV_SEQ_EVENT_NONE)
  900. goto __skip_event;
  901. else if (snd_seq_ev_is_reserved(&event)) {
  902. err = -EINVAL;
  903. break;
  904. }
  905. if (snd_seq_ev_is_variable(&event)) {
  906. int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  907. if ((size_t)(extlen + len) > count) {
  908. /* back out, will get an error this time or next */
  909. err = -EINVAL;
  910. break;
  911. }
  912. /* set user space pointer */
  913. event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR;
  914. event.data.ext.ptr = (char __force *)buf
  915. + sizeof(struct snd_seq_event);
  916. len += extlen; /* increment data length */
  917. } else {
  918. #ifdef CONFIG_COMPAT
  919. if (client->convert32 && snd_seq_ev_is_varusr(&event)) {
  920. void *ptr = (void __force *)compat_ptr(event.data.raw32.d[1]);
  921. event.data.ext.ptr = ptr;
  922. }
  923. #endif
  924. }
  925. /* ok, enqueue it */
  926. err = snd_seq_client_enqueue_event(client, &event, file,
  927. !(file->f_flags & O_NONBLOCK),
  928. 0, 0);
  929. if (err < 0)
  930. break;
  931. __skip_event:
  932. /* Update pointers and counts */
  933. count -= len;
  934. buf += len;
  935. written += len;
  936. }
  937. return written ? written : err;
  938. }
  939. /*
  940. * handle polling
  941. */
  942. static unsigned int snd_seq_poll(struct file *file, poll_table * wait)
  943. {
  944. struct snd_seq_client *client = file->private_data;
  945. unsigned int mask = 0;
  946. /* check client structures are in place */
  947. if (snd_BUG_ON(!client))
  948. return -ENXIO;
  949. if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) &&
  950. client->data.user.fifo) {
  951. /* check if data is available in the outqueue */
  952. if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait))
  953. mask |= POLLIN | POLLRDNORM;
  954. }
  955. if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
  956. /* check if data is available in the pool */
  957. if (!snd_seq_write_pool_allocated(client) ||
  958. snd_seq_pool_poll_wait(client->pool, file, wait))
  959. mask |= POLLOUT | POLLWRNORM;
  960. }
  961. return mask;
  962. }
  963. /*-----------------------------------------------------*/
  964. /* SYSTEM_INFO ioctl() */
  965. static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void __user *arg)
  966. {
  967. struct snd_seq_system_info info;
  968. memset(&info, 0, sizeof(info));
  969. /* fill the info fields */
  970. info.queues = SNDRV_SEQ_MAX_QUEUES;
  971. info.clients = SNDRV_SEQ_MAX_CLIENTS;
  972. info.ports = 256; /* fixed limit */
  973. info.channels = 256; /* fixed limit */
  974. info.cur_clients = client_usage.cur;
  975. info.cur_queues = snd_seq_queue_get_cur_queues();
  976. if (copy_to_user(arg, &info, sizeof(info)))
  977. return -EFAULT;
  978. return 0;
  979. }
  980. /* RUNNING_MODE ioctl() */
  981. static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void __user *arg)
  982. {
  983. struct snd_seq_running_info info;
  984. struct snd_seq_client *cptr;
  985. int err = 0;
  986. if (copy_from_user(&info, arg, sizeof(info)))
  987. return -EFAULT;
  988. /* requested client number */
  989. cptr = snd_seq_client_use_ptr(info.client);
  990. if (cptr == NULL)
  991. return -ENOENT; /* don't change !!! */
  992. #ifdef SNDRV_BIG_ENDIAN
  993. if (! info.big_endian) {
  994. err = -EINVAL;
  995. goto __err;
  996. }
  997. #else
  998. if (info.big_endian) {
  999. err = -EINVAL;
  1000. goto __err;
  1001. }
  1002. #endif
  1003. if (info.cpu_mode > sizeof(long)) {
  1004. err = -EINVAL;
  1005. goto __err;
  1006. }
  1007. cptr->convert32 = (info.cpu_mode < sizeof(long));
  1008. __err:
  1009. snd_seq_client_unlock(cptr);
  1010. return err;
  1011. }
  1012. /* CLIENT_INFO ioctl() */
  1013. static void get_client_info(struct snd_seq_client *cptr,
  1014. struct snd_seq_client_info *info)
  1015. {
  1016. info->client = cptr->number;
  1017. /* fill the info fields */
  1018. info->type = cptr->type;
  1019. strcpy(info->name, cptr->name);
  1020. info->filter = cptr->filter;
  1021. info->event_lost = cptr->event_lost;
  1022. memcpy(info->event_filter, cptr->event_filter, 32);
  1023. info->num_ports = cptr->num_ports;
  1024. memset(info->reserved, 0, sizeof(info->reserved));
  1025. }
  1026. static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
  1027. void __user *arg)
  1028. {
  1029. struct snd_seq_client *cptr;
  1030. struct snd_seq_client_info client_info;
  1031. if (copy_from_user(&client_info, arg, sizeof(client_info)))
  1032. return -EFAULT;
  1033. /* requested client number */
  1034. cptr = snd_seq_client_use_ptr(client_info.client);
  1035. if (cptr == NULL)
  1036. return -ENOENT; /* don't change !!! */
  1037. get_client_info(cptr, &client_info);
  1038. snd_seq_client_unlock(cptr);
  1039. if (copy_to_user(arg, &client_info, sizeof(client_info)))
  1040. return -EFAULT;
  1041. return 0;
  1042. }
  1043. /* CLIENT_INFO ioctl() */
  1044. static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
  1045. void __user *arg)
  1046. {
  1047. struct snd_seq_client_info client_info;
  1048. if (copy_from_user(&client_info, arg, sizeof(client_info)))
  1049. return -EFAULT;
  1050. /* it is not allowed to set the info fields for an another client */
  1051. if (client->number != client_info.client)
  1052. return -EPERM;
  1053. /* also client type must be set now */
  1054. if (client->type != client_info.type)
  1055. return -EINVAL;
  1056. /* fill the info fields */
  1057. if (client_info.name[0])
  1058. strlcpy(client->name, client_info.name, sizeof(client->name));
  1059. client->filter = client_info.filter;
  1060. client->event_lost = client_info.event_lost;
  1061. memcpy(client->event_filter, client_info.event_filter, 32);
  1062. return 0;
  1063. }
  1064. /*
  1065. * CREATE PORT ioctl()
  1066. */
  1067. static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
  1068. void __user *arg)
  1069. {
  1070. struct snd_seq_client_port *port;
  1071. struct snd_seq_port_info info;
  1072. struct snd_seq_port_callback *callback;
  1073. if (copy_from_user(&info, arg, sizeof(info)))
  1074. return -EFAULT;
  1075. /* it is not allowed to create the port for an another client */
  1076. if (info.addr.client != client->number)
  1077. return -EPERM;
  1078. port = snd_seq_create_port(client, (info.flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info.addr.port : -1);
  1079. if (port == NULL)
  1080. return -ENOMEM;
  1081. if (client->type == USER_CLIENT && info.kernel) {
  1082. snd_seq_delete_port(client, port->addr.port);
  1083. return -EINVAL;
  1084. }
  1085. if (client->type == KERNEL_CLIENT) {
  1086. if ((callback = info.kernel) != NULL) {
  1087. if (callback->owner)
  1088. port->owner = callback->owner;
  1089. port->private_data = callback->private_data;
  1090. port->private_free = callback->private_free;
  1091. port->callback_all = callback->callback_all;
  1092. port->event_input = callback->event_input;
  1093. port->c_src.open = callback->subscribe;
  1094. port->c_src.close = callback->unsubscribe;
  1095. port->c_dest.open = callback->use;
  1096. port->c_dest.close = callback->unuse;
  1097. }
  1098. }
  1099. info.addr = port->addr;
  1100. snd_seq_set_port_info(port, &info);
  1101. snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
  1102. if (copy_to_user(arg, &info, sizeof(info)))
  1103. return -EFAULT;
  1104. return 0;
  1105. }
  1106. /*
  1107. * DELETE PORT ioctl()
  1108. */
  1109. static int snd_seq_ioctl_delete_port(struct snd_seq_client *client,
  1110. void __user *arg)
  1111. {
  1112. struct snd_seq_port_info info;
  1113. int err;
  1114. /* set passed parameters */
  1115. if (copy_from_user(&info, arg, sizeof(info)))
  1116. return -EFAULT;
  1117. /* it is not allowed to remove the port for an another client */
  1118. if (info.addr.client != client->number)
  1119. return -EPERM;
  1120. err = snd_seq_delete_port(client, info.addr.port);
  1121. if (err >= 0)
  1122. snd_seq_system_client_ev_port_exit(client->number, info.addr.port);
  1123. return err;
  1124. }
  1125. /*
  1126. * GET_PORT_INFO ioctl() (on any client)
  1127. */
  1128. static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client,
  1129. void __user *arg)
  1130. {
  1131. struct snd_seq_client *cptr;
  1132. struct snd_seq_client_port *port;
  1133. struct snd_seq_port_info info;
  1134. if (copy_from_user(&info, arg, sizeof(info)))
  1135. return -EFAULT;
  1136. cptr = snd_seq_client_use_ptr(info.addr.client);
  1137. if (cptr == NULL)
  1138. return -ENXIO;
  1139. port = snd_seq_port_use_ptr(cptr, info.addr.port);
  1140. if (port == NULL) {
  1141. snd_seq_client_unlock(cptr);
  1142. return -ENOENT; /* don't change */
  1143. }
  1144. /* get port info */
  1145. snd_seq_get_port_info(port, &info);
  1146. snd_seq_port_unlock(port);
  1147. snd_seq_client_unlock(cptr);
  1148. if (copy_to_user(arg, &info, sizeof(info)))
  1149. return -EFAULT;
  1150. return 0;
  1151. }
  1152. /*
  1153. * SET_PORT_INFO ioctl() (only ports on this/own client)
  1154. */
  1155. static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client,
  1156. void __user *arg)
  1157. {
  1158. struct snd_seq_client_port *port;
  1159. struct snd_seq_port_info info;
  1160. if (copy_from_user(&info, arg, sizeof(info)))
  1161. return -EFAULT;
  1162. if (info.addr.client != client->number) /* only set our own ports ! */
  1163. return -EPERM;
  1164. port = snd_seq_port_use_ptr(client, info.addr.port);
  1165. if (port) {
  1166. snd_seq_set_port_info(port, &info);
  1167. snd_seq_port_unlock(port);
  1168. }
  1169. return 0;
  1170. }
  1171. /*
  1172. * port subscription (connection)
  1173. */
  1174. #define PERM_RD (SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ)
  1175. #define PERM_WR (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE)
  1176. static int check_subscription_permission(struct snd_seq_client *client,
  1177. struct snd_seq_client_port *sport,
  1178. struct snd_seq_client_port *dport,
  1179. struct snd_seq_port_subscribe *subs)
  1180. {
  1181. if (client->number != subs->sender.client &&
  1182. client->number != subs->dest.client) {
  1183. /* connection by third client - check export permission */
  1184. if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1185. return -EPERM;
  1186. if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1187. return -EPERM;
  1188. }
  1189. /* check read permission */
  1190. /* if sender or receiver is the subscribing client itself,
  1191. * no permission check is necessary
  1192. */
  1193. if (client->number != subs->sender.client) {
  1194. if (! check_port_perm(sport, PERM_RD))
  1195. return -EPERM;
  1196. }
  1197. /* check write permission */
  1198. if (client->number != subs->dest.client) {
  1199. if (! check_port_perm(dport, PERM_WR))
  1200. return -EPERM;
  1201. }
  1202. return 0;
  1203. }
  1204. /*
  1205. * send an subscription notify event to user client:
  1206. * client must be user client.
  1207. */
  1208. int snd_seq_client_notify_subscription(int client, int port,
  1209. struct snd_seq_port_subscribe *info,
  1210. int evtype)
  1211. {
  1212. struct snd_seq_event event;
  1213. memset(&event, 0, sizeof(event));
  1214. event.type = evtype;
  1215. event.data.connect.dest = info->dest;
  1216. event.data.connect.sender = info->sender;
  1217. return snd_seq_system_notify(client, port, &event); /* non-atomic */
  1218. }
  1219. /*
  1220. * add to port's subscription list IOCTL interface
  1221. */
  1222. static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
  1223. void __user *arg)
  1224. {
  1225. int result = -EINVAL;
  1226. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1227. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1228. struct snd_seq_port_subscribe subs;
  1229. if (copy_from_user(&subs, arg, sizeof(subs)))
  1230. return -EFAULT;
  1231. if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
  1232. goto __end;
  1233. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1234. goto __end;
  1235. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1236. goto __end;
  1237. if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
  1238. goto __end;
  1239. result = check_subscription_permission(client, sport, dport, &subs);
  1240. if (result < 0)
  1241. goto __end;
  1242. /* connect them */
  1243. result = snd_seq_port_connect(client, sender, sport, receiver, dport, &subs);
  1244. if (! result) /* broadcast announce */
  1245. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1246. &subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
  1247. __end:
  1248. if (sport)
  1249. snd_seq_port_unlock(sport);
  1250. if (dport)
  1251. snd_seq_port_unlock(dport);
  1252. if (sender)
  1253. snd_seq_client_unlock(sender);
  1254. if (receiver)
  1255. snd_seq_client_unlock(receiver);
  1256. return result;
  1257. }
  1258. /*
  1259. * remove from port's subscription list
  1260. */
  1261. static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
  1262. void __user *arg)
  1263. {
  1264. int result = -ENXIO;
  1265. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1266. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1267. struct snd_seq_port_subscribe subs;
  1268. if (copy_from_user(&subs, arg, sizeof(subs)))
  1269. return -EFAULT;
  1270. if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
  1271. goto __end;
  1272. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1273. goto __end;
  1274. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1275. goto __end;
  1276. if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
  1277. goto __end;
  1278. result = check_subscription_permission(client, sport, dport, &subs);
  1279. if (result < 0)
  1280. goto __end;
  1281. result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, &subs);
  1282. if (! result) /* broadcast announce */
  1283. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1284. &subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
  1285. __end:
  1286. if (sport)
  1287. snd_seq_port_unlock(sport);
  1288. if (dport)
  1289. snd_seq_port_unlock(dport);
  1290. if (sender)
  1291. snd_seq_client_unlock(sender);
  1292. if (receiver)
  1293. snd_seq_client_unlock(receiver);
  1294. return result;
  1295. }
  1296. /* CREATE_QUEUE ioctl() */
  1297. static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
  1298. void __user *arg)
  1299. {
  1300. struct snd_seq_queue_info info;
  1301. int result;
  1302. struct snd_seq_queue *q;
  1303. if (copy_from_user(&info, arg, sizeof(info)))
  1304. return -EFAULT;
  1305. result = snd_seq_queue_alloc(client->number, info.locked, info.flags);
  1306. if (result < 0)
  1307. return result;
  1308. q = queueptr(result);
  1309. if (q == NULL)
  1310. return -EINVAL;
  1311. info.queue = q->queue;
  1312. info.locked = q->locked;
  1313. info.owner = q->owner;
  1314. /* set queue name */
  1315. if (! info.name[0])
  1316. snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue);
  1317. strlcpy(q->name, info.name, sizeof(q->name));
  1318. queuefree(q);
  1319. if (copy_to_user(arg, &info, sizeof(info)))
  1320. return -EFAULT;
  1321. return 0;
  1322. }
  1323. /* DELETE_QUEUE ioctl() */
  1324. static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client,
  1325. void __user *arg)
  1326. {
  1327. struct snd_seq_queue_info info;
  1328. if (copy_from_user(&info, arg, sizeof(info)))
  1329. return -EFAULT;
  1330. return snd_seq_queue_delete(client->number, info.queue);
  1331. }
  1332. /* GET_QUEUE_INFO ioctl() */
  1333. static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client,
  1334. void __user *arg)
  1335. {
  1336. struct snd_seq_queue_info info;
  1337. struct snd_seq_queue *q;
  1338. if (copy_from_user(&info, arg, sizeof(info)))
  1339. return -EFAULT;
  1340. q = queueptr(info.queue);
  1341. if (q == NULL)
  1342. return -EINVAL;
  1343. memset(&info, 0, sizeof(info));
  1344. info.queue = q->queue;
  1345. info.owner = q->owner;
  1346. info.locked = q->locked;
  1347. strlcpy(info.name, q->name, sizeof(info.name));
  1348. queuefree(q);
  1349. if (copy_to_user(arg, &info, sizeof(info)))
  1350. return -EFAULT;
  1351. return 0;
  1352. }
  1353. /* SET_QUEUE_INFO ioctl() */
  1354. static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
  1355. void __user *arg)
  1356. {
  1357. struct snd_seq_queue_info info;
  1358. struct snd_seq_queue *q;
  1359. if (copy_from_user(&info, arg, sizeof(info)))
  1360. return -EFAULT;
  1361. if (info.owner != client->number)
  1362. return -EINVAL;
  1363. /* change owner/locked permission */
  1364. if (snd_seq_queue_check_access(info.queue, client->number)) {
  1365. if (snd_seq_queue_set_owner(info.queue, client->number, info.locked) < 0)
  1366. return -EPERM;
  1367. if (info.locked)
  1368. snd_seq_queue_use(info.queue, client->number, 1);
  1369. } else {
  1370. return -EPERM;
  1371. }
  1372. q = queueptr(info.queue);
  1373. if (! q)
  1374. return -EINVAL;
  1375. if (q->owner != client->number) {
  1376. queuefree(q);
  1377. return -EPERM;
  1378. }
  1379. strlcpy(q->name, info.name, sizeof(q->name));
  1380. queuefree(q);
  1381. return 0;
  1382. }
  1383. /* GET_NAMED_QUEUE ioctl() */
  1384. static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client, void __user *arg)
  1385. {
  1386. struct snd_seq_queue_info info;
  1387. struct snd_seq_queue *q;
  1388. if (copy_from_user(&info, arg, sizeof(info)))
  1389. return -EFAULT;
  1390. q = snd_seq_queue_find_name(info.name);
  1391. if (q == NULL)
  1392. return -EINVAL;
  1393. info.queue = q->queue;
  1394. info.owner = q->owner;
  1395. info.locked = q->locked;
  1396. queuefree(q);
  1397. if (copy_to_user(arg, &info, sizeof(info)))
  1398. return -EFAULT;
  1399. return 0;
  1400. }
  1401. /* GET_QUEUE_STATUS ioctl() */
  1402. static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
  1403. void __user *arg)
  1404. {
  1405. struct snd_seq_queue_status status;
  1406. struct snd_seq_queue *queue;
  1407. struct snd_seq_timer *tmr;
  1408. if (copy_from_user(&status, arg, sizeof(status)))
  1409. return -EFAULT;
  1410. queue = queueptr(status.queue);
  1411. if (queue == NULL)
  1412. return -EINVAL;
  1413. memset(&status, 0, sizeof(status));
  1414. status.queue = queue->queue;
  1415. tmr = queue->timer;
  1416. status.events = queue->tickq->cells + queue->timeq->cells;
  1417. status.time = snd_seq_timer_get_cur_time(tmr);
  1418. status.tick = snd_seq_timer_get_cur_tick(tmr);
  1419. status.running = tmr->running;
  1420. status.flags = queue->flags;
  1421. queuefree(queue);
  1422. if (copy_to_user(arg, &status, sizeof(status)))
  1423. return -EFAULT;
  1424. return 0;
  1425. }
  1426. /* GET_QUEUE_TEMPO ioctl() */
  1427. static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client,
  1428. void __user *arg)
  1429. {
  1430. struct snd_seq_queue_tempo tempo;
  1431. struct snd_seq_queue *queue;
  1432. struct snd_seq_timer *tmr;
  1433. if (copy_from_user(&tempo, arg, sizeof(tempo)))
  1434. return -EFAULT;
  1435. queue = queueptr(tempo.queue);
  1436. if (queue == NULL)
  1437. return -EINVAL;
  1438. memset(&tempo, 0, sizeof(tempo));
  1439. tempo.queue = queue->queue;
  1440. tmr = queue->timer;
  1441. tempo.tempo = tmr->tempo;
  1442. tempo.ppq = tmr->ppq;
  1443. tempo.skew_value = tmr->skew;
  1444. tempo.skew_base = tmr->skew_base;
  1445. queuefree(queue);
  1446. if (copy_to_user(arg, &tempo, sizeof(tempo)))
  1447. return -EFAULT;
  1448. return 0;
  1449. }
  1450. /* SET_QUEUE_TEMPO ioctl() */
  1451. int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo)
  1452. {
  1453. if (!snd_seq_queue_check_access(tempo->queue, client))
  1454. return -EPERM;
  1455. return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo);
  1456. }
  1457. EXPORT_SYMBOL(snd_seq_set_queue_tempo);
  1458. static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client,
  1459. void __user *arg)
  1460. {
  1461. int result;
  1462. struct snd_seq_queue_tempo tempo;
  1463. if (copy_from_user(&tempo, arg, sizeof(tempo)))
  1464. return -EFAULT;
  1465. result = snd_seq_set_queue_tempo(client->number, &tempo);
  1466. return result < 0 ? result : 0;
  1467. }
  1468. /* GET_QUEUE_TIMER ioctl() */
  1469. static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
  1470. void __user *arg)
  1471. {
  1472. struct snd_seq_queue_timer timer;
  1473. struct snd_seq_queue *queue;
  1474. struct snd_seq_timer *tmr;
  1475. if (copy_from_user(&timer, arg, sizeof(timer)))
  1476. return -EFAULT;
  1477. queue = queueptr(timer.queue);
  1478. if (queue == NULL)
  1479. return -EINVAL;
  1480. if (mutex_lock_interruptible(&queue->timer_mutex)) {
  1481. queuefree(queue);
  1482. return -ERESTARTSYS;
  1483. }
  1484. tmr = queue->timer;
  1485. memset(&timer, 0, sizeof(timer));
  1486. timer.queue = queue->queue;
  1487. timer.type = tmr->type;
  1488. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1489. timer.u.alsa.id = tmr->alsa_id;
  1490. timer.u.alsa.resolution = tmr->preferred_resolution;
  1491. }
  1492. mutex_unlock(&queue->timer_mutex);
  1493. queuefree(queue);
  1494. if (copy_to_user(arg, &timer, sizeof(timer)))
  1495. return -EFAULT;
  1496. return 0;
  1497. }
  1498. /* SET_QUEUE_TIMER ioctl() */
  1499. static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
  1500. void __user *arg)
  1501. {
  1502. int result = 0;
  1503. struct snd_seq_queue_timer timer;
  1504. if (copy_from_user(&timer, arg, sizeof(timer)))
  1505. return -EFAULT;
  1506. if (timer.type != SNDRV_SEQ_TIMER_ALSA)
  1507. return -EINVAL;
  1508. if (snd_seq_queue_check_access(timer.queue, client->number)) {
  1509. struct snd_seq_queue *q;
  1510. struct snd_seq_timer *tmr;
  1511. q = queueptr(timer.queue);
  1512. if (q == NULL)
  1513. return -ENXIO;
  1514. if (mutex_lock_interruptible(&q->timer_mutex)) {
  1515. queuefree(q);
  1516. return -ERESTARTSYS;
  1517. }
  1518. tmr = q->timer;
  1519. snd_seq_queue_timer_close(timer.queue);
  1520. tmr->type = timer.type;
  1521. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1522. tmr->alsa_id = timer.u.alsa.id;
  1523. tmr->preferred_resolution = timer.u.alsa.resolution;
  1524. }
  1525. result = snd_seq_queue_timer_open(timer.queue);
  1526. mutex_unlock(&q->timer_mutex);
  1527. queuefree(q);
  1528. } else {
  1529. return -EPERM;
  1530. }
  1531. return result;
  1532. }
  1533. /* GET_QUEUE_CLIENT ioctl() */
  1534. static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client,
  1535. void __user *arg)
  1536. {
  1537. struct snd_seq_queue_client info;
  1538. int used;
  1539. if (copy_from_user(&info, arg, sizeof(info)))
  1540. return -EFAULT;
  1541. used = snd_seq_queue_is_used(info.queue, client->number);
  1542. if (used < 0)
  1543. return -EINVAL;
  1544. info.used = used;
  1545. info.client = client->number;
  1546. if (copy_to_user(arg, &info, sizeof(info)))
  1547. return -EFAULT;
  1548. return 0;
  1549. }
  1550. /* SET_QUEUE_CLIENT ioctl() */
  1551. static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client,
  1552. void __user *arg)
  1553. {
  1554. int err;
  1555. struct snd_seq_queue_client info;
  1556. if (copy_from_user(&info, arg, sizeof(info)))
  1557. return -EFAULT;
  1558. if (info.used >= 0) {
  1559. err = snd_seq_queue_use(info.queue, client->number, info.used);
  1560. if (err < 0)
  1561. return err;
  1562. }
  1563. return snd_seq_ioctl_get_queue_client(client, arg);
  1564. }
  1565. /* GET_CLIENT_POOL ioctl() */
  1566. static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
  1567. void __user *arg)
  1568. {
  1569. struct snd_seq_client_pool info;
  1570. struct snd_seq_client *cptr;
  1571. if (copy_from_user(&info, arg, sizeof(info)))
  1572. return -EFAULT;
  1573. cptr = snd_seq_client_use_ptr(info.client);
  1574. if (cptr == NULL)
  1575. return -ENOENT;
  1576. memset(&info, 0, sizeof(info));
  1577. info.output_pool = cptr->pool->size;
  1578. info.output_room = cptr->pool->room;
  1579. info.output_free = info.output_pool;
  1580. info.output_free = snd_seq_unused_cells(cptr->pool);
  1581. if (cptr->type == USER_CLIENT) {
  1582. info.input_pool = cptr->data.user.fifo_pool_size;
  1583. info.input_free = info.input_pool;
  1584. if (cptr->data.user.fifo)
  1585. info.input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
  1586. } else {
  1587. info.input_pool = 0;
  1588. info.input_free = 0;
  1589. }
  1590. snd_seq_client_unlock(cptr);
  1591. if (copy_to_user(arg, &info, sizeof(info)))
  1592. return -EFAULT;
  1593. return 0;
  1594. }
  1595. /* SET_CLIENT_POOL ioctl() */
  1596. static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
  1597. void __user *arg)
  1598. {
  1599. struct snd_seq_client_pool info;
  1600. int rc;
  1601. if (copy_from_user(&info, arg, sizeof(info)))
  1602. return -EFAULT;
  1603. if (client->number != info.client)
  1604. return -EINVAL; /* can't change other clients */
  1605. if (info.output_pool >= 1 && info.output_pool <= SNDRV_SEQ_MAX_EVENTS &&
  1606. (! snd_seq_write_pool_allocated(client) ||
  1607. info.output_pool != client->pool->size)) {
  1608. if (snd_seq_write_pool_allocated(client)) {
  1609. /* remove all existing cells */
  1610. snd_seq_queue_client_leave_cells(client->number);
  1611. snd_seq_pool_done(client->pool);
  1612. }
  1613. client->pool->size = info.output_pool;
  1614. rc = snd_seq_pool_init(client->pool);
  1615. if (rc < 0)
  1616. return rc;
  1617. }
  1618. if (client->type == USER_CLIENT && client->data.user.fifo != NULL &&
  1619. info.input_pool >= 1 &&
  1620. info.input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS &&
  1621. info.input_pool != client->data.user.fifo_pool_size) {
  1622. /* change pool size */
  1623. rc = snd_seq_fifo_resize(client->data.user.fifo, info.input_pool);
  1624. if (rc < 0)
  1625. return rc;
  1626. client->data.user.fifo_pool_size = info.input_pool;
  1627. }
  1628. if (info.output_room >= 1 &&
  1629. info.output_room <= client->pool->size) {
  1630. client->pool->room = info.output_room;
  1631. }
  1632. return snd_seq_ioctl_get_client_pool(client, arg);
  1633. }
  1634. /* REMOVE_EVENTS ioctl() */
  1635. static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
  1636. void __user *arg)
  1637. {
  1638. struct snd_seq_remove_events info;
  1639. if (copy_from_user(&info, arg, sizeof(info)))
  1640. return -EFAULT;
  1641. /*
  1642. * Input mostly not implemented XXX.
  1643. */
  1644. if (info.remove_mode & SNDRV_SEQ_REMOVE_INPUT) {
  1645. /*
  1646. * No restrictions so for a user client we can clear
  1647. * the whole fifo
  1648. */
  1649. if (client->type == USER_CLIENT)
  1650. snd_seq_fifo_clear(client->data.user.fifo);
  1651. }
  1652. if (info.remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
  1653. snd_seq_queue_remove_cells(client->number, &info);
  1654. return 0;
  1655. }
  1656. /*
  1657. * get subscription info
  1658. */
  1659. static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
  1660. void __user *arg)
  1661. {
  1662. int result;
  1663. struct snd_seq_client *sender = NULL;
  1664. struct snd_seq_client_port *sport = NULL;
  1665. struct snd_seq_port_subscribe subs;
  1666. struct snd_seq_subscribers *p;
  1667. if (copy_from_user(&subs, arg, sizeof(subs)))
  1668. return -EFAULT;
  1669. result = -EINVAL;
  1670. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1671. goto __end;
  1672. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1673. goto __end;
  1674. p = snd_seq_port_get_subscription(&sport->c_src, &subs.dest);
  1675. if (p) {
  1676. result = 0;
  1677. subs = p->info;
  1678. } else
  1679. result = -ENOENT;
  1680. __end:
  1681. if (sport)
  1682. snd_seq_port_unlock(sport);
  1683. if (sender)
  1684. snd_seq_client_unlock(sender);
  1685. if (result >= 0) {
  1686. if (copy_to_user(arg, &subs, sizeof(subs)))
  1687. return -EFAULT;
  1688. }
  1689. return result;
  1690. }
  1691. /*
  1692. * get subscription info - check only its presence
  1693. */
  1694. static int snd_seq_ioctl_query_subs(struct snd_seq_client *client,
  1695. void __user *arg)
  1696. {
  1697. int result = -ENXIO;
  1698. struct snd_seq_client *cptr = NULL;
  1699. struct snd_seq_client_port *port = NULL;
  1700. struct snd_seq_query_subs subs;
  1701. struct snd_seq_port_subs_info *group;
  1702. struct list_head *p;
  1703. int i;
  1704. if (copy_from_user(&subs, arg, sizeof(subs)))
  1705. return -EFAULT;
  1706. if ((cptr = snd_seq_client_use_ptr(subs.root.client)) == NULL)
  1707. goto __end;
  1708. if ((port = snd_seq_port_use_ptr(cptr, subs.root.port)) == NULL)
  1709. goto __end;
  1710. switch (subs.type) {
  1711. case SNDRV_SEQ_QUERY_SUBS_READ:
  1712. group = &port->c_src;
  1713. break;
  1714. case SNDRV_SEQ_QUERY_SUBS_WRITE:
  1715. group = &port->c_dest;
  1716. break;
  1717. default:
  1718. goto __end;
  1719. }
  1720. down_read(&group->list_mutex);
  1721. /* search for the subscriber */
  1722. subs.num_subs = group->count;
  1723. i = 0;
  1724. result = -ENOENT;
  1725. list_for_each(p, &group->list_head) {
  1726. if (i++ == subs.index) {
  1727. /* found! */
  1728. struct snd_seq_subscribers *s;
  1729. if (subs.type == SNDRV_SEQ_QUERY_SUBS_READ) {
  1730. s = list_entry(p, struct snd_seq_subscribers, src_list);
  1731. subs.addr = s->info.dest;
  1732. } else {
  1733. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  1734. subs.addr = s->info.sender;
  1735. }
  1736. subs.flags = s->info.flags;
  1737. subs.queue = s->info.queue;
  1738. result = 0;
  1739. break;
  1740. }
  1741. }
  1742. up_read(&group->list_mutex);
  1743. __end:
  1744. if (port)
  1745. snd_seq_port_unlock(port);
  1746. if (cptr)
  1747. snd_seq_client_unlock(cptr);
  1748. if (result >= 0) {
  1749. if (copy_to_user(arg, &subs, sizeof(subs)))
  1750. return -EFAULT;
  1751. }
  1752. return result;
  1753. }
  1754. /*
  1755. * query next client
  1756. */
  1757. static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
  1758. void __user *arg)
  1759. {
  1760. struct snd_seq_client *cptr = NULL;
  1761. struct snd_seq_client_info info;
  1762. if (copy_from_user(&info, arg, sizeof(info)))
  1763. return -EFAULT;
  1764. /* search for next client */
  1765. info.client++;
  1766. if (info.client < 0)
  1767. info.client = 0;
  1768. for (; info.client < SNDRV_SEQ_MAX_CLIENTS; info.client++) {
  1769. cptr = snd_seq_client_use_ptr(info.client);
  1770. if (cptr)
  1771. break; /* found */
  1772. }
  1773. if (cptr == NULL)
  1774. return -ENOENT;
  1775. get_client_info(cptr, &info);
  1776. snd_seq_client_unlock(cptr);
  1777. if (copy_to_user(arg, &info, sizeof(info)))
  1778. return -EFAULT;
  1779. return 0;
  1780. }
  1781. /*
  1782. * query next port
  1783. */
  1784. static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
  1785. void __user *arg)
  1786. {
  1787. struct snd_seq_client *cptr;
  1788. struct snd_seq_client_port *port = NULL;
  1789. struct snd_seq_port_info info;
  1790. if (copy_from_user(&info, arg, sizeof(info)))
  1791. return -EFAULT;
  1792. cptr = snd_seq_client_use_ptr(info.addr.client);
  1793. if (cptr == NULL)
  1794. return -ENXIO;
  1795. /* search for next port */
  1796. info.addr.port++;
  1797. port = snd_seq_port_query_nearest(cptr, &info);
  1798. if (port == NULL) {
  1799. snd_seq_client_unlock(cptr);
  1800. return -ENOENT;
  1801. }
  1802. /* get port info */
  1803. info.addr = port->addr;
  1804. snd_seq_get_port_info(port, &info);
  1805. snd_seq_port_unlock(port);
  1806. snd_seq_client_unlock(cptr);
  1807. if (copy_to_user(arg, &info, sizeof(info)))
  1808. return -EFAULT;
  1809. return 0;
  1810. }
  1811. /* -------------------------------------------------------- */
  1812. static struct seq_ioctl_table {
  1813. unsigned int cmd;
  1814. int (*func)(struct snd_seq_client *client, void __user * arg);
  1815. } ioctl_tables[] = {
  1816. { SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info },
  1817. { SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode },
  1818. { SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info },
  1819. { SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, snd_seq_ioctl_set_client_info },
  1820. { SNDRV_SEQ_IOCTL_CREATE_PORT, snd_seq_ioctl_create_port },
  1821. { SNDRV_SEQ_IOCTL_DELETE_PORT, snd_seq_ioctl_delete_port },
  1822. { SNDRV_SEQ_IOCTL_GET_PORT_INFO, snd_seq_ioctl_get_port_info },
  1823. { SNDRV_SEQ_IOCTL_SET_PORT_INFO, snd_seq_ioctl_set_port_info },
  1824. { SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, snd_seq_ioctl_subscribe_port },
  1825. { SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, snd_seq_ioctl_unsubscribe_port },
  1826. { SNDRV_SEQ_IOCTL_CREATE_QUEUE, snd_seq_ioctl_create_queue },
  1827. { SNDRV_SEQ_IOCTL_DELETE_QUEUE, snd_seq_ioctl_delete_queue },
  1828. { SNDRV_SEQ_IOCTL_GET_QUEUE_INFO, snd_seq_ioctl_get_queue_info },
  1829. { SNDRV_SEQ_IOCTL_SET_QUEUE_INFO, snd_seq_ioctl_set_queue_info },
  1830. { SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE, snd_seq_ioctl_get_named_queue },
  1831. { SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS, snd_seq_ioctl_get_queue_status },
  1832. { SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO, snd_seq_ioctl_get_queue_tempo },
  1833. { SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO, snd_seq_ioctl_set_queue_tempo },
  1834. { SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER, snd_seq_ioctl_get_queue_timer },
  1835. { SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER, snd_seq_ioctl_set_queue_timer },
  1836. { SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT, snd_seq_ioctl_get_queue_client },
  1837. { SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT, snd_seq_ioctl_set_queue_client },
  1838. { SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, snd_seq_ioctl_get_client_pool },
  1839. { SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, snd_seq_ioctl_set_client_pool },
  1840. { SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION, snd_seq_ioctl_get_subscription },
  1841. { SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, snd_seq_ioctl_query_next_client },
  1842. { SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, snd_seq_ioctl_query_next_port },
  1843. { SNDRV_SEQ_IOCTL_REMOVE_EVENTS, snd_seq_ioctl_remove_events },
  1844. { SNDRV_SEQ_IOCTL_QUERY_SUBS, snd_seq_ioctl_query_subs },
  1845. { 0, NULL },
  1846. };
  1847. static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
  1848. void __user *arg)
  1849. {
  1850. struct seq_ioctl_table *p;
  1851. switch (cmd) {
  1852. case SNDRV_SEQ_IOCTL_PVERSION:
  1853. /* return sequencer version number */
  1854. return put_user(SNDRV_SEQ_VERSION, (int __user *)arg) ? -EFAULT : 0;
  1855. case SNDRV_SEQ_IOCTL_CLIENT_ID:
  1856. /* return the id of this client */
  1857. return put_user(client->number, (int __user *)arg) ? -EFAULT : 0;
  1858. }
  1859. if (! arg)
  1860. return -EFAULT;
  1861. for (p = ioctl_tables; p->cmd; p++) {
  1862. if (p->cmd == cmd)
  1863. return p->func(client, arg);
  1864. }
  1865. snd_printd("seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
  1866. cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
  1867. return -ENOTTY;
  1868. }
  1869. static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1870. {
  1871. struct snd_seq_client *client = file->private_data;
  1872. if (snd_BUG_ON(!client))
  1873. return -ENXIO;
  1874. return snd_seq_do_ioctl(client, cmd, (void __user *) arg);
  1875. }
  1876. #ifdef CONFIG_COMPAT
  1877. #include "seq_compat.c"
  1878. #else
  1879. #define snd_seq_ioctl_compat NULL
  1880. #endif
  1881. /* -------------------------------------------------------- */
  1882. /* exported to kernel modules */
  1883. int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
  1884. const char *name_fmt, ...)
  1885. {
  1886. struct snd_seq_client *client;
  1887. va_list args;
  1888. if (snd_BUG_ON(in_interrupt()))
  1889. return -EBUSY;
  1890. if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD)
  1891. return -EINVAL;
  1892. if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS)
  1893. return -EINVAL;
  1894. if (mutex_lock_interruptible(&register_mutex))
  1895. return -ERESTARTSYS;
  1896. if (card) {
  1897. client_index += SNDRV_SEQ_GLOBAL_CLIENTS
  1898. + card->number * SNDRV_SEQ_CLIENTS_PER_CARD;
  1899. if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN)
  1900. client_index = -1;
  1901. }
  1902. /* empty write queue as default */
  1903. client = seq_create_client1(client_index, 0);
  1904. if (client == NULL) {
  1905. mutex_unlock(&register_mutex);
  1906. return -EBUSY; /* failure code */
  1907. }
  1908. usage_alloc(&client_usage, 1);
  1909. client->accept_input = 1;
  1910. client->accept_output = 1;
  1911. va_start(args, name_fmt);
  1912. vsnprintf(client->name, sizeof(client->name), name_fmt, args);
  1913. va_end(args);
  1914. client->type = KERNEL_CLIENT;
  1915. mutex_unlock(&register_mutex);
  1916. /* make others aware this new client */
  1917. snd_seq_system_client_ev_client_start(client->number);
  1918. /* return client number to caller */
  1919. return client->number;
  1920. }
  1921. EXPORT_SYMBOL(snd_seq_create_kernel_client);
  1922. /* exported to kernel modules */
  1923. int snd_seq_delete_kernel_client(int client)
  1924. {
  1925. struct snd_seq_client *ptr;
  1926. if (snd_BUG_ON(in_interrupt()))
  1927. return -EBUSY;
  1928. ptr = clientptr(client);
  1929. if (ptr == NULL)
  1930. return -EINVAL;
  1931. seq_free_client(ptr);
  1932. kfree(ptr);
  1933. return 0;
  1934. }
  1935. EXPORT_SYMBOL(snd_seq_delete_kernel_client);
  1936. /* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
  1937. * and snd_seq_kernel_client_enqueue_blocking
  1938. */
  1939. static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
  1940. struct file *file, int blocking,
  1941. int atomic, int hop)
  1942. {
  1943. struct snd_seq_client *cptr;
  1944. int result;
  1945. if (snd_BUG_ON(!ev))
  1946. return -EINVAL;
  1947. if (ev->type == SNDRV_SEQ_EVENT_NONE)
  1948. return 0; /* ignore this */
  1949. if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR)
  1950. return -EINVAL; /* quoted events can't be enqueued */
  1951. /* fill in client number */
  1952. ev->source.client = client;
  1953. if (check_event_type_and_length(ev))
  1954. return -EINVAL;
  1955. cptr = snd_seq_client_use_ptr(client);
  1956. if (cptr == NULL)
  1957. return -EINVAL;
  1958. if (! cptr->accept_output)
  1959. result = -EPERM;
  1960. else /* send it */
  1961. result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
  1962. snd_seq_client_unlock(cptr);
  1963. return result;
  1964. }
  1965. /*
  1966. * exported, called by kernel clients to enqueue events (w/o blocking)
  1967. *
  1968. * RETURN VALUE: zero if succeed, negative if error
  1969. */
  1970. int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event * ev,
  1971. int atomic, int hop)
  1972. {
  1973. return kernel_client_enqueue(client, ev, NULL, 0, atomic, hop);
  1974. }
  1975. EXPORT_SYMBOL(snd_seq_kernel_client_enqueue);
  1976. /*
  1977. * exported, called by kernel clients to enqueue events (with blocking)
  1978. *
  1979. * RETURN VALUE: zero if succeed, negative if error
  1980. */
  1981. int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event * ev,
  1982. struct file *file,
  1983. int atomic, int hop)
  1984. {
  1985. return kernel_client_enqueue(client, ev, file, 1, atomic, hop);
  1986. }
  1987. EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking);
  1988. /*
  1989. * exported, called by kernel clients to dispatch events directly to other
  1990. * clients, bypassing the queues. Event time-stamp will be updated.
  1991. *
  1992. * RETURN VALUE: negative = delivery failed,
  1993. * zero, or positive: the number of delivered events
  1994. */
  1995. int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev,
  1996. int atomic, int hop)
  1997. {
  1998. struct snd_seq_client *cptr;
  1999. int result;
  2000. if (snd_BUG_ON(!ev))
  2001. return -EINVAL;
  2002. /* fill in client number */
  2003. ev->queue = SNDRV_SEQ_QUEUE_DIRECT;
  2004. ev->source.client = client;
  2005. if (check_event_type_and_length(ev))
  2006. return -EINVAL;
  2007. cptr = snd_seq_client_use_ptr(client);
  2008. if (cptr == NULL)
  2009. return -EINVAL;
  2010. if (!cptr->accept_output)
  2011. result = -EPERM;
  2012. else
  2013. result = snd_seq_deliver_event(cptr, ev, atomic, hop);
  2014. snd_seq_client_unlock(cptr);
  2015. return result;
  2016. }
  2017. EXPORT_SYMBOL(snd_seq_kernel_client_dispatch);
  2018. /*
  2019. * exported, called by kernel clients to perform same functions as with
  2020. * userland ioctl()
  2021. */
  2022. int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
  2023. {
  2024. struct snd_seq_client *client;
  2025. mm_segment_t fs;
  2026. int result;
  2027. client = clientptr(clientid);
  2028. if (client == NULL)
  2029. return -ENXIO;
  2030. fs = snd_enter_user();
  2031. result = snd_seq_do_ioctl(client, cmd, (void __force __user *)arg);
  2032. snd_leave_user(fs);
  2033. return result;
  2034. }
  2035. EXPORT_SYMBOL(snd_seq_kernel_client_ctl);
  2036. /* exported (for OSS emulator) */
  2037. int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait)
  2038. {
  2039. struct snd_seq_client *client;
  2040. client = clientptr(clientid);
  2041. if (client == NULL)
  2042. return -ENXIO;
  2043. if (! snd_seq_write_pool_allocated(client))
  2044. return 1;
  2045. if (snd_seq_pool_poll_wait(client->pool, file, wait))
  2046. return 1;
  2047. return 0;
  2048. }
  2049. EXPORT_SYMBOL(snd_seq_kernel_client_write_poll);
  2050. /*---------------------------------------------------------------------------*/
  2051. #ifdef CONFIG_PROC_FS
  2052. /*
  2053. * /proc interface
  2054. */
  2055. static void snd_seq_info_dump_subscribers(struct snd_info_buffer *buffer,
  2056. struct snd_seq_port_subs_info *group,
  2057. int is_src, char *msg)
  2058. {
  2059. struct list_head *p;
  2060. struct snd_seq_subscribers *s;
  2061. int count = 0;
  2062. down_read(&group->list_mutex);
  2063. if (list_empty(&group->list_head)) {
  2064. up_read(&group->list_mutex);
  2065. return;
  2066. }
  2067. snd_iprintf(buffer, msg);
  2068. list_for_each(p, &group->list_head) {
  2069. if (is_src)
  2070. s = list_entry(p, struct snd_seq_subscribers, src_list);
  2071. else
  2072. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  2073. if (count++)
  2074. snd_iprintf(buffer, ", ");
  2075. snd_iprintf(buffer, "%d:%d",
  2076. is_src ? s->info.dest.client : s->info.sender.client,
  2077. is_src ? s->info.dest.port : s->info.sender.port);
  2078. if (s->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  2079. snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue);
  2080. if (group->exclusive)
  2081. snd_iprintf(buffer, "[ex]");
  2082. }
  2083. up_read(&group->list_mutex);
  2084. snd_iprintf(buffer, "\n");
  2085. }
  2086. #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-')
  2087. #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-')
  2088. #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e')
  2089. #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-')
  2090. static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer,
  2091. struct snd_seq_client *client)
  2092. {
  2093. struct snd_seq_client_port *p;
  2094. mutex_lock(&client->ports_mutex);
  2095. list_for_each_entry(p, &client->ports_list_head, list) {
  2096. snd_iprintf(buffer, " Port %3d : \"%s\" (%c%c%c%c)\n",
  2097. p->addr.port, p->name,
  2098. FLAG_PERM_RD(p->capability),
  2099. FLAG_PERM_WR(p->capability),
  2100. FLAG_PERM_EX(p->capability),
  2101. FLAG_PERM_DUPLEX(p->capability));
  2102. snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, " Connecting To: ");
  2103. snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, " Connected From: ");
  2104. }
  2105. mutex_unlock(&client->ports_mutex);
  2106. }
  2107. /* exported to seq_info.c */
  2108. void snd_seq_info_clients_read(struct snd_info_entry *entry,
  2109. struct snd_info_buffer *buffer)
  2110. {
  2111. int c;
  2112. struct snd_seq_client *client;
  2113. snd_iprintf(buffer, "Client info\n");
  2114. snd_iprintf(buffer, " cur clients : %d\n", client_usage.cur);
  2115. snd_iprintf(buffer, " peak clients : %d\n", client_usage.peak);
  2116. snd_iprintf(buffer, " max clients : %d\n", SNDRV_SEQ_MAX_CLIENTS);
  2117. snd_iprintf(buffer, "\n");
  2118. /* list the client table */
  2119. for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
  2120. client = snd_seq_client_use_ptr(c);
  2121. if (client == NULL)
  2122. continue;
  2123. if (client->type == NO_CLIENT) {
  2124. snd_seq_client_unlock(client);
  2125. continue;
  2126. }
  2127. snd_iprintf(buffer, "Client %3d : \"%s\" [%s]\n",
  2128. c, client->name,
  2129. client->type == USER_CLIENT ? "User" : "Kernel");
  2130. snd_seq_info_dump_ports(buffer, client);
  2131. if (snd_seq_write_pool_allocated(client)) {
  2132. snd_iprintf(buffer, " Output pool :\n");
  2133. snd_seq_info_pool(buffer, client->pool, " ");
  2134. }
  2135. if (client->type == USER_CLIENT && client->data.user.fifo &&
  2136. client->data.user.fifo->pool) {
  2137. snd_iprintf(buffer, " Input pool :\n");
  2138. snd_seq_info_pool(buffer, client->data.user.fifo->pool, " ");
  2139. }
  2140. snd_seq_client_unlock(client);
  2141. }
  2142. }
  2143. #endif /* CONFIG_PROC_FS */
  2144. /*---------------------------------------------------------------------------*/
  2145. /*
  2146. * REGISTRATION PART
  2147. */
  2148. static const struct file_operations snd_seq_f_ops =
  2149. {
  2150. .owner = THIS_MODULE,
  2151. .read = snd_seq_read,
  2152. .write = snd_seq_write,
  2153. .open = snd_seq_open,
  2154. .release = snd_seq_release,
  2155. .llseek = no_llseek,
  2156. .poll = snd_seq_poll,
  2157. .unlocked_ioctl = snd_seq_ioctl,
  2158. .compat_ioctl = snd_seq_ioctl_compat,
  2159. };
  2160. /*
  2161. * register sequencer device
  2162. */
  2163. int __init snd_sequencer_device_init(void)
  2164. {
  2165. int err;
  2166. if (mutex_lock_interruptible(&register_mutex))
  2167. return -ERESTARTSYS;
  2168. if ((err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
  2169. &snd_seq_f_ops, NULL, "seq")) < 0) {
  2170. mutex_unlock(&register_mutex);
  2171. return err;
  2172. }
  2173. mutex_unlock(&register_mutex);
  2174. return 0;
  2175. }
  2176. /*
  2177. * unregister sequencer device
  2178. */
  2179. void __exit snd_sequencer_device_done(void)
  2180. {
  2181. snd_unregister_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0);
  2182. }