4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Implements upper edge functions for Bridge message module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 #include <linux/types.h>
20 /* ----------------------------------- DSP/BIOS Bridge */
21 #include <dspbridge/dbdefs.h>
23 /* ----------------------------------- OS Adaptation Layer */
24 #include <dspbridge/sync.h>
26 /* ----------------------------------- Platform Manager */
27 #include <dspbridge/dev.h>
29 /* ----------------------------------- Others */
30 #include <dspbridge/io_sm.h>
32 /* ----------------------------------- This */
34 #include <dspbridge/dspmsg.h>
36 /* ----------------------------------- Function Prototypes */
37 static int add_new_msg(struct list_head *msg_list);
38 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
39 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
40 static void free_msg_list(struct list_head *msg_list);
43 * ======== bridge_msg_create ========
44 * Create an object to manage message queues. Only one of these objects
45 * can exist per device object.
47 int bridge_msg_create(struct msg_mgr **msg_man,
48 struct dev_object *hdev_obj,
49 msg_onexit msg_callback)
51 struct msg_mgr *msg_mgr_obj;
52 struct io_mgr *hio_mgr;
55 if (!msg_man || !msg_callback || !hdev_obj)
58 dev_get_io_mgr(hdev_obj, &hio_mgr);
63 /* Allocate msg_ctrl manager object */
64 msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
68 msg_mgr_obj->on_exit = msg_callback;
69 msg_mgr_obj->iomgr = hio_mgr;
70 /* List of MSG_QUEUEs */
71 INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
73 * Queues of message frames for messages to the DSP. Message
74 * frames will only be added to the free queue when a
75 * msg_queue object is created.
77 INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
78 INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
79 spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
82 * Create an event to be used by bridge_msg_put() in waiting
83 * for an available free frame from the message manager.
85 msg_mgr_obj->sync_event =
86 kzalloc(sizeof(struct sync_object), GFP_KERNEL);
87 if (!msg_mgr_obj->sync_event) {
91 sync_init_event(msg_mgr_obj->sync_event);
93 *msg_man = msg_mgr_obj;
99 * ======== bridge_msg_create_queue ========
100 * Create a msg_queue for sending/receiving messages to/from a node
103 int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
104 u32 msgq_id, u32 max_msgs, void *arg)
107 u32 num_allocated = 0;
108 struct msg_queue *msg_q;
111 if (!hmsg_mgr || msgq == NULL)
115 /* Allocate msg_queue object */
116 msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
120 msg_q->max_msgs = max_msgs;
121 msg_q->msg_mgr = hmsg_mgr;
122 msg_q->arg = arg; /* Node handle */
123 msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
124 /* Queues of Message frames for messages from the DSP */
125 INIT_LIST_HEAD(&msg_q->msg_free_list);
126 INIT_LIST_HEAD(&msg_q->msg_used_list);
128 /* Create event that will be signalled when a message from
129 * the DSP is available. */
130 msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
131 if (!msg_q->sync_event) {
136 sync_init_event(msg_q->sync_event);
138 /* Create a notification list for message ready notification. */
139 msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
140 if (!msg_q->ntfy_obj) {
144 ntfy_init(msg_q->ntfy_obj);
146 /* Create events that will be used to synchronize cleanup
147 * when the object is deleted. sync_done will be set to
148 * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
149 * will be set by the unblocked thread to signal that it
150 * is unblocked and will no longer reference the object. */
151 msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
152 if (!msg_q->sync_done) {
156 sync_init_event(msg_q->sync_done);
158 msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
159 if (!msg_q->sync_done_ack) {
163 sync_init_event(msg_q->sync_done_ack);
165 /* Enter critical section */
166 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
167 /* Initialize message frames and put in appropriate queues */
168 for (i = 0; i < max_msgs && !status; i++) {
169 status = add_new_msg(&hmsg_mgr->msg_free_list);
172 status = add_new_msg(&msg_q->msg_free_list);
176 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
180 list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
182 /* Signal that free frames are now available */
183 if (!list_empty(&hmsg_mgr->msg_free_list))
184 sync_set_event(hmsg_mgr->sync_event);
186 /* Exit critical section */
187 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
191 delete_msg_queue(msg_q, num_allocated);
196 * ======== bridge_msg_delete ========
197 * Delete a msg_ctrl manager allocated in bridge_msg_create().
199 void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
201 delete_msg_mgr(hmsg_mgr);
205 * ======== bridge_msg_delete_queue ========
206 * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
208 void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
210 struct msg_mgr *hmsg_mgr;
213 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
216 hmsg_mgr = msg_queue_obj->msg_mgr;
217 msg_queue_obj->done = true;
218 /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
219 io_msg_pend = msg_queue_obj->io_msg_pend;
220 while (io_msg_pend) {
222 sync_set_event(msg_queue_obj->sync_done);
223 /* Wait for acknowledgement */
224 sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
225 io_msg_pend = msg_queue_obj->io_msg_pend;
227 /* Remove message queue from hmsg_mgr->queue_list */
228 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
229 list_del(&msg_queue_obj->list_elem);
230 /* Free the message queue object */
231 delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
232 if (list_empty(&hmsg_mgr->msg_free_list))
233 sync_reset_event(hmsg_mgr->sync_event);
234 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
238 * ======== bridge_msg_get ========
239 * Get a message from a msg_ctrl queue.
241 int bridge_msg_get(struct msg_queue *msg_queue_obj,
242 struct dsp_msg *pmsg, u32 utimeout)
244 struct msg_frame *msg_frame_obj;
245 struct msg_mgr *hmsg_mgr;
246 struct sync_object *syncs[2];
250 if (!msg_queue_obj || pmsg == NULL)
253 hmsg_mgr = msg_queue_obj->msg_mgr;
255 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
256 /* If a message is already there, get it */
257 if (!list_empty(&msg_queue_obj->msg_used_list)) {
258 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
259 struct msg_frame, list_elem);
260 list_del(&msg_frame_obj->list_elem);
261 *pmsg = msg_frame_obj->msg_data.msg;
262 list_add_tail(&msg_frame_obj->list_elem,
263 &msg_queue_obj->msg_free_list);
264 if (list_empty(&msg_queue_obj->msg_used_list))
265 sync_reset_event(msg_queue_obj->sync_event);
266 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
270 if (msg_queue_obj->done) {
271 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
274 msg_queue_obj->io_msg_pend++;
275 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
278 * Wait til message is available, timeout, or done. We don't
279 * have to schedule the DPC, since the DSP will send messages
280 * when they are available.
282 syncs[0] = msg_queue_obj->sync_event;
283 syncs[1] = msg_queue_obj->sync_done;
284 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
286 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
287 if (msg_queue_obj->done) {
288 msg_queue_obj->io_msg_pend--;
289 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
291 * Signal that we're not going to access msg_queue_obj
292 * anymore, so it can be deleted.
294 sync_set_event(msg_queue_obj->sync_done_ack);
297 if (!status && !list_empty(&msg_queue_obj->msg_used_list)) {
298 /* Get msg from used list */
299 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
300 struct msg_frame, list_elem);
301 list_del(&msg_frame_obj->list_elem);
302 /* Copy message into pmsg and put frame on the free list */
303 *pmsg = msg_frame_obj->msg_data.msg;
304 list_add_tail(&msg_frame_obj->list_elem,
305 &msg_queue_obj->msg_free_list);
307 msg_queue_obj->io_msg_pend--;
308 /* Reset the event if there are still queued messages */
309 if (!list_empty(&msg_queue_obj->msg_used_list))
310 sync_set_event(msg_queue_obj->sync_event);
312 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
318 * ======== bridge_msg_put ========
319 * Put a message onto a msg_ctrl queue.
321 int bridge_msg_put(struct msg_queue *msg_queue_obj,
322 const struct dsp_msg *pmsg, u32 utimeout)
324 struct msg_frame *msg_frame_obj;
325 struct msg_mgr *hmsg_mgr;
326 struct sync_object *syncs[2];
330 if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr)
333 hmsg_mgr = msg_queue_obj->msg_mgr;
335 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
337 /* If a message frame is available, use it */
338 if (!list_empty(&hmsg_mgr->msg_free_list)) {
339 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
340 struct msg_frame, list_elem);
341 list_del(&msg_frame_obj->list_elem);
342 msg_frame_obj->msg_data.msg = *pmsg;
343 msg_frame_obj->msg_data.msgq_id =
344 msg_queue_obj->msgq_id;
345 list_add_tail(&msg_frame_obj->list_elem,
346 &hmsg_mgr->msg_used_list);
347 hmsg_mgr->msgs_pending++;
349 if (list_empty(&hmsg_mgr->msg_free_list))
350 sync_reset_event(hmsg_mgr->sync_event);
352 /* Release critical section before scheduling DPC */
353 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
354 /* Schedule a DPC, to do the actual data transfer: */
355 iosm_schedule(hmsg_mgr->iomgr);
359 if (msg_queue_obj->done) {
360 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
363 msg_queue_obj->io_msg_pend++;
365 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
367 /* Wait til a free message frame is available, timeout, or done */
368 syncs[0] = hmsg_mgr->sync_event;
369 syncs[1] = msg_queue_obj->sync_done;
370 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
374 /* Enter critical section */
375 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
376 if (msg_queue_obj->done) {
377 msg_queue_obj->io_msg_pend--;
378 /* Exit critical section */
379 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
381 * Signal that we're not going to access msg_queue_obj
382 * anymore, so it can be deleted.
384 sync_set_event(msg_queue_obj->sync_done_ack);
388 if (list_empty(&hmsg_mgr->msg_free_list)) {
389 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
393 /* Get msg from free list */
394 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
395 struct msg_frame, list_elem);
397 * Copy message into pmsg and put frame on the
400 list_del(&msg_frame_obj->list_elem);
401 msg_frame_obj->msg_data.msg = *pmsg;
402 msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id;
403 list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list);
404 hmsg_mgr->msgs_pending++;
406 * Schedule a DPC, to do the actual
409 iosm_schedule(hmsg_mgr->iomgr);
411 msg_queue_obj->io_msg_pend--;
412 /* Reset event if there are still frames available */
413 if (!list_empty(&hmsg_mgr->msg_free_list))
414 sync_set_event(hmsg_mgr->sync_event);
416 /* Exit critical section */
417 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
423 * ======== bridge_msg_register_notify ========
425 int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
426 u32 event_mask, u32 notify_type,
427 struct dsp_notification *hnotification)
431 if (!msg_queue_obj || !hnotification) {
436 if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
441 if (notify_type != DSP_SIGNALEVENT) {
447 status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
448 event_mask, notify_type);
450 status = ntfy_unregister(msg_queue_obj->ntfy_obj,
453 if (status == -EINVAL) {
454 /* Not registered. Ok, since we couldn't have known. Node
455 * notifications are split between node state change handled
456 * by NODE, and message ready handled by msg_ctrl. */
464 * ======== bridge_msg_set_queue_id ========
466 void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
469 * A message queue must be created when a node is allocated,
470 * so that node_register_notify() can be called before the node
471 * is created. Since we don't know the node environment until the
472 * node is created, we need this function to set msg_queue_obj->msgq_id
473 * to the node environment, after the node is created.
476 msg_queue_obj->msgq_id = msgq_id;
480 * ======== add_new_msg ========
481 * Must be called in message manager critical section.
483 static int add_new_msg(struct list_head *msg_list)
485 struct msg_frame *pmsg;
487 pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
491 list_add_tail(&pmsg->list_elem, msg_list);
497 * ======== delete_msg_mgr ========
499 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
504 /* FIXME: free elements from queue_list? */
505 free_msg_list(&hmsg_mgr->msg_free_list);
506 free_msg_list(&hmsg_mgr->msg_used_list);
507 kfree(hmsg_mgr->sync_event);
512 * ======== delete_msg_queue ========
514 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
516 struct msg_mgr *hmsg_mgr;
517 struct msg_frame *pmsg, *tmp;
520 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
523 hmsg_mgr = msg_queue_obj->msg_mgr;
525 /* Pull off num_to_dsp message frames from Msg manager and free */
527 list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list,
529 list_del(&pmsg->list_elem);
531 if (i++ >= num_to_dsp)
535 free_msg_list(&msg_queue_obj->msg_free_list);
536 free_msg_list(&msg_queue_obj->msg_used_list);
538 if (msg_queue_obj->ntfy_obj) {
539 ntfy_delete(msg_queue_obj->ntfy_obj);
540 kfree(msg_queue_obj->ntfy_obj);
543 kfree(msg_queue_obj->sync_event);
544 kfree(msg_queue_obj->sync_done);
545 kfree(msg_queue_obj->sync_done_ack);
547 kfree(msg_queue_obj);
551 * ======== free_msg_list ========
553 static void free_msg_list(struct list_head *msg_list)
555 struct msg_frame *pmsg, *tmp;
560 list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) {
561 list_del(&pmsg->list_elem);