diff --git a/src/ipc/ipc4/handler.c b/src/ipc/ipc4/handler.c index dfd4969e1f0d..0ab182e84d04 100644 --- a/src/ipc/ipc4/handler.c +++ b/src/ipc/ipc4/handler.c @@ -65,9 +65,9 @@ struct ipc4_msg_data { static struct ipc4_msg_data msg_data; /* fw sends a fw ipc message to send the status of the last host ipc message */ -static struct ipc_msg msg_reply; +static struct ipc_msg msg_reply = {0, 0, 0, 0, LIST_INIT(msg_reply.list)}; -static struct ipc_msg msg_notify; +static struct ipc_msg msg_notify = {0, 0, 0, 0, LIST_INIT(msg_notify.list)}; /* * Global IPC Operations. @@ -1235,28 +1235,15 @@ void ipc_send_panic_notification(void) #ifdef CONFIG_LOG_BACKEND_ADSP_MTRACE -static bool is_notification_queued(void) +static bool is_notification_queued(struct ipc_msg *msg) { struct ipc *ipc = ipc_get(); - struct list_item *slist; - struct ipc_msg *msg; k_spinlock_key_t key; bool queued = false; key = k_spin_lock(&ipc->lock); - if (list_is_empty(&ipc->msg_list)) - goto out; - - list_for_item(slist, &ipc->msg_list) { - msg = container_of(slist, struct ipc_msg, list); - - if (msg == &msg_notify) { - queued = true; - break; - } - } - -out: + if (!list_is_empty(&msg->list)) + queued = true; k_spin_unlock(&ipc->lock, key); return queued; @@ -1265,13 +1252,12 @@ static bool is_notification_queued(void) void ipc_send_buffer_status_notify(void) { /* a single msg_notify object is used */ - if (is_notification_queued()) + if (is_notification_queued(&msg_notify)) return; msg_notify.header = SOF_IPC4_NOTIF_HEADER(SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS); msg_notify.extension = 0; msg_notify.tx_size = 0; - list_init(&msg_notify.list); tr_dbg(&ipc_tr, "tx-notify\t: %#x|%#x", msg_notify.header, msg_notify.extension); @@ -1303,7 +1289,6 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) msg_reply.tx_size = 0; msg_reply.header = in->primary.dat; msg_reply.extension = in->extension.dat; - list_init(&msg_reply.list); target = in->primary.r.msg_tgt; @@ -1330,9 +1315,63 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) char *data = ipc->comp_data; struct ipc4_message_reply reply; + /* Process flow and time stamp for IPC4 msg processed on secondary core : + * core 0 (primary core) core x (secondary core) + * # IPC msg thread #IPC delayed worker #core x idc thread + * ipc_task_ops.run() + * ipc_do_cmd() + * msg_reply.header = in->primary.dat + * ipc4_process_on_core(x) + * mask |= SECONDARY_CORE + * idc_send_message() + * Case 1: + * // Ipc msg processed by secondary core idc_ipc() + * if ((mask & SECONDARY_CORE)) ipc_cmd() + * return; ipc_msg_send() + * mask &= ~SECONDARY_CORE + * + * ipc_platform_send_msg + * ---------------------------------------------------------------------------- + * Case 2: + * idc_ipc() + * ipc_cmd() + * //Prepare reply msg + * msg_reply.header = + * reply.primary.dat; + * ipc_msg_send() + * mask &= ~SECONDARY_CORE + * + * if ((mask & IPC_TASK_SECONDARY_CORE)) + * return; + * // Ipc reply msg was prepared, so return + * if (msg_reply.header != in->primary.dat) + * return; + * ipc_platform_send_msg + * ---------------------------------------------------------------------------- + * Case 3: + * idc_ipc() + * ipc_cmd() + * //Prepare reply msg + * msg_reply.header = + * reply.primary.dat; + * ipc_msg_send() + * mask &= ~SECONDARY_CORE + * + * ipc_platform_send_msg + * + * if ((mask & IPC_TASK_SECONDARY_CORE)) + * return; + * // Ipc reply msg was prepared, so return + * if (msg_reply.header != in->primary.dat) + * return; + */ + /* Reply prepared by secondary core */ if ((ipc->task_mask & IPC_TASK_SECONDARY_CORE) && cpu_is_primary(cpu_get_id())) return; + /* Reply has been prepared by secondary core */ + if (msg_reply.header != in->primary.dat) + return; /* Do not send reply for SET_DX if we are going to enter D3 * The reply is going to be sent as part of the power down