@@ -221,18 +221,6 @@ static void hda_dma_get_dbg_vals(struct dma_chan_data *chan,
221221#define hda_dma_ptr_trace (...)
222222#endif
223223
224- static void hda_dma_l1_entry_notify (void * arg , enum notify_id type , void * data )
225- {
226- /* Notify about Host DMA usage */
227- pm_runtime_get (PM_RUNTIME_HOST_DMA_L1 , 0 );
228- }
229-
230- static void hda_dma_l1_exit_notify (void * arg , enum notify_id type , void * data )
231- {
232- /* Force Host DMA to exit L1 */
233- pm_runtime_put (PM_RUNTIME_HOST_DMA_L1 , 0 );
234- }
235-
236224static inline int hda_dma_is_buffer_full (struct dma_chan_data * chan )
237225{
238226 return dma_chan_reg_read (chan , DGCS ) & DGCS_BF ;
@@ -293,7 +281,6 @@ static int hda_dma_wait_for_buffer_empty(struct dma_chan_data *chan)
293281
294282static void hda_dma_post_copy (struct dma_chan_data * chan , int bytes )
295283{
296- struct hda_chan_data * hda_chan = dma_chan_get_data (chan );
297284 struct dma_cb_data next = {
298285 .channel = chan ,
299286 .elem = { .size = bytes },
@@ -309,9 +296,8 @@ static void hda_dma_post_copy(struct dma_chan_data *chan, int bytes)
309296 */
310297 hda_dma_inc_fp (chan , bytes );
311298
312- /* Force Host DMA to exit L1 if scheduled on DMA */
313- if (!hda_chan -> irq_disabled )
314- pm_runtime_put (PM_RUNTIME_HOST_DMA_L1 , 0 );
299+ /* Force Host DMA to exit L1 */
300+ pm_runtime_put (PM_RUNTIME_HOST_DMA_L1 , 0 );
315301 } else {
316302 /*
317303 * set BFPI to let link gateway know we have read size,
@@ -336,60 +322,10 @@ static int hda_dma_link_copy_ch(struct dma_chan_data *chan, int bytes)
336322 return 0 ;
337323}
338324
339- static int hda_dma_host_start (struct dma_chan_data * channel )
340- {
341- struct hda_chan_data * hda_chan = dma_chan_get_data (channel );
342- int ret = 0 ;
343-
344- /* Force Host DMA to exit L1 only on start*/
345- if (!(hda_chan -> state & HDA_STATE_RELEASE ))
346- pm_runtime_put (PM_RUNTIME_HOST_DMA_L1 , 0 );
347-
348- if (!hda_chan -> irq_disabled )
349- return ret ;
350-
351- /* Inform about Host DMA usage */
352- ret = notifier_register (NULL , scheduler_get_data (SOF_SCHEDULE_LL_TIMER ),
353- NOTIFIER_ID_LL_PRE_RUN , hda_dma_l1_entry_notify ,
354- NOTIFIER_FLAG_AGGREGATE );
355- if (ret < 0 )
356- tr_err (& hdma_tr , "hda-dmac: %d channel %d, cannot register notification %d" ,
357- channel -> dma -> plat_data .id , channel -> index ,
358- ret );
359-
360- /* Register common L1 exit for all channels */
361- ret = notifier_register (NULL , scheduler_get_data (SOF_SCHEDULE_LL_TIMER ),
362- NOTIFIER_ID_LL_POST_RUN , hda_dma_l1_exit_notify ,
363- NOTIFIER_FLAG_AGGREGATE );
364- if (ret < 0 )
365- tr_err (& hdma_tr , "hda-dmac: %d channel %d, cannot register notification %d" ,
366- channel -> dma -> plat_data .id , channel -> index ,
367- ret );
368-
369- return ret ;
370- }
371-
372- static void hda_dma_host_stop (struct dma_chan_data * channel )
373- {
374- struct hda_chan_data * hda_chan = dma_chan_get_data (channel );
375-
376- if (!hda_chan -> irq_disabled )
377- return ;
378-
379- /* Unregister L1 entry */
380- notifier_unregister (NULL , scheduler_get_data (SOF_SCHEDULE_LL_TIMER ),
381- NOTIFIER_ID_LL_PRE_RUN );
382-
383- /* Unregister L1 exit */
384- notifier_unregister (NULL , scheduler_get_data (SOF_SCHEDULE_LL_TIMER ),
385- NOTIFIER_ID_LL_POST_RUN );
386- }
387-
388325/* lock should be held by caller */
389326static int hda_dma_enable_unlock (struct dma_chan_data * channel )
390327{
391328 struct hda_chan_data * hda_chan ;
392- int ret ;
393329
394330 tr_info (& hdma_tr , "hda-dmac: %d channel %d -> enable" ,
395331 channel -> dma -> plat_data .id , channel -> index );
@@ -404,14 +340,6 @@ static int hda_dma_enable_unlock(struct dma_chan_data *channel)
404340 hda_chan = dma_chan_get_data (channel );
405341 hda_chan -> desc_avail = channel -> desc_count ;
406342
407- if (channel -> direction == DMA_DIR_HMEM_TO_LMEM ||
408- channel -> direction == DMA_DIR_LMEM_TO_HMEM ) {
409- pm_runtime_get (PM_RUNTIME_HOST_DMA_L1 , 0 );
410- ret = hda_dma_host_start (channel );
411- if (ret < 0 )
412- return ret ;
413- }
414-
415343 /* start link output transfer now */
416344 if (channel -> direction == DMA_DIR_MEM_TO_DEV &&
417345 !(hda_chan -> state & HDA_STATE_RELEASE ))
@@ -436,7 +364,6 @@ static int hda_dma_link_copy(struct dma_chan_data *channel, int bytes,
436364static int hda_dma_host_copy (struct dma_chan_data * channel , int bytes ,
437365 uint32_t flags )
438366{
439- struct hda_chan_data * hda_chan = dma_chan_get_data (channel );
440367 int ret ;
441368
442369 tr_dbg (& hdma_tr , "hda-dmac: %d channel %d -> copy 0x%x bytes" ,
@@ -445,8 +372,7 @@ static int hda_dma_host_copy(struct dma_chan_data *channel, int bytes,
445372 hda_dma_get_dbg_vals (channel , HDA_DBG_PRE , HDA_DBG_HOST );
446373
447374 /* Register Host DMA usage */
448- if (!hda_chan -> irq_disabled )
449- pm_runtime_get (PM_RUNTIME_HOST_DMA_L1 , 0 );
375+ pm_runtime_get (PM_RUNTIME_HOST_DMA_L1 , 0 );
450376
451377 /* blocking mode copy */
452378 if (flags & DMA_COPY_BLOCKING ) {
@@ -580,10 +506,6 @@ static int hda_dma_release(struct dma_chan_data *channel)
580506 */
581507 hda_chan -> state |= HDA_STATE_RELEASE ;
582508
583- if (channel -> direction == DMA_DIR_HMEM_TO_LMEM ||
584- channel -> direction == DMA_DIR_LMEM_TO_HMEM )
585- ret = hda_dma_host_start (channel );
586-
587509 irq_local_enable (flags );
588510 return ret ;
589511}
@@ -622,10 +544,6 @@ static int hda_dma_stop(struct dma_chan_data *channel)
622544 tr_info (& hdma_tr , "hda-dmac: %d channel %d -> stop" ,
623545 channel -> dma -> plat_data .id , channel -> index );
624546
625- if (channel -> direction == DMA_DIR_HMEM_TO_LMEM ||
626- channel -> direction == DMA_DIR_LMEM_TO_HMEM )
627- hda_dma_host_stop (channel );
628-
629547 /* disable the channel */
630548 dma_chan_reg_update_bits (channel , DGCS , DGCS_GEN | DGCS_FIFORDY , 0 );
631549 channel -> status = COMP_STATE_PREPARE ;
0 commit comments