aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHemant Kumar <hemantk@codeaurora.org>2019-12-04 17:42:18 -0800
committerGerrit - the friendly Code Review server <code-review@localhost>2019-12-11 13:21:04 -0800
commit98dfdf7904ee4ca4931c21b4ab14ec7023a0e775 (patch)
treec04ebc88e4499f24a6229b1fb37f6e96dce11380
parent9a7e667c3bbe32915ce336fca5147af1f96fa259 (diff)
mhi: core: Check for queued request instead of available requestLA.UM.8.1.r1-13000-sm8150.0LA.UM.8.1.r1-12800-sm8150.0LA.UM.8.1.r1-12600-sm8150.0
commit 741a49c45b3f2dac ("mhi: core: Handle RSC minimum credit requirement") checking available space in transfer ring to queue against the minimum RSC credit. This causes data stall if transfer ring is almost full and OOB event is received. Due to that minimum credit requirement does not match and Host never rings the DB and device does not post event for host to process. Fix this issue by checking queued request against minimum credit which allows host to ring DB when ring is almost full and OOB is received. Change-Id: I3ab88aff97d78db003c49e888802e77878c95fa2 Signed-off-by: Hemant Kumar <hemantk@codeaurora.org>
-rw-r--r--drivers/bus/mhi/core/mhi_main.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 213abb12fcf1..0670924094c5 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -449,7 +449,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
struct mhi_buf_info *buf_info;
struct mhi_tre *mhi_tre;
bool ring_db = true;
- int nr_tre;
+ int n_free_tre, n_queued_tre;
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
@@ -493,9 +493,12 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
* on RSC channel IPA HW has a minimum credit requirement before
* switching to DB mode
*/
- nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+ n_free_tre = mhi_get_no_free_descriptors(mhi_dev,
+ DMA_FROM_DEVICE);
+ n_queued_tre = tre_ring->elements - n_free_tre;
read_lock_bh(&mhi_chan->lock);
- if (mhi_chan->db_cfg.db_mode && nr_tre < MHI_RSC_MIN_CREDITS)
+ if (mhi_chan->db_cfg.db_mode &&
+ n_queued_tre < MHI_RSC_MIN_CREDITS)
ring_db = false;
read_unlock_bh(&mhi_chan->lock);
} else {
@@ -910,7 +913,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
struct mhi_result result;
unsigned long flags = 0;
bool ring_db = true;
- int nr_tre;
+ int n_free_tre, n_queued_tre;
ev_code = MHI_TRE_GET_EV_CODE(event);
buf_ring = &mhi_chan->buf_ring;
@@ -1011,9 +1014,10 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
* switching to DB mode
*/
if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) {
- nr_tre = mhi_get_no_free_descriptors(mhi_chan->mhi_dev,
- DMA_FROM_DEVICE);
- if (nr_tre < MHI_RSC_MIN_CREDITS)
+ n_free_tre = mhi_get_no_free_descriptors(
+ mhi_chan->mhi_dev, DMA_FROM_DEVICE);
+ n_queued_tre = tre_ring->elements - n_free_tre;
+ if (n_queued_tre < MHI_RSC_MIN_CREDITS)
ring_db = false;
}