blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request
Instead of having two almost identical copies of the same code just let the callers pass in the reserved flag directly. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
6fca6a611c
commit
4ce01dd1a0
3 changed files with 6 additions and 20 deletions
|
@ -1173,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
|
||||||
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
|
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
if (q->mq_ops)
|
if (q->mq_ops)
|
||||||
return blk_mq_alloc_request(q, rw, gfp_mask);
|
return blk_mq_alloc_request(q, rw, gfp_mask, false);
|
||||||
else
|
else
|
||||||
return blk_old_get_request(q, rw, gfp_mask);
|
return blk_old_get_request(q, rw, gfp_mask);
|
||||||
}
|
}
|
||||||
|
|
|
@ -294,35 +294,21 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
|
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
|
||||||
|
bool reserved)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
if (blk_mq_queue_enter(q))
|
if (blk_mq_queue_enter(q))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
|
rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
|
||||||
if (rq)
|
if (rq)
|
||||||
blk_mq_put_ctx(rq->mq_ctx);
|
blk_mq_put_ctx(rq->mq_ctx);
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_alloc_request);
|
EXPORT_SYMBOL(blk_mq_alloc_request);
|
||||||
|
|
||||||
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
|
|
||||||
gfp_t gfp)
|
|
||||||
{
|
|
||||||
struct request *rq;
|
|
||||||
|
|
||||||
if (blk_mq_queue_enter(q))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
|
|
||||||
if (rq)
|
|
||||||
blk_mq_put_ctx(rq->mq_ctx);
|
|
||||||
return rq;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
|
|
||||||
|
|
||||||
static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
|
static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *ctx, struct request *rq)
|
struct blk_mq_ctx *ctx, struct request *rq)
|
||||||
{
|
{
|
||||||
|
|
|
@ -160,8 +160,8 @@ void blk_mq_insert_request(struct request *, bool, bool, bool);
|
||||||
void blk_mq_run_queues(struct request_queue *q, bool async);
|
void blk_mq_run_queues(struct request_queue *q, bool async);
|
||||||
void blk_mq_free_request(struct request *rq);
|
void blk_mq_free_request(struct request *rq);
|
||||||
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
|
||||||
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
|
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
||||||
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
|
gfp_t gfp, bool reserved);
|
||||||
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
||||||
|
|
||||||
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
|
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue