/* * Single hardware queue variant. This will attempt to use any per-process * plug for merging and IO deferral. */ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) { callq 0000000000002fe5 push %r15 push %r14 push %r13 push %r12 mov %rdi,%r13 push %rbp push %rbx mov $0x1,%ebx sub $0x48,%rsp const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); mov 0x14(%rsi),%eax /* * Single hardware queue variant. This will attempt to use any per-process * plug for merging and IO deferral. */ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) { mov %gs:0x28,%rcx mov %rcx,0x40(%rsp) xor %ecx,%ecx mov %rsi,0x20(%rsp) const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); mov %eax,%edx shr $0x1d,%edx test %edx,%edx je 0000000000003024 mov %eax,%ebx shr $0x3,%ebx and $0x1,%ebx unsigned int request_count = 0; struct blk_map_ctx data; struct request *rq; blk_qc_t cookie; blk_queue_bounce(q, &bio); lea 0x20(%rsp),%rsi * plug for merging and IO deferral. */ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); and $0x300,%eax unsigned int request_count = 0; struct blk_map_ctx data; struct request *rq; blk_qc_t cookie; blk_queue_bounce(q, &bio); mov %r13,%rdi * plug for merging and IO deferral. */ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); mov %eax,%ebp struct blk_plug *plug; unsigned int request_count = 0; movl $0x0,0x2c(%rsp) struct blk_map_ctx data; struct request *rq; blk_qc_t cookie; blk_queue_bounce(q, &bio); callq 0000000000003040 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { mov 0x20(%rsp),%rdi callq 000000000000304a test %al,%al je 0000000000003060 mov 0x20(%rsp),%rdi callq 0000000000003058 test %eax,%eax jne 0000000000003133 bio_io_error(bio); return BLK_QC_T_NONE; } blk_queue_split(q, &bio, q->bio_split); mov 0x8b0(%r13),%rdx lea 0x20(%rsp),%rsi mov %r13,%rdi callq 0000000000003074 if (!is_flush_fua && !blk_queue_nomerges(q)) { test %ebp,%ebp jne 0000000000003170 } static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) { return ((1UL << (nr & (BITS_PER_LONG-1))) & (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; mov 0x510(%r13),%rax test $0x1,%ah je 00000000000031f4 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) return BLK_QC_T_NONE; } else request_count = blk_plug_queued_count(q); mov %r13,%rdi callq 0000000000003094 mov %eax,0x2c(%rsp) rq = blk_mq_map_request(q, bio, &data); mov 0x20(%rsp),%rsi lea 0x30(%rsp),%rdx mov %r13,%rdi callq 0000000000000e70 if (unlikely(!rq)) test %rax,%rax if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) return BLK_QC_T_NONE; } else request_count = blk_plug_queued_count(q); rq = blk_mq_map_request(q, bio, &data); mov %rax,%rbp if (unlikely(!rq)) je 0000000000003210 return BLK_QC_T_NONE; cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); mov 0x30(%rsp),%rax mov %rax,(%rsp) return cookie != BLK_QC_T_NONE; } static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num) { return tag | (queue_num << BLK_QC_T_SHIFT); mov 0x144(%rax),%eax mov %eax,0x8(%rsp) mov %eax,%r12d DECLARE_PER_CPU(struct task_struct *, current_task); static __always_inline struct task_struct *get_current(void) { return this_cpu_read_stable(current_task); mov %gs:0x0,%rax /* * A task plug currently exists. Since this is completely lockless, * utilize that to temporarily store requests until the task is * either done or scheduled away. */ plug = current->plug; mov 0x750(%rax),%r14 shl $0x10,%r12d or 0x100(%rbp),%r12d if (plug) { test %r14,%r14 je 000000000000327e } } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) { init_request_from_bio(rq, bio); mov 0x20(%rsp),%rsi mov %rbp,%rdi callq 00000000000030fd blk_account_io_start(rq, 1); mov $0x1,%esi mov %rbp,%rdi callq 000000000000310a * either done or scheduled away. */ plug = current->plug; if (plug) { blk_mq_bio_to_request(rq, bio); if (!request_count) mov 0x2c(%rsp),%eax test %eax,%eax je 000000000000322f trace_block_plug(q); blk_mq_put_ctx(data.ctx); if (request_count >= BLK_MAX_REQUEST_COUNT) { cmpl $0xf,0x2c(%rsp) ja 000000000000321b mov 0x18(%r14),%rsi blk_flush_plug_list(plug, false); trace_block_plug(q); } list_add_tail(&rq->queuelist, &plug->mq_list); lea 0x10(%r14),%rdx mov %rbp,%rdi callq 0000000000003131 return cookie; jmp 000000000000314a blk_qc_t cookie; blk_queue_bounce(q, &bio); if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { bio_io_error(bio); mov 0x20(%rsp),%rdi return BLK_QC_T_NONE; mov $0xffffffff,%r12d extern void bio_endio(struct bio *); static inline void bio_io_error(struct bio *bio) { bio->bi_error = -EIO; movl $0xfffffffb,0x10(%rdi) bio_endio(bio); callq 000000000000314a blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); } blk_mq_put_ctx(data.ctx); return cookie; } mov 0x40(%rsp),%rcx xor %gs:0x28,%rcx mov %r12d,%eax jne 000000000000348e add $0x48,%rsp pop %rbx pop %rbp pop %r12 pop %r13 pop %r14 pop %r15 retq if (!is_flush_fua && !blk_queue_nomerges(q)) { if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) return BLK_QC_T_NONE; } else request_count = blk_plug_queued_count(q); mov %r13,%rdi callq 0000000000003178 rq = blk_mq_map_request(q, bio, &data); mov 0x20(%rsp),%rsi lea 0x30(%rsp),%rdx mov %r13,%rdi if (!is_flush_fua && !blk_queue_nomerges(q)) { if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) return BLK_QC_T_NONE; } else request_count = blk_plug_queued_count(q); mov %eax,0x2c(%rsp) rq = blk_mq_map_request(q, bio, &data); callq 0000000000000e70 if (unlikely(!rq)) test %rax,%rax je 0000000000003210 return BLK_QC_T_NONE; cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); mov 0x30(%rsp),%rdx } } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) { init_request_from_bio(rq, bio); mov 0x20(%rsp),%rsi mov %rax,%rdi mov %rax,(%rsp) mov 0x144(%rdx),%r12d shl $0x10,%r12d or 0x100(%rax),%r12d callq 00000000000031bb blk_account_io_start(rq, 1); mov (%rsp),%rax mov $0x1,%esi mov %rax,%rdi callq 00000000000031cc cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); if (unlikely(is_flush_fua)) { blk_mq_bio_to_request(rq, bio); blk_insert_flush(rq); mov (%rsp),%rax mov %rax,%rdi callq 00000000000031d8 if (unlikely(!rq)) return BLK_QC_T_NONE; cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); if (unlikely(is_flush_fua)) { mov $0x1,%esi * an ASYNC request, just ensure that we run it later on. The * latter allows for merging opportunities and more efficient * dispatching. */ run_queue: blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); xor $0x1,%ebx mov 0x30(%rsp),%rdi or %esi,%ebx movzbl %bl,%esi callq 00000000000031ef } blk_mq_put_ctx(data.ctx); return cookie; jmpq 000000000000314a } blk_queue_split(q, &bio, q->bio_split); if (!is_flush_fua && !blk_queue_nomerges(q)) { if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) mov 0x20(%rsp),%rsi lea 0x2c(%rsp),%rdx xor %ecx,%ecx mov %r13,%rdi callq 0000000000003208 test %al,%al je 0000000000003098 return BLK_QC_T_NONE; mov $0xffffffff,%r12d jmpq 000000000000314a trace_block_plug(q); blk_mq_put_ctx(data.ctx); if (request_count >= BLK_MAX_REQUEST_COUNT) { blk_flush_plug_list(plug, false); xor %esi,%esi mov %r14,%rdi callq 0000000000003225 nopl 0x0(%rax,%rax,1) #include #include static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("1:" jmpq 0000000000003121 nopl 0x0(%rax,%rax,1) jmpq 0000000000003116 * * Plug the request queue @q. Do not allow block operation requests * to be sent to the device driver. Instead, accumulate requests in * the queue to improve throughput performance of the block device. */ TRACE_EVENT(block_plug, mov %gs:0x0(%rip),%eax # 0000000000003240 mov %eax,%eax static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; asm volatile("bt %2,%1\n\t" bt %rax,0x0(%rip) # 000000000000324a setb %al test %al,%al je 0000000000003121 mov 0x0(%rip),%rbx # 000000000000325c test %rbx,%rbx je 0000000000003279 mov (%rbx),%rax mov 0x8(%rbx),%rdi add $0x18,%rbx mov %r13,%rsi callq *%rax mov (%rbx),%rax test %rax,%rax jne 0000000000003264 jmpq 0000000000003121 list_add_tail(&rq->queuelist, &plug->mq_list); return cookie; } if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { mov 0x38(%rsp),%rax mov 0x20(%rsp),%r13 mov %rax,0x8(%rsp) blk_account_io_start(rq, 1); } static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) { return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && mov (%rsp),%rax testb $0x1,0xa8(%rax) je 0000000000003326 !blk_queue_nomerges(hctx->queue); mov 0xb0(%rax),%rax mov %rax,0x18(%rsp) } static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) { return ((1UL << (nr & (BITS_PER_LONG-1))) & (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; mov 0x510(%rax),%rax blk_account_io_start(rq, 1); } static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) { return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && test $0x1,%ah jne 0000000000003326 return true; } static inline bool bio_mergeable(struct bio *bio) { if (bio->bi_opf & REQ_NOMERGE_FLAGS) testl $0x100e300,0x14(%r13) jne 0000000000003326 mov 0x8(%rsp),%r15 mov %r15,%rdi callq 00000000000032cd struct blk_mq_ctx *ctx, struct bio *bio) { struct request *rq; int checked = 8; list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { mov 0x10(%r15),%r14 mov %r15,%rax mov $0x8,%r15d add $0x8,%rax mov %rax,0x10(%rsp) cmp %rax,%r14 je 000000000000330c int el_ret; if (!checked--) break; if (!blk_rq_merge_ok(rq, bio)) mov %r13,%rsi mov %r14,%rdi callq 00000000000032f3 test %al,%al jne 0000000000003419 struct blk_mq_ctx *ctx, struct bio *bio) { struct request *rq; int checked = 8; list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { mov 0x8(%r14),%r14 cmp 0x10(%rsp),%r14 je 000000000000330c int el_ret; if (!checked--) sub $0x1,%r15d jne 00000000000032e8 } } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) { init_request_from_bio(rq, bio); mov %r13,%rsi mov %rbp,%rdi callq 0000000000003317 blk_account_io_start(rq, 1); mov $0x1,%esi mov %rbp,%rdi callq 0000000000003324 jmp 0000000000003348 } } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) { init_request_from_bio(rq, bio); mov %r13,%rsi mov %rbp,%rdi callq 0000000000003331 blk_account_io_start(rq, 1); mov %rbp,%rdi mov $0x1,%esi callq 000000000000333e mov 0x8(%rsp),%rdi callq 0000000000003348 struct request *rq, bool at_head) { struct blk_mq_ctx *ctx = rq->mq_ctx; trace_block_rq_insert(hctx->queue, rq); mov (%rsp),%rax } static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) { struct blk_mq_ctx *ctx = rq->mq_ctx; mov 0x38(%rbp),%r13 struct request *rq, bool at_head) { struct blk_mq_ctx *ctx = rq->mq_ctx; trace_block_rq_insert(hctx->queue, rq); mov 0xb0(%rax),%r14 nopl 0x0(%rax,%rax,1) mov 0x10(%r13),%rsi if (at_head) list_add(&rq->queuelist, &ctx->rq_list); else list_add_tail(&rq->queuelist, &ctx->rq_list); lea 0x8(%r13),%rdx mov %rbp,%rdi callq 000000000000336c struct request *rq, bool at_head) { struct blk_mq_ctx *ctx = rq->mq_ctx; __blk_mq_insert_req_list(hctx, rq, at_head); blk_mq_hctx_mark_pending(hctx, ctx); mov (%rsp),%rdi lea 0x44(%r13),%rsi callq 0000000000000ab0 mov 0x8(%rsp),%rdi callq *0x0 static inline void __raw_spin_unlock(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); preempt_enable(); xor %esi,%esi jmpq 00000000000031dd mov %gs:0x0(%rip),%eax # 0000000000003393 mov %eax,%eax static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; asm volatile("bt %2,%1\n\t" bt %rax,0x0(%rip) # 000000000000339d setb %al test %al,%al je 0000000000003116 mov 0x0(%rip),%rbx # 00000000000033af test %rbx,%rbx je 00000000000033cc mov (%rbx),%rax mov 0x8(%rbx),%rdi add $0x18,%rbx mov %r13,%rsi callq *%rax mov (%rbx),%rax test %rax,%rax jne 00000000000033b7 jmpq 0000000000003116 * Called immediately before block operation request @rq is inserted * into queue @q. The fields in the operation request @rq struct can * be examined to determine which device and sectors the pending * operation would access. */ DEFINE_EVENT(block_rq, block_rq_insert, mov %gs:0x0(%rip),%eax # 00000000000033d8 mov %eax,%eax bt %rax,0x0(%rip) # 00000000000033e2 setb %al test %al,%al je 000000000000335c mov 0x0(%rip),%r15 # 00000000000033f4 test %r15,%r15 je 0000000000003414 mov (%r15),%rcx mov 0x8(%r15),%rdi add $0x18,%r15 mov %rbp,%rdx mov %r14,%rsi callq *%rcx mov (%r15),%rcx test %rcx,%rcx jne 00000000000033fc jmpq 000000000000335c break; if (!blk_rq_merge_ok(rq, bio)) continue; el_ret = blk_try_merge(rq, bio); mov %r13,%rsi mov %r14,%rdi callq 0000000000003424 if (el_ret == ELEVATOR_BACK_MERGE) { cmp $0x2,%eax je 0000000000003474 if (bio_attempt_back_merge(q, rq, bio)) { ctx->rq_merged++; return true; } break; } else if (el_ret == ELEVATOR_FRONT_MERGE) { cmp $0x1,%eax jne 00000000000032fb if (bio_attempt_front_merge(q, rq, bio)) { mov 0x18(%rsp),%rdi mov %r13,%rdx mov %r14,%rsi callq 0000000000003442 test %al,%al je 000000000000330c ctx->rq_merged++; mov 0x8(%rsp),%rax mov 0x8(%rsp),%rdi addq $0x1,0x58(%rax) callq *0x0 blk_mq_bio_to_request(rq, bio); goto insert_rq; } spin_unlock(&ctx->lock); __blk_mq_free_request(hctx, ctx, rq); mov %rdi,%rsi mov (%rsp),%rdi mov %rbp,%rdx callq 00000000000002f0 <__blk_mq_free_request> return __blk_mq_get_ctx(q, get_cpu()); } static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) { put_cpu(); jmpq 000000000000314a if (!blk_rq_merge_ok(rq, bio)) continue; el_ret = blk_try_merge(rq, bio); if (el_ret == ELEVATOR_BACK_MERGE) { if (bio_attempt_back_merge(q, rq, bio)) { mov 0x18(%rsp),%rdi mov %r13,%rdx mov %r14,%rsi callq 0000000000003484 test %al,%al je 000000000000330c jmp 000000000000344a blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); } blk_mq_put_ctx(data.ctx); return cookie; } callq 0000000000003493 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)