1 #ifndef BLK_MQ_H 2 #define BLK_MQ_H 3 4 #include <linux/blkdev.h> 5 6 struct blk_mq_tags; 7 struct blk_flush_queue; 8 9 struct blk_mq_cpu_notifier { 10 struct list_head list; 11 void *data; 12 int (*notify)(void *data, unsigned long action, unsigned int cpu); 13 }; 14 15 struct blk_mq_ctxmap { 16 unsigned int size; 17 unsigned int bits_per_word; 18 struct blk_align_bitmap *map; 19 }; 20 21 struct blk_mq_hw_ctx { 22 struct { 23 spinlock_t lock; 24 struct list_head dispatch; 25 } ____cacheline_aligned_in_smp; 26 27 unsigned long state; /* BLK_MQ_S_* flags */ 28 struct delayed_work run_work; 29 struct delayed_work delay_work; 30 cpumask_var_t cpumask; 31 int next_cpu; 32 int next_cpu_batch; 33 34 unsigned long flags; /* BLK_MQ_F_* flags */ 35 36 struct request_queue *queue; 37 struct blk_flush_queue *fq; 38 39 void *driver_data; 40 41 struct blk_mq_ctxmap ctx_map; 42 43 unsigned int nr_ctx; 44 struct blk_mq_ctx **ctxs; 45 46 atomic_t wait_index; 47 48 struct blk_mq_tags *tags; 49 50 unsigned long queued; 51 unsigned long run; 52 #define BLK_MQ_MAX_DISPATCH_ORDER 10 53 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 54 55 unsigned int numa_node; 56 unsigned int queue_num; 57 58 atomic_t nr_active; 59 60 struct blk_mq_cpu_notifier cpu_notifier; 61 struct kobject kobj; 62 }; 63 64 struct blk_mq_tag_set { 65 struct blk_mq_ops *ops; 66 unsigned int nr_hw_queues; 67 unsigned int queue_depth; /* max hw supported */ 68 unsigned int reserved_tags; 69 unsigned int cmd_size; /* per-request extra data */ 70 int numa_node; 71 unsigned int timeout; 72 unsigned int flags; /* BLK_MQ_F_* */ 73 void *driver_data; 74 75 struct blk_mq_tags **tags; 76 77 struct mutex tag_list_lock; 78 struct list_head tag_list; 79 }; 80 81 struct blk_mq_queue_data { 82 struct request *rq; 83 struct list_head *list; 84 bool last; 85 }; 86 87 typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 88 typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); 89 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 90 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 91 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 92 typedef int (init_request_fn)(void *, struct request *, unsigned int, 93 unsigned int, unsigned int); 94 typedef void (exit_request_fn)(void *, struct request *, unsigned int, 95 unsigned int); 96 97 typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 98 bool); 99 typedef void (busy_tag_iter_fn)(struct request *, void *, bool); 100 101 struct blk_mq_ops { 102 /* 103 * Queue request 104 */ 105 queue_rq_fn *queue_rq; 106 107 /* 108 * Map to specific hardware queue 109 */ 110 map_queue_fn *map_queue; 111 112 /* 113 * Called on request timeout 114 */ 115 timeout_fn *timeout; 116 117 softirq_done_fn *complete; 118 119 /* 120 * Called when the block layer side of a hardware queue has been 121 * set up, allowing the driver to allocate/init matching structures. 122 * Ditto for exit/teardown. 123 */ 124 init_hctx_fn *init_hctx; 125 exit_hctx_fn *exit_hctx; 126 127 /* 128 * Called for every command allocated by the block layer to allow 129 * the driver to set up driver specific data. 130 * 131 * Tag greater than or equal to queue_depth is for setting up 132 * flush request. 133 * 134 * Ditto for exit/teardown. 135 */ 136 init_request_fn *init_request; 137 exit_request_fn *exit_request; 138 }; 139 140 enum { 141 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ 142 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ 143 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ 144 145 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 146 BLK_MQ_F_TAG_SHARED = 1 << 1, 147 BLK_MQ_F_SG_MERGE = 1 << 2, 148 BLK_MQ_F_SYSFS_UP = 1 << 3, 149 BLK_MQ_F_DEFER_ISSUE = 1 << 4, 150 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 151 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 152 153 BLK_MQ_S_STOPPED = 0, 154 BLK_MQ_S_TAG_ACTIVE = 1, 155 156 BLK_MQ_MAX_DEPTH = 10240, 157 158 BLK_MQ_CPU_WORK_BATCH = 8, 159 }; 160 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ 161 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ 162 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) 163 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ 164 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ 165 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 166 167 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 168 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 169 struct request_queue *q); 170 void blk_mq_finish_init(struct request_queue *q); 171 int blk_mq_register_disk(struct gendisk *); 172 void blk_mq_unregister_disk(struct gendisk *); 173 174 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); 175 void blk_mq_free_tag_set(struct blk_mq_tag_set *set); 176 177 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 178 179 void blk_mq_insert_request(struct request *, bool, bool, bool); 180 void blk_mq_free_request(struct request *rq); 181 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); 182 bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 183 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 184 gfp_t gfp, bool reserved); 185 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 186 struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags); 187 188 enum { 189 BLK_MQ_UNIQUE_TAG_BITS = 16, 190 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, 191 }; 192 193 u32 blk_mq_unique_tag(struct request *rq); 194 195 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) 196 { 197 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; 198 } 199 200 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) 201 { 202 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 203 } 204 205 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 206 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 207 208 int blk_mq_request_started(struct request *rq); 209 void blk_mq_start_request(struct request *rq); 210 void blk_mq_end_request(struct request *rq, int error); 211 void __blk_mq_end_request(struct request *rq, int error); 212 213 void blk_mq_requeue_request(struct request *rq); 214 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 215 void blk_mq_cancel_requeue_work(struct request_queue *q); 216 void blk_mq_kick_requeue_list(struct request_queue *q); 217 void blk_mq_abort_requeue_list(struct request_queue *q); 218 void blk_mq_complete_request(struct request *rq); 219 220 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 221 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 222 void blk_mq_stop_hw_queues(struct request_queue *q); 223 void blk_mq_start_hw_queues(struct request_queue *q); 224 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 225 void blk_mq_run_hw_queues(struct request_queue *q, bool async); 226 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 227 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 228 void *priv); 229 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, 230 void *priv); 231 void blk_mq_freeze_queue(struct request_queue *q); 232 void blk_mq_unfreeze_queue(struct request_queue *q); 233 void blk_mq_freeze_queue_start(struct request_queue *q); 234 235 /* 236 * Driver command data is immediately after the request. So subtract request 237 * size to get back to the original request, add request size to get the PDU. 238 */ 239 static inline struct request *blk_mq_rq_from_pdu(void *pdu) 240 { 241 return pdu - sizeof(struct request); 242 } 243 static inline void *blk_mq_rq_to_pdu(struct request *rq) 244 { 245 return rq + 1; 246 } 247 248 #define queue_for_each_hw_ctx(q, hctx, i) \ 249 for ((i) = 0; (i) < (q)->nr_hw_queues && \ 250 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 251 252 #define queue_for_each_ctx(q, ctx, i) \ 253 for ((i) = 0; (i) < (q)->nr_queues && \ 254 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++) 255 256 #define hctx_for_each_ctx(hctx, ctx, i) \ 257 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 258 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 259 260 #define blk_ctx_sum(q, sum) \ 261 ({ \ 262 struct blk_mq_ctx *__x; \ 263 unsigned int __ret = 0, __i; \ 264 \ 265 queue_for_each_ctx((q), __x, __i) \ 266 __ret += sum; \ 267 __ret; \ 268 }) 269 270 #endif 271