@@ -95,6 +95,7 @@ struct nvme_dev;
95
95
struct nvme_queue ;
96
96
97
97
static void nvme_dev_disable (struct nvme_dev * dev , bool shutdown );
98
+ static bool __nvme_disable_io_queues (struct nvme_dev * dev , u8 opcode );
98
99
99
100
/*
100
101
* Represents an NVM Express device. Each nvme_dev is a PCI function.
@@ -1420,6 +1421,14 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1420
1421
return 0 ;
1421
1422
}
1422
1423
1424
+ static void nvme_suspend_io_queues (struct nvme_dev * dev )
1425
+ {
1426
+ int i ;
1427
+
1428
+ for (i = dev -> ctrl .queue_count - 1 ; i > 0 ; i -- )
1429
+ nvme_suspend_queue (& dev -> queues [i ]);
1430
+ }
1431
+
1423
1432
static void nvme_disable_admin_queue (struct nvme_dev * dev , bool shutdown )
1424
1433
{
1425
1434
struct nvme_queue * nvmeq = & dev -> queues [0 ];
@@ -2134,6 +2143,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2134
2143
return result ;
2135
2144
}
2136
2145
2146
+ static void nvme_disable_io_queues (struct nvme_dev * dev )
2147
+ {
2148
+ if (__nvme_disable_io_queues (dev , nvme_admin_delete_sq ))
2149
+ __nvme_disable_io_queues (dev , nvme_admin_delete_cq );
2150
+ }
2151
+
2137
2152
static int nvme_setup_io_queues (struct nvme_dev * dev )
2138
2153
{
2139
2154
struct nvme_queue * adminq = & dev -> queues [0 ];
@@ -2170,6 +2185,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2170
2185
} while (1 );
2171
2186
adminq -> q_db = dev -> dbs ;
2172
2187
2188
+ retry :
2173
2189
/* Deregister the admin queue's interrupt */
2174
2190
pci_free_irq (pdev , 0 , adminq );
2175
2191
@@ -2187,25 +2203,34 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2187
2203
result = max (result - 1 , 1 );
2188
2204
dev -> max_qid = result + dev -> io_queues [HCTX_TYPE_POLL ];
2189
2205
2190
- dev_info (dev -> ctrl .device , "%d/%d/%d default/read/poll queues\n" ,
2191
- dev -> io_queues [HCTX_TYPE_DEFAULT ],
2192
- dev -> io_queues [HCTX_TYPE_READ ],
2193
- dev -> io_queues [HCTX_TYPE_POLL ]);
2194
-
2195
2206
/*
2196
2207
* Should investigate if there's a performance win from allocating
2197
2208
* more queues than interrupt vectors; it might allow the submission
2198
2209
* path to scale better, even if the receive path is limited by the
2199
2210
* number of interrupts.
2200
2211
*/
2201
-
2202
2212
result = queue_request_irq (adminq );
2203
2213
if (result ) {
2204
2214
adminq -> cq_vector = -1 ;
2205
2215
return result ;
2206
2216
}
2207
2217
set_bit (NVMEQ_ENABLED , & adminq -> flags );
2208
- return nvme_create_io_queues (dev );
2218
+
2219
+ result = nvme_create_io_queues (dev );
2220
+ if (result || dev -> online_queues < 2 )
2221
+ return result ;
2222
+
2223
+ if (dev -> online_queues - 1 < dev -> max_qid ) {
2224
+ nr_io_queues = dev -> online_queues - 1 ;
2225
+ nvme_disable_io_queues (dev );
2226
+ nvme_suspend_io_queues (dev );
2227
+ goto retry ;
2228
+ }
2229
+ dev_info (dev -> ctrl .device , "%d/%d/%d default/read/poll queues\n" ,
2230
+ dev -> io_queues [HCTX_TYPE_DEFAULT ],
2231
+ dev -> io_queues [HCTX_TYPE_READ ],
2232
+ dev -> io_queues [HCTX_TYPE_POLL ]);
2233
+ return 0 ;
2209
2234
}
2210
2235
2211
2236
static void nvme_del_queue_end (struct request * req , blk_status_t error )
@@ -2250,7 +2275,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2250
2275
return 0 ;
2251
2276
}
2252
2277
2253
- static bool nvme_disable_io_queues (struct nvme_dev * dev , u8 opcode )
2278
+ static bool __nvme_disable_io_queues (struct nvme_dev * dev , u8 opcode )
2254
2279
{
2255
2280
int nr_queues = dev -> online_queues - 1 , sent = 0 ;
2256
2281
unsigned long timeout ;
@@ -2411,7 +2436,6 @@ static void nvme_pci_disable(struct nvme_dev *dev)
2411
2436
2412
2437
static void nvme_dev_disable (struct nvme_dev * dev , bool shutdown )
2413
2438
{
2414
- int i ;
2415
2439
bool dead = true;
2416
2440
struct pci_dev * pdev = to_pci_dev (dev -> dev );
2417
2441
@@ -2438,13 +2462,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2438
2462
nvme_stop_queues (& dev -> ctrl );
2439
2463
2440
2464
if (!dead && dev -> ctrl .queue_count > 0 ) {
2441
- if (nvme_disable_io_queues (dev , nvme_admin_delete_sq ))
2442
- nvme_disable_io_queues (dev , nvme_admin_delete_cq );
2465
+ nvme_disable_io_queues (dev );
2443
2466
nvme_disable_admin_queue (dev , shutdown );
2444
2467
}
2445
- for (i = dev -> ctrl .queue_count - 1 ; i >= 0 ; i -- )
2446
- nvme_suspend_queue (& dev -> queues [i ]);
2447
-
2468
+ nvme_suspend_io_queues (dev );
2469
+ nvme_suspend_queue (& dev -> queues [0 ]);
2448
2470
nvme_pci_disable (dev );
2449
2471
2450
2472
blk_mq_tagset_busy_iter (& dev -> tagset , nvme_cancel_request , & dev -> ctrl );
0 commit comments