@@ -207,6 +207,9 @@ struct blkfront_info
207
207
struct blk_mq_tag_set tag_set ;
208
208
struct blkfront_ring_info * rinfo ;
209
209
unsigned int nr_rings ;
210
+ /* Save uncomplete reqs and bios for migration. */
211
+ struct list_head requests ;
212
+ struct bio_list bio_list ;
210
213
};
211
214
212
215
static unsigned int nr_minors ;
@@ -2002,69 +2005,22 @@ static int blkif_recover(struct blkfront_info *info)
2002
2005
{
2003
2006
unsigned int i , r_index ;
2004
2007
struct request * req , * n ;
2005
- struct blk_shadow * copy ;
2006
2008
int rc ;
2007
2009
struct bio * bio , * cloned_bio ;
2008
- struct bio_list bio_list , merge_bio ;
2009
2010
unsigned int segs , offset ;
2010
2011
int pending , size ;
2011
2012
struct split_bio * split_bio ;
2012
- struct list_head requests ;
2013
2013
2014
2014
blkfront_gather_backend_features (info );
2015
2015
segs = info -> max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST ;
2016
2016
blk_queue_max_segments (info -> rq , segs );
2017
- bio_list_init (& bio_list );
2018
- INIT_LIST_HEAD (& requests );
2019
2017
2020
2018
for (r_index = 0 ; r_index < info -> nr_rings ; r_index ++ ) {
2021
- struct blkfront_ring_info * rinfo ;
2022
-
2023
- rinfo = & info -> rinfo [r_index ];
2024
- /* Stage 1: Make a safe copy of the shadow state. */
2025
- copy = kmemdup (rinfo -> shadow , sizeof (rinfo -> shadow ),
2026
- GFP_NOIO | __GFP_REPEAT | __GFP_HIGH );
2027
- if (!copy )
2028
- return - ENOMEM ;
2029
-
2030
- /* Stage 2: Set up free list. */
2031
- memset (& rinfo -> shadow , 0 , sizeof (rinfo -> shadow ));
2032
- for (i = 0 ; i < BLK_RING_SIZE (info ); i ++ )
2033
- rinfo -> shadow [i ].req .u .rw .id = i + 1 ;
2034
- rinfo -> shadow_free = rinfo -> ring .req_prod_pvt ;
2035
- rinfo -> shadow [BLK_RING_SIZE (info )- 1 ].req .u .rw .id = 0x0fffffff ;
2019
+ struct blkfront_ring_info * rinfo = & info -> rinfo [r_index ];
2036
2020
2037
2021
rc = blkfront_setup_indirect (rinfo );
2038
- if (rc ) {
2039
- kfree (copy );
2022
+ if (rc )
2040
2023
return rc ;
2041
- }
2042
-
2043
- for (i = 0 ; i < BLK_RING_SIZE (info ); i ++ ) {
2044
- /* Not in use? */
2045
- if (!copy [i ].request )
2046
- continue ;
2047
-
2048
- /*
2049
- * Get the bios in the request so we can re-queue them.
2050
- */
2051
- if (copy [i ].request -> cmd_flags &
2052
- (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE )) {
2053
- /*
2054
- * Flush operations don't contain bios, so
2055
- * we need to requeue the whole request
2056
- */
2057
- list_add (& copy [i ].request -> queuelist , & requests );
2058
- continue ;
2059
- }
2060
- merge_bio .head = copy [i ].request -> bio ;
2061
- merge_bio .tail = copy [i ].request -> biotail ;
2062
- bio_list_merge (& bio_list , & merge_bio );
2063
- copy [i ].request -> bio = NULL ;
2064
- blk_end_request_all (copy [i ].request , 0 );
2065
- }
2066
-
2067
- kfree (copy );
2068
2024
}
2069
2025
xenbus_switch_state (info -> xbdev , XenbusStateConnected );
2070
2026
@@ -2079,15 +2035,15 @@ static int blkif_recover(struct blkfront_info *info)
2079
2035
kick_pending_request_queues (rinfo );
2080
2036
}
2081
2037
2082
- list_for_each_entry_safe (req , n , & requests , queuelist ) {
2038
+ list_for_each_entry_safe (req , n , & info -> requests , queuelist ) {
2083
2039
/* Requeue pending requests (flush or discard) */
2084
2040
list_del_init (& req -> queuelist );
2085
2041
BUG_ON (req -> nr_phys_segments > segs );
2086
2042
blk_mq_requeue_request (req );
2087
2043
}
2088
2044
blk_mq_kick_requeue_list (info -> rq );
2089
2045
2090
- while ((bio = bio_list_pop (& bio_list )) != NULL ) {
2046
+ while ((bio = bio_list_pop (& info -> bio_list )) != NULL ) {
2091
2047
/* Traverse the list of pending bios and re-queue them */
2092
2048
if (bio_segments (bio ) > segs ) {
2093
2049
/*
@@ -2133,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev)
2133
2089
{
2134
2090
struct blkfront_info * info = dev_get_drvdata (& dev -> dev );
2135
2091
int err = 0 ;
2092
+ unsigned int i , j ;
2136
2093
2137
2094
dev_dbg (& dev -> dev , "blkfront_resume: %s\n" , dev -> nodename );
2138
2095
2096
+ bio_list_init (& info -> bio_list );
2097
+ INIT_LIST_HEAD (& info -> requests );
2098
+ for (i = 0 ; i < info -> nr_rings ; i ++ ) {
2099
+ struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
2100
+ struct bio_list merge_bio ;
2101
+ struct blk_shadow * shadow = rinfo -> shadow ;
2102
+
2103
+ for (j = 0 ; j < BLK_RING_SIZE (info ); j ++ ) {
2104
+ /* Not in use? */
2105
+ if (!shadow [j ].request )
2106
+ continue ;
2107
+
2108
+ /*
2109
+ * Get the bios in the request so we can re-queue them.
2110
+ */
2111
10072
+ if (shadow [j ].request -> cmd_flags &
2112
+ (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE )) {
2113
+ /*
2114
+ * Flush operations don't contain bios, so
2115
+ * we need to requeue the whole request
2116
+ */
2117
+ list_add (& shadow [j ].request -> queuelist , & info -> requests );
2118
+ continue ;
2119
+ }
2120
+ merge_bio .head = shadow [j ].request -> bio ;
2121
+ merge_bio .tail = shadow [j ].request -> biotail ;
2122
+ bio_list_merge (& info -> bio_list , & merge_bio );
2123
+ shadow [j ].request -> bio = NULL ;
2124
+ blk_mq_end_request (shadow [j ].request , 0 );
2125
+ }
2126
+ }
2127
+
2139
2128
blkif_free (info , info -> connected == BLKIF_STATE_CONNECTED );
2140
2129
2141
2130
err = negotiate_mq (info );
0 commit comments