8000 x86/dma_bounce: revert r289834 and r289836 · freebsd/freebsd-src@a45cd7e · GitHub
[go: up one dir, main page]

Skip to content

Commit a45cd7e

Browse files
author
royger
committed
x86/dma_bounce: revert r289834 and r289836
The new load_ma implementation can cause dereferences when used with certain drivers, back it out until the reason is found: Fatal trap 12: page fault while in kernel mode cpuid = 11; apic id = 03 fault virtual address = 0x30 fault code = supervisor read data, page not present instruction pointer = 0x20:0xffffffff808a2d22 stack pointer = 0x28:0xfffffe07cc737710 frame pointer = 0x28:0xfffffe07cc737790 code segment = base 0x0, limit 0xfffff, type 0x1b = DPL 0, pres 1, long 1, def32 0, gran 1 processor eflags = interrupt enabled, resume, IOPL = 0 current process 8000 = 13 (g_down) trap number = 12 panic: page fault cpuid = 11 KDB: stack backtrace: #0 0xffffffff80641647 at kdb_backtrace+0x67 #1 0xffffffff80606762 at vpanic+0x182 #2 0xffffffff806067e3 at panic+0x43 #3 0xffffffff8084eef1 at trap_fatal+0x351 #4 0xffffffff8084f0e4 at trap_pfault+0x1e4 #5 0xffffffff8084e82f at trap+0x4bf #6 0xffffffff80830d57 at calltrap+0x8 #7 0xffffffff8063beab at _bus_dmamap_load_ccb+0x1fb #8 0xffffffff8063bc51 at bus_dmamap_load_ccb+0x91 #9 0xffffffff8042dcad at ata_dmaload+0x11d #10 0xffffffff8042df7e at ata_begin_transaction+0x7e #11 0xffffffff8042c18e at ataaction+0x9ce #12 0xffffffff802a220f at xpt_run_devq+0x5bf #13 0xffffffff802a17ad at xpt_action_default+0x94d #14 0xffffffff802c0024 at adastart+0x8b4 #15 0xffffffff802a2e93 at xpt_run_allocq+0x193 #16 0xffffffff802c0735 at adastrategy+0xf5 #17 0xffffffff80554206 at g_disk_start+0x426 Uptime: 2m29s git-svn-id: svn+ssh://svn.freebsd.org/base/head@290005 ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f
1 parent 807f8c0 commit a45cd7e

File tree

2 files changed

+21
-193
lines changed

2 files changed

+21
-193
lines changed

sys/dev/xen/blkfront/blkfront.c

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -293,12 +293,8 @@ xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
293293
{
294294
int error;
295295

296-
if (cm->cm_bp != NULL)
297-
error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map,
298-
cm->cm_bp, xbd_queue_cb, cm, 0);
299-
else
300-
error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map,
301-
cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0);
296+
error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, cm->cm_data,
297+
cm->cm_datalen, xbd_queue_cb, cm, 0);
302298
if (error == EINPROGRESS) {
303299
/*
304300
* Maintain queuing order by freezing the queue. The next
@@ -358,6 +354,8 @@ xbd_bio_command(struct xbd_softc *sc)
358354
}
359355

360356
cm->cm_bp = bp;
357+
cm->cm_data = bp->bio_data;
358+
cm->cm_datalen = bp->bio_bcount;
361359
cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno;
362360

363361
switch (bp->bio_cmd) {
@@ -1011,7 +1009,7 @@ xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors,
10111009

10121010
sc->xbd_disk->d_mediasize = sectors * sector_size;
10131011
sc->xbd_disk->d_maxsize = sc->xbd_max_request_size;
1014-
sc->xbd_disk->d_flags = DISKFLAG_UNMAPPED_BIO;
1012+
sc->xbd_disk->d_flags = 0;
10151013
if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) {
10161014
sc->xbd_disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
10171015
device_printf(sc->xbd_dev,

sys/x86/x86/busdma_bounce.c

Lines changed: 16 additions & 186 deletions
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,8 @@ struct bounce_page {
7979
vm_offset_t vaddr; /* kva of bounce buffer */
8080
bus_addr_t busaddr; /* Physical address */
8181
vm_offset_t datavaddr; /* kva of client data */
82+
vm_page_t datapage; /* physical page of client data */
8283
vm_offset_t dataoffs; /* page offset of client data */
83-
vm_page_t datapage[2]; /* physical page(s) of client data */
8484
bus_size_t datacount; /* client data count */
8585
STAILQ_ENTRY(bounce_page) links;
8686
};
@@ -135,8 +135,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
135135
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
136136
int commit);
137137
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
138-
vm_offset_t vaddr, bus_addr_t addr1,
139-
bus_addr_t addr2, bus_size_t size);
138+
vm_offset_t vaddr, bus_addr_t addr,
139+
bus_size_t size);
140140
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
141141
int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
142142
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
@@ -527,51 +527,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
527527
}
528528
}
529529

530-
static void
531-
_bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
532-
int ma_offs, bus_size_t buflen, int flags)
533-
{
534-
bus_size_t sg_len, max_sgsize;
535-
int page_index;
536-
vm_paddr_t paddr;
537-
538-
if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
539-
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
540-
"alignment= %d", dmat->common.lowaddr,
541-
ptoa((vm_paddr_t)Maxmem),
542-
dmat->common.boundary, dmat->common.alignment);
543-
CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
544-
map, &nobounce_dmamap, map->pagesneeded);
545-
546-
/*
547-
* Count the number of bounce pages
548-
* needed in order to complete this transfer
549-
*/
550-
page_index = 0;
551-
while (buflen > 0) {
552-
paddr = ma[page_index]->phys_addr + ma_offs;
553-
sg_len = PAGE_SIZE - ma_offs;
554-
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
555-
sg_len = MIN(sg_len, max_sgsize);
556-
if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
557-
sg_len = roundup2(sg_len,
558-
dmat->common.alignment);
559-
sg_len = MIN(sg_len, max_sgsize);
560-
KASSERT((sg_len & (dmat->common.alignment - 1))
561-
== 0, ("Segment size is not aligned"));
562-
map->pagesneeded++;
563-
}
564-
if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
565-
page_index++;
566-
ma_offs = (ma_offs + sg_len) & PAGE_MASK;
567-
KASSERT(buflen >= sg_len,
568-
("Segment length overruns original buffer"));
569-
buflen -= sg_len;
570-
}
571-
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
572-
}
573-
}
574-
575530
static int
576531
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
577532
{
@@ -677,7 +632,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
677632
map->pagesneeded != 0 &&
678633
bus_dma_run_filter(&dmat->common, curaddr)) {
679634
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
680-
curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
635+
curaddr = add_bounce_page(dmat, map, 0, curaddr,
681636
sgsize);
682637
}
683638
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
@@ -746,7 +701,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
746701
bus_dma_run_filter(&dmat->common, curaddr)) {
747702
sgsize = roundup2(sgsize, dmat->common.alignment);
748703
sgsize = MIN(sgsize, max_sgsize);
749-
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
704+
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
750705
sgsize);
751706
} else {
752707
sgsize = MIN(sgsize, max_sgsize);
@@ -765,90 +720,6 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
765720
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
766721
}
767722

768-
static int
769-
bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
770-
struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,
771-
bus_dma_segment_t *segs, int *segp)
772-
{
773-
vm_paddr_t paddr, next_paddr;
774-
int error, page_index;
775-
struct vm_page *page;
776-
bus_size_t sgsize, max_sgsize;
777-
778-
if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
779-
/*
780-
* If we have to keep the offset of each page this function
781-
* is not suitable, switch back to bus_dmamap_load_ma_triv
782-
* which is going to do the right thing in this case.
783-
*/
784-
error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
785-
flags, segs, segp);
786-
return (error);
787-
}
788-
789-
if (map == NULL)
790-
map = &nobounce_dmamap;
791-
792-
if (segs == NULL)
793-
segs = dmat->segments;
794-
795-
if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
796-
_bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags);
797-
if (map->pagesneeded != 0) {
798-
error = _bus_dmamap_reserve_pages(dmat, map, flags);
799-
if (error)
800-
return (error);
801-
}
802-
}
803-
804-
page_index = 0;
805-
page = ma[0];
806-
while (buflen > 0) {
807-
/*
808-
* Compute the segment size, and adjust counts.
809-
*/
810-
page = ma[page_index];
811-
paddr = page->phys_addr + ma_offs;
812-
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
813-
sgsize = PAGE_SIZE - ma_offs;
814-
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
815-
map->pagesneeded != 0 &&
816-
bus_dma_run_filter(&dmat->common, paddr)) {
817-
sgsize = roundup2(sgsize, dmat->common.alignment);
818-
sgsize = MIN(sgsize, max_sgsize);
819-
KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
820-
("Segment size is not aligned"));
821-
/*
822-
* Check if two pages of the user provided buffer
823-
* are used.
824-
*/
825-
if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
826-
next_paddr = ma[page_index + 1]->phys_addr;
827-
else
828-
next_paddr = 0;
829-
paddr = add_bounce_page(dmat, map, 0, paddr,
830-
next_paddr, sgsize);
831-
} else {
832-
sgsize = MIN(sgsize, max_sgsize);
833-
}
834-
sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs,
835-
segp);
836-
if (sgsize == 0)
837-
break;
838-
KASSERT(buflen >= sgsize,
839-
("Segment length overruns original buffer"));
840-
buflen -= sgsize;
841-
if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
842-
page_index++;
843-
ma_offs = (ma_offs + sgsize) & PAGE_MASK;
844-
}
845-
846-
/*
847-
* Did we fit?
848-
*/
849-
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
850-
}
851-
852723
static void
853724
bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
854725
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
@@ -892,7 +763,6 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
892763
{
893764
struct bounce_page *bpage;
894765
vm_offset_t datavaddr, tempvaddr;
895-
bus_size_t datacount1, datacount2;
896766

897767
if ((bpage = STAILQ_FIRST(&map->bpages)) == NULL)
898768
return;
@@ -908,35 +778,17 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
908778
while (bpage != NULL) {
909779
tempvaddr = 0;
910780
datavaddr = bpage->datavaddr;
911-
datacount1 = bpage->datacount;
912781
if (datavaddr == 0) {
913782
tempvaddr =
914-
pmap_quick_enter_page(bpage->datapage[0]);
783+
pmap_quick_enter_page(bpage->datapage);
915784
datavaddr = tempvaddr | bpage->dataoffs;
916-
datacount1 = min(PAGE_SIZE - bpage->dataoffs,
917-
datacount1);
918785
}
919786

920787
bcopy((void *)datavaddr,
921-
(void *)bpage->vaddr, datacount1);
788+
(void *)bpage->vaddr, bpage->datacount);
922789

923790
if (tempvaddr != 0)
924791
pmap_quick_remove_page(tempvaddr);
925-
926-
if (bpage->datapage[1] == 0)
927-
goto next_w;
928-
929-
/*
930-
* We are dealing with an unmapped buffer that expands
931-
* over two pages.
932-
*/
933-
datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
934-
datacount2 = bpage->datacount - datacount1;
935-
bcopy((void *)datavaddr,
936-
(void *)(bpage->vaddr + datacount1), datacount2);
937-
pmap_quick_remove_page(datavaddr);
938-
939-
next_w:
940792
bpage = STAILQ_NEXT(bpage, links);
941793
}
942794
dmat->bounce_zone->total_bounced++;
@@ -946,35 +798,17 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
946798
while (bpage != NULL) {
947799
tempvaddr = 0;
948800
datavaddr = bpage->datavaddr;
949-
datacount1 = bpage->datacount;
950801
if (datavaddr == 0) {
951802
tempvaddr =
952-
pmap_quick_enter_page(bpage->datapage[0]);
803+
pmap_quick_enter_page(bpage->datapage);
953804
datavaddr = tempvaddr | bpage->dataoffs;
954-
datacount1 = min(PAGE_SIZE - bpage->dataoffs,
955-
datacount1);
956805
}
957806

958-
bcopy((void *)bpage->vaddr, (void *)datavaddr,
959-
datacount1);
807+
bcopy((void *)bpage->vaddr,
808+
(void *)datavaddr, bpage->datacount);
960809

961810
if (tempvaddr != 0)
962811
pmap_quick_remove_page(tempvaddr);
963-
964-
if (bpage->datapage[1] == 0)
965-
goto next_r;
966-
967-
/*
968-
* We are dealing with an unmapped buffer that expands
969-
* over two pages.
970-
*/
971-
datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
972-
datacount2 = bpage->datacount - datacount1;
973-
bcopy((void *)(bpage->vaddr + datacount1),
974-
(void *)datavaddr, datacount2);
975-
pmap_quick_remove_page(datavaddr);
976-
977-
next_r:
978812
bpage = STAILQ_NEXT(bpage, links);
979813
}
980814
dmat->bounce_zone->total_bounced++;
@@ -1138,7 +972,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1138972

1139973
static bus_addr_t
1140974
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1141-
bus_addr_t addr1, bus_addr_t addr2, bus_size_t size)
975+
bus_addr_t addr, bus_size_t size)
1142976
{
1143977
struct bounce_zone *bz;
1144978
struct bounce_page *bpage;
@@ -1168,16 +1002,12 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
11681002

11691003
if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
11701004
/* Page offset needs to be preserved. */
1171-
bpage->vaddr |= addr1 & PAGE_MASK;
1172-
bpage->busaddr |= addr1 & PAGE_MASK;
1173-
KASSERT(addr2 == 0,
1174-
("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
1005+
bpage->vaddr |= addr & PAGE_MASK;
1006+
bpage->busaddr |= addr & PAGE_MASK;
11751007
}
11761008
bpage->datavaddr = vaddr;
1177-
bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
1178-
KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
1179-
bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2);
1180-
bpage->dataoffs = addr1 & PAGE_MASK;
1009+
bpage->datapage = PHYS_TO_VM_PAGE(addr);
1010+
bpage->dataoffs = addr & PAGE_MASK;
11811011
bpage->datacount = size;
11821012
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
11831013
return (bpage->busaddr);
@@ -1249,7 +1079,7 @@ struct bus_dma_impl bus_dma_bounce_impl = {
12491079
.mem_free = bounce_bus_dmamem_free,
12501080
.load_phys = bounce_bus_dmamap_load_phys,
12511081
.load_buffer = bounce_bus_dmamap_load_buffer,
1252-
.load_ma = bounce_bus_dmamap_load_ma,
1082+
.load_ma = bus_dmamap_load_ma_triv,
12531083
.map_waitok = bounce_bus_dmamap_waitok,
12541084
.map_complete = bounce_bus_dmamap_complete,
12551085
.map_unload = bounce_bus_dmamap_unload,

0 commit comments

Comments
 (0)
0