@@ -79,8 +79,8 @@ struct bounce_page {
79
79
vm_offset_t vaddr ; /* kva of bounce buffer */
80
80
bus_addr_t busaddr ; /* Physical address */
81
81
vm_offset_t datavaddr ; /* kva of client data */
82
+ vm_page_t datapage ; /* physical page of client data */
82
83
vm_offset_t dataoffs ; /* page offset of client data */
83
- vm_page_t datapage [2 ]; /* physical page(s) of client data */
84
84
bus_size_t datacount ; /* client data count */
85
85
STAILQ_ENTRY (bounce_page ) links ;
86
86
};
@@ -135,8 +135,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
135
135
static int reserve_bounce_pages (bus_dma_tag_t dmat , bus_dmamap_t map ,
136
136
int commit );
137
137
static bus_addr_t add_bounce_page (bus_dma_tag_t dmat , bus_dmamap_t map ,
138
- vm_offset_t vaddr , bus_addr_t addr1 ,
139
- bus_addr_t addr2 , bus_size_t size );
138
+ vm_offset_t vaddr , bus_addr_t addr ,
139
+ bus_size_t size );
140
140
static void free_bounce_page (bus_dma_tag_t dmat , struct bounce_page * bpage );
141
141
int run_filter (bus_dma_tag_t dmat , bus_addr_t paddr );
142
142
static void _bus_dmamap_count_pages (bus_dma_tag_t dmat , bus_dmamap_t map ,
@@ -527,51 +527,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
527
527
}
528
528
}
529
529
530
- static void
531
- _bus_dmamap_count_ma (bus_dma_tag_t dmat , bus_dmamap_t map , struct vm_page * * ma ,
532
- int ma_offs , bus_size_t buflen , int flags )
533
- {
534
- bus_size_t sg_len , max_sgsize ;
535
- int page_index ;
536
- vm_paddr_t paddr ;
537
-
538
- if ((map != & nobounce_dmamap && map -> pagesneeded == 0 )) {
539
- CTR4 (KTR_BUSDMA , "lowaddr= %d Maxmem= %d, boundary= %d, "
540
- "alignment= %d" , dmat -> common .lowaddr ,
541
- ptoa ((vm_paddr_t )Maxmem ),
542
- dmat -> common .boundary , dmat -> common .alignment );
543
- CTR3 (KTR_BUSDMA , "map= %p, nobouncemap= %p, pagesneeded= %d" ,
544
- map , & nobounce_dmamap , map -> pagesneeded );
545
-
546
- /*
547
- * Count the number of bounce pages
548
- * needed in order to complete this transfer
549
- */
550
- page_index = 0 ;
551
- while (buflen > 0 ) {
552
- paddr = ma [page_index ]-> phys_addr + ma_offs ;
553
- sg_len = PAGE_SIZE - ma_offs ;
554
- max_sgsize = MIN (buflen , dmat -> common .maxsegsz );
555
- sg_len = MIN (sg_len , max_sgsize );
556
- if (bus_dma_run_filter (& dmat -> common , paddr ) != 0 ) {
557
- sg_len = roundup2 (sg_len ,
558
- dmat -> common .alignment );
559
- sg_len = MIN (sg_len , max_sgsize );
560
- KASSERT ((sg_len & (dmat -> common .alignment - 1 ))
561
- == 0 , ("Segment size is not aligned" ));
562
- map -> pagesneeded ++ ;
563
- }
564
- if (((ma_offs + sg_len ) & ~PAGE_MASK ) != 0 )
565
- page_index ++ ;
566
- ma_offs = (ma_offs + sg_len ) & PAGE_MASK ;
567
- KASSERT (buflen >= sg_len ,
568
- ("Segment length overruns original buffer" ));
569
- buflen -= sg_len ;
570
- }
571
- CTR1 (KTR_BUSDMA , "pagesneeded= %d\n" , map -> pagesneeded );
572
- }
573
- }
574
-
575
530
static int
576
531
_bus_dmamap_reserve_pages (bus_dma_tag_t dmat , bus_dmamap_t map , int flags )
577
532
{
@@ -677,7 +632,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
677
632
map -> pagesneeded != 0 &&
678
633
bus_dma_run_filter (& dmat -> common , curaddr )) {
679
634
sgsize = MIN (sgsize , PAGE_SIZE - (curaddr & PAGE_MASK ));
680
- curaddr = add_bounce_page (dmat , map , 0 , curaddr , 0 ,
635
+ curaddr = add_bounce_page (dmat , map , 0 , curaddr ,
681
636
sgsize );
682
637
}
683
638
sgsize = _bus_dmamap_addseg (dmat , map , curaddr , sgsize , segs ,
@@ -746,7 +701,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
746
701
bus_dma_run_filter (& dmat -> common , curaddr )) {
747
702
sgsize = roundup2 (sgsize , dmat -> common .alignment );
748
703
sgsize = MIN (sgsize , max_sgsize );
749
- curaddr = add_bounce_page (dmat , map , kvaddr , curaddr , 0 ,
704
+ curaddr = add_bounce_page (dmat , map , kvaddr , curaddr ,
750
705
sgsize );
751
706
} else {
752
707
sgsize = MIN (sgsize , max_sgsize );
@@ -765,90 +720,6 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
765
720
return (buflen != 0 ? EFBIG : 0 ); /* XXX better return value here? */
766
721
}
767
722
768
- static int
769
- bounce_bus_dmamap_load_ma (bus_dma_tag_t dmat , bus_dmamap_t map ,
770
- struct vm_page * * ma , bus_size_t buflen , int ma_offs , int flags ,
771
- bus_dma_segment_t * segs , int * segp )
772
- {
773
- vm_paddr_t paddr , next_paddr ;
774
- int error , page_index ;
775
- struct vm_page * page ;
776
- bus_size_t sgsize , max_sgsize ;
777
-
778
- if (dmat -> common .flags & BUS_DMA_KEEP_PG_OFFSET ) {
779
- /*
780
- * If we have to keep the offset of each page this function
781
- * is not suitable, switch back to bus_dmamap_load_ma_triv
782
- * which is going to do the right thing in this case.
783
- */
784
- error = bus_dmamap_load_ma_triv (dmat , map , ma , buflen , ma_offs ,
785
- flags , segs , segp );
786
- return (error );
787
- }
788
-
789
- if (map == NULL )
790
- map = & nobounce_dmamap ;
791
-
792
- if (segs == NULL )
793
- segs = dmat -> segments ;
794
-
795
- if ((dmat -> bounce_flags & BUS_DMA_COULD_BOUNCE ) != 0 ) {
796
- _bus_dmamap_count_ma (dmat , map , ma , ma_offs , buflen , flags );
797
- if (map -> pagesneeded != 0 ) {
798
- error = _bus_dmamap_reserve_pages (dmat , map , flags );
799
- if (error )
800
- return (error );
801
- }
802
- }
803
-
804
- page_index = 0 ;
805
- page = ma [0 ];
806
- while (buflen > 0 ) {
807
- /*
808
- * Compute the segment size, and adjust counts.
809
- */
810
- page = ma [page_index ];
811
- paddr = page -> phys_addr + ma_offs ;
812
- max_sgsize = MIN (buflen , dmat -> common .maxsegsz );
813
- sgsize = PAGE_SIZE - ma_offs ;
814
- if (((dmat -> bounce_flags & BUS_DMA_COULD_BOUNCE ) != 0 ) &&
815
- map -> pagesneeded != 0 &&
816
- bus_dma_run_filter (& dmat -> common , paddr )) {
817
- sgsize = roundup2 (sgsize , dmat -> common .alignment );
818
- sgsize = MIN (sgsize , max_sgsize );
819
- KASSERT ((sgsize & (dmat -> common .alignment - 1 )) == 0 ,
820
- ("Segment size is not aligned" ));
821
- /*
822
- * Check if two pages of the user provided buffer
823
- * are used.
824
- */
825
- if (((ma_offs + sgsize ) & ~PAGE_MASK ) != 0 )
826
- next_paddr = ma [page_index + 1 ]-> phys_addr ;
827
- else
828
- next_paddr = 0 ;
829
- paddr = add_bounce_page (dmat , map , 0 , paddr ,
830
- next_paddr , sgsize );
831
- } else {
832
- sgsize = MIN (sgsize , max_sgsize );
833
- }
834
- sgsize = _bus_dmamap_addseg (dmat , map , paddr , sgsize , segs ,
835
- segp );
836
- if (sgsize == 0 )
837
- break ;
838
- KASSERT (buflen >= sgsize ,
839
- ("Segment length overruns original buffer" ));
840
- buflen -= sgsize ;
841
- if (((ma_offs + sgsize ) & ~PAGE_MASK ) != 0 )
842
- page_index ++ ;
843
- ma_offs = (ma_offs + sgsize ) & PAGE_MASK ;
844
- }
845
-
846
- /*
847
- * Did we fit?
848
- */
849
- return (buflen != 0 ? EFBIG : 0 ); /* XXX better return value here? */
850
- }
851
-
852
723
static void
853
724
bounce_bus_dmamap_waitok (bus_dma_tag_t dmat , bus_dmamap_t map ,
854
725
struct memdesc * mem , bus_dmamap_callback_t * callback , void * callback_arg )
@@ -892,7 +763,6 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
892
763
{
893
764
struct bounce_page * bpage ;
894
765
vm_offset_t datavaddr , tempvaddr ;
895
- bus_size_t datacount1 , datacount2 ;
896
766
897
767
if ((bpage = STAILQ_FIRST (& map -> bpages )) == NULL )
898
768
return ;
@@ -908,35 +778,17 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
908
778
while (bpage != NULL ) {
909
779
tempvaddr = 0 ;
910
780
datavaddr = bpage -> datavaddr ;
911
- datacount1 = bpage -> datacount ;
912
781
if (datavaddr == 0 ) {
913
782
tempvaddr =
914
- pmap_quick_enter_page (bpage -> datapage [ 0 ] );
783
+ pmap_quick_enter_page (bpage -> datapage );
915
784
datavaddr = tempvaddr | bpage -> dataoffs ;
916
- datacount1 = min (PAGE_SIZE - bpage -> dataoffs ,
917
- datacount1 );
918
785
}
919
786
920
787
bcopy ((void * )datavaddr ,
921
- (void * )bpage -> vaddr , datacount1 );
788
+ (void * )bpage -> vaddr , bpage -> datacount );
922
789
923
790
if (tempvaddr != 0 )
924
791
pmap_quick_remove_page (tempvaddr );
925
-
926
- if (bpage -> datapage [1 ] == 0 )
927
- goto next_w ;
928
-
929
- /*
930
- * We are dealing with an unmapped buffer that expands
931
- * over two pages.
932
- */
933
- datavaddr = pmap_quick_enter_page (bpage -> datapage [1 ]);
934
- datacount2 = bpage -> datacount - datacount1 ;
935
- bcopy ((void * )datavaddr ,
936
- (void * )(bpage -> vaddr + datacount1 ), datacount2 );
937
- pmap_quick_remove_page (datavaddr );
938
-
939
- next_w :
940
792
bpage = STAILQ_NEXT (bpage , links );
941
793
}
942
794
dmat -> bounce_zone -> total_bounced ++ ;
@@ -946,35 +798,17 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
946
798
while (bpage != NULL ) {
947
799
tempvaddr = 0 ;
948
800
datavaddr = bpage -> datavaddr ;
949
- datacount1 = bpage -> datacount ;
950
801
if (datavaddr == 0 ) {
951
802
tempvaddr =
952
- pmap_quick_enter_page (bpage -> datapage [ 0 ] );
803
+ pmap_quick_enter_page (bpage -> datapage );
953
804
datavaddr = tempvaddr | bpage -> dataoffs ;
954
- datacount1 = min (PAGE_SIZE - bpage -> dataoffs ,
955
- datacount1 );
956
805
}
957
806
958
- bcopy ((void * )bpage -> vaddr , ( void * ) datavaddr ,
959
- datacount1 );
807
+ bcopy ((void * )bpage -> vaddr ,
808
+ ( void * ) datavaddr , bpage -> datacount );
960
809
961
810
if (tempvaddr != 0 )
962
811
pmap_quick_remove_page (tempvaddr );
963
-
964
- if (bpage -> datapage [1 ] == 0 )
965
- goto next_r ;
966
-
967
- /*
968
- * We are dealing with an unmapped buffer that expands
969
- * over two pages.
970
- */
971
- datavaddr = pmap_quick_enter_page (bpage -> datapage [1 ]);
972
- datacount2 = bpage -> datacount - datacount1 ;
973
- bcopy ((void * )(bpage -> vaddr + datacount1 ),
974
- (void * )datavaddr , datacount2 );
975
- pmap_quick_remove_page (datavaddr );
976
-
977
- next_r :
978
812
bpage = STAILQ_NEXT (bpage , links );
979
813
}
980
814
dmat -> bounce_zone -> total_bounced ++ ;
@@ -1138,7 +972,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1138
972
1139
973
static bus_addr_t
1140
974
add_bounce_page (bus_dma_tag_t dmat , bus_dmamap_t map , vm_offset_t vaddr ,
1141
- bus_addr_t addr1 , bus_addr_t addr2 , bus_size_t size )
975
+ bus_addr_t addr , bus_size_t size )
1142
976
{
1143
977
struct bounce_zone * bz ;
1144
978
struct bounce_page * bpage ;
@@ -1168,16 +1002,12 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1168
1002
1169
1003
if (dmat -> common .flags & BUS_DMA_KEEP_PG_OFFSET ) {
1170
1004
/* Page offset needs to be preserved. */
1171
- bpage -> vaddr |= addr1 & PAGE_MASK ;
1172
- bpage -> busaddr |= addr1 & PAGE_MASK ;
1173
- KASSERT (addr2 == 0 ,
1174
- ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET" ));
1005
+ bpage -> vaddr |= addr & PAGE_MASK ;
1006
+ bpage -> busaddr |= addr & PAGE_MASK ;
1175
1007
}
1176
1008
bpage -> datavaddr = vaddr ;
1177
- bpage -> datapage [0 ] = PHYS_TO_VM_PAGE (addr1 );
1178
- KASSERT ((addr2 & PAGE_MASK ) == 0 , ("Second page is not aligned" ));
1179
- bpage -> datapage [1 ] = PHYS_TO_VM_PAGE (addr2 );
1180
- bpage -> dataoffs = addr1 & PAGE_MASK ;
1009
+ bpage -> datapage = PHYS_TO_VM_PAGE (addr );
1010
+ bpage -> dataoffs = addr & PAGE_MASK ;
1181
1011
bpage -> datacount = size ;
1182
1012
STAILQ_INSERT_TAIL (& (map -> bpages ), bpage , links );
1183
1013
return (bpage -> busaddr );
@@ -1249,7 +1079,7 @@ struct bus_dma_impl bus_dma_bounce_impl = {
1249
1079
.mem_free = bounce_bus_dmamem_free ,
1250
1080
.load_phys = bounce_bus_dmamap_load_phys ,
1251
1081
.load_buffer = bounce_bus_dmamap_load_buffer ,
1252
- .load_ma = bounce_bus_dmamap_load_ma ,
1082
+ .load_ma = bus_dmamap_load_ma_triv ,
1253
1083
.map_waitok = bounce_bus_dmamap_waitok ,
1254
1084
.map_complete = bounce_bus_dmamap_complete ,
1255
1085
.map_unload = bounce_bus_dmamap_unload ,
0 commit comments