diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c index 3f68276eea5f..375246c43af6 100644 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c @@ -24,6 +24,98 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.8 2021/12/19 12:45:43 riastradh #include "i915_gem_region.h" #include "i915_scatterlist.h" +#ifdef __NetBSD__ + +#include +#include + +#include /* kvtopte, pmap_pte_clearbits */ + +/* + * Version of bus_dmamem_map that uses pmap_kenter_pa, not pmap_enter, + * so that it isn't affected by pmap_page_protect on the physical + * address. Adapted from sys/arch/x86/x86/bus_dma.c. + */ +static int +bus_dmamem_kmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, + size_t size, void **kvap, int flags) +{ + vaddr_t va; + bus_addr_t addr; + int curseg; + const uvm_flag_t kmflags = + (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; + u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE; + + CTASSERT(PAGE_SIZE == 4096); + printf("%s: t=%p segs=%p nsegs=%d size=0x%zx kvap=%p flags=0x%x\n", + __func__, t, segs, nsegs, size, kvap, flags); + + size = round_page(size); + if (flags & BUS_DMA_NOCACHE) + pmapflags |= PMAP_NOCACHE; + + va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); + + if (va == 0) + return ENOMEM; + + printf("%s: va=%"PRIxVADDR"\n", __func__, va); + *kvap = (void *)va; + + for (curseg = 0; curseg < nsegs; curseg++) { + printf("%s:%d size=0x%"PRIxBUSSIZE + " curseg=%d addr=0x%"PRIxBUSADDR" len=0x%"PRIxBUSSIZE + " end=0x%"PRIxBUSADDR"\n", + __func__, __LINE__, size, + curseg, segs[curseg].ds_addr, segs[curseg].ds_len, + segs[curseg].ds_addr + segs[curseg].ds_len); + for (addr = segs[curseg].ds_addr; + addr < (segs[curseg].ds_addr + segs[curseg].ds_len); + addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { + printf("%s:%d: size=0x%"PRIxBUSSIZE + " addr=0x%"PRIxBUSADDR, + __func__, __LINE__, size, addr); + if (size == 0) + panic("_bus_dmamem_kmap: size botch"); + pmap_kenter_pa(va, addr, + VM_PROT_READ | VM_PROT_WRITE, + pmapflags); + } + } + pmap_update(pmap_kernel()); + + return 0; +} + +static void +bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size) +{ + pt_entry_t *pte, opte; + vaddr_t va, sva, eva; + + KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva); + + size = round_page(size); + sva = (vaddr_t)kva; + eva = sva + size; + + /* + * mark pages cacheable again. + */ + for (va = sva; va < eva; va += PAGE_SIZE) { + pte = kvtopte(va); + opte = *pte; + if ((opte & PTE_PCD) != 0) + pmap_pte_clearbits(pte, PTE_PCD); + } + pmap_kremove((vaddr_t)kva, size); + pmap_update(pmap_kernel()); + uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); +} + +#endif + #include static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) @@ -65,7 +157,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) if (ret) return -ENOMEM; KASSERT(rsegs == 1); - ret = -bus_dmamem_map(dmat, &obj->mm.u.phys.seg, 1, + ret = -bus_dmamem_kmap(dmat, &obj->mm.u.phys.seg, 1, roundup_pow_of_two(obj->base.size), &vaddr, BUS_DMA_WAITOK|BUS_DMA_COHERENT); if (ret) @@ -83,7 +175,12 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) if (!st) goto err_pci; +#ifdef __NetBSD__ + if (sg_alloc_table_from_bus_dmamem(st, dmat, &obj->mm.u.phys.seg, 1, + GFP_KERNEL)) +#else if (sg_alloc_table(st, 1, GFP_KERNEL)) +#endif goto err_st; sg = st->sgl; @@ -151,7 +248,7 @@ err_st: err_pci: #ifdef __NetBSD__ if (vaddr) { - bus_dmamem_unmap(dmat, vaddr, + bus_dmamem_kunmap(dmat, vaddr, roundup_pow_of_two(obj->base.size)); } obj->mm.u.phys.kva = NULL; @@ -225,7 +322,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, kfree(pages); #ifdef __NetBSD__ - bus_dmamem_unmap(dmat, obj->mm.u.phys.kva, + bus_dmamem_kunmap(dmat, obj->mm.u.phys.kva, roundup_pow_of_two(obj->base.size)); obj->mm.u.phys.kva = NULL; bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1);