diff options
| author | Suren A. Chilingaryan <csa@suren.me> | 2015-11-20 06:04:08 +0100 | 
|---|---|---|
| committer | Suren A. Chilingaryan <csa@suren.me> | 2015-11-20 06:04:08 +0100 | 
| commit | 517ecf828e60e1e364c3ab6e67c2acd8a3c1b0c1 (patch) | |
| tree | bc3349f5f682a578050150318f2500a6c8a7a63f | |
| parent | 2bda41263f2464c271509b0bd9ea9062c239d851 (diff) | |
Support large DMA pages in IPEDMA
| -rw-r--r-- | dma/ipe.c | 46 | ||||
| -rw-r--r-- | dma/ipe_benchmark.c | 9 | ||||
| -rw-r--r-- | dma/ipe_private.h | 3 | ||||
| -rw-r--r-- | driver/kmem.c | 6 | 
4 files changed, 39 insertions, 25 deletions
| @@ -142,15 +142,32 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm      if (ctx->pages) return 0; +#ifdef IPEDMA_TLP_SIZE	 +	tlp_size = IPEDMA_TLP_SIZE; +#else /* IPEDMA_TLP_SIZE */ +	link_info = pcilib_get_pcie_link_info(vctx->pcilib); +	if (link_info) { +	    tlp_size = 1<<link_info->payload; +	    if (tlp_size > IPEDMA_MAX_TLP_SIZE) +		tlp_size = IPEDMA_MAX_TLP_SIZE; +	} else tlp_size = 128; +#endif /* IPEDMA_TLP_SIZE */ +      if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_timeout", &value))  	ctx->dma_timeout = value;      else   	ctx->dma_timeout = IPEDMA_DMA_TIMEOUT; -    if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_page_size", &value)) -	ctx->dma_page_size = value; -    else -	ctx->dma_page_size = IPEDMA_PAGE_SIZE; +    if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_page_size", &value)) { +	if (value % IPEDMA_PAGE_SIZE) { +	    pcilib_error("Invalid DMA page size (%lu) is configured", value); +	    return PCILIB_ERROR_INVALID_ARGUMENT; +	} +	//if ((value)&&((value / (tlp_size * IPEDMA_CORES)) > ...seems no limit...)) { ... fail ... } + +	ctx->page_size = value; +    } else +	ctx->page_size = IPEDMA_PAGE_SIZE;      if (!pcilib_read_register(ctx->dmactx.pcilib, "dmaconf", "dma_pages", &value))  	ctx->dma_pages = value; @@ -174,7 +191,7 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm      kflags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(ctx->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);      pcilib_kmem_handle_t *desc = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, IPEDMA_DESCRIPTOR_SIZE, IPEDMA_DESCRIPTOR_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, 0x00), kflags); -    pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, IPEDMA_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags); +    pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, IPEDMA_DMA_PAGES, ctx->page_size, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags);      if (!desc||!pages) {  	if (pages) pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, 0); @@ -212,6 +229,11 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm  	last_written_addr_ptr = desc_va + 2 * sizeof(uint32_t);      } +	// get page size if default size was used +    if (!ctx->page_size) { +	ctx->page_size = pcilib_kmem_get_block_size(ctx->dmactx.pcilib, pages, 0); +    } +      if (preserve) {  	ctx->reused = 1;  	ctx->preserve = 1; @@ -249,18 +271,8 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm  	if (ctx->mode64) address64 = 0x8000 | (0<<24);  	else address64 = 0; -#ifdef IPEDMA_TLP_SIZE	 -	tlp_size = IPEDMA_TLP_SIZE; -#else /* IPEDMA_TLP_SIZE */ -	link_info = pcilib_get_pcie_link_info(vctx->pcilib); -	if (link_info) { -	    tlp_size = 1<<link_info->payload; -	    if (tlp_size > IPEDMA_MAX_TLP_SIZE) -		tlp_size = IPEDMA_MAX_TLP_SIZE; -	} else tlp_size = 128; -#endif /* IPEDMA_TLP_SIZE */          WR(IPEDMA_REG_TLP_SIZE,  address64 | (tlp_size>>2)); -        WR(IPEDMA_REG_TLP_COUNT, IPEDMA_PAGE_SIZE / (tlp_size * IPEDMA_CORES)); +        WR(IPEDMA_REG_TLP_COUNT, ctx->page_size / (tlp_size * IPEDMA_CORES));  	    // Setting progress register threshold  	WR(IPEDMA_REG_UPDATE_THRESHOLD, IPEDMA_DMA_PROGRESS_THRESHOLD); @@ -325,7 +337,7 @@ int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dm      ctx->desc = desc;      ctx->pages = pages; -    ctx->page_size = pcilib_kmem_get_block_size(ctx->dmactx.pcilib, pages, 0); +      ctx->ring_size = IPEDMA_DMA_PAGES;      return 0; diff --git a/dma/ipe_benchmark.c b/dma/ipe_benchmark.c index 57e5646..937a848 100644 --- a/dma/ipe_benchmark.c +++ b/dma/ipe_benchmark.c @@ -74,11 +74,12 @@ double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm      if ((dma != PCILIB_DMA_ENGINE_INVALID)&&(dma > 1)) return -1.; -    if (size%IPEDMA_PAGE_SIZE) size = (1 + size / IPEDMA_PAGE_SIZE) * IPEDMA_PAGE_SIZE; -      err = dma_ipe_start(vctx, 0, PCILIB_DMA_FLAGS_DEFAULT);      if (err) return err; +    if (size%ctx->page_size) size = (1 + size / ctx->page_size) * ctx->page_size; + +      if (getenv("PCILIB_BENCHMARK_HARDWARE"))  	read_dma = dma_ipe_skim_dma_custom;      else @@ -102,9 +103,9 @@ double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm  	pcilib_calc_deadline(&start, ctx->dma_timeout * IPEDMA_DMA_PAGES);  #ifdef IPEDMA_BUG_LAST_READ -	dma_buffer_space = (IPEDMA_DMA_PAGES - 2) * IPEDMA_PAGE_SIZE; +	dma_buffer_space = (IPEDMA_DMA_PAGES - 2) * ctx->page_size;  #else /* IPEDMA_BUG_LAST_READ */ -	dma_buffer_space = (IPEDMA_DMA_PAGES - 1) * IPEDMA_PAGE_SIZE; +	dma_buffer_space = (IPEDMA_DMA_PAGES - 1) * ctx->page_size;  #endif /* IPEDMA_BUG_LAST_READ */  	// Allocate memory and prepare data diff --git a/dma/ipe_private.h b/dma/ipe_private.h index 655a485..e3cb217 100644 --- a/dma/ipe_private.h +++ b/dma/ipe_private.h @@ -95,12 +95,11 @@ struct ipe_dma_s {      uint32_t dma_flags;			/**< Various operation flags, see IPEDMA_FLAG_* */      size_t dma_timeout;			/**< DMA timeout,IPEDMA_DMA_TIMEOUT is used by default */      size_t dma_pages;			/**< Number of DMA pages in ring buffer to allocate */ -    size_t dma_page_size;		/**< Size of a single DMA page */      pcilib_kmem_handle_t *desc;		/**< in-memory status descriptor written by DMA engine upon operation progess */      pcilib_kmem_handle_t *pages;	/**< collection of memory-locked pages for DMA operation */ -    size_t ring_size, page_size; +    size_t ring_size, page_size;	/**< Number of pages in ring buffer and the size of a single DMA page */      size_t last_read, last_written;      uintptr_t last_read_addr; diff --git a/driver/kmem.c b/driver/kmem.c index 2c72e38..7539ae6 100644 --- a/driver/kmem.c +++ b/driver/kmem.c @@ -56,7 +56,7 @@ int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_han  			return -EINVAL;  		    } -		    if ((kmem_handle->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_PAGE) { +		    if (((kmem_handle->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_PAGE)&&(kmem_handle->size == 0)) {  			    kmem_handle->size = kmem_entry->size;  		    } else if (kmem_handle->size != kmem_entry->size) {  			mod_info("Invalid size of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->size, kmem_handle->size); @@ -151,10 +151,12 @@ int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_han  		kmem_handle->size = PAGE_SIZE;  	    else if (kmem_handle->size%PAGE_SIZE)  		goto kmem_alloc_mem_fail; +	    else  +		flags |= __GFP_COMP;  	    retptr = (void*)__get_free_pages(flags, get_order(kmem_handle->size));  	    kmem_entry->dma_handle = 0; -	     +  	    if (retptr) {  	        if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_S2C_PAGE) {  		    kmem_entry->direction = PCI_DMA_TODEVICE; | 
