2
2
#define NWL_RING_SET(data, offset, val) *(uint32_t*)(((char*)(data)) + (offset)) = (val)
3
3
#define NWL_RING_UPDATE(data, offset, mask, val) *(uint32_t*)(((char*)(data)) + (offset)) = ((*(uint32_t*)(((char*)(data)) + (offset)))&(mask))|(val)
5
static int dma_nwl_compute_read_s2c_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, unsigned char *ring, uint32_t ring_pa) {
5
static int dma_nwl_compute_read_s2c_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_context_t *ectx, unsigned char *ring, uint32_t ring_pa) {
8
char *base = info->base_addr;
8
const char *base = ectx->base_addr;
10
10
nwl_read_register(val, ctx, base, REG_SW_NEXT_BD);
11
11
if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
27
27
return PCILIB_ERROR_INVALID_STATE;
30
info->tail = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
31
if (info->tail >= PCILIB_NWL_DMA_PAGES) {
32
pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register value (%zu) out of range)", info->tail);
30
ectx->tail = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
31
if (ectx->tail >= PCILIB_NWL_DMA_PAGES) {
32
pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register value (%zu) out of range)", ectx->tail);
33
33
return PCILIB_ERROR_INVALID_STATE;
37
printf("S2C: %lu %lu\n", info->tail, info->head);
37
printf("S2C: %lu %lu\n", ectx->tail, ectx->head);
38
38
#endif /* DEBUG_NWL */
43
static int dma_nwl_compute_read_c2s_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, unsigned char *ring, uint32_t ring_pa) {
43
static int dma_nwl_compute_read_c2s_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_context_t *ectx, unsigned char *ring, uint32_t ring_pa) {
46
char *base = info->base_addr;
46
const char *base = ectx->base_addr;
48
48
nwl_read_register(val, ctx, base, REG_SW_NEXT_BD);
49
49
if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
52
52
return PCILIB_ERROR_INVALID_STATE;
55
info->head = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
56
if (info->head >= PCILIB_NWL_DMA_PAGES) {
57
pcilib_warning("Inconsistent C2S DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu) out of range)", info->head);
55
ectx->head = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
56
if (ectx->head >= PCILIB_NWL_DMA_PAGES) {
57
pcilib_warning("Inconsistent C2S DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu) out of range)", ectx->head);
58
58
return PCILIB_ERROR_INVALID_STATE;
61
info->tail = info->head + 1;
62
if (info->tail == PCILIB_NWL_DMA_PAGES) info->tail = 0;
61
ectx->tail = ectx->head + 1;
62
if (ectx->tail == PCILIB_NWL_DMA_PAGES) ectx->tail = 0;
65
printf("C2S: %lu %lu\n", info->tail, info->head);
65
printf("C2S: %lu %lu\n", ectx->tail, ectx->head);
66
66
#endif /* DEBUG_NWL */
72
static int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
72
static int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_context_t *ectx) {
82
82
pcilib_kmem_flags_t flags;
83
83
pcilib_kmem_type_t type;
85
char *base = info->base_addr;
85
char *base = ectx->base_addr;
87
if (info->pages) return 0;
87
if (ectx->pages) return 0;
89
89
// Or bidirectional specified by 0x0|addr, or read 0x0|addr and write 0x80|addr
90
type = (info->desc.direction == PCILIB_DMA_TO_DEVICE)?PCILIB_KMEM_TYPE_DMA_S2C_PAGE:PCILIB_KMEM_TYPE_DMA_C2S_PAGE;
91
sub_use = info->desc.addr|((info->desc.direction == PCILIB_DMA_TO_DEVICE)?0x80:0x00);
92
flags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(info->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);
90
type = (ectx->desc->direction == PCILIB_DMA_TO_DEVICE)?PCILIB_KMEM_TYPE_DMA_S2C_PAGE:PCILIB_KMEM_TYPE_DMA_C2S_PAGE;
91
sub_use = ectx->desc->addr|((ectx->desc->direction == PCILIB_DMA_TO_DEVICE)?0x80:0x00);
92
flags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(ectx->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);
94
pcilib_kmem_handle_t *ring = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, PCILIB_NWL_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, sub_use), flags);
95
pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->pcilib, type, PCILIB_NWL_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, sub_use), flags);
94
pcilib_kmem_handle_t *ring = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, PCILIB_NWL_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, sub_use), flags);
95
pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, type, PCILIB_NWL_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, sub_use), flags);
97
97
if (!ring||!pages) {
98
if (pages) pcilib_free_kernel_memory(ctx->pcilib, pages, 0);
99
if (ring) pcilib_free_kernel_memory(ctx->pcilib, ring, 0);
98
if (pages) pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, 0);
99
if (ring) pcilib_free_kernel_memory(ctx->dmactx.pcilib, ring, 0);
100
100
return PCILIB_ERROR_MEMORY;
103
reuse_ring = pcilib_kmem_is_reused(ctx->pcilib, ring);
104
reuse_pages = pcilib_kmem_is_reused(ctx->pcilib, pages);
103
reuse_ring = pcilib_kmem_is_reused(ctx->dmactx.pcilib, ring);
104
reuse_pages = pcilib_kmem_is_reused(ctx->dmactx.pcilib, pages);
106
106
// I guess idea here was that we not need to check all that stuff during the second iteration
107
107
// which is basicaly true (shall we expect any driver-triggered changes or parallel accesses?)
108
108
// but still we need to set preserve flag (and that if we enforcing preservation --start-dma).
109
109
// Probably having checks anyway is not harming...
110
// if (!info->preserve) {
110
// if (!ectx->preserve) {
111
111
if (reuse_ring == reuse_pages) {
112
112
if (reuse_ring & PCILIB_KMEM_REUSE_PARTIAL) pcilib_warning("Inconsistent DMA buffers are found (only part of required buffers is available), reinitializing...");
113
113
else if (reuse_ring & PCILIB_KMEM_REUSE_REUSED) {
114
114
if ((reuse_ring & PCILIB_KMEM_REUSE_PERSISTENT) == 0) pcilib_warning("Lost DMA buffers are found (non-persistent mode), reinitializing...");
115
115
else if ((reuse_ring & PCILIB_KMEM_REUSE_HARDWARE) == 0) pcilib_warning("Lost DMA buffers are found (missing HW reference), reinitializing...");
117
nwl_read_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
117
nwl_read_register(val, ctx, ectx->base_addr, REG_DMA_ENG_CTRL_STATUS);
119
119
if ((val&DMA_ENG_RUNNING) == 0) pcilib_warning("Lost DMA buffers are found (DMA engine is stopped), reinitializing...");
120
120
else preserve = 1;
127
unsigned char *data = (unsigned char*)pcilib_kmem_get_ua(ctx->pcilib, ring);
128
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, ring);
127
unsigned char *data = (unsigned char*)pcilib_kmem_get_ua(ctx->dmactx.pcilib, ring);
128
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->dmactx.pcilib, ring);
131
if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) err = dma_nwl_compute_read_c2s_pointers(ctx, info, data, ring_pa);
132
else err = dma_nwl_compute_read_s2c_pointers(ctx, info, data, ring_pa);
131
if (ectx->desc->direction == PCILIB_DMA_FROM_DEVICE) err = dma_nwl_compute_read_c2s_pointers(ctx, ectx, data, ring_pa);
132
else err = dma_nwl_compute_read_s2c_pointers(ctx, ectx, data, ring_pa);
134
134
if (err) preserve = 0;
139
buf_sz = pcilib_kmem_get_block_size(ctx->pcilib, pages, 0);
139
buf_sz = pcilib_kmem_get_block_size(ctx->dmactx.pcilib, pages, 0);
143
143
memset(data, 0, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
145
145
for (i = 0; i < PCILIB_NWL_DMA_PAGES; i++, data += PCILIB_NWL_DMA_DESCRIPTOR_SIZE) {
146
buf_pa = pcilib_kmem_get_block_pa(ctx->pcilib, pages, i);
147
buf_sz = pcilib_kmem_get_block_size(ctx->pcilib, pages, i);
146
buf_pa = pcilib_kmem_get_block_pa(ctx->dmactx.pcilib, pages, i);
147
buf_sz = pcilib_kmem_get_block_size(ctx->dmactx.pcilib, pages, i);
149
149
NWL_RING_SET(data, DMA_BD_NDESC_OFFSET, ring_pa + ((i + 1) % PCILIB_NWL_DMA_PAGES) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
150
150
NWL_RING_SET(data, DMA_BD_BUFAL_OFFSET, buf_pa&0xFFFFFFFF);
160
160
nwl_write_register(val, ctx, base, REG_DMA_ENG_NEXT_BD);
161
161
nwl_write_register(val, ctx, base, REG_SW_NEXT_BD);
169
info->page_size = buf_sz;
170
info->ring_size = PCILIB_NWL_DMA_PAGES;
169
ectx->page_size = buf_sz;
170
ectx->ring_size = PCILIB_NWL_DMA_PAGES;
176
static size_t dma_nwl_clean_buffers(nwl_dma_t * ctx, pcilib_nwl_engine_description_t *info) {
176
static size_t dma_nwl_clean_buffers(nwl_dma_t * ctx, pcilib_nwl_engine_context_t *ectx) {
180
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
181
ring += info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
180
unsigned char *ring = pcilib_kmem_get_ua(ctx->dmactx.pcilib, ectx->ring);
181
ring += ectx->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
184
184
status = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET)&DMA_BD_STATUS_MASK;
216
static size_t dma_nwl_get_next_buffer(nwl_dma_t * ctx, pcilib_nwl_engine_description_t *info, size_t n_buffers, pcilib_timeout_t timeout) {
216
static size_t dma_nwl_get_next_buffer(nwl_dma_t * ctx, pcilib_nwl_engine_context_t *ectx, size_t n_buffers, pcilib_timeout_t timeout) {
217
217
struct timeval start, cur;
219
219
size_t res, n = 0;
222
for (head = info->head; (((head + 1)%info->ring_size) != info->tail)&&(n < n_buffers); head++, n++);
223
if (n == n_buffers) return info->head;
222
for (head = ectx->head; (((head + 1)%ectx->ring_size) != ectx->tail)&&(n < n_buffers); head++, n++);
223
if (n == n_buffers) return ectx->head;
225
225
gettimeofday(&start, NULL);
227
res = dma_nwl_clean_buffers(ctx, info);
227
res = dma_nwl_clean_buffers(ctx, ectx);
228
228
if (res == (size_t)-1) return PCILIB_DMA_BUFFER_INVALID;
248
248
if (n < n_buffers) return PCILIB_DMA_BUFFER_INVALID;
253
static int dma_nwl_push_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, size_t size, int eop, pcilib_timeout_t timeout) {
253
static int dma_nwl_push_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_context_t *ectx, size_t size, int eop, pcilib_timeout_t timeout) {
257
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
258
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
257
unsigned char *ring = pcilib_kmem_get_ua(ctx->dmactx.pcilib, ectx->ring);
258
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->dmactx.pcilib, ectx->ring);
260
ring += info->head * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
260
ring += ectx->head * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
263
if (!info->writting) {
263
if (!ectx->writting) {
264
264
flags |= DMA_BD_SOP_MASK;
268
268
flags |= DMA_BD_EOP_MASK;
272
272
NWL_RING_SET(ring, DMA_BD_BUFL_CTRL_OFFSET, size|flags);
273
273
NWL_RING_SET(ring, DMA_BD_BUFL_STATUS_OFFSET, size);
276
if (info->head == info->ring_size) info->head = 0;
276
if (ectx->head == ectx->ring_size) ectx->head = 0;
278
val = ring_pa + info->head * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
279
nwl_write_register(val, ctx, info->base_addr, REG_SW_NEXT_BD);
278
val = ring_pa + ectx->head * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
279
nwl_write_register(val, ctx, ectx->base_addr, REG_SW_NEXT_BD);
285
static size_t dma_nwl_wait_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, size_t *size, int *eop, pcilib_timeout_t timeout) {
285
static size_t dma_nwl_wait_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_context_t *ectx, size_t *size, int *eop, pcilib_timeout_t timeout) {
286
286
struct timeval start, cur;
287
287
uint32_t status_size, status;
289
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
289
unsigned char *ring = pcilib_kmem_get_ua(ctx->dmactx.pcilib, ectx->ring);
291
ring += info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
291
ring += ectx->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
293
293
gettimeofday(&start, NULL);
329
329
// This function is not used now, but we may need it in the future
330
static int dma_nwl_is_overflown(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
330
static int dma_nwl_is_overflown(nwl_dma_t *ctx, pcilib_nwl_engine_context_t *ectx) {
332
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
333
if (info->tail > 0) ring += (info->tail - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
334
else ring += (info->ring_size - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
332
unsigned char *ring = pcilib_kmem_get_ua(ctx->dmactx.pcilib, ectx->ring);
333
if (ectx->tail > 0) ring += (ectx->tail - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
334
else ring += (ectx->ring_size - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
336
336
status = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET);
337
337
return status&DMA_BD_COMP_MASK?1:0;
341
static int dma_nwl_return_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
341
static int dma_nwl_return_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_context_t *ectx) {
344
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
345
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
346
size_t bufsz = pcilib_kmem_get_block_size(ctx->pcilib, info->pages, info->tail);
344
unsigned char *ring = pcilib_kmem_get_ua(ctx->dmactx.pcilib, ectx->ring);
345
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->dmactx.pcilib, ectx->ring);
346
size_t bufsz = pcilib_kmem_get_block_size(ctx->dmactx.pcilib, ectx->pages, ectx->tail);
348
ring += info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
348
ring += ectx->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
350
350
#ifdef NWL_GENERATE_DMA_IRQ
351
351
NWL_RING_SET(ring, DMA_BD_BUFL_CTRL_OFFSET, bufsz | DMA_BD_INT_ERROR_MASK | DMA_BD_INT_COMP_MASK);
356
356
NWL_RING_SET(ring, DMA_BD_BUFL_STATUS_OFFSET, 0);
358
val = ring_pa + info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
359
nwl_write_register(val, ctx, info->base_addr, REG_SW_NEXT_BD);
358
val = ring_pa + ectx->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
359
nwl_write_register(val, ctx, ectx->base_addr, REG_SW_NEXT_BD);
362
if (info->tail == info->ring_size) info->tail = 0;
362
if (ectx->tail == ectx->ring_size) ectx->tail = 0;
369
369
uint32_t bstatus;
370
370
nwl_dma_t *ctx = (nwl_dma_t*)vctx;
371
pcilib_nwl_engine_description_t *info = ctx->engines + dma;
372
unsigned char *ring = (unsigned char*)pcilib_kmem_get_ua(ctx->pcilib, info->ring);
371
pcilib_nwl_engine_context_t *ectx = ctx->engines + dma;
372
unsigned char *ring = (unsigned char*)pcilib_kmem_get_ua(ctx->dmactx.pcilib, ectx->ring);
375
375
if (!status) return -1;
377
status->started = info->started;
378
status->ring_size = info->ring_size;
379
status->buffer_size = info->page_size;
380
status->ring_tail = info->tail;
377
status->started = ectx->started;
378
status->ring_size = ectx->ring_size;
379
status->buffer_size = ectx->page_size;
380
status->ring_tail = ectx->tail;
382
if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) {
382
if (ectx->desc->direction == PCILIB_DMA_FROM_DEVICE) {
384
for (i = 0; i < info->ring_size; i++) {
384
for (i = 0; i < ectx->ring_size; i++) {
385
385
pos = status->ring_tail + i;
386
if (pos >= info->ring_size) pos -= info->ring_size;
386
if (pos >= ectx->ring_size) pos -= ectx->ring_size;
388
388
bstatus = NWL_RING_GET(ring + pos * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, DMA_BD_BUFL_STATUS_OFFSET);
389
389
if ((bstatus&(DMA_BD_ERROR_MASK|DMA_BD_COMP_MASK)) == 0) break;
391
391
status->ring_head = pos;
393
status->ring_head = info->head;
393
status->ring_head = ectx->head;
398
for (i = 0; (i < info->ring_size)&&(i < n_buffers); i++) {
398
for (i = 0; (i < ectx->ring_size)&&(i < n_buffers); i++) {
399
399
bstatus = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET);
401
401
buffers[i].error = bstatus & (DMA_BD_ERROR_MASK/*|DMA_BD_SHORT_MASK*/);