13
#include "nwl_private.h"
15
#include "nwl_defines.h"
17
#include "nwl_engine_buffers.h"
19
int dma_nwl_read_engine_config(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, char *base) {
22
info->base_addr = base;
24
nwl_read_register(val, ctx, base, REG_DMA_ENG_CAP);
26
if ((val & DMA_ENG_PRESENT_MASK) == 0) return PCILIB_ERROR_NOTAVAILABLE;
28
info->desc.addr = (val & DMA_ENG_NUMBER) >> DMA_ENG_NUMBER_SHIFT;
29
if ((info->desc.addr > PCILIB_MAX_DMA_ENGINES)||(info->desc.addr < 0)) return PCILIB_ERROR_INVALID_DATA;
31
switch (val & DMA_ENG_DIRECTION_MASK) {
33
info->desc.direction = PCILIB_DMA_FROM_DEVICE;
36
info->desc.direction = PCILIB_DMA_TO_DEVICE;
39
switch (val & DMA_ENG_TYPE_MASK) {
41
info->desc.type = PCILIB_DMA_TYPE_BLOCK;
44
info->desc.type = PCILIB_DMA_TYPE_PACKET;
47
info->desc.type = PCILIB_DMA_TYPE_UNKNOWN;
50
info->desc.addr_bits = (val & DMA_ENG_BD_MAX_BC) >> DMA_ENG_BD_MAX_BC_SHIFT;
52
info->base_addr = base;
57
int dma_nwl_start_engine(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
61
struct timeval start, cur;
63
pcilib_nwl_engine_description_t *info = ctx->engines + dma;
64
char *base = ctx->engines[dma].base_addr;
66
if (info->started) return 0;
69
// This will only successed if there are no parallel access to DMA engine
70
err = dma_nwl_allocate_engine_buffers(ctx, info);
73
dma_nwl_stop_engine(ctx, dma);
80
dma_nwl_acknowledge_irq((pcilib_dma_context_t*)ctx, PCILIB_DMA_IRQ, dma);
82
#ifdef NWL_GENERATE_DMA_IRQ
83
dma_nwl_enable_engine_irq(ctx, dma);
84
#endif /* NWL_GENERATE_DMA_IRQ */
87
err = dma_nwl_disable_engine_irq(ctx, dma);
90
dma_nwl_stop_engine(ctx, dma);
94
// Disable Engine & Reseting
95
val = DMA_ENG_DISABLE|DMA_ENG_USER_RESET;
96
nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
98
gettimeofday(&start, NULL);
100
nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
101
gettimeofday(&cur, NULL);
102
} while ((val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET))&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
104
if (val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET)) {
105
pcilib_error("Timeout during reset of DMA engine %i", info->desc.addr);
108
dma_nwl_stop_engine(ctx, dma);
109
return PCILIB_ERROR_TIMEOUT;
113
nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
115
gettimeofday(&start, NULL);
117
nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
118
gettimeofday(&cur, NULL);
119
} while ((val & DMA_ENG_RESET)&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
121
if (val & DMA_ENG_RESET) {
122
pcilib_error("Timeout during reset of DMA engine %i", info->desc.addr);
125
dma_nwl_stop_engine(ctx, dma);
126
return PCILIB_ERROR_TIMEOUT;
129
dma_nwl_acknowledge_irq((pcilib_dma_context_t*)ctx, PCILIB_DMA_IRQ, dma);
131
ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
132
nwl_write_register(ring_pa, ctx, info->base_addr, REG_DMA_ENG_NEXT_BD);
133
nwl_write_register(ring_pa, ctx, info->base_addr, REG_SW_NEXT_BD);
135
__sync_synchronize();
137
nwl_read_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
138
val |= (DMA_ENG_ENABLE);
139
nwl_write_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
141
__sync_synchronize();
143
#ifdef NWL_GENERATE_DMA_IRQ
144
dma_nwl_enable_engine_irq(ctx, dma);
145
#endif /* NWL_GENERATE_DMA_IRQ */
147
if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) {
148
ring_pa += (info->ring_size - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
149
nwl_write_register(ring_pa, ctx, info->base_addr, REG_SW_NEXT_BD);
152
info->head = (info->ring_size - 1);
165
int dma_nwl_stop_engine(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
169
struct timeval start, cur;
170
pcilib_kmem_flags_t flags;
173
pcilib_nwl_engine_description_t *info = ctx->engines + dma;
174
char *base = ctx->engines[dma].base_addr;
176
if (!info->started) return 0;
180
err = dma_nwl_disable_engine_irq(ctx, dma);
183
if (!info->preserve) {
184
// Stopping DMA is not enough reset is required
185
val = DMA_ENG_DISABLE|DMA_ENG_USER_RESET|DMA_ENG_RESET;
186
nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
188
gettimeofday(&start, NULL);
190
nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
191
gettimeofday(&cur, NULL);
192
} while ((val & (DMA_ENG_RUNNING))&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
195
ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
196
nwl_write_register(ring_pa, ctx, info->base_addr, REG_DMA_ENG_NEXT_BD);
197
nwl_write_register(ring_pa, ctx, info->base_addr, REG_SW_NEXT_BD);
201
dma_nwl_acknowledge_irq((pcilib_dma_context_t*)ctx, PCILIB_DMA_IRQ, dma);
203
if (info->preserve) {
204
flags = PCILIB_KMEM_FLAG_REUSE;
206
flags = PCILIB_KMEM_FLAG_HARDWARE|PCILIB_KMEM_FLAG_PERSISTENT;
211
pcilib_free_kernel_memory(ctx->pcilib, info->ring, flags);
216
pcilib_free_kernel_memory(ctx->pcilib, info->pages, flags);
223
int dma_nwl_write_fragment(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, void *data, size_t *written) {
227
nwl_dma_t *ctx = (nwl_dma_t*)vctx;
229
pcilib_nwl_engine_description_t *info = ctx->engines + dma;
231
err = dma_nwl_start(vctx, dma, PCILIB_DMA_FLAGS_DEFAULT);
235
for (pos = 0; pos < size; pos += info->page_size) {
236
int block_size = min2(size - pos, info->page_size);
238
bufnum = dma_nwl_get_next_buffer(ctx, info, 1, timeout);
239
if (bufnum == PCILIB_DMA_BUFFER_INVALID) {
240
if (written) *written = pos;
241
return PCILIB_ERROR_TIMEOUT;
244
void *buf = pcilib_kmem_get_block_ua(ctx->pcilib, info->pages, bufnum);
246
pcilib_kmem_sync_block(ctx->pcilib, info->pages, PCILIB_KMEM_SYNC_FROMDEVICE, bufnum);
247
memcpy(buf, data, block_size);
248
pcilib_kmem_sync_block(ctx->pcilib, info->pages, PCILIB_KMEM_SYNC_TODEVICE, bufnum);
250
err = dma_nwl_push_buffer(ctx, info, block_size, (flags&PCILIB_DMA_FLAG_EOP)&&((pos + block_size) == size), timeout);
252
if (written) *written = pos;
258
if (written) *written = size;
260
if (flags&PCILIB_DMA_FLAG_WAIT) {
261
bufnum = dma_nwl_get_next_buffer(ctx, info, PCILIB_NWL_DMA_PAGES - 1, timeout);
262
if (bufnum == PCILIB_DMA_BUFFER_INVALID) return PCILIB_ERROR_TIMEOUT;
268
int dma_nwl_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, pcilib_dma_callback_t cb, void *cbattr) {
269
int err, ret = PCILIB_STREAMING_REQ_PACKET;
270
pcilib_timeout_t wait = 0;
275
nwl_dma_t *ctx = (nwl_dma_t*)vctx;
279
pcilib_nwl_engine_description_t *info = ctx->engines + dma;
281
err = dma_nwl_start(vctx, dma, PCILIB_DMA_FLAGS_DEFAULT);
285
switch (ret&PCILIB_STREAMING_TIMEOUT_MASK) {
286
case PCILIB_STREAMING_CONTINUE: wait = PCILIB_DMA_TIMEOUT; break;
287
case PCILIB_STREAMING_WAIT: wait = timeout; break;
288
// case PCILIB_STREAMING_CHECK: wait = 0; break;
291
bufnum = dma_nwl_wait_buffer(ctx, info, &bufsize, &eop, wait);
292
if (bufnum == PCILIB_DMA_BUFFER_INVALID) {
293
return (ret&PCILIB_STREAMING_FAIL)?PCILIB_ERROR_TIMEOUT:0;
296
// EOP is not respected in IPE Camera
297
if (ctx->dmactx.ignore_eop) eop = 1;
299
pcilib_kmem_sync_block(ctx->pcilib, info->pages, PCILIB_KMEM_SYNC_FROMDEVICE, bufnum);
300
void *buf = pcilib_kmem_get_block_ua(ctx->pcilib, info->pages, bufnum);
301
ret = cb(cbattr, (eop?PCILIB_DMA_FLAG_EOP:0), bufsize, buf);
302
if (ret < 0) return -ret;
303
// DS: Fixme, it looks like we can avoid calling this for the sake of performance
304
// pcilib_kmem_sync_block(ctx->pcilib, info->pages, PCILIB_KMEM_SYNC_TODEVICE, bufnum);
305
dma_nwl_return_buffer(ctx, info);
314
int dma_nwl_wait_completion(nwl_dma_t * ctx, pcilib_dma_engine_t dma, pcilib_timeout_t timeout) {
315
if (dma_nwl_get_next_buffer(ctx, ctx->engines + dma, PCILIB_NWL_DMA_PAGES - 1, PCILIB_DMA_TIMEOUT) == (PCILIB_NWL_DMA_PAGES - 1)) return 0;
316
else return PCILIB_ERROR_TIMEOUT;