4
* @brief This file contains the functions handling user space memory.
5
* @author Guillermo Marcus
9
#include <linux/version.h>
10
#include <linux/string.h>
11
#include <linux/types.h>
12
#include <linux/list.h>
13
#include <linux/interrupt.h>
14
#include <linux/pci.h>
15
#include <linux/cdev.h>
16
#include <linux/wait.h>
18
#include <linux/pagemap.h>
19
#include <linux/sched.h>
21
#include "config.h" /* compile-time configuration */
22
#include "compat.h" /* compatibility definitions for older linux */
23
#include "pciDriver.h" /* external interface for the driver */
24
#include "common.h" /* internal definitions for all parts */
25
#include "umem.h" /* prototypes for kernel memory */
26
#include "sysfs.h" /* prototypes for sysfs */
30
* Reserve a new scatter/gather list and map it from memory to PCI bus addresses.
33
int pcidriver_umem_sgmap(pcidriver_privdata_t *privdata, umem_handle_t *umem_handle)
37
struct scatterlist *sg = NULL;
38
pcidriver_umem_entry_t *umem_entry;
40
unsigned long count,offset,length;
43
* We do some checks first. Then, the following is necessary to create a
44
* Scatter/Gather list from a user memory area:
45
* - Determine the number of pages
46
* - Get the pages for the memory area
48
* - Create a scatter/gather list of the pages
49
* - Map the list from memory to PCI bus addresses
52
* - Create an entry on the umem list of the device, to cache the mapping.
53
* - Create a sysfs attribute that gives easy access to the SG list
57
if (umem_handle->size == 0)
60
/* Direction is better ignoring during mapping. */
61
/* We assume bidirectional buffers always, except when sync'ing */
63
/* calculate the number of pages */
64
nr_pages = ((umem_handle->vma & ~PAGE_MASK) + umem_handle->size + ~PAGE_MASK) >> PAGE_SHIFT;
66
mod_info_dbg("nr_pages computed: %u\n", nr_pages);
68
/* Allocate space for the page information */
69
/* This can be very big, so we use vmalloc */
70
if ((pages = vmalloc(nr_pages * sizeof(*pages))) == NULL)
73
mod_info_dbg("allocated space for the pages.\n");
75
/* Allocate space for the scatterlist */
76
/* We do not know how many entries will be, but the maximum is nr_pages. */
77
/* This can be very big, so we use vmalloc */
78
if ((sg = vmalloc(nr_pages * sizeof(*sg))) == NULL)
79
goto umem_sgmap_pages;
81
sg_init_table(sg, nr_pages);
83
mod_info_dbg("allocated space for the SG list.\n");
85
/* Get the page information */
86
down_read(¤t->mm->mmap_sem);
93
0, /* do not force, FIXME: shall I? */
96
up_read(¤t->mm->mmap_sem);
98
/* Error, not all pages mapped */
99
if (res < (int)nr_pages) {
100
mod_info("Could not map all user pages (%d of %d)\n", res, nr_pages);
101
/* If only some pages could be mapped, we release those. If a real
102
* error occured, we set nr_pages to 0 */
103
nr_pages = (res > 0 ? res : 0);
104
goto umem_sgmap_unmap;
107
mod_info_dbg("Got the pages (%d).\n", res);
109
/* Lock the pages, then populate the SG list with the pages */
110
/* page0 is different */
111
if ( !PageReserved(pages[0]) )
112
compat_lock_page(pages[0]);
114
offset = (umem_handle->vma & ~PAGE_MASK);
115
length = (umem_handle->size > (PAGE_SIZE-offset) ? (PAGE_SIZE-offset) : umem_handle->size);
117
sg_set_page(&sg[0], pages[0], length, offset);
119
count = umem_handle->size - length;
120
for(i=1;i<nr_pages;i++) {
121
/* Lock page first */
122
if ( !PageReserved(pages[i]) )
123
compat_lock_page(pages[i]);
125
/* Populate the list */
126
sg_set_page(&sg[i], pages[i], ((count > PAGE_SIZE) ? PAGE_SIZE : count), 0);
127
count -= sg[i].length;
130
/* Use the page list to populate the SG list */
131
/* SG entries may be merged, res is the number of used entries */
132
/* We have originally nr_pages entries in the sg list */
133
if ((nents = pci_map_sg(privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL)) == 0)
134
goto umem_sgmap_unmap;
136
mod_info_dbg("Mapped SG list (%d entries).\n", nents);
138
/* Add an entry to the umem_list of the device, and update the handle with the id */
139
/* Allocate space for the new umem entry */
140
if ((umem_entry = kmalloc(sizeof(*umem_entry), GFP_KERNEL)) == NULL)
141
goto umem_sgmap_entry;
143
/* Fill entry to be added to the umem list */
144
umem_entry->id = atomic_inc_return(&privdata->umem_count) - 1;
145
umem_entry->nr_pages = nr_pages; /* Will be needed when unmapping */
146
umem_entry->pages = pages;
147
umem_entry->nents = nents;
150
if (pcidriver_sysfs_initialize_umem(privdata, umem_entry->id, &(umem_entry->sysfs_attr)) != 0)
151
goto umem_sgmap_name_fail;
153
/* Add entry to the umem list */
154
spin_lock( &(privdata->umemlist_lock) );
155
list_add_tail( &(umem_entry->list), &(privdata->umem_list) );
156
spin_unlock( &(privdata->umemlist_lock) );
158
/* Update the Handle with the Handle ID of the entry */
159
umem_handle->handle_id = umem_entry->id;
163
umem_sgmap_name_fail:
166
pci_unmap_sg( privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL );
170
for(i=0;i<nr_pages;i++) {
171
if (PageLocked(pages[i]))
172
compat_unlock_page(pages[i]);
173
if (!PageReserved(pages[i]))
174
set_page_dirty(pages[i]);
175
page_cache_release(pages[i]);
187
* Unmap a scatter/gather list
190
int pcidriver_umem_sgunmap(pcidriver_privdata_t *privdata, pcidriver_umem_entry_t *umem_entry)
193
pcidriver_sysfs_remove(privdata, &(umem_entry->sysfs_attr));
195
/* Unmap user memory */
196
pci_unmap_sg( privdata->pdev, umem_entry->sg, umem_entry->nr_pages, PCI_DMA_BIDIRECTIONAL );
198
/* Release the pages */
199
if (umem_entry->nr_pages > 0) {
200
for(i=0;i<(umem_entry->nr_pages);i++) {
201
/* Mark pages as Dirty and unlock it */
202
if ( !PageReserved( umem_entry->pages[i] )) {
203
SetPageDirty( umem_entry->pages[i] );
204
compat_unlock_page(umem_entry->pages[i]);
206
/* and release it from the cache */
207
page_cache_release( umem_entry->pages[i] );
211
/* Remove the umem list entry */
212
spin_lock( &(privdata->umemlist_lock) );
213
list_del( &(umem_entry->list) );
214
spin_unlock( &(privdata->umemlist_lock) );
216
/* Release SG list and page list memory */
217
/* These two are in the vm area of the kernel */
218
vfree(umem_entry->pages);
219
vfree(umem_entry->sg);
221
/* Release umem_entry memory */
229
* Unmap all scatter/gather lists.
232
int pcidriver_umem_sgunmap_all(pcidriver_privdata_t *privdata)
234
struct list_head *ptr, *next;
235
pcidriver_umem_entry_t *umem_entry;
237
/* iterate safely over the entries and delete them */
238
list_for_each_safe( ptr, next, &(privdata->umem_list) ) {
239
umem_entry = list_entry(ptr, pcidriver_umem_entry_t, list );
240
pcidriver_umem_sgunmap( privdata, umem_entry ); /* spin lock inside! */
248
* Copies the scatter/gather list from kernelspace to userspace.
251
int pcidriver_umem_sgget(pcidriver_privdata_t *privdata, umem_sglist_t *umem_sglist)
254
pcidriver_umem_entry_t *umem_entry;
255
struct scatterlist *sg;
258
unsigned int cur_size;
260
/* Find the associated umem_entry for this buffer */
261
umem_entry = pcidriver_umem_find_entry_id( privdata, umem_sglist->handle_id );
262
if (umem_entry == NULL)
263
return -EINVAL; /* umem_handle is not valid */
265
/* Check if passed SG list is enough */
266
if (umem_sglist->nents < umem_entry->nents)
267
return -EINVAL; /* sg has not enough entries */
269
/* Copy the SG list to the user format */
270
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
271
if (umem_sglist->type == PCIDRIVER_SG_MERGED) {
272
for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
274
umem_sglist->sg[0].addr = sg_dma_address( sg );
275
umem_sglist->sg[0].size = sg_dma_len( sg );
279
cur_addr = sg_dma_address( sg );
280
cur_size = sg_dma_len( sg );
282
/* Check if entry fits after current entry */
283
if (cur_addr == (umem_sglist->sg[idx].addr + umem_sglist->sg[idx].size)) {
284
umem_sglist->sg[idx].size += cur_size;
288
/* Skip if the entry is zero-length (yes, it can happen.... at the end of the list) */
292
/* None of the above, add new entry */
294
umem_sglist->sg[idx].addr = cur_addr;
295
umem_sglist->sg[idx].size = cur_size;
298
/* Set the used size of the SG list */
299
umem_sglist->nents = idx+1;
301
for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
302
mod_info("entry: %d\n",i);
303
umem_sglist->sg[i].addr = sg_dma_address( sg );
304
umem_sglist->sg[i].size = sg_dma_len( sg );
307
/* Set the used size of the SG list */
308
/* Check if the last one is zero-length */
309
if ( umem_sglist->sg[ umem_entry->nents - 1].size == 0)
310
umem_sglist->nents = umem_entry->nents -1;
312
umem_sglist->nents = umem_entry->nents;
315
if (umem_sglist->type == PCIDRIVER_SG_MERGED) {
316
/* Merge entries that are contiguous into a single entry */
317
/* Non-optimal but fast for most cases */
318
/* First one always true */
320
umem_sglist->sg[0].addr = sg_dma_address( sg );
321
umem_sglist->sg[0].size = sg_dma_len( sg );
325
/* Iterate over the SG entries */
326
for(i=1; i< umem_entry->nents; i++, sg++ ) {
327
cur_addr = sg_dma_address( sg );
328
cur_size = sg_dma_len( sg );
330
/* Check if entry fits after current entry */
331
if (cur_addr == (umem_sglist->sg[idx].addr + umem_sglist->sg[idx].size)) {
332
umem_sglist->sg[idx].size += cur_size;
336
/* Skip if the entry is zero-length (yes, it can happen.... at the end of the list) */
340
/* None of the above, add new entry */
342
umem_sglist->sg[idx].addr = cur_addr;
343
umem_sglist->sg[idx].size = cur_size;
345
/* Set the used size of the SG list */
346
umem_sglist->nents = idx+1;
348
/* Assume pci_map_sg made a good job (ehem..) and just copy it.
349
* actually, now I assume it just gives them plainly to me. */
350
for(i=0, sg=umem_entry->sg ; i< umem_entry->nents; i++, sg++ ) {
351
umem_sglist->sg[i].addr = sg_dma_address( sg );
352
umem_sglist->sg[i].size = sg_dma_len( sg );
354
/* Set the used size of the SG list */
355
/* Check if the last one is zero-length */
356
if ( umem_sglist->sg[ umem_entry->nents - 1].size == 0)
357
umem_sglist->nents = umem_entry->nents -1;
359
umem_sglist->nents = umem_entry->nents;
368
* Sync user space memory from/to device
371
int pcidriver_umem_sync( pcidriver_privdata_t *privdata, umem_handle_t *umem_handle )
373
pcidriver_umem_entry_t *umem_entry;
375
/* Find the associated umem_entry for this buffer */
376
umem_entry = pcidriver_umem_find_entry_id( privdata, umem_handle->handle_id );
377
if (umem_entry == NULL)
378
return -EINVAL; /* umem_handle is not valid */
380
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
381
switch (umem_handle->dir) {
382
case PCIDRIVER_DMA_TODEVICE:
383
pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_TODEVICE );
385
case PCIDRIVER_DMA_FROMDEVICE:
386
pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_FROMDEVICE );
388
case PCIDRIVER_DMA_BIDIRECTIONAL:
389
pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
390
pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
393
return -EINVAL; /* wrong direction parameter */
396
switch (umem_handle->dir) {
397
case PCIDRIVER_DMA_TODEVICE:
398
pci_dma_sync_sg( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_TODEVICE );
400
case PCIDRIVER_DMA_FROMDEVICE:
401
pci_dma_sync_sg( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_FROMDEVICE );
403
case PCIDRIVER_DMA_BIDIRECTIONAL:
404
pci_dma_sync_sg( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
407
return -EINVAL; /* wrong direction parameter */
416
* Get the pcidriver_umem_entry_t structure for the given id.
418
* @param id ID of the umem entry to search for
421
pcidriver_umem_entry_t *pcidriver_umem_find_entry_id(pcidriver_privdata_t *privdata, int id)
423
struct list_head *ptr;
424
pcidriver_umem_entry_t *entry;
426
spin_lock(&(privdata->umemlist_lock));
427
list_for_each(ptr, &(privdata->umem_list)) {
428
entry = list_entry(ptr, pcidriver_umem_entry_t, list );
430
if (entry->id == id) {
431
spin_unlock( &(privdata->umemlist_lock) );
436
spin_unlock(&(privdata->umemlist_lock));