4
* @brief This file contains all functions dealing with kernel memory.
5
* @author Guillermo Marcus
9
#include <linux/version.h>
10
#include <linux/string.h>
11
#include <linux/types.h>
12
#include <linux/list.h>
13
#include <linux/interrupt.h>
14
#include <linux/pci.h>
15
#include <linux/cdev.h>
16
#include <linux/wait.h>
18
#include <linux/pagemap.h>
20
#include "config.h" /* compile-time configuration */
21
#include "compat.h" /* compatibility definitions for older linux */
22
#include "pciDriver.h" /* external interface for the driver */
23
#include "common.h" /* internal definitions for all parts */
24
#include "kmem.h" /* prototypes for kernel memory */
25
#include "sysfs.h" /* prototypes for sysfs */
29
* Allocates new kernel memory including the corresponding management structure, makes
30
* it available via sysfs if possible.
33
int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
35
pcidriver_kmem_entry_t *kmem_entry;
38
/* First, allocate zeroed memory for the kmem_entry */
39
if ((kmem_entry = kcalloc(1, sizeof(pcidriver_kmem_entry_t), GFP_KERNEL)) == NULL)
40
goto kmem_alloc_entry_fail;
42
/* Initialize the kmem_entry */
43
kmem_entry->id = atomic_inc_return(&privdata->kmem_count) - 1;
44
kmem_entry->size = kmem_handle->size;
45
kmem_handle->handle_id = kmem_entry->id;
47
/* Initialize sysfs if possible */
48
if (pcidriver_sysfs_initialize_kmem(privdata, kmem_entry->id, &(kmem_entry->sysfs_attr)) != 0)
49
goto kmem_alloc_mem_fail;
51
/* ...and allocate the DMA memory */
52
/* note this is a memory pair, referencing the same area: the cpu address (cpua)
53
* and the PCI bus address (pa). The CPU and PCI addresses may not be the same.
54
* The CPU sees only CPU addresses, while the device sees only PCI addresses.
55
* CPU address is used for the mmap (internal to the driver), and
56
* PCI address is the address passed to the DMA Controller in the device.
58
retptr = pci_alloc_consistent( privdata->pdev, kmem_handle->size, &(kmem_entry->dma_handle) );
60
goto kmem_alloc_mem_fail;
61
kmem_entry->cpua = (unsigned long)retptr;
62
kmem_handle->pa = (unsigned long)(kmem_entry->dma_handle);
64
set_pages_reserved_compat(kmem_entry->cpua, kmem_entry->size);
66
/* Add the kmem_entry to the list of the device */
67
spin_lock( &(privdata->kmemlist_lock) );
68
list_add_tail( &(kmem_entry->list), &(privdata->kmem_list) );
69
spin_unlock( &(privdata->kmemlist_lock) );
75
kmem_alloc_entry_fail:
81
* Called via sysfs, frees kernel memory and the corresponding management structure
84
int pcidriver_kmem_free( pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle )
86
pcidriver_kmem_entry_t *kmem_entry;
88
/* Find the associated kmem_entry for this buffer */
89
if ((kmem_entry = pcidriver_kmem_find_entry(privdata, kmem_handle)) == NULL)
90
return -EINVAL; /* kmem_handle is not valid */
92
return pcidriver_kmem_free_entry(privdata, kmem_entry);
97
* Called when cleaning up, frees all kernel memory and their corresponding management structure
100
int pcidriver_kmem_free_all(pcidriver_privdata_t *privdata)
102
struct list_head *ptr, *next;
103
pcidriver_kmem_entry_t *kmem_entry;
105
/* iterate safely over the entries and delete them */
106
list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
107
kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
108
pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
116
* Synchronize memory to/from the device (or in both directions).
119
int pcidriver_kmem_sync( pcidriver_privdata_t *privdata, kmem_sync_t *kmem_sync )
121
pcidriver_kmem_entry_t *kmem_entry;
123
/* Find the associated kmem_entry for this buffer */
124
if ((kmem_entry = pcidriver_kmem_find_entry(privdata, &(kmem_sync->handle))) == NULL)
125
return -EINVAL; /* kmem_handle is not valid */
127
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
128
switch (kmem_sync->dir) {
129
case PCIDRIVER_DMA_TODEVICE:
130
pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_TODEVICE );
132
case PCIDRIVER_DMA_FROMDEVICE:
133
pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_FROMDEVICE );
135
case PCIDRIVER_DMA_BIDIRECTIONAL:
136
pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_BIDIRECTIONAL );
137
pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_BIDIRECTIONAL );
140
return -EINVAL; /* wrong direction parameter */
143
switch (kmem_sync->dir) {
144
case PCIDRIVER_DMA_TODEVICE:
145
pci_dma_sync_single( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_TODEVICE );
147
case PCIDRIVER_DMA_FROMDEVICE:
148
pci_dma_sync_single( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_FROMDEVICE );
150
case PCIDRIVER_DMA_BIDIRECTIONAL:
151
pci_dma_sync_single( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_BIDIRECTIONAL );
154
return -EINVAL; /* wrong direction parameter */
158
return 0; /* success */
163
* Free the given kmem_entry and its memory.
166
int pcidriver_kmem_free_entry(pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry)
168
pcidriver_sysfs_remove(privdata, &(kmem_entry->sysfs_attr));
170
/* Go over the pages of the kmem buffer, and mark them as not reserved */
172
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
174
* This code is DISABLED.
175
* Apparently, it is not needed to unreserve them. Doing so here
176
* hangs the machine. Why?
180
* http://lwn.net/Articles/161204/
181
* http://lists.openfabrics.org/pipermail/general/2007-March/034101.html
183
* I insist, this should be enabled, but doing so hangs the machine.
184
* Literature supports the point, and there is even a similar problem (see link)
185
* But this is not the case. It seems right to me. but obviously is not.
187
* Anyway, this goes away in kernel >=2.6.15.
189
unsigned long start = __pa(kmem_entry->cpua) >> PAGE_SHIFT;
190
unsigned long end = __pa(kmem_entry->cpua + kmem_entry->size) >> PAGE_SHIFT;
192
for(i=start;i<end;i++) {
193
struct page *kpage = pfn_to_page(i);
194
ClearPageReserved(kpage);
199
/* Release DMA memory */
200
pci_free_consistent( privdata->pdev, kmem_entry->size, (void *)(kmem_entry->cpua), kmem_entry->dma_handle );
202
/* Remove the kmem list entry */
203
spin_lock( &(privdata->kmemlist_lock) );
204
list_del( &(kmem_entry->list) );
205
spin_unlock( &(privdata->kmemlist_lock) );
207
/* Release kmem_entry memory */
215
* Find the corresponding kmem_entry for the given kmem_handle.
218
pcidriver_kmem_entry_t *pcidriver_kmem_find_entry(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
220
struct list_head *ptr;
221
pcidriver_kmem_entry_t *entry, *result = NULL;
223
/* should I implement it better using the handle_id? */
225
spin_lock(&(privdata->kmemlist_lock));
226
list_for_each(ptr, &(privdata->kmem_list)) {
227
entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
229
if (entry->dma_handle == kmem_handle->pa) {
235
spin_unlock(&(privdata->kmemlist_lock));
241
* find the corresponding kmem_entry for the given id.
244
pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_id(pcidriver_privdata_t *privdata, int id)
246
struct list_head *ptr;
247
pcidriver_kmem_entry_t *entry, *result = NULL;
249
spin_lock(&(privdata->kmemlist_lock));
250
list_for_each(ptr, &(privdata->kmem_list)) {
251
entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
253
if (entry->id == id) {
259
spin_unlock(&(privdata->kmemlist_lock));
265
* mmap() kernel memory to userspace.
268
int pcidriver_mmap_kmem(pcidriver_privdata_t *privdata, struct vm_area_struct *vma)
270
unsigned long vma_size;
271
pcidriver_kmem_entry_t *kmem_entry;
274
mod_info_dbg("Entering mmap_kmem\n");
276
/* FIXME: Is this really right? Always just the latest one? Can't we identify one? */
277
/* Get latest entry on the kmem_list */
278
spin_lock(&(privdata->kmemlist_lock));
279
if (list_empty(&(privdata->kmem_list))) {
280
spin_unlock(&(privdata->kmemlist_lock));
281
mod_info("Trying to mmap a kernel memory buffer without creating it first!\n");
284
kmem_entry = list_entry(privdata->kmem_list.prev, pcidriver_kmem_entry_t, list);
285
spin_unlock(&(privdata->kmemlist_lock));
287
mod_info_dbg("Got kmem_entry with id: %d\n", kmem_entry->id);
290
vma_size = (vma->vm_end - vma->vm_start);
291
if ((vma_size != kmem_entry->size) &&
292
((kmem_entry->size < PAGE_SIZE) && (vma_size != PAGE_SIZE))) {
293
mod_info("kem_entry size(%lu) and vma size do not match(%lu)\n", kmem_entry->size, vma_size);
297
vma->vm_flags |= (VM_RESERVED);
299
#ifdef pgprot_noncached
300
// This is coherent memory, so it must not be cached.
301
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
304
mod_info_dbg("Mapping address %08lx / PFN %08lx\n",
305
virt_to_phys((void*)kmem_entry->cpua),
306
page_to_pfn(virt_to_page((void*)kmem_entry->cpua)));
308
ret = remap_pfn_range_cpua_compat(
316
mod_info("kmem remap failed: %d (%lx)\n", ret,kmem_entry->cpua);