2 * USB support based on Linux kernel source
4 * 2003-06-21 Georg Acher (georg@acher.org)
8 * 1) Forget all device interrupts, scheduling, semaphores, threads etc.
9 * 1a) Forget all DMA and PCI helper functions
10 * 2) Forget usbdevfs, procfs and ioctls
11 * 3) Emulate xHCI interrupts and root hub timer by polling
12 * 4) Emulate hub kernel thread by polling
13 * 5) Emulate synchronous USB-messages (usb_*_msg) with busy waiting
16 * 6) Remove code bloat
20 #include "../usb_wrapper.h"
24 static struct pci_dev
*pci_probe_dev
;
25 extern int (*thread_handler
)(void*);
26 extern void* thread_parm
;
28 struct my_irqs reg_irqs
[MAX_IRQS
];
34 struct timer_list
*main_timer_list
[MAX_TIMERS
];
35 struct dummy_process act_cur
={0};
36 struct dummy_process
*my_current
;
38 int (*thread_handler
)(void*);
42 static struct device_driver
*m_drivers
[MAX_DRVS
];
43 static int drvs_num
=0;
44 unsigned int LAST_USB_EVENT_TICK
;
46 NTSTATUS
init_dma(PUSBMP_DEVICE_EXTENSION pDevExt
);
48 /*------------------------------------------------------------------------*/
50 * Helper functions for top-level system
52 /*------------------------------------------------------------------------*/
53 void init_wrapper(struct pci_dev
*probe_dev
)
56 for(n
=0;n
<MAX_TIMERS
;n
++)
58 main_timer_list
[n
]=NULL
;
64 pci_probe_dev
=probe_dev
;
66 for(n
=0;n
<MAX_IRQS
;n
++)
68 reg_irqs
[n
].handler
=NULL
;
73 for(n
=0;n
<MAX_DRVS
;n
++)
76 init_dma(probe_dev
->dev_ext
);
78 /*------------------------------------------------------------------------*/
79 void handle_irqs(int irq
)
82 //printk("handle irqs\n");
83 for(n
=0;n
<MAX_IRQS
;n
++)
85 if (reg_irqs
[n
].handler
&& (irq
==reg_irqs
[n
].irq
|| irq
==-1))
86 reg_irqs
[n
].handler(reg_irqs
[n
].irq
,reg_irqs
[n
].data
,NULL
);
89 /*------------------------------------------------------------------------*/
90 void inc_jiffies(int n
)
94 /*------------------------------------------------------------------------*/
95 void do_all_timers(void)
98 for(n
=0;n
<MAX_TIMERS
;n
++)
100 if (main_timer_list
[n
] && main_timer_list
[n
]->function
)
102 void (*function
)(unsigned long)=main_timer_list
[n
]->function
;
103 unsigned long data
=main_timer_list
[n
]->data
;
105 if (main_timer_list
[n
]->expires
>1) {
106 main_timer_list
[n
]->expires
--;
109 main_timer_list
[n
]->expires
=0;
110 main_timer_list
[n
]=0; // remove timer
111 // Call Timer Function Data
117 /*------------------------------------------------------------------------*/
118 // Purpose: Remember thread procedure and data in global var
119 // ReactOS Purpose: Create real kernel thread
120 int my_kernel_thread(int STDCALL (*handler
)(void*), void* parm
, int flags
)
122 HANDLE hThread
= NULL
;
123 //thread_handler=handler;
125 //return 42; // PID :-)
127 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL
);
129 PsCreateSystemThread(&hThread
,
134 (PKSTART_ROUTINE
)handler
,
137 DPRINT1("usbcore: Created system thread %d\n", (int)hThread
);
139 return (int)hThread
; // FIXME: Correct?
143 int my_kill_proc(int pid
, int signal
, int unk
)
147 // TODO: Implement actual process killing
149 hThread
= (HANDLE
)pid
;
155 /*------------------------------------------------------------------------*/
157 * As simple as possible, but as complete as necessary ...
159 /*------------------------------------------------------------------------*/
162 /* calls probe function for hotplug (which does device matching), this is the
163 only link between usbcore and the registered device drivers! */
164 int my_device_add(struct device
*dev
)
167 printk("drv_num %i %p %p\n",drvs_num
,m_drivers
[0]->probe
,m_drivers
[1]->probe
);
171 if (dev
->driver
->probe
)
172 return dev
->driver
->probe(dev
);
176 for(n
=0;n
<drvs_num
;n
++)
178 if (m_drivers
[n
]->probe
)
180 dev
->driver
=m_drivers
[n
];
181 printk("probe%i %p\n",n
,m_drivers
[n
]->probe
);
183 if (m_drivers
[n
]->probe(dev
) == 0)
195 /*------------------------------------------------------------------------*/
196 int my_driver_register(struct device_driver
*driver
)
199 if (drvs_num
<MAX_DRVS
)
201 printk("driver_register %i: %p %p",drvs_num
,driver
,driver
->probe
);
203 m_drivers
[drvs_num
++]=driver
;
208 /*------------------------------------------------------------------------*/
209 int my_device_unregister(struct device
*dev
)
211 if (dev
->driver
&& dev
->driver
->remove
)
212 dev
->driver
->remove(dev
);
216 /*------------------------------------------------------------------------*/
217 struct device
*my_get_device(struct device
*dev
)
221 /*------------------------------------------------------------------------*/
222 void my_device_initialize(struct device
*dev
)
225 /*------------------------------------------------------------------------*/
226 void my_wake_up(PKEVENT evnt
)
230 KeSetEvent(evnt
, 0, FALSE
); // Signal event
232 /*------------------------------------------------------------------------*/
233 void my_init_waitqueue_head(PKEVENT evnt
)
235 // this is used only in core/message.c, and it isn't needed there
236 //KeInitializeEvent(evnt, NotificationEvent, TRUE); // signalled state
238 /*------------------------------------------------------------------------*/
239 /* wait until woken up (only one wait allowed!) */
240 extern unsigned int LAST_USB_IRQ
;
242 int my_schedule_timeout(int x
)
247 //PULONG tmp_debug=NULL;
248 //extern unsigned int LAST_USB_EVENT_TICK;
250 //*tmp_debug = 0xFFAAFFAA;
252 printk("schedule_timeout: %d ms\n", x
);
254 //delay.QuadPart = -x*10000; // convert to 100ns units
255 //KeDelayExecutionThread(KernelMode, FALSE, &delay); //wait_us(1);
259 x = x*1000; // to us format
261 x
= 50; // it's enough for most purposes
265 KeQueryTickCount((LARGE_INTEGER
*)&HH
);//IoInputDword(0x8008);
266 //temp = HH - LAST_USB_EVENT_TICK;
268 //if (temp>(3579)) { //3579 = 1ms!
271 // LAST_USB_EVENT_TICK = HH;
279 delay
.QuadPart
= -10;
280 KeDelayExecutionThread(KernelMode
, FALSE
, &delay
); //wait_us(1);
282 //DPRINT("schedule_timeout(): time left: %d\n", x);
286 printk("schedule DONE!!!!!!\n");
290 /*------------------------------------------------------------------------*/
291 void my_wait_for_completion(struct completion
*x
)
297 extern unsigned int LAST_USB_EVENT_TICK
;
299 printk("wait for completion11, x=0x%08x\n", (DWORD
)x
);
302 n
= n
*1000; // to us format
304 while(!x
->done
&& (n
>0))
306 KeQueryTickCount((LARGE_INTEGER
*)&HH
);//IoInputDword(0x8008);
307 temp
= HH
- LAST_USB_EVENT_TICK
;
312 LAST_USB_EVENT_TICK
= HH
;
317 delay
.QuadPart
= -10;
318 KeDelayExecutionThread(KernelMode
, FALSE
, &delay
); //wait_us(1);
321 printk("wait for completion done %i\n",x
->done
);
324 /*------------------------------------------------------------------------*/
325 void my_init_completion(struct completion
*x
)
328 KeInitializeEvent(&x
->wait
, NotificationEvent
, FALSE
);
330 /*------------------------------------------------------------------------*/
331 void my_interruptible_sleep_on(PKEVENT evnt
)
333 KeWaitForSingleObject(evnt
, Executive
, KernelMode
, FALSE
, NULL
);
334 KeClearEvent(evnt
); // reset to not-signalled
336 /*------------------------------------------------------------------------*/
337 // Helper for pci_module_init
338 /*------------------------------------------------------------------------*/
339 int my_pci_module_init(struct pci_driver
*x
)
341 struct pci_dev
*dev
=pci_probe_dev
;
342 const struct pci_device_id
*id
=NULL
;
345 DPRINT1("PCI device not set!\n");
351 /*------------------------------------------------------------------------*/
352 struct pci_dev
*my_pci_find_slot(int a
,int b
)
356 /*------------------------------------------------------------------------*/
357 int my_pci_write_config_word(struct pci_dev
*dev
, int where
, u16 val
)
359 //dev->bus, dev->devfn, where, val
360 PUSBMP_DEVICE_EXTENSION dev_ext
= (PUSBMP_DEVICE_EXTENSION
)dev
->dev_ext
;
362 //FIXME: Is returning this value correct?
363 //FIXME: Mixing pci_dev and win structs isn't a good thing at all
364 return HalSetBusDataByOffset(PCIConfiguration
, dev
->bus
->number
, dev_ext
->SystemIoSlotNumber
, &val
, where
, sizeof(val
));
366 /*------------------------------------------------------------------------*/
367 int my_request_irq(unsigned int irq
,
368 int (*handler
)(int,void *, struct pt_regs
*),
369 unsigned long mode
, const char *desc
, void *data
)
371 if (num_irqs
<MAX_IRQS
)
373 reg_irqs
[num_irqs
].handler
=handler
;
374 reg_irqs
[num_irqs
].irq
=irq
;
375 reg_irqs
[num_irqs
].data
=data
;
382 /*------------------------------------------------------------------------*/
383 int my_free_irq(int irq
, void* p
)
388 /*------------------------------------------------------------------------*/
390 /*------------------------------------------------------------------------*/
391 kmem_cache_t
*my_kmem_cache_create(const char *tag
, size_t alloc_size
,
392 size_t offset
, unsigned long flags
,
396 //TODO: Take in account ctor and dtor - callbacks for alloc/free, flags and offset
397 //FIXME: We assume this cache is always NPaged
398 PNPAGED_LOOKASIDE_LIST Lookaside
;
399 ULONG Tag
=0x11223344; //FIXME: Make this from tag
401 Lookaside
= ExAllocatePool(NonPagedPool
, sizeof(NPAGED_LOOKASIDE_LIST
));
403 ExInitializeNPagedLookasideList(
412 return (kmem_cache_t
*)Lookaside
;
414 /*------------------------------------------------------------------------*/
415 BOOLEAN
my_kmem_cache_destroy(kmem_cache_t
*co
)
417 ExDeleteNPagedLookasideList((PNPAGED_LOOKASIDE_LIST
)co
);
422 /*------------------------------------------------------------------------*/
423 void *my_kmem_cache_alloc(kmem_cache_t
*co
, int flags
)
425 return ExAllocateFromNPagedLookasideList((PNPAGED_LOOKASIDE_LIST
)co
);
427 /*------------------------------------------------------------------------*/
428 void my_kmem_cache_free(kmem_cache_t
*co
, void *ptr
)
430 ExFreeToNPagedLookasideList((PNPAGED_LOOKASIDE_LIST
)co
, ptr
);
432 /*------------------------------------------------------------------------*/
433 // DMA support routines
434 /*------------------------------------------------------------------------*/
435 #ifdef USB_DMA_SINGLE_SUPPORT
436 static IO_ALLOCATION_ACTION NTAPI
MapRegisterCallback(PDEVICE_OBJECT DeviceObject
,
438 PVOID MapRegisterBase
,
443 init_dma(PUSBMP_DEVICE_EXTENSION pDevExt
)
445 // Prepare device descriptor structure
446 DEVICE_DESCRIPTION dd
;
447 #ifdef USB_DMA_SINGLE_SUPPORT
453 RtlZeroMemory( &dd
, sizeof(dd
) );
454 dd
.Version
= DEVICE_DESCRIPTION_VERSION
;
456 dd
.ScatterGather
= TRUE
;
457 dd
.DemandMode
= FALSE
;
458 dd
.AutoInitialize
= FALSE
;
459 dd
.Dma32BitAddresses
= TRUE
;
460 dd
.InterfaceType
= PCIBus
;
461 dd
.DmaChannel
= 0;//pDevExt->dmaChannel;
462 dd
.MaximumLength
= 128;//MAX_DMA_LENGTH;
463 dd
.DmaWidth
= Width32Bits
;
464 dd
.DmaSpeed
= MaximumDmaSpeed
;
466 // The following taken from Win2k DDB:
467 // "Compute the maximum number of mapping regs
468 // this device could possibly need. Since the
469 // transfer may not be paged aligned, add one
470 // to allow the max xfer size to span a page."
471 //pDevExt->mapRegisterCount = (MAX_DMA_LENGTH / PAGE_SIZE) + 1;
473 // TODO: Free it somewhere (PutDmaAdapter)
474 pDevExt
->pDmaAdapter
=
475 IoGetDmaAdapter( pDevExt
->PhysicalDeviceObject
,
477 &pDevExt
->mapRegisterCount
);
479 DPRINT1("IoGetDmaAdapter done 0x%X, mapRegisterCount=%d\n", pDevExt
->pDmaAdapter
, pDevExt
->mapRegisterCount
);
482 if (pDevExt
->pDmaAdapter
== NULL
)
483 return STATUS_INSUFFICIENT_RESOURCES
;
485 #ifdef USB_DMA_SINGLE_SUPPORT
486 /* Allocate buffer now */
487 pDevExt
->BufferSize
= pDevExt
->mapRegisterCount
* PAGE_SIZE
;
488 DPRINT1("Bufsize = %u\n", pDevExt
->BufferSize
);
489 pDevExt
->VirtualBuffer
= pDevExt
->pDmaAdapter
->DmaOperations
->AllocateCommonBuffer(
490 pDevExt
->pDmaAdapter
, pDevExt
->BufferSize
, &pDevExt
->Buffer
, FALSE
);
491 DPRINT1("Bufsize = %u, Buffer = 0x%x", pDevExt
->BufferSize
, pDevExt
->Buffer
.LowPart
);
493 if (!pDevExt
->VirtualBuffer
)
495 DPRINT1("Could not allocate buffer\n");
496 // should try again with smaller buffer...
497 return STATUS_INSUFFICIENT_RESOURCES
;
500 DPRINT1("Calling IoAllocateMdl()\n");
501 pDevExt
->Mdl
= IoAllocateMdl(pDevExt
->VirtualBuffer
, pDevExt
->BufferSize
, FALSE
, FALSE
, NULL
);
502 DPRINT1("Bufsize == %u\n", pDevExt
->BufferSize
);
506 DPRINT1("IoAllocateMdl() FAILED\n");
507 //TODO: Free the HAL buffer
508 return STATUS_INSUFFICIENT_RESOURCES
;
511 DPRINT1("VBuffer == 0x%x Mdl == %u Bufsize == %u\n", pDevExt
->VirtualBuffer
, pDevExt
->Mdl
, pDevExt
->BufferSize
);
513 DPRINT1("Calling MmBuildMdlForNonPagedPool\n");
514 MmBuildMdlForNonPagedPool(pDevExt
->Mdl
);
517 /* Get map registers for DMA */
518 KeInitializeEvent(&DMAEvent
, SynchronizationEvent
, FALSE
);
520 KeRaiseIrql(DISPATCH_LEVEL
, &OldIrql
);
521 // TODO: Free adapter channel somewhere
522 Status
= pDevExt
->pDmaAdapter
->DmaOperations
->AllocateAdapterChannel(pDevExt
->pDmaAdapter
,
523 pDevExt
->PhysicalDeviceObject
, pDevExt
->mapRegisterCount
, MapRegisterCallback
, &DMAEvent
);
524 KeLowerIrql(OldIrql
);
526 DPRINT1("VBuffer == 0x%x Bufsize == %u\n", pDevExt
->VirtualBuffer
, pDevExt
->BufferSize
);
527 KeWaitForSingleObject(&DMAEvent
, Executive
, KernelMode
, FALSE
, NULL
);
529 if(Status
!= STATUS_SUCCESS
)
531 DPRINT("init_dma(): unable to allocate adapter channels\n");
532 return STATUS_INSUFFICIENT_RESOURCES
;
535 return STATUS_SUCCESS
;
539 * FUNCTION: Acquire map registers in prep for DMA
541 * DeviceObject: unused
543 * MapRegisterBase: returned to blocked thread via a member var
544 * Context: contains a pointer to the right ControllerInfo
547 * KeepObject, because that's what the DDK says to do
549 #ifdef USB_DMA_SINGLE_SUPPORT
550 static IO_ALLOCATION_ACTION NTAPI
MapRegisterCallback(PDEVICE_OBJECT DeviceObject
,
552 PVOID MapRegisterBase
,
555 PUSBMP_DEVICE_EXTENSION pDevExt
= (PUSBMP_DEVICE_EXTENSION
)DeviceObject
->DeviceExtension
;
556 UNREFERENCED_PARAMETER(Irp
);
558 DPRINT("usb_linuxwrapper: MapRegisterCallback Called, base=0x%08x\n", MapRegisterBase
);
560 pDevExt
->MapRegisterBase
= MapRegisterBase
;
562 // signal that we are finished
563 KeSetEvent(Context
, 0, FALSE
);
565 return KeepObject
;//DeallocateObjectKeepRegisters;
569 void *my_dma_pool_alloc(struct dma_pool
*pool
, int gfp_flags
, dma_addr_t
*dma_handle
)
571 // HalAllocCommonBuffer
572 // But ideally IoGetDmaAdapter
574 DPRINT1("dma_pool_alloc() called\n");
579 pci_pool_create -- Creates a pool of pci consistent memory blocks, for dma.
581 struct pci_pool * pci_pool_create (const char * name, struct pci_dev * pdev, size_t size, size_t align, size_t allocation, int flags);
584 name - name of pool, for diagnostics
585 pdev - pci device that will be doing the DMA
586 size - size of the blocks in this pool.
587 align - alignment requirement for blocks; must be a power of two
588 allocation - returned blocks won't cross this boundary (or zero)
589 flags - SLAB_* flags (not all are supported).
592 Returns a pci allocation pool with the requested characteristics, or null if one can't be created.
593 Given one of these pools, pci_pool_alloc may be used to allocate memory. Such memory will all have
594 "consistent" DMA mappings, accessible by the device and its driver without using cache flushing
595 primitives. The actual size of blocks allocated may be larger than requested because of alignment.
596 If allocation is nonzero, objects returned from pci_pool_alloc won't cross that size boundary.
597 This is useful for devices which have addressing restrictions on individual DMA transfers, such
598 as not crossing boundaries of 4KBytes.
600 struct pci_pool
*my_pci_pool_create(const char * name
, struct pci_dev
* pdev
, size_t size
, size_t align
, size_t allocation
)
602 struct pci_pool
*retval
;
608 else if (size
< align
)
610 else if ((size
% align
) != 0) {
612 size
&= ~(align
- 1);
615 if (allocation
== 0) {
616 if (PAGE_SIZE
< size
)
619 allocation
= PAGE_SIZE
;
620 // FIXME: round up for less fragmentation
621 } else if (allocation
< size
)
624 retval
= ExAllocatePool(NonPagedPool
, sizeof(struct pci_pool
)); // Non-paged because could be
625 // accesses at IRQL < PASSIVE
627 // fill retval structure
628 strncpy (retval
->name
, name
, sizeof retval
->name
);
629 retval
->name
[sizeof retval
->name
- 1] = 0;
631 retval
->allocation
= allocation
;
633 retval
->blocks_per_page
= allocation
/ size
;
636 retval
->pages_allocated
= 0;
637 retval
->blocks_allocated
= 0;
639 DPRINT("pci_pool_create(): %s/%s size %d, %d/page (%d alloc)\n",
640 pdev
? pdev
->slot_name
: NULL
, retval
->name
, size
,
641 retval
->blocks_per_page
, allocation
);
648 pci_pool_alloc -- get a block of consistent memory
651 void * pci_pool_alloc (struct pci_pool * pool, int mem_flags, dma_addr_t * handle);
654 pool - pci pool that will produce the block
656 mem_flags - SLAB_KERNEL or SLAB_ATOMIC
658 handle - pointer to dma address of block
661 This returns the kernel virtual address of a currently unused block, and reports its dma
662 address through the handle. If such a memory block can't be allocated, null is returned.
664 void * my_pci_pool_alloc(struct pci_pool
* pool
, int mem_flags
, dma_addr_t
*dma_handle
)
667 PUSBMP_DEVICE_EXTENSION devExt
= (PUSBMP_DEVICE_EXTENSION
)pool
->pdev
->dev_ext
;
671 //DPRINT1("pci_pool_alloc() called, blocks already allocated=%d, dma_handle=%p\n", pool->blocks_allocated, dma_handle);
672 //ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
674 if (pool
->pages_allocated
== 0)
676 // we need to allocate at least one page
677 pool
->pages
[pool
->pages_allocated
].virtualAddress
=
678 devExt
->pDmaAdapter
->DmaOperations
->AllocateCommonBuffer(devExt
->pDmaAdapter
,
679 PAGE_SIZE
, &pool
->pages
[pool
->pages_allocated
].dmaAddress
, FALSE
); //FIXME: Cache-enabled?
681 // mark all blocks as free (bit=set)
682 memset(pool
->pages
[pool
->pages_allocated
].bitmap
, 0xFF, 128*sizeof(unsigned long));
684 /* FIXME: the next line replaces physical address by virtual address:
685 * this change is needed to boot VMWare, but I'm really not sure this
688 //pool->pages[pool->pages_allocated].dmaAddress.QuadPart = (ULONG_PTR)pool->pages[pool->pages_allocated].virtualAddress;
689 pool
->pages_allocated
++;
692 // search for a free block in all pages
693 for (page
=0; page
<pool
->pages_allocated
; page
++)
695 for (map
=0,i
=0; i
< pool
->blocks_per_page
; i
+= BITS_PER_LONG
, map
++)
697 if (pool
->pages
[page
].bitmap
[map
] == 0)
700 block
= ffz(~ pool
->pages
[page
].bitmap
[map
]);
702 if ((i
+ block
) < pool
->blocks_per_page
)
704 //DPRINT("pci_pool_alloc(): Allocating block %p:%d:%d:%d\n", pool, page, map, block);
705 clear_bit(block
, &pool
->pages
[page
].bitmap
[map
]);
706 offset
= (BITS_PER_LONG
* map
) + block
;
707 offset
*= pool
->size
;
713 //TODO: alloc page here then
714 DPRINT1("Panic!! We need one more page to be allocated, and Fireball doesn't want to alloc it!\n");
719 *dma_handle
= pool
->pages
[page
].dmaAddress
.QuadPart
+ offset
;
720 result
= (char *)pool
->pages
[page
].virtualAddress
+ offset
;
721 pool
->blocks_allocated
++;
728 pci_pool_free -- put block back into pci pool
731 void pci_pool_free (struct pci_pool * pool, void * vaddr, dma_addr_t dma);
735 pool - the pci pool holding the block
737 vaddr - virtual address of block
739 dma - dma address of block
742 Caller promises neither device nor driver will again touch this block unless it is first re-allocated.
744 void my_pci_pool_free (struct pci_pool
* pool
, void * vaddr
, dma_addr_t dma
)
746 int page
, block
, map
;
749 for (page
=0; page
<pool
->pages_allocated
; page
++)
751 if (dma
< pool
->pages
[page
].dmaAddress
.QuadPart
)
753 if (dma
< (pool
->pages
[page
].dmaAddress
.QuadPart
+ pool
->allocation
))
757 block
= dma
- pool
->pages
[page
].dmaAddress
.QuadPart
;
759 map
= block
/ BITS_PER_LONG
;
760 block
%= BITS_PER_LONG
;
763 set_bit (block
, &pool
->pages
[page
].bitmap
[map
]);
765 pool
->blocks_allocated
--;
766 //DPRINT("pci_pool_free(): alloc'd: %d\n", pool->blocks_allocated);
770 pci_pool_destroy -- destroys a pool of pci memory blocks.
773 void pci_pool_destroy (struct pci_pool * pool);
777 pool - pci pool that will be destroyed
780 Caller guarantees that no more memory from the pool is in use, and that nothing will try to
781 use the pool after this call.
783 void __inline__
my_pci_pool_destroy (struct pci_pool
* pool
)
785 DPRINT1("pci_pool_destroy(): alloc'd: %d, UNIMPLEMENTED\n", pool
->blocks_allocated
);
790 void *my_pci_alloc_consistent(struct pci_dev
*hwdev
, size_t size
, dma_addr_t
*dma_handle
)
792 PUSBMP_DEVICE_EXTENSION devExt
= (PUSBMP_DEVICE_EXTENSION
)hwdev
->dev_ext
;
793 DPRINT1("pci_alloc_consistent() size=%d\n", size
);
795 return devExt
->pDmaAdapter
->DmaOperations
->AllocateCommonBuffer(devExt
->pDmaAdapter
, size
, (PPHYSICAL_ADDRESS
)dma_handle
, FALSE
); //FIXME: Cache-enabled?
798 dma_addr_t
my_dma_map_single(struct device
*hwdev
, void *ptr
, size_t size
, enum dma_data_direction direction
)
800 //PHYSICAL_ADDRESS BaseAddress;
801 //PUSBMP_DEVICE_EXTENSION pDevExt = (PUSBMP_DEVICE_EXTENSION)hwdev->dev_ext;
802 //PUCHAR VirtualAddress = (PUCHAR) MmGetMdlVirtualAddress(pDevExt->Mdl);
803 //ULONG transferSize = size;
804 //BOOLEAN WriteToDevice;
806 //DPRINT1("dma_map_single() ptr=0x%lx, size=0x%x, dir=%d\n", ptr, size, direction);
807 /*ASSERT(pDevExt->BufferSize > size);
809 // FIXME: It must be an error if DMA_BIDIRECTIONAL trasnfer happens, since MSDN says
810 // the buffer is locked
811 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
812 WriteToDevice = TRUE;
814 WriteToDevice = FALSE;
816 DPRINT1("IoMapTransfer\n");
817 BaseAddress = pDevExt->pDmaAdapter->DmaOperations->MapTransfer(pDevExt->pDmaAdapter,
819 pDevExt->MapRegisterBase,
820 (PUCHAR) MmGetMdlVirtualAddress(pDevExt->Mdl),
826 DPRINT1("Writing to the device...\n");
827 memcpy(VirtualAddress, ptr, size);
831 DPRINT1("Reading from the device...\n");
832 memcpy(ptr, VirtualAddress, size);
835 //DPRINT1("VBuffer == 0x%x (really 0x%x?) transf_size == %u\n", pDevExt->VirtualBuffer, MmGetPhysicalAddress(pDevExt->VirtualBuffer).LowPart, transferSize);
836 //DPRINT1("VBuffer == 0x%x (really 0x%x?) transf_size == %u\n", ptr, MmGetPhysicalAddress(ptr).LowPart, transferSize);
838 return MmGetPhysicalAddress(ptr
).QuadPart
;//BaseAddress.QuadPart; /* BIG HACK */
841 // 2.6 version of pci_unmap_single
842 //void my_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction)
843 void my_dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
, enum dma_data_direction direction
)
845 //DPRINT1("dma_unmap_single() called, nothing to do\n");
849 void my_dma_sync_single(struct device
*hwdev
,
850 dma_addr_t dma_handle
,
851 size_t size
, int direction
)
853 DPRINT1("dma_sync_single() called, UNIMPLEMENTED\n");
857 void my_dma_sync_sg(struct device
*hwdev
,
858 struct scatterlist
*sg
,
859 int nelems
, int direction
)
861 DPRINT1("dma_sync_sg() called, UNIMPLEMENTED\n");
866 int my_dma_map_sg(struct device
*hwdev
, struct scatterlist
*sg
, int nents
, enum dma_data_direction direction
)
868 DPRINT1("dma_map_sg() called, UNIMPLEMENTED\n");
872 void my_dma_unmap_sg(struct device
*hwdev
, struct scatterlist
*sg
, int nents
, enum dma_data_direction direction
)
874 DPRINT1("dma_unmap_sg() called, UNIMPLEMENTED\n");
878 /* forwarder ro dma_ equivalent */
879 void my_pci_unmap_single(struct pci_dev
*hwdev
, dma_addr_t dma_addr
, size_t size
, int direction
)
881 my_dma_unmap_single(&hwdev
->dev
, dma_addr
, size
, direction
);
885 /*------------------------------------------------------------------------*/
886 /* SPINLOCK routines */
887 /*------------------------------------------------------------------------*/
888 void my_spin_lock_init(spinlock_t
*sl
)
890 KeInitializeSpinLock(&sl
->SpinLock
);
893 void my_spin_lock(spinlock_t
*sl
)
895 //KeAcquireSpinLock(&sl->SpinLock, &sl->OldIrql);
898 void my_spin_unlock(spinlock_t
*sl
)
900 //KeReleaseSpinLock(&sl->SpinLock, sl->OldIrql);
903 void my_spin_lock_irqsave(spinlock_t
*sl
, int flags
)