fd47f1f30e4a2d6207f7a581d6cf73babe35c1fb
[reactos.git] / reactos / drivers / usb / miniport / sys / linuxwrapper.c
1 /*
2 * USB support based on Linux kernel source
3 *
4 * 2003-06-21 Georg Acher (georg@acher.org)
5 *
6 * Concept:
7 *
8 * 1) Forget all device interrupts, scheduling, semaphores, threads etc.
9 * 1a) Forget all DMA and PCI helper functions
10 * 2) Forget usbdevfs, procfs and ioctls
11 * 3) Emulate xHCI interrupts and root hub timer by polling
12 * 4) Emulate hub kernel thread by polling
13 * 5) Emulate synchronous USB-messages (usb_*_msg) with busy waiting
14 *
15 * To be done:
16 * 6) Remove code bloat
17 *
18 */
19
20 #include "../usb_wrapper.h"
21
22 /* internal state */
23
24 static struct pci_dev *pci_probe_dev;
25 extern int (*thread_handler)(void*);
26 extern void* thread_parm;
27
28 struct my_irqs reg_irqs[MAX_IRQS];
29 int num_irqs;
30 int need_wakeup;
31
32 int my_jiffies;
33
34 struct timer_list *main_timer_list[MAX_TIMERS];
35 struct dummy_process act_cur={0};
36 struct dummy_process *my_current;
37
38 int (*thread_handler)(void*);
39 void* thread_parm;
40
41 #define MAX_DRVS 8
42 static struct device_driver *m_drivers[MAX_DRVS];
43 static int drvs_num=0;
44 unsigned int LAST_USB_EVENT_TICK;
45
46 NTSTATUS init_dma(PUSBMP_DEVICE_EXTENSION pDevExt);
47
48 /*------------------------------------------------------------------------*/
49 /*
50 * Helper functions for top-level system
51 */
52 /*------------------------------------------------------------------------*/
53 void init_wrapper(struct pci_dev *probe_dev)
54 {
55 int n;
56 for(n=0;n<MAX_TIMERS;n++)
57 {
58 main_timer_list[n]=NULL;
59 }
60
61 my_jiffies=0;
62 num_irqs=0;
63 my_current=&act_cur;
64 pci_probe_dev=probe_dev;
65
66 for(n=0;n<MAX_IRQS;n++)
67 {
68 reg_irqs[n].handler=NULL;
69 reg_irqs[n].irq=-1;
70 }
71 drvs_num=0;
72 need_wakeup=0;
73 for(n=0;n<MAX_DRVS;n++)
74 m_drivers[n]=NULL;
75
76 init_dma(probe_dev->dev_ext);
77 }
78 /*------------------------------------------------------------------------*/
79 void handle_irqs(int irq)
80 {
81 int n;
82 //printk("handle irqs\n");
83 for(n=0;n<MAX_IRQS;n++)
84 {
85 if (reg_irqs[n].handler && (irq==reg_irqs[n].irq || irq==-1))
86 reg_irqs[n].handler(reg_irqs[n].irq,reg_irqs[n].data,NULL);
87 }
88 }
89 /*------------------------------------------------------------------------*/
90 void inc_jiffies(int n)
91 {
92 my_jiffies+=n;
93 }
94 /*------------------------------------------------------------------------*/
95 void do_all_timers(void)
96 {
97 int n;
98 for(n=0;n<MAX_TIMERS;n++)
99 {
100 if (main_timer_list[n] && main_timer_list[n]->function)
101 {
102 void (*function)(unsigned long)=main_timer_list[n]->function;
103 unsigned long data=main_timer_list[n]->data;
104
105 if (main_timer_list[n]->expires>1) {
106 main_timer_list[n]->expires--;
107 } else {
108
109 main_timer_list[n]->expires=0;
110 main_timer_list[n]=0; // remove timer
111 // Call Timer Function Data
112 function(data);
113 }
114 }
115 }
116 }
117 /*------------------------------------------------------------------------*/
118 // Purpose: Remember thread procedure and data in global var
119 // ReactOS Purpose: Create real kernel thread
120 int my_kernel_thread(int STDCALL (*handler)(void*), void* parm, int flags)
121 {
122 HANDLE hThread = NULL;
123 //thread_handler=handler;
124 //thread_parm=parm;
125 //return 42; // PID :-)
126
127 ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
128
129 PsCreateSystemThread(&hThread,
130 THREAD_ALL_ACCESS,
131 NULL,
132 NULL,
133 NULL,
134 (PKSTART_ROUTINE)handler,
135 parm);
136
137 DPRINT1("usbcore: Created system thread %d\n", (int)hThread);
138
139 return (int)hThread; // FIXME: Correct?
140 }
141
142 // Kill the process
143 int my_kill_proc(int pid, int signal, int unk)
144 {
145 HANDLE hThread;
146
147 // TODO: Implement actual process killing
148
149 hThread = (HANDLE)pid;
150 ZwClose(hThread);
151
152 return 0;
153 }
154
155 /*------------------------------------------------------------------------*/
156 /* Device management
157 * As simple as possible, but as complete as necessary ...
158 */
159 /*------------------------------------------------------------------------*/
160
161
162 /* calls probe function for hotplug (which does device matching), this is the
163 only link between usbcore and the registered device drivers! */
164 int my_device_add(struct device *dev)
165 {
166 int n,found=0;
167 printk("drv_num %i %p %p\n",drvs_num,m_drivers[0]->probe,m_drivers[1]->probe);
168
169 if (dev->driver)
170 {
171 if (dev->driver->probe)
172 return dev->driver->probe(dev);
173 }
174 else
175 {
176 for(n=0;n<drvs_num;n++)
177 {
178 if (m_drivers[n]->probe)
179 {
180 dev->driver=m_drivers[n];
181 printk("probe%i %p\n",n,m_drivers[n]->probe);
182
183 if (m_drivers[n]->probe(dev) == 0)
184 {
185 // return 0;
186 found=1;
187 }
188 }
189 }
190 if (found) return 0;
191 }
192 dev->driver=NULL;
193 return -ENODEV;
194 }
195 /*------------------------------------------------------------------------*/
196 int my_driver_register(struct device_driver *driver)
197 {
198
199 if (drvs_num<MAX_DRVS)
200 {
201 printk("driver_register %i: %p %p",drvs_num,driver,driver->probe);
202
203 m_drivers[drvs_num++]=driver;
204 return 0;
205 }
206 return -1;
207 }
208 /*------------------------------------------------------------------------*/
209 int my_device_unregister(struct device *dev)
210 {
211 if (dev->driver && dev->driver->remove)
212 dev->driver->remove(dev);
213 return 0;
214
215 }
216 /*------------------------------------------------------------------------*/
217 struct device *my_get_device(struct device *dev)
218 {
219 return NULL;
220 }
221 /*------------------------------------------------------------------------*/
222 void my_device_initialize(struct device *dev)
223 {
224 }
225 /*------------------------------------------------------------------------*/
226 void my_wake_up(PKEVENT evnt)
227 {
228 need_wakeup=1;
229
230 KeSetEvent(evnt, 0, FALSE); // Signal event
231 }
232 /*------------------------------------------------------------------------*/
233 void my_init_waitqueue_head(PKEVENT evnt)
234 {
235 // this is used only in core/message.c, and it isn't needed there
236 //KeInitializeEvent(evnt, NotificationEvent, TRUE); // signalled state
237 }
238 /*------------------------------------------------------------------------*/
239 /* wait until woken up (only one wait allowed!) */
240 extern unsigned int LAST_USB_IRQ;
241
242 int my_schedule_timeout(int x)
243 {
244 LONGLONG HH;
245 //LONGLONG temp;
246 LARGE_INTEGER delay;
247 //PULONG tmp_debug=NULL;
248 //extern unsigned int LAST_USB_EVENT_TICK;
249
250 //*tmp_debug = 0xFFAAFFAA;
251
252 printk("schedule_timeout: %d ms\n", x);
253
254 //delay.QuadPart = -x*10000; // convert to 100ns units
255 //KeDelayExecutionThread(KernelMode, FALSE, &delay); //wait_us(1);
256
257 /*
258 x+=5; // safety
259 x = x*1000; // to us format
260 */
261 x = 50; // it's enough for most purposes
262
263 while(x>0)
264 {
265 KeQueryTickCount((LARGE_INTEGER *)&HH);//IoInputDword(0x8008);
266 //temp = HH - LAST_USB_EVENT_TICK;
267
268 //if (temp>(3579)) { //3579 = 1ms!
269 //if (temp>1000) {
270 do_all_timers();
271 // LAST_USB_EVENT_TICK = HH;
272 //}
273
274 handle_irqs(-1);
275
276 if (need_wakeup)
277 break;
278
279 delay.QuadPart = -10;
280 KeDelayExecutionThread(KernelMode, FALSE, &delay); //wait_us(1);
281 x-=1;
282 //DPRINT("schedule_timeout(): time left: %d\n", x);
283 }
284 need_wakeup=0;
285
286 printk("schedule DONE!!!!!!\n");
287
288 return 0;//x;
289 }
290 /*------------------------------------------------------------------------*/
291 void my_wait_for_completion(struct completion *x)
292 {
293 LONGLONG HH;
294 LONGLONG temp;
295 LARGE_INTEGER delay;
296
297 extern unsigned int LAST_USB_EVENT_TICK;
298
299 printk("wait for completion11, x=0x%08x\n", (DWORD)x);
300
301 int n=10;
302 n = n*1000; // to us format
303
304 while(!x->done && (n>0))
305 {
306 KeQueryTickCount((LARGE_INTEGER *)&HH);//IoInputDword(0x8008);
307 temp = HH - LAST_USB_EVENT_TICK;
308
309 //if (temp>(3579)) {
310 if (temp>(1000)) {
311 // do_all_timers();
312 LAST_USB_EVENT_TICK = HH;
313 }
314
315 // handle_irqs(-1);
316
317 delay.QuadPart = -10;
318 KeDelayExecutionThread(KernelMode, FALSE, &delay); //wait_us(1);
319 n--;
320 }
321 printk("wait for completion done %i\n",x->done);
322
323 }
324 /*------------------------------------------------------------------------*/
325 void my_init_completion(struct completion *x)
326 {
327 x->done=0;
328 KeInitializeEvent(&x->wait, NotificationEvent, FALSE);
329 }
330 /*------------------------------------------------------------------------*/
331 void my_interruptible_sleep_on(PKEVENT evnt)
332 {
333 KeWaitForSingleObject(evnt, Executive, KernelMode, FALSE, NULL);
334 KeClearEvent(evnt); // reset to not-signalled
335 }
336 /*------------------------------------------------------------------------*/
337 // Helper for pci_module_init
338 /*------------------------------------------------------------------------*/
339 int my_pci_module_init(struct pci_driver *x)
340 {
341 struct pci_dev *dev=pci_probe_dev;
342 const struct pci_device_id *id=NULL;
343 if (!pci_probe_dev)
344 {
345 DPRINT1("PCI device not set!\n");
346 return 0;
347 }
348 x->probe(dev, id);
349 return 0;
350 }
351 /*------------------------------------------------------------------------*/
352 struct pci_dev *my_pci_find_slot(int a,int b)
353 {
354 return NULL;
355 }
356 /*------------------------------------------------------------------------*/
357 int my_pci_write_config_word(struct pci_dev *dev, int where, u16 val)
358 {
359 //dev->bus, dev->devfn, where, val
360 PUSBMP_DEVICE_EXTENSION dev_ext = (PUSBMP_DEVICE_EXTENSION)dev->dev_ext;
361
362 //FIXME: Is returning this value correct?
363 //FIXME: Mixing pci_dev and win structs isn't a good thing at all
364 return HalSetBusDataByOffset(PCIConfiguration, dev->bus->number, dev_ext->SystemIoSlotNumber, &val, where, sizeof(val));
365 }
366 /*------------------------------------------------------------------------*/
367 int my_request_irq(unsigned int irq,
368 int (*handler)(int,void *, struct pt_regs *),
369 unsigned long mode, const char *desc, void *data)
370 {
371 if (num_irqs<MAX_IRQS)
372 {
373 reg_irqs[num_irqs].handler=handler;
374 reg_irqs[num_irqs].irq=irq;
375 reg_irqs[num_irqs].data=data;
376 num_irqs++;
377 return 0;
378 }
379
380 return 1;
381 }
382 /*------------------------------------------------------------------------*/
383 int my_free_irq(int irq, void* p)
384 {
385 /* No free... */
386 return 0;
387 }
388 /*------------------------------------------------------------------------*/
389 // Lookaside funcs
390 /*------------------------------------------------------------------------*/
391 kmem_cache_t *my_kmem_cache_create(const char *tag, size_t alloc_size,
392 size_t offset, unsigned long flags,
393 void *ctor,
394 void *dtor)
395 {
396 //TODO: Take in account ctor and dtor - callbacks for alloc/free, flags and offset
397 //FIXME: We assume this cache is always NPaged
398 PNPAGED_LOOKASIDE_LIST Lookaside;
399 ULONG Tag=0x11223344; //FIXME: Make this from tag
400
401 Lookaside = ExAllocatePool(NonPagedPool, sizeof(NPAGED_LOOKASIDE_LIST));
402
403 ExInitializeNPagedLookasideList(
404 Lookaside,
405 NULL,
406 NULL,
407 0,
408 alloc_size,
409 Tag,
410 0);
411
412 return (kmem_cache_t *)Lookaside;
413 }
414 /*------------------------------------------------------------------------*/
415 BOOLEAN my_kmem_cache_destroy(kmem_cache_t *co)
416 {
417 ExDeleteNPagedLookasideList((PNPAGED_LOOKASIDE_LIST)co);
418
419 ExFreePool(co);
420 return FALSE;
421 }
422 /*------------------------------------------------------------------------*/
423 void *my_kmem_cache_alloc(kmem_cache_t *co, int flags)
424 {
425 return ExAllocateFromNPagedLookasideList((PNPAGED_LOOKASIDE_LIST)co);
426 }
427 /*------------------------------------------------------------------------*/
428 void my_kmem_cache_free(kmem_cache_t *co, void *ptr)
429 {
430 ExFreeToNPagedLookasideList((PNPAGED_LOOKASIDE_LIST)co, ptr);
431 }
432 /*------------------------------------------------------------------------*/
433 // DMA support routines
434 /*------------------------------------------------------------------------*/
435 #ifdef USB_DMA_SINGLE_SUPPORT
436 static IO_ALLOCATION_ACTION NTAPI MapRegisterCallback(PDEVICE_OBJECT DeviceObject,
437 PIRP Irp,
438 PVOID MapRegisterBase,
439 PVOID Context);
440 #endif
441
442 NTSTATUS
443 init_dma(PUSBMP_DEVICE_EXTENSION pDevExt)
444 {
445 // Prepare device descriptor structure
446 DEVICE_DESCRIPTION dd;
447 #ifdef USB_DMA_SINGLE_SUPPORT
448 KEVENT DMAEvent;
449 KIRQL OldIrql;
450 NTSTATUS Status;
451 #endif
452
453 RtlZeroMemory( &dd, sizeof(dd) );
454 dd.Version = DEVICE_DESCRIPTION_VERSION;
455 dd.Master = TRUE;
456 dd.ScatterGather = TRUE;
457 dd.DemandMode = FALSE;
458 dd.AutoInitialize = FALSE;
459 dd.Dma32BitAddresses = TRUE;
460 dd.InterfaceType = PCIBus;
461 dd.DmaChannel = 0;//pDevExt->dmaChannel;
462 dd.MaximumLength = 128;//MAX_DMA_LENGTH;
463 dd.DmaWidth = Width32Bits;
464 dd.DmaSpeed = MaximumDmaSpeed;
465
466 // The following taken from Win2k DDB:
467 // "Compute the maximum number of mapping regs
468 // this device could possibly need. Since the
469 // transfer may not be paged aligned, add one
470 // to allow the max xfer size to span a page."
471 //pDevExt->mapRegisterCount = (MAX_DMA_LENGTH / PAGE_SIZE) + 1;
472
473 // TODO: Free it somewhere (PutDmaAdapter)
474 pDevExt->pDmaAdapter =
475 IoGetDmaAdapter( pDevExt->PhysicalDeviceObject,
476 &dd,
477 &pDevExt->mapRegisterCount);
478
479 DPRINT1("IoGetDmaAdapter done 0x%X, mapRegisterCount=%d\n", pDevExt->pDmaAdapter, pDevExt->mapRegisterCount);
480
481 // Fail if failed
482 if (pDevExt->pDmaAdapter == NULL)
483 return STATUS_INSUFFICIENT_RESOURCES;
484
485 #ifdef USB_DMA_SINGLE_SUPPORT
486 /* Allocate buffer now */
487 pDevExt->BufferSize = pDevExt->mapRegisterCount * PAGE_SIZE;
488 DPRINT1("Bufsize = %u\n", pDevExt->BufferSize);
489 pDevExt->VirtualBuffer = pDevExt->pDmaAdapter->DmaOperations->AllocateCommonBuffer(
490 pDevExt->pDmaAdapter, pDevExt->BufferSize, &pDevExt->Buffer, FALSE);
491 DPRINT1("Bufsize = %u, Buffer = 0x%x", pDevExt->BufferSize, pDevExt->Buffer.LowPart);
492
493 if (!pDevExt->VirtualBuffer)
494 {
495 DPRINT1("Could not allocate buffer\n");
496 // should try again with smaller buffer...
497 return STATUS_INSUFFICIENT_RESOURCES;
498 }
499
500 DPRINT1("Calling IoAllocateMdl()\n");
501 pDevExt->Mdl = IoAllocateMdl(pDevExt->VirtualBuffer, pDevExt->BufferSize, FALSE, FALSE, NULL);
502 DPRINT1("Bufsize == %u\n", pDevExt->BufferSize);
503
504 if (!pDevExt->Mdl)
505 {
506 DPRINT1("IoAllocateMdl() FAILED\n");
507 //TODO: Free the HAL buffer
508 return STATUS_INSUFFICIENT_RESOURCES;
509 }
510
511 DPRINT1("VBuffer == 0x%x Mdl == %u Bufsize == %u\n", pDevExt->VirtualBuffer, pDevExt->Mdl, pDevExt->BufferSize);
512
513 DPRINT1("Calling MmBuildMdlForNonPagedPool\n");
514 MmBuildMdlForNonPagedPool(pDevExt->Mdl);
515
516
517 /* Get map registers for DMA */
518 KeInitializeEvent(&DMAEvent, SynchronizationEvent, FALSE);
519
520 KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
521 // TODO: Free adapter channel somewhere
522 Status = pDevExt->pDmaAdapter->DmaOperations->AllocateAdapterChannel(pDevExt->pDmaAdapter,
523 pDevExt->PhysicalDeviceObject, pDevExt->mapRegisterCount, MapRegisterCallback, &DMAEvent);
524 KeLowerIrql(OldIrql);
525
526 DPRINT1("VBuffer == 0x%x Bufsize == %u\n", pDevExt->VirtualBuffer, pDevExt->BufferSize);
527 KeWaitForSingleObject(&DMAEvent, Executive, KernelMode, FALSE, NULL);
528
529 if(Status != STATUS_SUCCESS)
530 {
531 DPRINT("init_dma(): unable to allocate adapter channels\n");
532 return STATUS_INSUFFICIENT_RESOURCES;
533 }
534 #endif
535 return STATUS_SUCCESS;
536 }
537
538 /*
539 * FUNCTION: Acquire map registers in prep for DMA
540 * ARGUMENTS:
541 * DeviceObject: unused
542 * Irp: unused
543 * MapRegisterBase: returned to blocked thread via a member var
544 * Context: contains a pointer to the right ControllerInfo
545 * struct
546 * RETURNS:
547 * KeepObject, because that's what the DDK says to do
548 */
549 #ifdef USB_DMA_SINGLE_SUPPORT
550 static IO_ALLOCATION_ACTION NTAPI MapRegisterCallback(PDEVICE_OBJECT DeviceObject,
551 PIRP Irp,
552 PVOID MapRegisterBase,
553 PVOID Context)
554 {
555 PUSBMP_DEVICE_EXTENSION pDevExt = (PUSBMP_DEVICE_EXTENSION)DeviceObject->DeviceExtension;
556 UNREFERENCED_PARAMETER(Irp);
557
558 DPRINT("usb_linuxwrapper: MapRegisterCallback Called, base=0x%08x\n", MapRegisterBase);
559
560 pDevExt->MapRegisterBase = MapRegisterBase;
561
562 // signal that we are finished
563 KeSetEvent(Context, 0, FALSE);
564
565 return KeepObject;//DeallocateObjectKeepRegisters;
566 }
567 #endif
568
569 void *my_dma_pool_alloc(struct dma_pool *pool, int gfp_flags, dma_addr_t *dma_handle)
570 {
571 // HalAllocCommonBuffer
572 // But ideally IoGetDmaAdapter
573
574 DPRINT1("dma_pool_alloc() called\n");
575 return NULL;
576 }
577
578 /*
579 pci_pool_create -- Creates a pool of pci consistent memory blocks, for dma.
580
581 struct pci_pool * pci_pool_create (const char * name, struct pci_dev * pdev, size_t size, size_t align, size_t allocation, int flags);
582
583 Arguments:
584 name - name of pool, for diagnostics
585 pdev - pci device that will be doing the DMA
586 size - size of the blocks in this pool.
587 align - alignment requirement for blocks; must be a power of two
588 allocation - returned blocks won't cross this boundary (or zero)
589 flags - SLAB_* flags (not all are supported).
590
591 Description:
592 Returns a pci allocation pool with the requested characteristics, or null if one can't be created.
593 Given one of these pools, pci_pool_alloc may be used to allocate memory. Such memory will all have
594 "consistent" DMA mappings, accessible by the device and its driver without using cache flushing
595 primitives. The actual size of blocks allocated may be larger than requested because of alignment.
596 If allocation is nonzero, objects returned from pci_pool_alloc won't cross that size boundary.
597 This is useful for devices which have addressing restrictions on individual DMA transfers, such
598 as not crossing boundaries of 4KBytes.
599 */
600 struct pci_pool *my_pci_pool_create(const char * name, struct pci_dev * pdev, size_t size, size_t align, size_t allocation)
601 {
602 struct pci_pool *retval;
603
604 if (align == 0)
605 align = 1;
606 if (size == 0)
607 return 0;
608 else if (size < align)
609 size = align;
610 else if ((size % align) != 0) {
611 size += align + 1;
612 size &= ~(align - 1);
613 }
614
615 if (allocation == 0) {
616 if (PAGE_SIZE < size)
617 allocation = size;
618 else
619 allocation = PAGE_SIZE;
620 // FIXME: round up for less fragmentation
621 } else if (allocation < size)
622 return 0;
623
624 retval = ExAllocatePool(NonPagedPool, sizeof(struct pci_pool)); // Non-paged because could be
625 // accesses at IRQL < PASSIVE
626
627 // fill retval structure
628 strncpy (retval->name, name, sizeof retval->name);
629 retval->name[sizeof retval->name - 1] = 0;
630
631 retval->allocation = allocation;
632 retval->size = size;
633 retval->blocks_per_page = allocation / size;
634 retval->pdev = pdev;
635
636 retval->pages_allocated = 0;
637 retval->blocks_allocated = 0;
638
639 DPRINT("pci_pool_create(): %s/%s size %d, %d/page (%d alloc)\n",
640 pdev ? pdev->slot_name : NULL, retval->name, size,
641 retval->blocks_per_page, allocation);
642
643 return retval;
644 }
645
646 /*
647 Name:
648 pci_pool_alloc -- get a block of consistent memory
649
650 Synopsis:
651 void * pci_pool_alloc (struct pci_pool * pool, int mem_flags, dma_addr_t * handle);
652
653 Arguments:
654 pool - pci pool that will produce the block
655
656 mem_flags - SLAB_KERNEL or SLAB_ATOMIC
657
658 handle - pointer to dma address of block
659
660 Description:
661 This returns the kernel virtual address of a currently unused block, and reports its dma
662 address through the handle. If such a memory block can't be allocated, null is returned.
663 */
664 void * my_pci_pool_alloc(struct pci_pool * pool, int mem_flags, dma_addr_t *dma_handle)
665 {
666 PVOID result;
667 PUSBMP_DEVICE_EXTENSION devExt = (PUSBMP_DEVICE_EXTENSION)pool->pdev->dev_ext;
668 int page=0, offset;
669 int map, i, block;
670
671 //DPRINT1("pci_pool_alloc() called, blocks already allocated=%d, dma_handle=%p\n", pool->blocks_allocated, dma_handle);
672 //ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
673
674 if (pool->pages_allocated == 0)
675 {
676 // we need to allocate at least one page
677 pool->pages[pool->pages_allocated].virtualAddress =
678 devExt->pDmaAdapter->DmaOperations->AllocateCommonBuffer(devExt->pDmaAdapter,
679 PAGE_SIZE, &pool->pages[pool->pages_allocated].dmaAddress, FALSE); //FIXME: Cache-enabled?
680
681 // mark all blocks as free (bit=set)
682 memset(pool->pages[pool->pages_allocated].bitmap, 0xFF, 128*sizeof(unsigned long));
683
684 /* FIXME: the next line replaces physical address by virtual address:
685 * this change is needed to boot VMWare, but I'm really not sure this
686 * change is correct!
687 */
688 //pool->pages[pool->pages_allocated].dmaAddress.QuadPart = (ULONG_PTR)pool->pages[pool->pages_allocated].virtualAddress;
689 pool->pages_allocated++;
690 }
691
692 // search for a free block in all pages
693 for (page=0; page<pool->pages_allocated; page++)
694 {
695 for (map=0,i=0; i < pool->blocks_per_page; i+= BITS_PER_LONG, map++)
696 {
697 if (pool->pages[page].bitmap[map] == 0)
698 continue;
699
700 block = ffz(~ pool->pages[page].bitmap[map]);
701
702 if ((i + block) < pool->blocks_per_page)
703 {
704 //DPRINT("pci_pool_alloc(): Allocating block %p:%d:%d:%d\n", pool, page, map, block);
705 clear_bit(block, &pool->pages[page].bitmap[map]);
706 offset = (BITS_PER_LONG * map) + block;
707 offset *= pool->size;
708 goto ready;
709 }
710 }
711 }
712
713 //TODO: alloc page here then
714 DPRINT1("Panic!! We need one more page to be allocated, and Fireball doesn't want to alloc it!\n");
715 offset = 0;
716 return 0;
717
718 ready:
719 *dma_handle = pool->pages[page].dmaAddress.QuadPart + offset;
720 result = (char *)pool->pages[page].virtualAddress + offset;
721 pool->blocks_allocated++;
722
723 return result;
724 }
725
726 /*
727 Name
728 pci_pool_free -- put block back into pci pool
729 Synopsis
730
731 void pci_pool_free (struct pci_pool * pool, void * vaddr, dma_addr_t dma);
732
733 Arguments
734
735 pool - the pci pool holding the block
736
737 vaddr - virtual address of block
738
739 dma - dma address of block
740
741 Description:
742 Caller promises neither device nor driver will again touch this block unless it is first re-allocated.
743 */
744 void my_pci_pool_free (struct pci_pool * pool, void * vaddr, dma_addr_t dma)
745 {
746 int page, block, map;
747
748 // Find page
749 for (page=0; page<pool->pages_allocated; page++)
750 {
751 if (dma < pool->pages[page].dmaAddress.QuadPart)
752 continue;
753 if (dma < (pool->pages[page].dmaAddress.QuadPart + pool->allocation))
754 break;
755 }
756
757 block = dma - pool->pages[page].dmaAddress.QuadPart;
758 block /= pool->size;
759 map = block / BITS_PER_LONG;
760 block %= BITS_PER_LONG;
761
762 // mark as free
763 set_bit (block, &pool->pages[page].bitmap[map]);
764
765 pool->blocks_allocated--;
766 //DPRINT("pci_pool_free(): alloc'd: %d\n", pool->blocks_allocated);
767 }
768
769 /*
770 pci_pool_destroy -- destroys a pool of pci memory blocks.
771 Synopsis
772
773 void pci_pool_destroy (struct pci_pool * pool);
774
775
776 Arguments:
777 pool - pci pool that will be destroyed
778
779 Description
780 Caller guarantees that no more memory from the pool is in use, and that nothing will try to
781 use the pool after this call.
782 */
783 void __inline__ my_pci_pool_destroy (struct pci_pool * pool)
784 {
785 DPRINT1("pci_pool_destroy(): alloc'd: %d, UNIMPLEMENTED\n", pool->blocks_allocated);
786
787 ExFreePool(pool);
788 }
789
790 void *my_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
791 {
792 PUSBMP_DEVICE_EXTENSION devExt = (PUSBMP_DEVICE_EXTENSION)hwdev->dev_ext;
793 DPRINT1("pci_alloc_consistent() size=%d\n", size);
794
795 return devExt->pDmaAdapter->DmaOperations->AllocateCommonBuffer(devExt->pDmaAdapter, size, (PPHYSICAL_ADDRESS)dma_handle, FALSE); //FIXME: Cache-enabled?
796 }
797
798 dma_addr_t my_dma_map_single(struct device *hwdev, void *ptr, size_t size, enum dma_data_direction direction)
799 {
800 //PHYSICAL_ADDRESS BaseAddress;
801 //PUSBMP_DEVICE_EXTENSION pDevExt = (PUSBMP_DEVICE_EXTENSION)hwdev->dev_ext;
802 //PUCHAR VirtualAddress = (PUCHAR) MmGetMdlVirtualAddress(pDevExt->Mdl);
803 //ULONG transferSize = size;
804 //BOOLEAN WriteToDevice;
805
806 //DPRINT1("dma_map_single() ptr=0x%lx, size=0x%x, dir=%d\n", ptr, size, direction);
807 /*ASSERT(pDevExt->BufferSize > size);
808
809 // FIXME: It must be an error if DMA_BIDIRECTIONAL trasnfer happens, since MSDN says
810 // the buffer is locked
811 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
812 WriteToDevice = TRUE;
813 else
814 WriteToDevice = FALSE;
815
816 DPRINT1("IoMapTransfer\n");
817 BaseAddress = pDevExt->pDmaAdapter->DmaOperations->MapTransfer(pDevExt->pDmaAdapter,
818 pDevExt->Mdl,
819 pDevExt->MapRegisterBase,
820 (PUCHAR) MmGetMdlVirtualAddress(pDevExt->Mdl),
821 &transferSize,
822 WriteToDevice);
823
824 if (WriteToDevice)
825 {
826 DPRINT1("Writing to the device...\n");
827 memcpy(VirtualAddress, ptr, size);
828 }
829 else
830 {
831 DPRINT1("Reading from the device...\n");
832 memcpy(ptr, VirtualAddress, size);
833 }*/
834
835 //DPRINT1("VBuffer == 0x%x (really 0x%x?) transf_size == %u\n", pDevExt->VirtualBuffer, MmGetPhysicalAddress(pDevExt->VirtualBuffer).LowPart, transferSize);
836 //DPRINT1("VBuffer == 0x%x (really 0x%x?) transf_size == %u\n", ptr, MmGetPhysicalAddress(ptr).LowPart, transferSize);
837
838 return MmGetPhysicalAddress(ptr).QuadPart;//BaseAddress.QuadPart; /* BIG HACK */
839 }
840
841 // 2.6 version of pci_unmap_single
842 //void my_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction)
843 void my_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction)
844 {
845 //DPRINT1("dma_unmap_single() called, nothing to do\n");
846 /* nothing yet */
847 }
848
849 void my_dma_sync_single(struct device *hwdev,
850 dma_addr_t dma_handle,
851 size_t size, int direction)
852 {
853 DPRINT1("dma_sync_single() called, UNIMPLEMENTED\n");
854 /* nothing yet */
855 }
856
857 void my_dma_sync_sg(struct device *hwdev,
858 struct scatterlist *sg,
859 int nelems, int direction)
860 {
861 DPRINT1("dma_sync_sg() called, UNIMPLEMENTED\n");
862 /* nothing yet */
863 }
864
865
866 int my_dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction direction)
867 {
868 DPRINT1("dma_map_sg() called, UNIMPLEMENTED\n");
869 return 0;
870 }
871
872 void my_dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction direction)
873 {
874 DPRINT1("dma_unmap_sg() called, UNIMPLEMENTED\n");
875 /* nothing yet */
876 }
877
878 /* forwarder ro dma_ equivalent */
879 void my_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
880 {
881 my_dma_unmap_single(&hwdev->dev, dma_addr, size, direction);
882 }
883
884
885 /*------------------------------------------------------------------------*/
886 /* SPINLOCK routines */
887 /*------------------------------------------------------------------------*/
888 void my_spin_lock_init(spinlock_t *sl)
889 {
890 KeInitializeSpinLock(&sl->SpinLock);
891 }
892
893 void my_spin_lock(spinlock_t *sl)
894 {
895 //KeAcquireSpinLock(&sl->SpinLock, &sl->OldIrql);
896 }
897
898 void my_spin_unlock(spinlock_t *sl)
899 {
900 //KeReleaseSpinLock(&sl->SpinLock, sl->OldIrql);
901 }
902
903 void my_spin_lock_irqsave(spinlock_t *sl, int flags)
904 {
905 my_spin_lock(sl);
906 }