2 * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 FILE_LICENCE ( GPL2_OR_LATER );
25 #include <ipxe/malloc.h>
27 #include <ipxe/cpuid.h>
30 #include <ipxe/xenver.h>
31 #include <ipxe/xenmem.h>
32 #include <ipxe/xenstore.h>
33 #include <ipxe/xenbus.h>
34 #include <ipxe/xengrant.h>
47 * @ret rc Return status code
49 static int hvm_cpuid_base ( struct hvm_device *hvm ) {
54 } __attribute__ (( packed )) signature;
62 /* Scan for magic signature */
63 for ( base = HVM_CPUID_MIN ; base <= HVM_CPUID_MAX ;
64 base += HVM_CPUID_STEP ) {
65 cpuid ( base, &discard_eax, &signature.ebx, &signature.ecx,
67 if ( memcmp ( &signature, HVM_CPUID_MAGIC,
68 sizeof ( signature ) ) == 0 ) {
69 hvm->cpuid_base = base;
70 cpuid ( ( base + HVM_CPUID_VERSION ), &version,
71 &discard_ebx, &discard_ecx, &discard_edx );
72 DBGC2 ( hvm, "HVM using CPUID base %#08x (v%d.%d)\n",
73 base, ( version >> 16 ), ( version & 0xffff ) );
78 DBGC ( hvm, "HVM could not find hypervisor\n" );
83 * Map hypercall page(s)
86 * @ret rc Return status code
88 static int hvm_map_hypercall ( struct hvm_device *hvm ) {
93 physaddr_t hypercall_phys;
95 static xen_extraversion_t extraversion;
99 /* Get number of hypercall pages and MSR to use */
100 cpuid ( ( hvm->cpuid_base + HVM_CPUID_PAGES ), &pages, &msr,
101 &discard_ecx, &discard_edx );
104 hvm->hypercall_len = ( pages * PAGE_SIZE );
105 hvm->xen.hypercall = malloc_dma ( hvm->hypercall_len, PAGE_SIZE );
106 if ( ! hvm->xen.hypercall ) {
107 DBGC ( hvm, "HVM could not allocate %d hypercall page(s)\n",
111 hypercall_phys = virt_to_phys ( hvm->xen.hypercall );
112 DBGC2 ( hvm, "HVM hypercall page(s) at [%#08lx,%#08lx) via MSR %#08x\n",
113 hypercall_phys, ( hypercall_phys + hvm->hypercall_len ), msr );
116 wrmsr ( msr, hypercall_phys );
118 /* Check that hypercall mechanism is working */
119 version = xenver_version ( &hvm->xen );
120 if ( ( xenrc = xenver_extraversion ( &hvm->xen, &extraversion ) ) != 0){
121 rc = -EXEN ( xenrc );
122 DBGC ( hvm, "HVM could not get extraversion: %s\n",
126 DBGC2 ( hvm, "HVM found Xen version %d.%d%s\n",
127 ( version >> 16 ), ( version & 0xffff ) , extraversion );
133 * Unmap hypercall page(s)
137 static void hvm_unmap_hypercall ( struct hvm_device *hvm ) {
140 free_dma ( hvm->xen.hypercall, hvm->hypercall_len );
144 * Allocate and map MMIO space
147 * @v space Source mapping space
148 * @v len Length (must be a multiple of PAGE_SIZE)
149 * @ret mmio MMIO space address, or NULL on error
151 static void * hvm_ioremap ( struct hvm_device *hvm, unsigned int space,
153 struct xen_add_to_physmap add;
154 struct xen_remove_from_physmap remove;
155 unsigned int pages = ( len / PAGE_SIZE );
156 physaddr_t mmio_phys;
163 assert ( ( len % PAGE_SIZE ) == 0 );
165 /* Check for available space */
166 if ( ( hvm->mmio_offset + len ) > hvm->mmio_len ) {
167 DBGC ( hvm, "HVM could not allocate %zd bytes of MMIO space "
168 "(%zd of %zd remaining)\n", len,
169 ( hvm->mmio_len - hvm->mmio_offset ), hvm->mmio_len );
174 mmio = ioremap ( ( hvm->mmio + hvm->mmio_offset ), len );
176 DBGC ( hvm, "HVM could not map MMIO space [%08lx,%08lx)\n",
177 ( hvm->mmio + hvm->mmio_offset ),
178 ( hvm->mmio + hvm->mmio_offset + len ) );
181 mmio_phys = virt_to_phys ( mmio );
183 /* Add to physical address space */
184 for ( i = 0 ; i < pages ; i++ ) {
185 add.domid = DOMID_SELF;
188 add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
189 if ( ( xenrc = xenmem_add_to_physmap ( &hvm->xen, &add ) ) !=0){
190 rc = -EXEN ( xenrc );
191 DBGC ( hvm, "HVM could not add space %d idx %d at "
192 "[%08lx,%08lx): %s\n", space, i,
193 ( mmio_phys + ( i * PAGE_SIZE ) ),
194 ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ),
196 goto err_add_to_physmap;
201 hvm->mmio_offset += len;
207 for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
208 remove.domid = DOMID_SELF;
209 add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
210 xenmem_remove_from_physmap ( &hvm->xen, &remove );
222 * @v mmio MMIO space address
223 * @v len Length (must be a multiple of PAGE_SIZE)
225 static void hvm_iounmap ( struct hvm_device *hvm, void *mmio, size_t len ) {
226 struct xen_remove_from_physmap remove;
227 physaddr_t mmio_phys = virt_to_phys ( mmio );
228 unsigned int pages = ( len / PAGE_SIZE );
233 /* Unmap this space */
236 /* Remove from physical address space */
237 for ( i = 0 ; i < pages ; i++ ) {
238 remove.domid = DOMID_SELF;
239 remove.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
240 if ( ( xenrc = xenmem_remove_from_physmap ( &hvm->xen,
242 rc = -EXEN ( xenrc );
243 DBGC ( hvm, "HVM could not remove space [%08lx,%08lx): "
244 "%s\n", ( mmio_phys + ( i * PAGE_SIZE ) ),
245 ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ),
247 /* Nothing we can do about this */
253 * Map shared info page
256 * @ret rc Return status code
258 static int hvm_map_shared_info ( struct hvm_device *hvm ) {
259 physaddr_t shared_info_phys;
262 /* Map shared info page */
263 hvm->xen.shared = hvm_ioremap ( hvm, XENMAPSPACE_shared_info,
265 if ( ! hvm->xen.shared ) {
269 shared_info_phys = virt_to_phys ( hvm->xen.shared );
270 DBGC2 ( hvm, "HVM shared info page at [%#08lx,%#08lx)\n",
271 shared_info_phys, ( shared_info_phys + PAGE_SIZE ) );
274 DBGC2 ( hvm, "HVM wallclock time is %d\n",
275 readl ( &hvm->xen.shared->wc_sec ) );
279 hvm_iounmap ( hvm, hvm->xen.shared, PAGE_SIZE );
285 * Unmap shared info page
289 static void hvm_unmap_shared_info ( struct hvm_device *hvm ) {
291 /* Unmap shared info page */
292 hvm_iounmap ( hvm, hvm->xen.shared, PAGE_SIZE );
299 * @ret rc Return status code
301 static int hvm_map_grant ( struct hvm_device *hvm ) {
302 physaddr_t grant_phys;
305 /* Initialise grant table */
306 if ( ( rc = xengrant_init ( &hvm->xen ) ) != 0 ) {
307 DBGC ( hvm, "HVM could not initialise grant table: %s\n",
312 /* Map grant table */
313 hvm->xen.grant.table = hvm_ioremap ( hvm, XENMAPSPACE_grant_table,
314 hvm->xen.grant.len );
315 if ( ! hvm->xen.grant.table )
318 grant_phys = virt_to_phys ( hvm->xen.grant.table );
319 DBGC2 ( hvm, "HVM mapped grant table at [%08lx,%08lx)\n",
320 grant_phys, ( grant_phys + hvm->xen.grant.len ) );
329 static void hvm_unmap_grant ( struct hvm_device *hvm ) {
331 /* Unmap grant table */
332 hvm_iounmap ( hvm, hvm->xen.grant.table, hvm->xen.grant.len );
339 * @ret rc Return status code
341 static int hvm_map_xenstore ( struct hvm_device *hvm ) {
342 uint64_t xenstore_evtchn;
343 uint64_t xenstore_pfn;
344 physaddr_t xenstore_phys;
349 /* Get XenStore event channel */
350 if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_EVTCHN,
351 &xenstore_evtchn ) ) != 0 ) {
352 rc = -EXEN ( xenrc );
353 DBGC ( hvm, "HVM could not get XenStore event channel: %s\n",
357 hvm->xen.store.port = xenstore_evtchn;
359 /* Get XenStore PFN */
360 if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_PFN,
361 &xenstore_pfn ) ) != 0 ) {
362 rc = -EXEN ( xenrc );
363 DBGC ( hvm, "HVM could not get XenStore PFN: %s\n",
367 xenstore_phys = ( xenstore_pfn * PAGE_SIZE );
370 hvm->xen.store.intf = ioremap ( xenstore_phys, PAGE_SIZE );
371 if ( ! hvm->xen.store.intf ) {
372 DBGC ( hvm, "HVM could not map XenStore at [%08lx,%08lx)\n",
373 xenstore_phys, ( xenstore_phys + PAGE_SIZE ) );
376 DBGC2 ( hvm, "HVM mapped XenStore at [%08lx,%08lx) with event port "
377 "%d\n", xenstore_phys, ( xenstore_phys + PAGE_SIZE ),
378 hvm->xen.store.port );
380 /* Check that XenStore is working */
381 if ( ( rc = xenstore_read ( &hvm->xen, &name, "name", NULL ) ) != 0 ) {
382 DBGC ( hvm, "HVM could not read domain name: %s\n",
386 DBGC2 ( hvm, "HVM running in domain \"%s\"\n", name );
397 static void hvm_unmap_xenstore ( struct hvm_device *hvm ) {
400 iounmap ( hvm->xen.store.intf );
407 * @ret rc Return status code
409 static int hvm_probe ( struct pci_device *pci ) {
410 struct hvm_device *hvm;
413 /* Allocate and initialise structure */
414 hvm = zalloc ( sizeof ( *hvm ) );
419 hvm->mmio = pci_bar_start ( pci, HVM_MMIO_BAR );
420 hvm->mmio_len = pci_bar_size ( pci, HVM_MMIO_BAR );
421 DBGC2 ( hvm, "HVM has MMIO space [%08lx,%08lx)\n",
422 hvm->mmio, ( hvm->mmio + hvm->mmio_len ) );
424 /* Fix up PCI device */
425 adjust_pci_device ( pci );
427 /* Attach to hypervisor */
428 if ( ( rc = hvm_cpuid_base ( hvm ) ) != 0 )
430 if ( ( rc = hvm_map_hypercall ( hvm ) ) != 0 )
431 goto err_map_hypercall;
432 if ( ( rc = hvm_map_shared_info ( hvm ) ) != 0 )
433 goto err_map_shared_info;
434 if ( ( rc = hvm_map_grant ( hvm ) ) != 0 )
436 if ( ( rc = hvm_map_xenstore ( hvm ) ) != 0 )
437 goto err_map_xenstore;
439 /* Probe Xen devices */
440 if ( ( rc = xenbus_probe ( &hvm->xen, &pci->dev ) ) != 0 ) {
441 DBGC ( hvm, "HVM could not probe Xen bus: %s\n",
443 goto err_xenbus_probe;
446 pci_set_drvdata ( pci, hvm );
449 xenbus_remove ( &hvm->xen, &pci->dev );
451 hvm_unmap_xenstore ( hvm );
453 hvm_unmap_grant ( hvm );
455 hvm_unmap_shared_info ( hvm );
457 hvm_unmap_hypercall ( hvm );
470 static void hvm_remove ( struct pci_device *pci ) {
471 struct hvm_device *hvm = pci_get_drvdata ( pci );
473 xenbus_remove ( &hvm->xen, &pci->dev );
474 hvm_unmap_xenstore ( hvm );
475 hvm_unmap_grant ( hvm );
476 hvm_unmap_shared_info ( hvm );
477 hvm_unmap_hypercall ( hvm );
481 /** PCI device IDs */
482 static struct pci_device_id hvm_ids[] = {
483 PCI_ROM ( 0x5853, 0x0001, "hvm", "hvm", 0 ),
484 PCI_ROM ( 0x5853, 0x0002, "hvm2", "hvm2", 0 ),
488 struct pci_driver hvm_driver __pci_driver = {
490 .id_count = ( sizeof ( hvm_ids ) / sizeof ( hvm_ids[0] ) ),
492 .remove = hvm_remove,
495 /* Drag in netfront driver */
496 REQUIRE_OBJECT ( netfront );