Add qemu 2.4.0
[kvmfornfv.git] / qemu / hw / s390x / sclp.c
1 /*
2  * SCLP Support
3  *
4  * Copyright IBM, Corp. 2012
5  *
6  * Authors:
7  *  Christian Borntraeger <borntraeger@de.ibm.com>
8  *  Heinz Graalfs <graalfs@linux.vnet.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or (at your
11  * option) any later version.  See the COPYING file in the top-level directory.
12  *
13  */
14
15 #include "cpu.h"
16 #include "sysemu/kvm.h"
17 #include "exec/memory.h"
18 #include "sysemu/sysemu.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/config-file.h"
21 #include "hw/s390x/sclp.h"
22 #include "hw/s390x/event-facility.h"
23 #include "hw/s390x/s390-pci-bus.h"
24
25 static inline SCLPEventFacility *get_event_facility(void)
26 {
27     ObjectProperty *op = object_property_find(qdev_get_machine(),
28                                               TYPE_SCLP_EVENT_FACILITY,
29                                               NULL);
30     assert(op);
31     return op->opaque;
32 }
33
34 /* Provide information about the configuration, CPUs and storage */
35 static void read_SCP_info(SCCB *sccb)
36 {
37     ReadInfo *read_info = (ReadInfo *) sccb;
38     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
39     CPUState *cpu;
40     int cpu_count = 0;
41     int i = 0;
42     int increment_size = 20;
43     int rnsize, rnmax;
44     QemuOpts *opts = qemu_opts_find(qemu_find_opts("memory"), NULL);
45     int slots = qemu_opt_get_number(opts, "slots", 0);
46     int max_avail_slots = s390_get_memslot_count(kvm_state);
47
48     if (slots > max_avail_slots) {
49         slots = max_avail_slots;
50     }
51
52     CPU_FOREACH(cpu) {
53         cpu_count++;
54     }
55
56     /* CPU information */
57     read_info->entries_cpu = cpu_to_be16(cpu_count);
58     read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries));
59     read_info->highest_cpu = cpu_to_be16(max_cpus);
60
61     for (i = 0; i < cpu_count; i++) {
62         read_info->entries[i].address = i;
63         read_info->entries[i].type = 0;
64     }
65
66     read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO |
67                                         SCLP_HAS_PCI_RECONFIG);
68
69     /*
70      * The storage increment size is a multiple of 1M and is a power of 2.
71      * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
72      */
73     while ((ram_size >> increment_size) > MAX_STORAGE_INCREMENTS) {
74         increment_size++;
75     }
76     rnmax = ram_size >> increment_size;
77
78     /* Memory Hotplug is only supported for the ccw machine type */
79     if (mhd) {
80         while ((mhd->standby_mem_size >> increment_size) >
81                MAX_STORAGE_INCREMENTS) {
82             increment_size++;
83         }
84         assert(increment_size == mhd->increment_size);
85
86         mhd->standby_subregion_size = MEM_SECTION_SIZE;
87         /* Deduct the memory slot already used for core */
88         if (slots > 0) {
89             while ((mhd->standby_subregion_size * (slots - 1)
90                     < mhd->standby_mem_size)) {
91                 mhd->standby_subregion_size = mhd->standby_subregion_size << 1;
92             }
93         }
94         /*
95          * Initialize mapping of guest standby memory sections indicating which
96          * are and are not online. Assume all standby memory begins offline.
97          */
98         if (mhd->standby_state_map == 0) {
99             if (mhd->standby_mem_size % mhd->standby_subregion_size) {
100                 mhd->standby_state_map = g_malloc0((mhd->standby_mem_size /
101                                              mhd->standby_subregion_size + 1) *
102                                              (mhd->standby_subregion_size /
103                                              MEM_SECTION_SIZE));
104             } else {
105                 mhd->standby_state_map = g_malloc0(mhd->standby_mem_size /
106                                                    MEM_SECTION_SIZE);
107             }
108         }
109         mhd->padded_ram_size = ram_size + mhd->pad_size;
110         mhd->rzm = 1 << mhd->increment_size;
111         rnmax = ((ram_size + mhd->standby_mem_size + mhd->pad_size)
112              >> mhd->increment_size);
113
114         read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR);
115     }
116
117     rnsize = 1 << (increment_size - 20);
118     if (rnsize <= 128) {
119         read_info->rnsize = rnsize;
120     } else {
121         read_info->rnsize = 0;
122         read_info->rnsize2 = cpu_to_be32(rnsize);
123     }
124
125     if (rnmax < 0x10000) {
126         read_info->rnmax = cpu_to_be16(rnmax);
127     } else {
128         read_info->rnmax = cpu_to_be16(0);
129         read_info->rnmax2 = cpu_to_be64(rnmax);
130     }
131
132     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
133 }
134
135 static void read_storage_element0_info(SCCB *sccb)
136 {
137     int i, assigned;
138     int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID;
139     ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
140     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
141
142     assert(mhd);
143
144     if ((ram_size >> mhd->increment_size) >= 0x10000) {
145         sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
146         return;
147     }
148
149     /* Return information regarding core memory */
150     storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
151     assigned = ram_size >> mhd->increment_size;
152     storage_info->assigned = cpu_to_be16(assigned);
153
154     for (i = 0; i < assigned; i++) {
155         storage_info->entries[i] = cpu_to_be32(subincrement_id);
156         subincrement_id += SCLP_INCREMENT_UNIT;
157     }
158     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
159 }
160
161 static void read_storage_element1_info(SCCB *sccb)
162 {
163     ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
164     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
165
166     assert(mhd);
167
168     if ((mhd->standby_mem_size >> mhd->increment_size) >= 0x10000) {
169         sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
170         return;
171     }
172
173     /* Return information regarding standby memory */
174     storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
175     storage_info->assigned = cpu_to_be16(mhd->standby_mem_size >>
176                                          mhd->increment_size);
177     storage_info->standby = cpu_to_be16(mhd->standby_mem_size >>
178                                         mhd->increment_size);
179     sccb->h.response_code = cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION);
180 }
181
182 static void attach_storage_element(SCCB *sccb, uint16_t element)
183 {
184     int i, assigned, subincrement_id;
185     AttachStorageElement *attach_info = (AttachStorageElement *) sccb;
186     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
187
188     assert(mhd);
189
190     if (element != 1) {
191         sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
192         return;
193     }
194
195     assigned = mhd->standby_mem_size >> mhd->increment_size;
196     attach_info->assigned = cpu_to_be16(assigned);
197     subincrement_id = ((ram_size >> mhd->increment_size) << 16)
198                       + SCLP_STARTING_SUBINCREMENT_ID;
199     for (i = 0; i < assigned; i++) {
200         attach_info->entries[i] = cpu_to_be32(subincrement_id);
201         subincrement_id += SCLP_INCREMENT_UNIT;
202     }
203     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
204 }
205
206 static void assign_storage(SCCB *sccb)
207 {
208     MemoryRegion *mr = NULL;
209     uint64_t this_subregion_size;
210     AssignStorage *assign_info = (AssignStorage *) sccb;
211     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
212     assert(mhd);
213     ram_addr_t assign_addr = (assign_info->rn - 1) * mhd->rzm;
214     MemoryRegion *sysmem = get_system_memory();
215
216     if ((assign_addr % MEM_SECTION_SIZE == 0) &&
217         (assign_addr >= mhd->padded_ram_size)) {
218         /* Re-use existing memory region if found */
219         mr = memory_region_find(sysmem, assign_addr, 1).mr;
220         if (!mr) {
221
222             MemoryRegion *standby_ram = g_new(MemoryRegion, 1);
223
224             /* offset to align to standby_subregion_size for allocation */
225             ram_addr_t offset = assign_addr -
226                                 (assign_addr - mhd->padded_ram_size)
227                                 % mhd->standby_subregion_size;
228
229             /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) +  NULL */
230             char id[16];
231             snprintf(id, 16, "standby.ram%d",
232                      (int)((offset - mhd->padded_ram_size) /
233                      mhd->standby_subregion_size) + 1);
234
235             /* Allocate a subregion of the calculated standby_subregion_size */
236             if (offset + mhd->standby_subregion_size >
237                 mhd->padded_ram_size + mhd->standby_mem_size) {
238                 this_subregion_size = mhd->padded_ram_size +
239                   mhd->standby_mem_size - offset;
240             } else {
241                 this_subregion_size = mhd->standby_subregion_size;
242             }
243
244             memory_region_init_ram(standby_ram, NULL, id, this_subregion_size, &error_abort);
245             vmstate_register_ram_global(standby_ram);
246             memory_region_add_subregion(sysmem, offset, standby_ram);
247         }
248         /* The specified subregion is no longer in standby */
249         mhd->standby_state_map[(assign_addr - mhd->padded_ram_size)
250                                / MEM_SECTION_SIZE] = 1;
251     }
252     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
253 }
254
255 static void unassign_storage(SCCB *sccb)
256 {
257     MemoryRegion *mr = NULL;
258     AssignStorage *assign_info = (AssignStorage *) sccb;
259     sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
260     assert(mhd);
261     ram_addr_t unassign_addr = (assign_info->rn - 1) * mhd->rzm;
262     MemoryRegion *sysmem = get_system_memory();
263
264     /* if the addr is a multiple of 256 MB */
265     if ((unassign_addr % MEM_SECTION_SIZE == 0) &&
266         (unassign_addr >= mhd->padded_ram_size)) {
267         mhd->standby_state_map[(unassign_addr -
268                            mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0;
269
270         /* find the specified memory region and destroy it */
271         mr = memory_region_find(sysmem, unassign_addr, 1).mr;
272         if (mr) {
273             int i;
274             int is_removable = 1;
275             ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size -
276                                      (unassign_addr - mhd->padded_ram_size)
277                                      % mhd->standby_subregion_size);
278             /* Mark all affected subregions as 'standby' once again */
279             for (i = 0;
280                  i < (mhd->standby_subregion_size / MEM_SECTION_SIZE);
281                  i++) {
282
283                 if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) {
284                     is_removable = 0;
285                     break;
286                 }
287             }
288             if (is_removable) {
289                 memory_region_del_subregion(sysmem, mr);
290                 object_unparent(OBJECT(mr));
291                 g_free(mr);
292             }
293         }
294     }
295     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
296 }
297
298 /* Provide information about the CPU */
299 static void sclp_read_cpu_info(SCCB *sccb)
300 {
301     ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb;
302     CPUState *cpu;
303     int cpu_count = 0;
304     int i = 0;
305
306     CPU_FOREACH(cpu) {
307         cpu_count++;
308     }
309
310     cpu_info->nr_configured = cpu_to_be16(cpu_count);
311     cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
312     cpu_info->nr_standby = cpu_to_be16(0);
313
314     /* The standby offset is 16-byte for each CPU */
315     cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
316         + cpu_info->nr_configured*sizeof(CPUEntry));
317
318     for (i = 0; i < cpu_count; i++) {
319         cpu_info->entries[i].address = i;
320         cpu_info->entries[i].type = 0;
321     }
322
323     sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
324 }
325
326 static void sclp_execute(SCCB *sccb, uint32_t code)
327 {
328     SCLPEventFacility *ef = get_event_facility();
329     SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
330
331     switch (code & SCLP_CMD_CODE_MASK) {
332     case SCLP_CMDW_READ_SCP_INFO:
333     case SCLP_CMDW_READ_SCP_INFO_FORCED:
334         read_SCP_info(sccb);
335         break;
336     case SCLP_CMDW_READ_CPU_INFO:
337         sclp_read_cpu_info(sccb);
338         break;
339     case SCLP_READ_STORAGE_ELEMENT_INFO:
340         if (code & 0xff00) {
341             read_storage_element1_info(sccb);
342         } else {
343             read_storage_element0_info(sccb);
344         }
345         break;
346     case SCLP_ATTACH_STORAGE_ELEMENT:
347         attach_storage_element(sccb, (code & 0xff00) >> 8);
348         break;
349     case SCLP_ASSIGN_STORAGE:
350         assign_storage(sccb);
351         break;
352     case SCLP_UNASSIGN_STORAGE:
353         unassign_storage(sccb);
354         break;
355     case SCLP_CMDW_CONFIGURE_PCI:
356         s390_pci_sclp_configure(1, sccb);
357         break;
358     case SCLP_CMDW_DECONFIGURE_PCI:
359         s390_pci_sclp_configure(0, sccb);
360         break;
361     default:
362         efc->command_handler(ef, sccb, code);
363         break;
364     }
365 }
366
367 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code)
368 {
369     int r = 0;
370     SCCB work_sccb;
371
372     hwaddr sccb_len = sizeof(SCCB);
373
374     /* first some basic checks on program checks */
375     if (env->psw.mask & PSW_MASK_PSTATE) {
376         r = -PGM_PRIVILEGED;
377         goto out;
378     }
379     if (cpu_physical_memory_is_io(sccb)) {
380         r = -PGM_ADDRESSING;
381         goto out;
382     }
383     if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa
384         || (sccb & ~0x7ffffff8UL) != 0) {
385         r = -PGM_SPECIFICATION;
386         goto out;
387     }
388
389     /*
390      * we want to work on a private copy of the sccb, to prevent guests
391      * from playing dirty tricks by modifying the memory content after
392      * the host has checked the values
393      */
394     cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
395
396     /* Valid sccb sizes */
397     if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) ||
398         be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) {
399         r = -PGM_SPECIFICATION;
400         goto out;
401     }
402
403     sclp_execute((SCCB *)&work_sccb, code);
404
405     cpu_physical_memory_write(sccb, &work_sccb,
406                               be16_to_cpu(work_sccb.h.length));
407
408     sclp_service_interrupt(sccb);
409
410 out:
411     return r;
412 }
413
414 void sclp_service_interrupt(uint32_t sccb)
415 {
416     SCLPEventFacility *ef = get_event_facility();
417     SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
418
419     uint32_t param = sccb & ~3;
420
421     /* Indicate whether an event is still pending */
422     param |= efc->event_pending(ef) ? 1 : 0;
423
424     if (!param) {
425         /* No need to send an interrupt, there's nothing to be notified about */
426         return;
427     }
428     s390_sclp_extint(param);
429 }
430
431 /* qemu object creation and initialization functions */
432
433 void s390_sclp_init(void)
434 {
435     DeviceState *dev  = qdev_create(NULL, TYPE_SCLP_EVENT_FACILITY);
436
437     object_property_add_child(qdev_get_machine(), TYPE_SCLP_EVENT_FACILITY,
438                               OBJECT(dev), NULL);
439     qdev_init_nofail(dev);
440 }
441
442 sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void)
443 {
444     DeviceState *dev;
445     dev = qdev_create(NULL, TYPE_SCLP_MEMORY_HOTPLUG_DEV);
446     object_property_add_child(qdev_get_machine(),
447                               TYPE_SCLP_MEMORY_HOTPLUG_DEV,
448                               OBJECT(dev), NULL);
449     qdev_init_nofail(dev);
450     return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
451                                    TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
452 }
453
454 sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void)
455 {
456     return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
457                                    TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
458 }
459
460 static void sclp_memory_hotplug_dev_class_init(ObjectClass *klass,
461                                                void *data)
462 {
463     DeviceClass *dc = DEVICE_CLASS(klass);
464
465     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
466 }
467
468 static TypeInfo sclp_memory_hotplug_dev_info = {
469     .name = TYPE_SCLP_MEMORY_HOTPLUG_DEV,
470     .parent = TYPE_SYS_BUS_DEVICE,
471     .instance_size = sizeof(sclpMemoryHotplugDev),
472     .class_init = sclp_memory_hotplug_dev_class_init,
473 };
474
475 static void register_types(void)
476 {
477     type_register_static(&sclp_memory_hotplug_dev_info);
478 }
479 type_init(register_types);