2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12 #include <linux/export.h>
13 #include <linux/kernel.h>
14 #include <linux/bitmap.h>
15 #include <linux/sched.h>
16 #include <linux/poll.h>
17 #include <linux/pid.h>
20 #include <linux/slab.h>
21 #include <asm/cputable.h>
22 #include <asm/current.h>
23 #include <asm/copro.h>
28 #define CXL_NUM_MINORS 256 /* Total to reserve */
29 #define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
31 #define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
32 #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
33 #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
34 #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
35 #define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
36 #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
37 #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
39 #define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
40 #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
42 #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
46 static struct class *cxl_class;
48 static int __afu_open(struct inode *inode, struct file *file, bool master)
52 struct cxl_context *ctx;
53 int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
54 int slice = CXL_DEVT_AFU(inode->i_rdev);
57 pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
59 if (!(adapter = get_cxl_adapter(adapter_num)))
62 if (slice > adapter->slices)
65 spin_lock(&adapter->afu_list_lock);
66 if (!(afu = adapter->afu[slice])) {
67 spin_unlock(&adapter->afu_list_lock);
72 * taking a ref to the afu so that it doesn't go away
73 * for rest of the function. This ref is released before
77 spin_unlock(&adapter->afu_list_lock);
79 if (!afu->current_mode)
82 if (!cxl_adapter_link_ok(adapter)) {
87 if (!(ctx = cxl_context_alloc())) {
92 if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
95 pr_devel("afu_open pe: %i\n", ctx->pe);
96 file->private_data = ctx;
99 /* indicate success */
103 /* release the ref taken earlier */
106 put_device(&adapter->dev);
110 int afu_open(struct inode *inode, struct file *file)
112 return __afu_open(inode, file, false);
115 static int afu_master_open(struct inode *inode, struct file *file)
117 return __afu_open(inode, file, true);
120 int afu_release(struct inode *inode, struct file *file)
122 struct cxl_context *ctx = file->private_data;
124 pr_devel("%s: closing cxl file descriptor. pe: %i\n",
126 cxl_context_detach(ctx);
130 * Delete the context's mapping pointer, unless it's created by the
131 * kernel API, in which case leave it so it can be freed by reclaim_ctx()
133 if (!ctx->kernelapi) {
134 mutex_lock(&ctx->mapping_lock);
136 mutex_unlock(&ctx->mapping_lock);
140 * At this this point all bottom halfs have finished and we should be
141 * getting no more IRQs from the hardware for this context. Once it's
142 * removed from the IDR (and RCU synchronised) it's safe to free the
145 cxl_context_free(ctx);
150 static long afu_ioctl_start_work(struct cxl_context *ctx,
151 struct cxl_ioctl_start_work __user *uwork)
153 struct cxl_ioctl_start_work work;
157 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
159 /* Do this outside the status_mutex to avoid a circular dependency with
160 * the locking in cxl_mmap_fault() */
161 if (copy_from_user(&work, uwork,
162 sizeof(struct cxl_ioctl_start_work))) {
167 mutex_lock(&ctx->status_mutex);
168 if (ctx->status != OPENED) {
174 * if any of the reserved fields are set or any of the unused
175 * flags are set it's invalid
177 if (work.reserved1 || work.reserved2 || work.reserved3 ||
178 work.reserved4 || work.reserved5 || work.reserved6 ||
179 (work.flags & ~CXL_START_WORK_ALL)) {
184 if (!(work.flags & CXL_START_WORK_NUM_IRQS))
185 work.num_interrupts = ctx->afu->pp_irqs;
186 else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
187 (work.num_interrupts > ctx->afu->irqs_max)) {
191 if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
194 if (work.flags & CXL_START_WORK_AMR)
195 amr = work.amr & mfspr(SPRN_UAMOR);
197 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
200 * We grab the PID here and not in the file open to allow for the case
201 * where a process (master, some daemon, etc) has opened the chardev on
202 * behalf of another process, so the AFU's mm gets bound to the process
203 * that performs this ioctl and not the process that opened the file.
204 * Also we grab the PID of the group leader so that if the task that
205 * has performed the attach operation exits the mm context of the
206 * process is still accessible.
208 ctx->pid = get_task_pid(current, PIDTYPE_PID);
209 ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
211 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
213 if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
215 afu_release_irqs(ctx, ctx);
219 ctx->status = STARTED;
222 mutex_unlock(&ctx->status_mutex);
225 static long afu_ioctl_process_element(struct cxl_context *ctx,
228 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
230 if (copy_to_user(upe, &ctx->pe, sizeof(__u32)))
236 static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
237 struct cxl_afu_id __user *upafuid)
239 struct cxl_afu_id afuid = { 0 };
241 afuid.card_id = ctx->afu->adapter->adapter_num;
242 afuid.afu_offset = ctx->afu->slice;
243 afuid.afu_mode = ctx->afu->current_mode;
245 /* set the flag bit in case the afu is a slave */
246 if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
247 afuid.flags |= CXL_AFUID_FLAG_SLAVE;
249 if (copy_to_user(upafuid, &afuid, sizeof(afuid)))
255 long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
257 struct cxl_context *ctx = file->private_data;
259 if (ctx->status == CLOSED)
262 if (!cxl_adapter_link_ok(ctx->afu->adapter))
265 pr_devel("afu_ioctl\n");
267 case CXL_IOCTL_START_WORK:
268 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
269 case CXL_IOCTL_GET_PROCESS_ELEMENT:
270 return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
271 case CXL_IOCTL_GET_AFU_ID:
272 return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
278 static long afu_compat_ioctl(struct file *file, unsigned int cmd,
281 return afu_ioctl(file, cmd, arg);
284 int afu_mmap(struct file *file, struct vm_area_struct *vm)
286 struct cxl_context *ctx = file->private_data;
288 /* AFU must be started before we can MMIO */
289 if (ctx->status != STARTED)
292 if (!cxl_adapter_link_ok(ctx->afu->adapter))
295 return cxl_context_iomap(ctx, vm);
298 unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
300 struct cxl_context *ctx = file->private_data;
305 poll_wait(file, &ctx->wq, poll);
307 pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
309 spin_lock_irqsave(&ctx->lock, flags);
310 if (ctx->pending_irq || ctx->pending_fault ||
311 ctx->pending_afu_err)
312 mask |= POLLIN | POLLRDNORM;
313 else if (ctx->status == CLOSED)
314 /* Only error on closed when there are no futher events pending
317 spin_unlock_irqrestore(&ctx->lock, flags);
319 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
324 static inline int ctx_event_pending(struct cxl_context *ctx)
326 return (ctx->pending_irq || ctx->pending_fault ||
327 ctx->pending_afu_err || (ctx->status == CLOSED));
330 ssize_t afu_read(struct file *file, char __user *buf, size_t count,
333 struct cxl_context *ctx = file->private_data;
334 struct cxl_event event;
339 if (!cxl_adapter_link_ok(ctx->afu->adapter))
342 if (count < CXL_READ_MIN_SIZE)
345 spin_lock_irqsave(&ctx->lock, flags);
348 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
349 if (ctx_event_pending(ctx))
352 if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
357 if (file->f_flags & O_NONBLOCK) {
362 if (signal_pending(current)) {
367 spin_unlock_irqrestore(&ctx->lock, flags);
368 pr_devel("afu_read going to sleep...\n");
370 pr_devel("afu_read woken up\n");
371 spin_lock_irqsave(&ctx->lock, flags);
374 finish_wait(&ctx->wq, &wait);
376 memset(&event, 0, sizeof(event));
377 event.header.process_element = ctx->pe;
378 event.header.size = sizeof(struct cxl_event_header);
379 if (ctx->pending_irq) {
380 pr_devel("afu_read delivering AFU interrupt\n");
381 event.header.size += sizeof(struct cxl_event_afu_interrupt);
382 event.header.type = CXL_EVENT_AFU_INTERRUPT;
383 event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
384 clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
385 if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
386 ctx->pending_irq = false;
387 } else if (ctx->pending_fault) {
388 pr_devel("afu_read delivering data storage fault\n");
389 event.header.size += sizeof(struct cxl_event_data_storage);
390 event.header.type = CXL_EVENT_DATA_STORAGE;
391 event.fault.addr = ctx->fault_addr;
392 event.fault.dsisr = ctx->fault_dsisr;
393 ctx->pending_fault = false;
394 } else if (ctx->pending_afu_err) {
395 pr_devel("afu_read delivering afu error\n");
396 event.header.size += sizeof(struct cxl_event_afu_error);
397 event.header.type = CXL_EVENT_AFU_ERROR;
398 event.afu_error.error = ctx->afu_err;
399 ctx->pending_afu_err = false;
400 } else if (ctx->status == CLOSED) {
401 pr_devel("afu_read fatal error\n");
402 spin_unlock_irqrestore(&ctx->lock, flags);
405 WARN(1, "afu_read must be buggy\n");
407 spin_unlock_irqrestore(&ctx->lock, flags);
409 if (copy_to_user(buf, &event, event.header.size))
411 return event.header.size;
414 finish_wait(&ctx->wq, &wait);
415 spin_unlock_irqrestore(&ctx->lock, flags);
420 * Note: if this is updated, we need to update api.c to patch the new ones in
423 const struct file_operations afu_fops = {
424 .owner = THIS_MODULE,
428 .release = afu_release,
429 .unlocked_ioctl = afu_ioctl,
430 .compat_ioctl = afu_compat_ioctl,
434 static const struct file_operations afu_master_fops = {
435 .owner = THIS_MODULE,
436 .open = afu_master_open,
439 .release = afu_release,
440 .unlocked_ioctl = afu_ioctl,
441 .compat_ioctl = afu_compat_ioctl,
446 static char *cxl_devnode(struct device *dev, umode_t *mode)
448 if (CXL_DEVT_IS_CARD(dev->devt)) {
450 * These minor numbers will eventually be used to program the
451 * PSL and AFUs once we have dynamic reprogramming support
455 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
458 extern struct class *cxl_class;
460 static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
461 struct device **chardev, char *postfix, char *desc,
462 const struct file_operations *fops)
467 cdev_init(cdev, fops);
468 if ((rc = cdev_add(cdev, devt, 1))) {
469 dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
473 dev = device_create(cxl_class, &afu->dev, devt, afu,
474 "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
476 dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
489 int cxl_chardev_d_afu_add(struct cxl_afu *afu)
491 return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
492 &afu->chardev_d, "d", "dedicated",
493 &afu_master_fops); /* Uses master fops */
496 int cxl_chardev_m_afu_add(struct cxl_afu *afu)
498 return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
499 &afu->chardev_m, "m", "master",
503 int cxl_chardev_s_afu_add(struct cxl_afu *afu)
505 return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
506 &afu->chardev_s, "s", "shared",
510 void cxl_chardev_afu_remove(struct cxl_afu *afu)
512 if (afu->chardev_d) {
513 cdev_del(&afu->afu_cdev_d);
514 device_unregister(afu->chardev_d);
515 afu->chardev_d = NULL;
517 if (afu->chardev_m) {
518 cdev_del(&afu->afu_cdev_m);
519 device_unregister(afu->chardev_m);
520 afu->chardev_m = NULL;
522 if (afu->chardev_s) {
523 cdev_del(&afu->afu_cdev_s);
524 device_unregister(afu->chardev_s);
525 afu->chardev_s = NULL;
529 int cxl_register_afu(struct cxl_afu *afu)
531 afu->dev.class = cxl_class;
533 return device_register(&afu->dev);
536 int cxl_register_adapter(struct cxl *adapter)
538 adapter->dev.class = cxl_class;
541 * Future: When we support dynamically reprogramming the PSL & AFU we
542 * will expose the interface to do that via a chardev:
543 * adapter->dev.devt = CXL_CARD_MKDEV(adapter);
546 return device_register(&adapter->dev);
549 int __init cxl_file_init(void)
554 * If these change we really need to update API. Either change some
555 * flags or update API version number CXL_API_VERSION.
557 BUILD_BUG_ON(CXL_API_VERSION != 2);
558 BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
559 BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
560 BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
561 BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
562 BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
564 if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
565 pr_err("Unable to allocate CXL major number: %i\n", rc);
569 pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
571 cxl_class = class_create(THIS_MODULE, "cxl");
572 if (IS_ERR(cxl_class)) {
573 pr_err("Unable to create CXL class\n");
574 rc = PTR_ERR(cxl_class);
577 cxl_class->devnode = cxl_devnode;
582 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
586 void cxl_file_exit(void)
588 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
589 class_destroy(cxl_class);