These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / block / xen-blkback / common.h
index f620b5d..c929ae2 100644 (file)
 #include <asm/pgalloc.h>
 #include <asm/hypervisor.h>
 #include <xen/grant_table.h>
+#include <xen/page.h>
 #include <xen/xenbus.h>
 #include <xen/interface/io/ring.h>
 #include <xen/interface/io/blkif.h>
 #include <xen/interface/io/protocols.h>
 
+extern unsigned int xen_blkif_max_ring_order;
 /*
  * This is the maximum number of segments that would be allowed in indirect
  * requests. This value will also be passed to the frontend.
  */
 #define MAX_INDIRECT_SEGMENTS 256
 
-#define SEGS_PER_INDIRECT_FRAME \
-       (PAGE_SIZE/sizeof(struct blkif_request_segment))
+/*
+ * Xen use 4K pages. The guest may use different page size (4K or 64K)
+ * Number of Xen pages per segment
+ */
+#define XEN_PAGES_PER_SEGMENT   (PAGE_SIZE / XEN_PAGE_SIZE)
+
+#define XEN_PAGES_PER_INDIRECT_FRAME \
+       (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
+#define SEGS_PER_INDIRECT_FRAME        \
+       (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
+
 #define MAX_INDIRECT_PAGES \
        ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
-#define INDIRECT_PAGES(_segs) \
-       ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
+#define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
 
 /* Not a real protocol.  Used to generate ring structs which contain
  * the elements common to all protocols only.  This way we get a
@@ -248,7 +258,7 @@ struct backend_info;
 #define PERSISTENT_GNT_WAS_ACTIVE      1
 
 /* Number of requests that we can fit in a ring */
-#define XEN_BLKIF_REQS                 32
+#define XEN_BLKIF_REQS_PER_PAGE                32
 
 struct persistent_gnt {
        struct page *page;
@@ -320,6 +330,7 @@ struct xen_blkif {
        struct work_struct      free_work;
        /* Thread shutdown wait queue. */
        wait_queue_head_t       shutdown_wq;
+       unsigned int nr_ring_pages;
 };
 
 struct seg_buf {
@@ -343,7 +354,7 @@ struct grant_page {
 struct pending_req {
        struct xen_blkif        *blkif;
        u64                     id;
-       int                     nr_pages;
+       int                     nr_segs;
        atomic_t                pendcnt;
        unsigned short          operation;
        int                     status;
@@ -397,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                                        struct blkif_x86_32_request *src)
 {
        int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
-       dst->operation = src->operation;
-       switch (src->operation) {
+       dst->operation = READ_ONCE(src->operation);
+       switch (dst->operation) {
        case BLKIF_OP_READ:
        case BLKIF_OP_WRITE:
        case BLKIF_OP_WRITE_BARRIER:
@@ -445,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
                                        struct blkif_x86_64_request *src)
 {
        int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
-       dst->operation = src->operation;
-       switch (src->operation) {
+       dst->operation = READ_ONCE(src->operation);
+       switch (dst->operation) {
        case BLKIF_OP_READ:
        case BLKIF_OP_WRITE:
        case BLKIF_OP_WRITE_BARRIER: