These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / x86 / lib / checksum_32.S
index 9bc944a..c1e6232 100644 (file)
@@ -26,7 +26,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
                                
@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
           * alignment for the unrolled loop.
           */           
 ENTRY(csum_partial)
-       CFI_STARTPROC
-       pushl_cfi_reg esi
-       pushl_cfi_reg ebx
+       pushl %esi
+       pushl %ebx
        movl 20(%esp),%eax      # Function arg: unsigned int sum
        movl 16(%esp),%ecx      # Function arg: int len
        movl 12(%esp),%esi      # Function arg: unsigned char *buff
@@ -129,10 +127,9 @@ ENTRY(csum_partial)
        jz 8f
        roll $8, %eax
 8:
-       popl_cfi_reg ebx
-       popl_cfi_reg esi
+       popl %ebx
+       popl %esi
        ret
-       CFI_ENDPROC
 ENDPROC(csum_partial)
 
 #else
@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
 /* Version for PentiumII/PPro */
 
 ENTRY(csum_partial)
-       CFI_STARTPROC
-       pushl_cfi_reg esi
-       pushl_cfi_reg ebx
+       pushl %esi
+       pushl %ebx
        movl 20(%esp),%eax      # Function arg: unsigned int sum
        movl 16(%esp),%ecx      # Function arg: int len
        movl 12(%esp),%esi      # Function arg: const unsigned char *buf
@@ -249,10 +245,9 @@ ENTRY(csum_partial)
        jz 90f
        roll $8, %eax
 90: 
-       popl_cfi_reg ebx
-       popl_cfi_reg esi
+       popl %ebx
+       popl %esi
        ret
-       CFI_ENDPROC
 ENDPROC(csum_partial)
                                
 #endif
@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
 #define FP             12
                
 ENTRY(csum_partial_copy_generic)
-       CFI_STARTPROC
        subl  $4,%esp   
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
-       pushl_cfi_reg ebx
+       pushl %edi
+       pushl %esi
+       pushl %ebx
        movl ARGBASE+16(%esp),%eax      # sum
        movl ARGBASE+12(%esp),%ecx      # len
        movl ARGBASE+4(%esp),%esi       # src
@@ -401,12 +394,11 @@ DST(      movb %cl, (%edi)        )
 
 .previous
 
-       popl_cfi_reg ebx
-       popl_cfi_reg esi
-       popl_cfi_reg edi
-       popl_cfi %ecx                   # equivalent to addl $4,%esp
+       popl %ebx
+       popl %esi
+       popl %edi
+       popl %ecx                       # equivalent to addl $4,%esp
        ret     
-       CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
 
 #else
@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
 #define ARGBASE 12
                
 ENTRY(csum_partial_copy_generic)
-       CFI_STARTPROC
-       pushl_cfi_reg ebx
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
+       pushl %ebx
+       pushl %edi
+       pushl %esi
        movl ARGBASE+4(%esp),%esi       #src
        movl ARGBASE+8(%esp),%edi       #dst    
        movl ARGBASE+12(%esp),%ecx      #len
@@ -489,11 +480,10 @@ DST(      movb %dl, (%edi)         )
        jmp  7b                 
 .previous                              
 
-       popl_cfi_reg esi
-       popl_cfi_reg edi
-       popl_cfi_reg ebx
+       popl %esi
+       popl %edi
+       popl %ebx
        ret
-       CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
                                
 #undef ROUND