Browse Source

aarch64: Fix a few misindented lines

Signed-off-by: Martin Storsjö <martin@martin.st>
Martin Storsjö 8 months ago
parent
commit
b137347278
2 changed files with 9 additions and 9 deletions
  1. 4 4
      libswresample/aarch64/resample.S
  2. 5 5
      libswscale/aarch64/yuv2rgb_neon.S

+ 4 - 4
libswresample/aarch64/resample.S

@@ -30,7 +30,7 @@ function ff_resample_common_apply_filter_x4_float_neon, export=1
         faddp           v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         faddp           v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         faddp           v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         faddp           v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         st1             {v0.s}[0], [x0], #4                            // write accumulator
         st1             {v0.s}[0], [x0], #4                            // write accumulator
-    ret
+        ret
 endfunc
 endfunc
 
 
 function ff_resample_common_apply_filter_x8_float_neon, export=1
 function ff_resample_common_apply_filter_x8_float_neon, export=1
@@ -46,7 +46,7 @@ function ff_resample_common_apply_filter_x8_float_neon, export=1
         faddp           v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         faddp           v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         faddp           v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         faddp           v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         st1             {v0.s}[0], [x0], #4                            // write accumulator
         st1             {v0.s}[0], [x0], #4                            // write accumulator
-    ret
+        ret
 endfunc
 endfunc
 
 
 function ff_resample_common_apply_filter_x4_s16_neon, export=1
 function ff_resample_common_apply_filter_x4_s16_neon, export=1
@@ -59,7 +59,7 @@ function ff_resample_common_apply_filter_x4_s16_neon, export=1
         addp            v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         addp            v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         addp            v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         addp            v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         st1             {v0.s}[0], [x0], #4                            // write accumulator
         st1             {v0.s}[0], [x0], #4                            // write accumulator
-    ret
+        ret
 endfunc
 endfunc
 
 
 function ff_resample_common_apply_filter_x8_s16_neon, export=1
 function ff_resample_common_apply_filter_x8_s16_neon, export=1
@@ -73,5 +73,5 @@ function ff_resample_common_apply_filter_x8_s16_neon, export=1
         addp            v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         addp            v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         addp            v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         addp            v0.4s, v0.4s, v0.4s                            // pair adding of the 4x32-bit accumulated values
         st1             {v0.s}[0], [x0], #4                            // write accumulator
         st1             {v0.s}[0], [x0], #4                            // write accumulator
-    ret
+        ret
 endfunc
 endfunc

+ 5 - 5
libswscale/aarch64/yuv2rgb_neon.S

@@ -126,7 +126,7 @@
 .endm
 .endm
 
 
 .macro load_chroma_yuv422p
 .macro load_chroma_yuv422p
-    load_chroma_yuv420p
+        load_chroma_yuv420p
 .endm
 .endm
 
 
 .macro increment_nv12
 .macro increment_nv12
@@ -136,7 +136,7 @@
 .endm
 .endm
 
 
 .macro increment_nv21
 .macro increment_nv21
-    increment_nv12
+        increment_nv12
 .endm
 .endm
 
 
 .macro increment_yuv420p
 .macro increment_yuv420p
@@ -182,7 +182,7 @@ function ff_\ifmt\()_to_\ofmt\()_neon, export=1
         mov             w8, w0                                          // w8 = width
         mov             w8, w0                                          // w8 = width
 2:
 2:
         movi            v5.8h, #4, lsl #8                               // 128 * (1<<3)
         movi            v5.8h, #4, lsl #8                               // 128 * (1<<3)
-    load_chroma_\ifmt
+        load_chroma_\ifmt
         sub             v18.8h, v18.8h, v5.8h                           // U*(1<<3) - 128*(1<<3)
         sub             v18.8h, v18.8h, v5.8h                           // U*(1<<3) - 128*(1<<3)
         sub             v19.8h, v19.8h, v5.8h                           // V*(1<<3) - 128*(1<<3)
         sub             v19.8h, v19.8h, v5.8h                           // V*(1<<3) - 128*(1<<3)
         sqdmulh         v20.8h, v19.8h, v1.h[0]                         // V * v2r            (R)
         sqdmulh         v20.8h, v19.8h, v1.h[0]                         // V * v2r            (R)
@@ -237,11 +237,11 @@ function ff_\ifmt\()_to_\ofmt\()_neon, export=1
         add             x15, x15, w16, sxtw                             // dst2 += padding2
         add             x15, x15, w16, sxtw                             // dst2 += padding2
 .endif
 .endif
         add             x4, x4, w5, sxtw                                // srcY += paddingY
         add             x4, x4, w5, sxtw                                // srcY += paddingY
-    increment_\ifmt
+        increment_\ifmt
         subs            w1, w1, #1                                      // height -= 1
         subs            w1, w1, #1                                      // height -= 1
         b.gt            1b
         b.gt            1b
         mov             w0, w9
         mov             w0, w9
-    ret
+        ret
 endfunc
 endfunc
 .endm
 .endm