hevcdsp_sao_neon.S 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /*
  2. * Copyright (c) 2017 Meng Wang <wangmeng.kids@bytedance.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/arm/asm.S"
  21. #include "neon.S"
  22. function ff_hevc_sao_band_filter_neon_8, export=1
  23. push {r4-r10}
  24. ldr r5, [sp, #28] // width
  25. ldr r4, [sp, #32] // height
  26. ldr r8, [sp, #36] // offset_table
  27. vpush {d8-d15}
  28. mov r12, r4 // r12 = height
  29. mov r6, r0 // r6 = r0 = dst
  30. mov r7, r1 // r7 = r1 = src
  31. vldm r8, {q0-q3}
  32. vmov.u16 q15, #1
  33. vmov.u8 q14, #32
  34. 0: pld [r1]
  35. cmp r5, #4
  36. beq 4f
  37. 8: subs r4, #1
  38. vld1.8 {d16}, [r1], r3
  39. vshr.u8 d17, d16, #3 // index = [src>>3]
  40. vshll.u8 q9, d17, #1 // lowIndex = 2*index
  41. vadd.u16 q11, q9, q15 // highIndex = (2*index+1) << 8
  42. vshl.u16 q10, q11, #8 // q10: highIndex; q9: lowIndex;
  43. vadd.u16 q10, q9 // combine high and low index;
  44. // Look-up Table Round 1; index range: 0-15
  45. vtbx.8 d24, {q0-q1}, d20
  46. vtbx.8 d25, {q0-q1}, d21
  47. // Look-up Table Round 2; index range: 16-31
  48. vsub.u8 q10, q14 // Look-up with 8bit
  49. vtbx.8 d24, {q2-q3}, d20
  50. vtbx.8 d25, {q2-q3}, d21
  51. vaddw.u8 q13, q12, d16
  52. vqmovun.s16 d8, q13
  53. vst1.8 d8, [r0], r2
  54. bne 8b
  55. subs r5, #8
  56. beq 99f
  57. mov r4, r12
  58. add r6, #8
  59. mov r0, r6
  60. add r7, #8
  61. mov r1, r7
  62. b 0b
  63. 4: subs r4, #1
  64. vld1.32 {d16[0]}, [r1], r3
  65. vshr.u8 d17, d16, #3 // src>>3
  66. vshll.u8 q9, d17, #1 // lowIndex = 2*index
  67. vadd.u16 q11, q9, q15 // highIndex = (2*index+1) << 8
  68. vshl.u16 q10, q11, #8 // q10: highIndex; q9: lowIndex;
  69. vadd.u16 q10, q9 // combine high and low index;
  70. // Look-up Table Round 1; index range: 0-15
  71. vtbx.8 d24, {q0-q1}, d20
  72. vtbx.8 d25, {q0-q1}, d21
  73. // Look-up Table Round 2; index range: 16-32
  74. vsub.u8 q10, q14 // Look-up with 8bit
  75. vtbx.8 d24, {q2-q3}, d20
  76. vtbx.8 d25, {q2-q3}, d21
  77. vaddw.u8 q13, q12, d16
  78. vqmovun.s16 d14, q13
  79. vst1.32 d14[0], [r0], r2
  80. bne 4b
  81. b 99f
  82. 99:
  83. vpop {d8-d15}
  84. pop {r4-r10}
  85. bx lr
  86. endfunc
  87. function ff_hevc_sao_edge_filter_neon_8, export=1
  88. push {r4-r11}
  89. ldr r5, [sp, #32] // width
  90. ldr r4, [sp, #36] // height
  91. ldr r8, [sp, #40] // a_stride
  92. ldr r9, [sp, #44] // b_stride
  93. ldr r10, [sp, #48] // sao_offset_val
  94. ldr r11, [sp, #52] // edge_idx
  95. vpush {d8-d15}
  96. mov r12, r4 // r12 = height
  97. mov r6, r0 // r6 = r0 = dst
  98. mov r7, r1 // r7 = r1 = src
  99. vld1.8 {d0}, [r11] // edge_idx table load in d0 5x8bit
  100. vld1.16 {q1}, [r10] // sao_offset_val table load in q1, 5x16bit
  101. vmov.u8 d1, #2
  102. vmov.u16 q2, #1
  103. 0: mov r10, r1
  104. add r10, r8 // src[x + a_stride]
  105. mov r11, r1
  106. add r11, r9 // src[x + b_stride]
  107. pld [r1]
  108. cmp r5, #4
  109. beq 4f
  110. 8: subs r4, #1
  111. vld1.8 {d16}, [r1], r3 // src[x] 8x8bit
  112. vld1.8 {d17}, [r10], r3 // src[x + a_stride]
  113. vld1.8 {d18}, [r11], r3 // src[x + b_stride]
  114. vcgt.u8 d8, d16, d17
  115. vshr.u8 d9, d8, #7
  116. vclt.u8 d8, d16, d17
  117. vadd.u8 d8, d9 // diff0
  118. vcgt.u8 d10, d16, d18
  119. vshr.u8 d11, d10, #7
  120. vclt.u8 d10, d16, d18
  121. vadd.u8 d10, d11 // diff1
  122. vadd.s8 d8, d10
  123. vadd.s8 d8, d1
  124. vtbx.8 d9, {d0}, d8 // offset_val
  125. vshll.u8 q6, d9, #1 // lowIndex
  126. vadd.u16 q7, q6, q2
  127. vshl.u16 q10, q7, #8 // highIndex
  128. vadd.u16 q10, q6 // combine lowIndex and highIndex, offset_val
  129. vtbx.8 d22, {q1}, d20
  130. vtbx.8 d23, {q1}, d21
  131. vaddw.u8 q12, q11, d16
  132. vqmovun.s16 d26, q12
  133. vst1.8 d26, [r0], r2
  134. bne 8b
  135. subs r5, #8
  136. beq 99f
  137. mov r4, r12
  138. add r6, #8
  139. mov r0, r6
  140. add r7, #8
  141. mov r1, r7
  142. b 0b
  143. 4: subs r4, #1
  144. vld1.32 {d16[0]}, [r1], r3
  145. vld1.32 {d17[0]}, [r10], r3 // src[x + a_stride]
  146. vld1.32 {d18[0]}, [r11], r3 // src[x + b_stride]
  147. vcgt.u8 d8, d16, d17
  148. vshr.u8 d9, d8, #7
  149. vclt.u8 d8, d16, d17
  150. vadd.u8 d8, d9 // diff0
  151. vcgt.u8 d10, d16, d18
  152. vshr.u8 d11, d10, #7
  153. vclt.u8 d10, d16, d18
  154. vadd.u8 d10, d11 // diff1
  155. vadd.s8 d8, d10
  156. vadd.s8 d8, d1
  157. vtbx.8 d9, {d0}, d8 // offset_val
  158. vshll.u8 q6, d9, #1 // lowIndex
  159. vadd.u16 q7, q6, q2
  160. vshl.u16 q10, q7, #8 // highIndex
  161. vadd.u16 q10, q6 // combine lowIndex and highIndex, offset_val
  162. vtbx.8 d22, {q1}, d20
  163. vtbx.8 d23, {q1}, d21
  164. vaddw.u8 q12, q11, d16
  165. vqmovun.s16 d26, q12
  166. vst1.32 d26[0], [r0], r2
  167. bne 4b
  168. b 99f
  169. 99:
  170. vpop {d8-d15}
  171. pop {r4-r11}
  172. bx lr
  173. endfunc