summaryrefslogtreecommitdiffstats
path: root/firmware/asm/arm/lcd-as-memframe.S
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/asm/arm/lcd-as-memframe.S')
-rw-r--r--firmware/asm/arm/lcd-as-memframe.S591
1 files changed, 0 insertions, 591 deletions
diff --git a/firmware/asm/arm/lcd-as-memframe.S b/firmware/asm/arm/lcd-as-memframe.S
index 52ab0447c2..4bbae6fc0a 100644
--- a/firmware/asm/arm/lcd-as-memframe.S
+++ b/firmware/asm/arm/lcd-as-memframe.S
@@ -99,594 +99,3 @@ lcd_copy_buffer_rect: @
bgt 10b @ copy line @
ldmpc regs=r4-r11 @ restore regs and return
.size lcd_copy_buffer_rect, .-lcd_copy_buffer_rect
-
-
-/****************************************************************************
- * void lcd_write_yuv420_lines(fb_data *dst,
- * unsigned char const * const src[3],
- * int width,
- * int stride);
- *
- * |R| |1.000000 -0.000001 1.402000| |Y'|
- * |G| = |1.000000 -0.334136 -0.714136| |Pb|
- * |B| |1.000000 1.772000 0.000000| |Pr|
- * Scaled, normalized, rounded and tweaked to yield RGB 565:
- * |R| |74 0 101| |Y' - 16| >> 9
- * |G| = |74 -24 -51| |Cb - 128| >> 8
- * |B| |74 128 0| |Cr - 128| >> 9
- *
- * Write four RGB565 pixels in the following order on each loop:
- * 1 3 + > down
- * 2 4 \/ left
- */
- .section .icode.lcd_write_yuv420_lines, "ax", %progbits
- .align 2
- .global lcd_write_yuv420_lines
- .type lcd_write_yuv420_lines, %function
-lcd_write_yuv420_lines:
- @ r0 = dst
- @ r1 = yuv_src
- @ r2 = width
- @ r3 = stride
- stmfd sp!, { r4-r10, lr } @ save non-scratch
- ldmia r1, { r4, r5, r6 } @ r4 = yuv_src[0] = Y'_p
- @ r5 = yuv_src[1] = Cb_p
- @ r6 = yuv_src[2] = Cr_p
- @ r1 = scratch
- sub r3, r3, #1 @
-10: @ loop line @
- ldrb r7, [r4], #1 @ r7 = *Y'_p++;
- ldrb r8, [r5], #1 @ r8 = *Cb_p++;
- ldrb r9, [r6], #1 @ r9 = *Cr_p++;
- @
- sub r7, r7, #16 @ r7 = Y = (Y' - 16)*74
- add r12, r7, r7, asl #2 @ actually (Y' - 16)*37 and shift right
- add r7, r12, r7, asl #5 @ by one less when adding - same for all
- @
- sub r8, r8, #128 @ Cb -= 128
- sub r9, r9, #128 @ Cr -= 128
- @
- add r10, r9, r9, asl #1 @ r10 = Cr*51 + Cb*24
- add r10, r10, r10, asl #4 @
- add r10, r10, r8, asl #3 @
- add r10, r10, r8, asl #4 @
- @
- add lr, r9, r9, asl #2 @ r9 = Cr*101
- add lr, lr, r9, asl #5 @
- add r9, lr, r9, asl #6 @
- @
- add r8, r8, #2 @ r8 = bu = (Cb*128 + 128) >> 8
- mov r8, r8, asr #2 @
- add r9, r9, #256 @ r9 = rv = (r9 + 256) >> 9
- mov r9, r9, asr #9 @
- rsb r10, r10, #128 @ r10 = guv = (-r10 + 128) >> 8
- mov r10, r10, asr #8 @
- @ compute R, G, and B
- add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
- add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv
- add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
- @
-#if ARM_ARCH >= 6
- usat r1, #5, r1 @ clamp b
- usat lr, #5, lr @ clamp r
- usat r7, #6, r7 @ clamp g
-#else
- orr r12, r1, lr @ check if clamping is needed...
- orr r12, r12, r7, asr #1 @ ...at all
- cmp r12, #31 @
- bls 15f @ no clamp @
- cmp r1, #31 @ clamp b
- mvnhi r1, r1, asr #31 @
- andhi r1, r1, #31 @
- cmp lr, #31 @ clamp r
- mvnhi lr, lr, asr #31 @
- andhi lr, lr, #31 @
- cmp r7, #63 @ clamp g
- mvnhi r7, r7, asr #31 @
- andhi r7, r7, #63 @
-15: @ no clamp @
-#endif
- @
- ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
- @
- orr r1, r1, r7, lsl #5 @ r4 |= (g << 5)
- orr r1, r1, lr, lsl #11 @ r4 = b | (r << 11)
-
-#if LCD_WIDTH >= LCD_HEIGHT
- strh r1, [r0] @
-#elif LCD_WIDTH < 256
- strh r1, [r0], #LCD_WIDTH @ store pixel
-#else
- strh r1, [r0] @
-#endif
- @
- sub r7, r12, #16 @ r7 = Y = (Y' - 16)*74
- add r12, r7, r7, asl #2 @
- add r7, r12, r7, asl #5 @
- @ compute R, G, and B
- add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
- add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv
- add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
- @
-#if ARM_ARCH >= 6
- usat r1, #5, r1 @ clamp b
- usat lr, #5, lr @ clamp r
- usat r7, #6, r7 @ clamp g
-#else
- orr r12, r1, lr @ check if clamping is needed...
- orr r12, r12, r7, asr #1 @ ...at all
- cmp r12, #31 @
- bls 15f @ no clamp @
- cmp r1, #31 @ clamp b
- mvnhi r1, r1, asr #31 @
- andhi r1, r1, #31 @
- cmp lr, #31 @ clamp r
- mvnhi lr, lr, asr #31 @
- andhi lr, lr, #31 @
- cmp r7, #63 @ clamp g
- mvnhi r7, r7, asr #31 @
- andhi r7, r7, #63 @
-15: @ no clamp @
-#endif
- @
- ldrb r12, [r4], #1 @ r12 = Y' = *(Y'_p++)
- @
- orr r1, r1, lr, lsl #11 @ r1 = b | (r << 11)
- orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
-
-#if LCD_WIDTH >= LCD_HEIGHT
- add r0, r0, #2*LCD_WIDTH @
- strh r1, [r0] @ store pixel
- sub r0, r0, #2*LCD_WIDTH @
-#elif LCD_WIDTH < 256
- strh r1, [r0, #-LCD_WIDTH-2] @ store pixel
-#else
- strh r1, [r0, #-2] @
- add r0, r0, #LCD_WIDTH @
-#endif
- @
- sub r7, r12, #16 @ r7 = Y = (Y' - 16)*74
- add r12, r7, r7, asl #2 @
- add r7, r12, r7, asl #5 @
- @ compute R, G, and B
- add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
- add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv
- add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
- @
-#if ARM_ARCH >= 6
- usat r1, #5, r1 @ clamp b
- usat lr, #5, lr @ clamp r
- usat r7, #6, r7 @ clamp g
-#else
- orr r12, r1, lr @ check if clamping is needed...
- orr r12, r12, r7, asr #1 @ ...at all
- cmp r12, #31 @
- bls 15f @ no clamp @
- cmp r1, #31 @ clamp b
- mvnhi r1, r1, asr #31 @
- andhi r1, r1, #31 @
- cmp lr, #31 @ clamp r
- mvnhi lr, lr, asr #31 @
- andhi lr, lr, #31 @
- cmp r7, #63 @ clamp g
- mvnhi r7, r7, asr #31 @
- andhi r7, r7, #63 @
-15: @ no clamp @
-#endif
- @
- ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
- @
- orr r1, r1, r7, lsl #5 @ r1 = b | (g << 5)
- orr r1, r1, lr, lsl #11 @ r1 |= (r << 11)
-
-#if LCD_WIDTH >= LCD_HEIGHT
- strh r1, [r0, #2]
-#elif LCD_WIDTH < 256
- strh r1, [r0, #LCD_WIDTH]! @ store pixel
-#else
- strh r1, [r0] @
-#endif
- @
- sub r7, r12, #16 @ r7 = Y = (Y' - 16)*74
- add r12, r7, r7, asl #2 @
- add r7, r12, r7, asl #5 @
- @ compute R, G, and B
- add r1, r8, r7, asr #8 @ r1 = b = (Y >> 9) + bu
- add lr, r9, r7, asr #8 @ lr = r = (Y >> 9) + rv
- add r7, r10, r7, asr #7 @ r7 = g = (Y >> 8) + guv
- @
-#if ARM_ARCH >= 6
- usat r1, #5, r1 @ clamp b
- usat lr, #5, lr @ clamp r
- usat r7, #6, r7 @ clamp g
-#else
- orr r12, r1, lr @ check if clamping is needed...
- orr r12, r12, r7, asr #1 @ ...at all
- cmp r12, #31 @
- bls 15f @ no clamp @
- cmp r1, #31 @ clamp b
- mvnhi r1, r1, asr #31 @
- andhi r1, r1, #31 @
- cmp lr, #31 @ clamp r
- mvnhi lr, lr, asr #31 @
- andhi lr, lr, #31 @
- cmp r7, #63 @ clamp g
- mvnhi r7, r7, asr #31 @
- andhi r7, r7, #63 @
-15: @ no clamp @
-#endif
- @
- orr r12, r1, lr, lsl #11 @ r12 = b | (r << 11)
- orr r12, r12, r7, lsl #5 @ r12 |= (g << 5)
-
-#if LCD_WIDTH >= LCD_HEIGHT
- add r0, r0, #2*LCD_WIDTH
- strh r12, [r0, #2]
-#if LCD_WIDTH <= 512
- sub r0, r0, #(2*LCD_WIDTH)-4
-#else
- sub r0, r0, #(2*LCD_WIDTH)
- add r0, r0, #4
-#endif
-#else
- strh r12, [r0, #-2] @ store pixel
-#if LCD_WIDTH < 256
- add r0, r0, #2*LCD_WIDTH @
-#else
- add r0, r0, #LCD_WIDTH @
-#endif
-#endif
- @
- subs r2, r2, #2 @ subtract block from width
- bgt 10b @ loop line @
- @
- ldmpc regs=r4-r10 @ restore registers and return
- .ltorg @ dump constant pool
- .size lcd_write_yuv420_lines, .-lcd_write_yuv420_lines
-
-
-/****************************************************************************
- * void lcd_write_yuv420_lines_odither(fb_data *dst,
- * unsigned char const * const src[3],
- * int width,
- * int stride,
- * int x_screen,
- * int y_screen);
- *
- * |R| |1.000000 -0.000001 1.402000| |Y'|
- * |G| = |1.000000 -0.334136 -0.714136| |Pb|
- * |B| |1.000000 1.772000 0.000000| |Pr|
- * Red scaled at twice g & b but at same precision to place it in correct
- * bit position after multiply and leave instruction count lower.
- * |R| |258 0 408| |Y' - 16|
- * |G| = |149 -49 -104| |Cb - 128|
- * |B| |149 258 0| |Cr - 128|
- *
- * Write four RGB565 pixels in the following order on each loop:
- * 1 3 + > down
- * 2 4 \/ left
- *
- * Kernel pattern (raw|rotated|use order):
- * 5 3 4 2 2 6 3 7 row0 row2 > down
- * 1 7 0 6 | 4 0 5 1 | 2 4 6 0 3 5 7 1 col0 left
- * 4 2 5 3 | 3 7 2 6 | 3 5 7 1 2 4 6 0 col2 \/
- * 0 6 1 7 5 1 4 0
- */
- .section .icode.lcd_write_yuv420_lines_odither, "ax", %progbits
- .align 2
- .global lcd_write_yuv420_lines_odither
- .type lcd_write_yuv420_lines_odither, %function
-lcd_write_yuv420_lines_odither:
- @ r0 = dst
- @ r1 = yuv_src
- @ r2 = width
- @ r3 = stride
- @ [sp] = x_screen
- @ [sp+4] = y_screen
- stmfd sp!, { r4-r11, lr } @ save non-scratch
- ldmia r1, { r4, r5, r6 } @ r4 = yuv_src[0] = Y'_p
- @ r5 = yuv_src[1] = Cb_p
- @ r6 = yuv_src[2] = Cr_p
- @
- sub r3, r3, #1 @
- add r1, sp, #36 @ Line up pattern and kernel quadrant
- ldmia r1, { r12, r14 } @
- eor r14, r14, r12 @
- and r14, r14, #0x2 @
- mov r14, r14, lsl #6 @ 0x00 or 0x80
-10: @ loop line @
- @
- ldrb r7, [r4], #1 @ r7 = *Y'_p++;
- ldrb r8, [r5], #1 @ r8 = *Cb_p++;
- ldrb r9, [r6], #1 @ r9 = *Cr_p++;
- @
- eor r14, r14, #0x80 @ flip pattern quadrant
- @
- sub r7, r7, #16 @ r7 = Y = (Y' - 16)*149
- add r12, r7, r7, asl #2 @
- add r12, r12, r12, asl #4 @
- add r7, r12, r7, asl #6 @
- @
- sub r8, r8, #128 @ Cb -= 128
- sub r9, r9, #128 @ Cr -= 128
- @
- add r10, r8, r8, asl #4 @ r10 = guv = Cr*104 + Cb*49
- add r10, r10, r8, asl #5 @
- add r10, r10, r9, asl #3 @
- add r10, r10, r9, asl #5 @
- add r10, r10, r9, asl #6 @
- @
- mov r8, r8, asl #1 @ r8 = bu = Cb*258
- add r8, r8, r8, asl #7 @
- @
- add r9, r9, r9, asl #1 @ r9 = rv = Cr*408
- add r9, r9, r9, asl #4 @
- mov r9, r9, asl #3 @
- @
- @ compute R, G, and B
- add r1, r8, r7 @ r1 = b' = Y + bu
- add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv
- rsb r7, r10, r7 @ r7 = g' = Y + guv
- @
- @ r8 = bu, r9 = rv, r10 = guv
- @
- sub r12, r1, r1, lsr #5 @ r1 = 31/32*b + b/256
- add r1, r12, r1, lsr #8 @
- @
- sub r12, r11, r11, lsr #5 @ r11 = 31/32*r + r/256
- add r11, r12, r11, lsr #8 @
- @
- sub r12, r7, r7, lsr #6 @ r7 = 63/64*g + g/256
- add r7, r12, r7, lsr #8 @
- @
- add r12, r14, #0x100 @
- @
- add r1, r1, r12 @ b = r1 + delta
- add r11, r11, r12, lsl #1 @ r = r11 + delta*2
- add r7, r7, r12, lsr #1 @ g = r7 + delta/2
- @
-#if ARM_ARCH >= 6
- usat r11, #5, r11, asr #11 @ clamp r
- usat r7, #6, r7, asr #9 @ clamp g
- usat r1, #5, r1, asr #10 @ clamp b
- @
- ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
- @
- orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11)
- orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
-#else
- orr r12, r1, r11, asr #1 @ check if clamping is needed...
- orr r12, r12, r7 @ ...at all
- movs r12, r12, asr #15 @
- beq 15f @ no clamp @
- movs r12, r1, asr #15 @ clamp b
- mvnne r1, r12, lsr #15 @
- andne r1, r1, #0x7c00 @ mask b only if clamped
- movs r12, r11, asr #16 @ clamp r
- mvnne r11, r12, lsr #16 @
- movs r12, r7, asr #15 @ clamp g
- mvnne r7, r12, lsr #15 @
-15: @ no clamp @
- @
- ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
- @
- and r11, r11, #0xf800 @ pack pixel
- and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) |
- orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) |
- orr r1, r11, r1, lsr #10 @ (b >> 10)
-#endif
- @
-#if LCD_WIDTH >= LCD_HEIGHT
- strh r1, [r0] @
-#elif LCD_WIDTH < 256
- strh r1, [r0], #LCD_WIDTH @ store pixel
-#else
- strh r1, [r0] @
-#endif
- @
- sub r7, r12, #16 @ r7 = Y = (Y' - 16)*149
- add r12, r7, r7, asl #2 @
- add r12, r12, r12, asl #4 @
- add r7, r12, r7, asl #6 @
- @ compute R, G, and B
- add r1, r8, r7 @ r1 = b' = Y + bu
- add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv
- rsb r7, r10, r7 @ r7 = g' = Y + guv
- @
- sub r12, r1, r1, lsr #5 @ r1 = 31/32*b' + b'/256
- add r1, r12, r1, lsr #8 @
- @
- sub r12, r11, r11, lsr #5 @ r11 = 31/32*r' + r'/256
- add r11, r12, r11, lsr #8 @
- @
- sub r12, r7, r7, lsr #6 @ r7 = 63/64*g' + g'/256
- add r7, r12, r7, lsr #8 @
- @
- add r12, r14, #0x200 @
- @
- add r1, r1, r12 @ b = r1 + delta
- add r11, r11, r12, lsl #1 @ r = r11 + delta*2
- add r7, r7, r12, lsr #1 @ g = r7 + delta/2
- @
-#if ARM_ARCH >= 6
- usat r11, #5, r11, asr #11 @ clamp r
- usat r7, #6, r7, asr #9 @ clamp g
- usat r1, #5, r1, asr #10 @ clamp b
- @
- ldrb r12, [r4], #1 @ r12 = Y' = *(Y'_p++)
- @
- orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11)
- orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
-#else
- orr r12, r1, r11, asr #1 @ check if clamping is needed...
- orr r12, r12, r7 @ ...at all
- movs r12, r12, asr #15 @
- beq 15f @ no clamp @
- movs r12, r1, asr #15 @ clamp b
- mvnne r1, r12, lsr #15 @
- andne r1, r1, #0x7c00 @ mask b only if clamped
- movs r12, r11, asr #16 @ clamp r
- mvnne r11, r12, lsr #16 @
- movs r12, r7, asr #15 @ clamp g
- mvnne r7, r12, lsr #15 @
-15: @ no clamp @
- @
- ldrb r12, [r4], #1 @ r12 = Y' = *(Y'_p++)
- @
- and r11, r11, #0xf800 @ pack pixel
- and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) |
- orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) |
- orr r1, r11, r1, lsr #10 @ (b >> 10)
-#endif
- @
-#if LCD_WIDTH >= LCD_HEIGHT
- add r0, r0, #2*LCD_WIDTH @
- strh r1, [r0] @ store pixel
- sub r0, r0, #2*LCD_WIDTH @
-#elif LCD_WIDTH < 256
- strh r1, [r0, #-LCD_WIDTH-2] @ store pixel
-#else
- strh r1, [r0, #-2] @ store pixel
- add r0, r0, #LCD_WIDTH @
-#endif
- @
- sub r7, r12, #16 @ r7 = Y = (Y' - 16)*149
- add r12, r7, r7, asl #2 @
- add r12, r12, r12, asl #4 @
- add r7, r12, r7, asl #6 @
- @ compute R, G, and B
- add r1, r8, r7 @ r1 = b' = Y + bu
- add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv
- rsb r7, r10, r7 @ r7 = g' = Y + guv
- @
- @ r8 = bu, r9 = rv, r10 = guv
- @
- sub r12, r1, r1, lsr #5 @ r1 = 31/32*b' + b'/256
- add r1, r12, r1, lsr #8 @
- @
- sub r12, r11, r11, lsr #5 @ r11 = 31/32*r' + r'/256
- add r11, r12, r11, lsr #8 @
- @
- sub r12, r7, r7, lsr #6 @ r7 = 63/64*g' + g'/256
- add r7, r12, r7, lsr #8 @
- @
- add r12, r14, #0x300 @
- @
- add r1, r1, r12 @ b = r1 + delta
- add r11, r11, r12, lsl #1 @ r = r11 + delta*2
- add r7, r7, r12, lsr #1 @ g = r7 + delta/2
- @
-#if ARM_ARCH >= 6
- usat r11, #5, r11, asr #11 @ clamp r
- usat r7, #6, r7, asr #9 @ clamp g
- usat r1, #5, r1, asr #10 @ clamp b
- @
- ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
- @
- orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11)
- orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
-#else
- orr r12, r1, r11, asr #1 @ check if clamping is needed...
- orr r12, r12, r7 @ ...at all
- movs r12, r12, asr #15 @
- beq 15f @ no clamp @
- movs r12, r1, asr #15 @ clamp b
- mvnne r1, r12, lsr #15 @
- andne r1, r1, #0x7c00 @ mask b only if clamped
- movs r12, r11, asr #16 @ clamp r
- mvnne r11, r12, lsr #16 @
- movs r12, r7, asr #15 @ clamp g
- mvnne r7, r12, lsr #15 @
-15: @ no clamp @
- @
- ldrb r12, [r4, r3] @ r12 = Y' = *(Y'_p + stride)
- @
- and r11, r11, #0xf800 @ pack pixel
- and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) |
- orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) |
- orr r1, r11, r1, lsr #10 @ (b >> 10)
-#endif
- @
-#if LCD_WIDTH >= LCD_HEIGHT
- strh r1, [r0, #2]
-#elif LCD_WIDTH < 256
- strh r1, [r0, #LCD_WIDTH]! @ store pixel
-#else
- strh r1, [r0] @
-#endif
-
- sub r7, r12, #16 @ r7 = Y = (Y' - 16)*149
- add r12, r7, r7, asl #2 @
- add r12, r12, r12, asl #4 @
- add r7, r12, r7, asl #6 @
- @ compute R, G, and B
- add r1, r8, r7 @ r1 = b' = Y + bu
- add r11, r9, r7, asl #1 @ r11 = r' = Y*2 + rv
- rsb r7, r10, r7 @ r7 = g' = Y + guv
- @
- sub r12, r1, r1, lsr #5 @ r1 = 31/32*b + b/256
- add r1, r12, r1, lsr #8 @
- @
- sub r12, r11, r11, lsr #5 @ r11 = 31/32*r + r/256
- add r11, r12, r11, lsr #8 @
- @
- sub r12, r7, r7, lsr #6 @ r7 = 63/64*g + g/256
- add r7, r12, r7, lsr #8 @
- @
- @ This element is zero - use r14 @
- @
- add r1, r1, r14 @ b = r1 + delta
- add r11, r11, r14, lsl #1 @ r = r11 + delta*2
- add r7, r7, r14, lsr #1 @ g = r7 + delta/2
- @
-#if ARM_ARCH >= 6
- usat r11, #5, r11, asr #11 @ clamp r
- usat r7, #6, r7, asr #9 @ clamp g
- usat r1, #5, r1, asr #10 @ clamp b
- @
- orr r1, r1, r11, lsl #11 @ r1 = b | (r << 11)
- orr r1, r1, r7, lsl #5 @ r1 |= (g << 5)
-#else
- orr r12, r1, r11, asr #1 @ check if clamping is needed...
- orr r12, r12, r7 @ ...at all
- movs r12, r12, asr #15 @
- beq 15f @ no clamp @
- movs r12, r1, asr #15 @ clamp b
- mvnne r1, r12, lsr #15 @
- andne r1, r1, #0x7c00 @ mask b only if clamped
- movs r12, r11, asr #16 @ clamp r
- mvnne r11, r12, lsr #16 @
- movs r12, r7, asr #15 @ clamp g
- mvnne r7, r12, lsr #15 @
-15: @ no clamp @
- @
- and r11, r11, #0xf800 @ pack pixel
- and r7, r7, #0x7e00 @ r1 = pixel = (r & 0xf800) |
- orr r11, r11, r7, lsr #4 @ ((g & 0x7e00) >> 4) |
- orr r1, r11, r1, lsr #10 @ (b >> 10)
-#endif
- @
-#if LCD_WIDTH >= LCD_HEIGHT
- add r0, r0, #2*LCD_WIDTH
- strh r1, [r0, #2] @ store pixel
-#if LCD_WIDTH <= 512
- sub r0, r0, #(2*LCD_WIDTH)-4
-#else
- sub r0, r0, #(2*LCD_WIDTH)
- add r0, r0, #4
-#endif
-#else
- strh r1, [r0, #-2] @ store pixel
-#if LCD_WIDTH < 256
- add r0, r0, #2*LCD_WIDTH @
-#else
- add r0, r0, #LCD_WIDTH @
-#endif
-#endif
- @
- subs r2, r2, #2 @ subtract block from width
- bgt 10b @ loop line @
- @
- ldmpc regs=r4-r11 @ restore registers and return
- .ltorg @ dump constant pool
- .size lcd_write_yuv420_lines_odither, .-lcd_write_yuv420_lines_odither