1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/media/libvpx/build/make/thumb.pm Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,70 @@ 1.4 +#!/usr/bin/perl 1.5 +## 1.6 +## Copyright (c) 2013 The WebM project authors. All Rights Reserved. 1.7 +## 1.8 +## Use of this source code is governed by a BSD-style license 1.9 +## that can be found in the LICENSE file in the root of the source 1.10 +## tree. An additional intellectual property rights grant can be found 1.11 +## in the file PATENTS. All contributing project authors may 1.12 +## be found in the AUTHORS file in the root of the source tree. 1.13 +## 1.14 + 1.15 +package thumb; 1.16 + 1.17 +sub FixThumbInstructions($$) 1.18 +{ 1.19 + my $short_branches = $_[1]; 1.20 + my $branch_shift_offset = $short_branches ? 1 : 0; 1.21 + 1.22 + # Write additions with shifts, such as "add r10, r11, lsl #8", 1.23 + # in three operand form, "add r10, r10, r11, lsl #8". 1.24 + s/(add\s+)(r\d+),\s*(r\d+),\s*(lsl #\d+)/$1$2, $2, $3, $4/g; 1.25 + 1.26 + # Convert additions with a non-constant shift into a sequence 1.27 + # with left shift, addition and a right shift (to restore the 1.28 + # register to the original value). Currently the right shift 1.29 + # isn't necessary in the code base since the values in these 1.30 + # registers aren't used, but doing the shift for consitency. 1.31 + # This converts instructions such as "add r12, r12, r5, lsl r4" 1.32 + # into the sequence "lsl r5, r4", "add r12, r12, r5", "lsr r5, r4". 1.33 + s/^(\s*)(add)(\s+)(r\d+),\s*(r\d+),\s*(r\d+),\s*lsl (r\d+)/$1lsl$3$6, $7\n$1$2$3$4, $5, $6\n$1lsr$3$6, $7/g; 1.34 + 1.35 + # Convert loads with right shifts in the indexing into a 1.36 + # sequence of an add, load and sub. This converts 1.37 + # "ldrb r4, [r9, lr, asr #1]" into "add r9, r9, lr, asr #1", 1.38 + # "ldrb r9, [r9]", "sub r9, r9, lr, asr #1". 1.39 + s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+),\s*(asr #\d+)\]/$1add $3$5, $5, $6, $7\n$1$2$3$4, [$5]\n$1sub $3$5, $5, $6, $7/g; 1.40 + 1.41 + # Convert register indexing with writeback into a separate add 1.42 + # instruction. This converts "ldrb r12, [r1, r2]!" into 1.43 + # "ldrb r12, [r1, r2]", "add r1, r1, r2". 1.44 + s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+)\]!/$1$2$3$4, [$5, $6]\n$1add $3$5, $6/g; 1.45 + 1.46 + # Convert negative register indexing into separate sub/add instructions. 1.47 + # This converts "ldrne r4, [src, -pstep, lsl #1]" into 1.48 + # "subne src, src, pstep, lsl #1", "ldrne r4, [src]", 1.49 + # "addne src, src, pstep, lsl #1". In a couple of cases where 1.50 + # this is used, it's used for two subsequent load instructions, 1.51 + # where a hand-written version of it could merge two subsequent 1.52 + # add and sub instructions. 1.53 + s/^(\s*)((ldr|str|pld)(ne)?)(\s+)(r\d+,\s*)?\[(\w+), -([^\]]+)\]/$1sub$4$5$7, $7, $8\n$1$2$5$6\[$7\]\n$1add$4$5$7, $7, $8/g; 1.54 + 1.55 + # Convert register post indexing to a separate add instruction. 1.56 + # This converts "ldrneb r9, [r0], r2" into "ldrneb r9, [r0]", 1.57 + # "add r0, r2". 1.58 + s/^(\s*)((ldr|str)(ne)?[bhd]?)(\s+)(\w+),(\s*\w+,)?\s*\[(\w+)\],\s*(\w+)/$1$2$5$6,$7 [$8]\n$1add$4$5$8, $8, $9/g; 1.59 + 1.60 + # Convert a conditional addition to the pc register into a series of 1.61 + # instructions. This converts "addlt pc, pc, r3, lsl #2" into 1.62 + # "itttt lt", "movlt.n r12, pc", "addlt.w r12, #12", 1.63 + # "addlt.w r12, r12, r3, lsl #2", "movlt.n pc, r12". 1.64 + # This assumes that r12 is free at this point. 1.65 + s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1itttt$2lt\n$1movlt.n$2r12, pc\n$1addlt.w$2r12, #12\n$1addlt.w$2r12, r12, $3, lsl #($4-$branch_shift_offset)\n$1movlt.n$2pc, r12/g; 1.66 + 1.67 + # Convert "mov pc, lr" into "bx lr", since the former only works 1.68 + # for switching from arm to thumb (and only in armv7), but not 1.69 + # from thumb to arm. 1.70 + s/mov(\s*)pc\s*,\s*lr/bx$1lr/g; 1.71 +} 1.72 + 1.73 +1;