|
1 #!/usr/bin/perl |
|
2 ## |
|
3 ## Copyright (c) 2013 The WebM project authors. All Rights Reserved. |
|
4 ## |
|
5 ## Use of this source code is governed by a BSD-style license |
|
6 ## that can be found in the LICENSE file in the root of the source |
|
7 ## tree. An additional intellectual property rights grant can be found |
|
8 ## in the file PATENTS. All contributing project authors may |
|
9 ## be found in the AUTHORS file in the root of the source tree. |
|
10 ## |
|
11 |
|
12 package thumb; |
|
13 |
|
14 sub FixThumbInstructions($$) |
|
15 { |
|
16 my $short_branches = $_[1]; |
|
17 my $branch_shift_offset = $short_branches ? 1 : 0; |
|
18 |
|
19 # Write additions with shifts, such as "add r10, r11, lsl #8", |
|
20 # in three operand form, "add r10, r10, r11, lsl #8". |
|
21 s/(add\s+)(r\d+),\s*(r\d+),\s*(lsl #\d+)/$1$2, $2, $3, $4/g; |
|
22 |
|
23 # Convert additions with a non-constant shift into a sequence |
|
24 # with left shift, addition and a right shift (to restore the |
|
25 # register to the original value). Currently the right shift |
|
26 # isn't necessary in the code base since the values in these |
|
27 # registers aren't used, but doing the shift for consitency. |
|
28 # This converts instructions such as "add r12, r12, r5, lsl r4" |
|
29 # into the sequence "lsl r5, r4", "add r12, r12, r5", "lsr r5, r4". |
|
30 s/^(\s*)(add)(\s+)(r\d+),\s*(r\d+),\s*(r\d+),\s*lsl (r\d+)/$1lsl$3$6, $7\n$1$2$3$4, $5, $6\n$1lsr$3$6, $7/g; |
|
31 |
|
32 # Convert loads with right shifts in the indexing into a |
|
33 # sequence of an add, load and sub. This converts |
|
34 # "ldrb r4, [r9, lr, asr #1]" into "add r9, r9, lr, asr #1", |
|
35 # "ldrb r9, [r9]", "sub r9, r9, lr, asr #1". |
|
36 s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+),\s*(asr #\d+)\]/$1add $3$5, $5, $6, $7\n$1$2$3$4, [$5]\n$1sub $3$5, $5, $6, $7/g; |
|
37 |
|
38 # Convert register indexing with writeback into a separate add |
|
39 # instruction. This converts "ldrb r12, [r1, r2]!" into |
|
40 # "ldrb r12, [r1, r2]", "add r1, r1, r2". |
|
41 s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+)\]!/$1$2$3$4, [$5, $6]\n$1add $3$5, $6/g; |
|
42 |
|
43 # Convert negative register indexing into separate sub/add instructions. |
|
44 # This converts "ldrne r4, [src, -pstep, lsl #1]" into |
|
45 # "subne src, src, pstep, lsl #1", "ldrne r4, [src]", |
|
46 # "addne src, src, pstep, lsl #1". In a couple of cases where |
|
47 # this is used, it's used for two subsequent load instructions, |
|
48 # where a hand-written version of it could merge two subsequent |
|
49 # add and sub instructions. |
|
50 s/^(\s*)((ldr|str|pld)(ne)?)(\s+)(r\d+,\s*)?\[(\w+), -([^\]]+)\]/$1sub$4$5$7, $7, $8\n$1$2$5$6\[$7\]\n$1add$4$5$7, $7, $8/g; |
|
51 |
|
52 # Convert register post indexing to a separate add instruction. |
|
53 # This converts "ldrneb r9, [r0], r2" into "ldrneb r9, [r0]", |
|
54 # "add r0, r2". |
|
55 s/^(\s*)((ldr|str)(ne)?[bhd]?)(\s+)(\w+),(\s*\w+,)?\s*\[(\w+)\],\s*(\w+)/$1$2$5$6,$7 [$8]\n$1add$4$5$8, $8, $9/g; |
|
56 |
|
57 # Convert a conditional addition to the pc register into a series of |
|
58 # instructions. This converts "addlt pc, pc, r3, lsl #2" into |
|
59 # "itttt lt", "movlt.n r12, pc", "addlt.w r12, #12", |
|
60 # "addlt.w r12, r12, r3, lsl #2", "movlt.n pc, r12". |
|
61 # This assumes that r12 is free at this point. |
|
62 s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1itttt$2lt\n$1movlt.n$2r12, pc\n$1addlt.w$2r12, #12\n$1addlt.w$2r12, r12, $3, lsl #($4-$branch_shift_offset)\n$1movlt.n$2pc, r12/g; |
|
63 |
|
64 # Convert "mov pc, lr" into "bx lr", since the former only works |
|
65 # for switching from arm to thumb (and only in armv7), but not |
|
66 # from thumb to arm. |
|
67 s/mov(\s*)pc\s*,\s*lr/bx$1lr/g; |
|
68 } |
|
69 |
|
70 1; |