security/nss/lib/freebl/mpi/mpi_arm.c

Thu, 22 Jan 2015 13:21:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 22 Jan 2015 13:21:57 +0100
branch
TOR_BUG_9701
changeset 15
b8a032363ba2
permissions
-rw-r--r--

Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6

michael@0 1 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 2 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 4
michael@0 5 /* This inlined version is for 32-bit ARM platform only */
michael@0 6
michael@0 7 #if !defined(__arm__)
michael@0 8 #error "This is for ARM only"
michael@0 9 #endif
michael@0 10
michael@0 11 /* 16-bit thumb doesn't work inlined assember version */
michael@0 12 #if (!defined(__thumb__) || defined(__thumb2__)) && !defined(__ARM_ARCH_3__)
michael@0 13
michael@0 14 #include "mpi-priv.h"
michael@0 15
michael@0 16 #ifdef MP_ASSEMBLY_MULTIPLY
michael@0 17 void s_mpv_mul_d(const mp_digit *a, mp_size a_len, mp_digit b, mp_digit *c)
michael@0 18 {
michael@0 19 __asm__ __volatile__(
michael@0 20 "mov r5, #0\n"
michael@0 21 #ifdef __thumb2__
michael@0 22 "cbz %1, 2f\n"
michael@0 23 #else
michael@0 24 "cmp %1, r5\n" /* r5 is 0 now */
michael@0 25 "beq 2f\n"
michael@0 26 #endif
michael@0 27
michael@0 28 "1:\n"
michael@0 29 "mov r4, #0\n"
michael@0 30 "ldr r6, [%0], #4\n"
michael@0 31 "umlal r5, r4, r6, %2\n"
michael@0 32 "str r5, [%3], #4\n"
michael@0 33 "mov r5, r4\n"
michael@0 34
michael@0 35 "subs %1, #1\n"
michael@0 36 "bne 1b\n"
michael@0 37
michael@0 38 "2:\n"
michael@0 39 "str r5, [%3]\n"
michael@0 40 :
michael@0 41 : "r"(a), "r"(a_len), "r"(b), "r"(c)
michael@0 42 : "memory", "cc", "%r4", "%r5", "%r6");
michael@0 43 }
michael@0 44
michael@0 45 void s_mpv_mul_d_add(const mp_digit *a, mp_size a_len, mp_digit b, mp_digit *c)
michael@0 46 {
michael@0 47 __asm__ __volatile__(
michael@0 48 "mov r5, #0\n"
michael@0 49 #ifdef __thumb2__
michael@0 50 "cbz %1, 2f\n"
michael@0 51 #else
michael@0 52 "cmp %1, r5\n" /* r5 is 0 now */
michael@0 53 "beq 2f\n"
michael@0 54 #endif
michael@0 55
michael@0 56 "1:\n"
michael@0 57 "mov r4, #0\n"
michael@0 58 "ldr r6, [%3]\n"
michael@0 59 "adds r5, r6\n"
michael@0 60 "adc r4, r4, #0\n"
michael@0 61
michael@0 62 "ldr r6, [%0], #4\n"
michael@0 63 "umlal r5, r4, r6, %2\n"
michael@0 64 "str r5, [%3], #4\n"
michael@0 65 "mov r5, r4\n"
michael@0 66
michael@0 67 "subs %1, #1\n"
michael@0 68 "bne 1b\n"
michael@0 69
michael@0 70 "2:\n"
michael@0 71 "str r5, [%3]\n"
michael@0 72 :
michael@0 73 : "r"(a), "r"(a_len), "r"(b), "r"(c)
michael@0 74 : "memory", "cc", "%r4", "%r5", "%r6");
michael@0 75 }
michael@0 76
michael@0 77 void s_mpv_mul_d_add_prop(const mp_digit *a, mp_size a_len, mp_digit b, mp_digit *c)
michael@0 78 {
michael@0 79 if (!a_len)
michael@0 80 return;
michael@0 81
michael@0 82 __asm__ __volatile__(
michael@0 83 "mov r5, #0\n"
michael@0 84
michael@0 85 "1:\n"
michael@0 86 "mov r4, #0\n"
michael@0 87 "ldr r6, [%3]\n"
michael@0 88 "adds r5, r6\n"
michael@0 89 "adc r4, r4, #0\n"
michael@0 90 "ldr r6, [%0], #4\n"
michael@0 91 "umlal r5, r4, r6, %2\n"
michael@0 92 "str r5, [%3], #4\n"
michael@0 93 "mov r5, r4\n"
michael@0 94
michael@0 95 "subs %1, #1\n"
michael@0 96 "bne 1b\n"
michael@0 97
michael@0 98 #ifdef __thumb2__
michael@0 99 "cbz r4, 3f\n"
michael@0 100 #else
michael@0 101 "cmp r4, #0\n"
michael@0 102 "beq 3f\n"
michael@0 103 #endif
michael@0 104
michael@0 105 "2:\n"
michael@0 106 "mov r4, #0\n"
michael@0 107 "ldr r6, [%3]\n"
michael@0 108 "adds r5, r6\n"
michael@0 109 "adc r4, r4, #0\n"
michael@0 110 "str r5, [%3], #4\n"
michael@0 111 "movs r5, r4\n"
michael@0 112 "bne 2b\n"
michael@0 113
michael@0 114 "3:\n"
michael@0 115 :
michael@0 116 : "r"(a), "r"(a_len), "r"(b), "r"(c)
michael@0 117 : "memory", "cc", "%r4", "%r5", "%r6");
michael@0 118 }
michael@0 119 #endif
michael@0 120
michael@0 121 #ifdef MP_ASSEMBLY_SQUARE
michael@0 122 void s_mpv_sqr_add_prop(const mp_digit *pa, mp_size a_len, mp_digit *ps)
michael@0 123 {
michael@0 124 if (!a_len)
michael@0 125 return;
michael@0 126
michael@0 127 __asm__ __volatile__(
michael@0 128 "mov r3, #0\n"
michael@0 129
michael@0 130 "1:\n"
michael@0 131 "mov r4, #0\n"
michael@0 132 "ldr r6, [%0], #4\n"
michael@0 133 "ldr r5, [%2]\n"
michael@0 134 "adds r3, r5\n"
michael@0 135 "adc r4, r4, #0\n"
michael@0 136 "umlal r3, r4, r6, r6\n" /* w = r3:r4 */
michael@0 137 "str r3, [%2], #4\n"
michael@0 138
michael@0 139 "ldr r5, [%2]\n"
michael@0 140 "adds r3, r4, r5\n"
michael@0 141 "mov r4, #0\n"
michael@0 142 "adc r4, r4, #0\n"
michael@0 143 "str r3, [%2], #4\n"
michael@0 144 "mov r3, r4\n"
michael@0 145
michael@0 146 "subs %1, #1\n"
michael@0 147 "bne 1b\n"
michael@0 148
michael@0 149 #ifdef __thumb2__
michael@0 150 "cbz r3, 3f\n"
michael@0 151 #else
michael@0 152 "cmp r3, #0\n"
michael@0 153 "beq 3f\n"
michael@0 154 #endif
michael@0 155
michael@0 156 "2:\n"
michael@0 157 "mov r4, #0\n"
michael@0 158 "ldr r5, [%2]\n"
michael@0 159 "adds r3, r5\n"
michael@0 160 "adc r4, r4, #0\n"
michael@0 161 "str r3, [%2], #4\n"
michael@0 162 "movs r3, r4\n"
michael@0 163 "bne 2b\n"
michael@0 164
michael@0 165 "3:"
michael@0 166 :
michael@0 167 : "r"(pa), "r"(a_len), "r"(ps)
michael@0 168 : "memory", "cc", "%r3", "%r4", "%r5", "%r6");
michael@0 169 }
michael@0 170 #endif
michael@0 171 #endif

mercurial