browser/components/translation/cld2/internal/port.h

Thu, 22 Jan 2015 13:21:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 22 Jan 2015 13:21:57 +0100
branch
TOR_BUG_9701
changeset 15
b8a032363ba2
permissions
-rw-r--r--

Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6

     1 // Copyright 2013 Google Inc. All Rights Reserved.
     2 //
     3 // Licensed under the Apache License, Version 2.0 (the "License");
     4 // you may not use this file except in compliance with the License.
     5 // You may obtain a copy of the License at
     6 //
     7 //     http://www.apache.org/licenses/LICENSE-2.0
     8 //
     9 // Unless required by applicable law or agreed to in writing, software
    10 // distributed under the License is distributed on an "AS IS" BASIS,
    11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12 // See the License for the specific language governing permissions and
    13 // limitations under the License.
    15 //
    16 // These are weird things we need to do to get this compiling on
    17 // random systems [subset].
    19 #ifndef BASE_PORT_H_
    20 #define BASE_PORT_H_
    22 #include <string.h>         // for memcpy()
    23 #include "integral_types.h"
    25 namespace CLD2 {
    27 // Portable handling of unaligned loads, stores, and copies.
    28 // On some platforms, like ARM, the copy functions can be more efficient
    29 // then a load and a store.
    31 #if defined(ARCH_PIII) || defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC)
    33 // x86 and x86-64 can perform unaligned loads/stores directly;
    34 // modern PowerPC hardware can also do unaligned integer loads and stores;
    35 // but note: the FPU still sends unaligned loads and stores to a trap handler!
    37 #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
    38 #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
    39 #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
    41 #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
    42 #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
    43 #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
    45 #elif defined(__arm__) && \
    46       !defined(__ARM_ARCH_5__) && \
    47       !defined(__ARM_ARCH_5T__) && \
    48       !defined(__ARM_ARCH_5TE__) && \
    49       !defined(__ARM_ARCH_5TEJ__) && \
    50       !defined(__ARM_ARCH_6__) && \
    51       !defined(__ARM_ARCH_6J__) && \
    52       !defined(__ARM_ARCH_6K__) && \
    53       !defined(__ARM_ARCH_6Z__) && \
    54       !defined(__ARM_ARCH_6ZK__) && \
    55       !defined(__ARM_ARCH_6T2__)
    57 // ARMv7 and newer support native unaligned accesses, but only of 16-bit
    58 // and 32-bit values (not 64-bit); older versions either raise a fatal signal,
    59 // do an unaligned read and rotate the words around a bit, or do the reads very
    60 // slowly (trip through kernel mode). There's no simple #define that says just
    61 // “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
    62 // sub-architectures. Newer gcc (>= 4.6) set an __ARM_FEATURE_ALIGNED #define,
    63 // so in time, maybe we can move on to that.
    64 //
    65 // This is a mess, but there's not much we can do about it.
    67 #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
    68 #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
    70 #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
    71 #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
    73 // TODO(sesse): NEON supports unaligned 64-bit loads and stores.
    74 // See if that would be more efficient on platforms supporting it,
    75 // at least for copies.
    77 inline uint64 UNALIGNED_LOAD64(const void *p) {
    78   uint64 t;
    79   memcpy(&t, p, sizeof t);
    80   return t;
    81 }
    83 inline void UNALIGNED_STORE64(void *p, uint64 v) {
    84   memcpy(p, &v, sizeof v);
    85 }
    87 #else
    89 #define NEED_ALIGNED_LOADS
    91 // These functions are provided for architectures that don't support
    92 // unaligned loads and stores.
    94 inline uint16 UNALIGNED_LOAD16(const void *p) {
    95   uint16 t;
    96   memcpy(&t, p, sizeof t);
    97   return t;
    98 }
   100 inline uint32 UNALIGNED_LOAD32(const void *p) {
   101   uint32 t;
   102   memcpy(&t, p, sizeof t);
   103   return t;
   104 }
   106 inline uint64 UNALIGNED_LOAD64(const void *p) {
   107   uint64 t;
   108   memcpy(&t, p, sizeof t);
   109   return t;
   110 }
   112 inline void UNALIGNED_STORE16(void *p, uint16 v) {
   113   memcpy(p, &v, sizeof v);
   114 }
   116 inline void UNALIGNED_STORE32(void *p, uint32 v) {
   117   memcpy(p, &v, sizeof v);
   118 }
   120 inline void UNALIGNED_STORE64(void *p, uint64 v) {
   121   memcpy(p, &v, sizeof v);
   122 }
   124 #endif
   126 }       // End namespace CLD2
   128 #endif  // BASE_PORT_H_

mercurial