browser/components/translation/cld2/internal/port.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 // Copyright 2013 Google Inc. All Rights Reserved.
michael@0 2 //
michael@0 3 // Licensed under the Apache License, Version 2.0 (the "License");
michael@0 4 // you may not use this file except in compliance with the License.
michael@0 5 // You may obtain a copy of the License at
michael@0 6 //
michael@0 7 // http://www.apache.org/licenses/LICENSE-2.0
michael@0 8 //
michael@0 9 // Unless required by applicable law or agreed to in writing, software
michael@0 10 // distributed under the License is distributed on an "AS IS" BASIS,
michael@0 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
michael@0 12 // See the License for the specific language governing permissions and
michael@0 13 // limitations under the License.
michael@0 14
michael@0 15 //
michael@0 16 // These are weird things we need to do to get this compiling on
michael@0 17 // random systems [subset].
michael@0 18
michael@0 19 #ifndef BASE_PORT_H_
michael@0 20 #define BASE_PORT_H_
michael@0 21
michael@0 22 #include <string.h> // for memcpy()
michael@0 23 #include "integral_types.h"
michael@0 24
michael@0 25 namespace CLD2 {
michael@0 26
michael@0 27 // Portable handling of unaligned loads, stores, and copies.
michael@0 28 // On some platforms, like ARM, the copy functions can be more efficient
michael@0 29 // then a load and a store.
michael@0 30
michael@0 31 #if defined(ARCH_PIII) || defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC)
michael@0 32
michael@0 33 // x86 and x86-64 can perform unaligned loads/stores directly;
michael@0 34 // modern PowerPC hardware can also do unaligned integer loads and stores;
michael@0 35 // but note: the FPU still sends unaligned loads and stores to a trap handler!
michael@0 36
michael@0 37 #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
michael@0 38 #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
michael@0 39 #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
michael@0 40
michael@0 41 #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
michael@0 42 #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
michael@0 43 #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
michael@0 44
michael@0 45 #elif defined(__arm__) && \
michael@0 46 !defined(__ARM_ARCH_5__) && \
michael@0 47 !defined(__ARM_ARCH_5T__) && \
michael@0 48 !defined(__ARM_ARCH_5TE__) && \
michael@0 49 !defined(__ARM_ARCH_5TEJ__) && \
michael@0 50 !defined(__ARM_ARCH_6__) && \
michael@0 51 !defined(__ARM_ARCH_6J__) && \
michael@0 52 !defined(__ARM_ARCH_6K__) && \
michael@0 53 !defined(__ARM_ARCH_6Z__) && \
michael@0 54 !defined(__ARM_ARCH_6ZK__) && \
michael@0 55 !defined(__ARM_ARCH_6T2__)
michael@0 56
michael@0 57 // ARMv7 and newer support native unaligned accesses, but only of 16-bit
michael@0 58 // and 32-bit values (not 64-bit); older versions either raise a fatal signal,
michael@0 59 // do an unaligned read and rotate the words around a bit, or do the reads very
michael@0 60 // slowly (trip through kernel mode). There's no simple #define that says just
michael@0 61 // “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
michael@0 62 // sub-architectures. Newer gcc (>= 4.6) set an __ARM_FEATURE_ALIGNED #define,
michael@0 63 // so in time, maybe we can move on to that.
michael@0 64 //
michael@0 65 // This is a mess, but there's not much we can do about it.
michael@0 66
michael@0 67 #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
michael@0 68 #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
michael@0 69
michael@0 70 #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
michael@0 71 #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
michael@0 72
michael@0 73 // TODO(sesse): NEON supports unaligned 64-bit loads and stores.
michael@0 74 // See if that would be more efficient on platforms supporting it,
michael@0 75 // at least for copies.
michael@0 76
michael@0 77 inline uint64 UNALIGNED_LOAD64(const void *p) {
michael@0 78 uint64 t;
michael@0 79 memcpy(&t, p, sizeof t);
michael@0 80 return t;
michael@0 81 }
michael@0 82
michael@0 83 inline void UNALIGNED_STORE64(void *p, uint64 v) {
michael@0 84 memcpy(p, &v, sizeof v);
michael@0 85 }
michael@0 86
michael@0 87 #else
michael@0 88
michael@0 89 #define NEED_ALIGNED_LOADS
michael@0 90
michael@0 91 // These functions are provided for architectures that don't support
michael@0 92 // unaligned loads and stores.
michael@0 93
michael@0 94 inline uint16 UNALIGNED_LOAD16(const void *p) {
michael@0 95 uint16 t;
michael@0 96 memcpy(&t, p, sizeof t);
michael@0 97 return t;
michael@0 98 }
michael@0 99
michael@0 100 inline uint32 UNALIGNED_LOAD32(const void *p) {
michael@0 101 uint32 t;
michael@0 102 memcpy(&t, p, sizeof t);
michael@0 103 return t;
michael@0 104 }
michael@0 105
michael@0 106 inline uint64 UNALIGNED_LOAD64(const void *p) {
michael@0 107 uint64 t;
michael@0 108 memcpy(&t, p, sizeof t);
michael@0 109 return t;
michael@0 110 }
michael@0 111
michael@0 112 inline void UNALIGNED_STORE16(void *p, uint16 v) {
michael@0 113 memcpy(p, &v, sizeof v);
michael@0 114 }
michael@0 115
michael@0 116 inline void UNALIGNED_STORE32(void *p, uint32 v) {
michael@0 117 memcpy(p, &v, sizeof v);
michael@0 118 }
michael@0 119
michael@0 120 inline void UNALIGNED_STORE64(void *p, uint64 v) {
michael@0 121 memcpy(p, &v, sizeof v);
michael@0 122 }
michael@0 123
michael@0 124 #endif
michael@0 125
michael@0 126 } // End namespace CLD2
michael@0 127
michael@0 128 #endif // BASE_PORT_H_

mercurial