browser/components/translation/cld2/internal/port.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/browser/components/translation/cld2/internal/port.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,128 @@
     1.4 +// Copyright 2013 Google Inc. All Rights Reserved.
     1.5 +//
     1.6 +// Licensed under the Apache License, Version 2.0 (the "License");
     1.7 +// you may not use this file except in compliance with the License.
     1.8 +// You may obtain a copy of the License at
     1.9 +//
    1.10 +//     http://www.apache.org/licenses/LICENSE-2.0
    1.11 +//
    1.12 +// Unless required by applicable law or agreed to in writing, software
    1.13 +// distributed under the License is distributed on an "AS IS" BASIS,
    1.14 +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    1.15 +// See the License for the specific language governing permissions and
    1.16 +// limitations under the License.
    1.17 +
    1.18 +//
    1.19 +// These are weird things we need to do to get this compiling on
    1.20 +// random systems [subset].
    1.21 +
    1.22 +#ifndef BASE_PORT_H_
    1.23 +#define BASE_PORT_H_
    1.24 +
    1.25 +#include <string.h>         // for memcpy()
    1.26 +#include "integral_types.h"
    1.27 +
    1.28 +namespace CLD2 {
    1.29 +
    1.30 +// Portable handling of unaligned loads, stores, and copies.
    1.31 +// On some platforms, like ARM, the copy functions can be more efficient
    1.32 +// then a load and a store.
    1.33 +
    1.34 +#if defined(ARCH_PIII) || defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC)
    1.35 +
    1.36 +// x86 and x86-64 can perform unaligned loads/stores directly;
    1.37 +// modern PowerPC hardware can also do unaligned integer loads and stores;
    1.38 +// but note: the FPU still sends unaligned loads and stores to a trap handler!
    1.39 +
    1.40 +#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
    1.41 +#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
    1.42 +#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
    1.43 +
    1.44 +#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
    1.45 +#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
    1.46 +#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
    1.47 +
    1.48 +#elif defined(__arm__) && \
    1.49 +      !defined(__ARM_ARCH_5__) && \
    1.50 +      !defined(__ARM_ARCH_5T__) && \
    1.51 +      !defined(__ARM_ARCH_5TE__) && \
    1.52 +      !defined(__ARM_ARCH_5TEJ__) && \
    1.53 +      !defined(__ARM_ARCH_6__) && \
    1.54 +      !defined(__ARM_ARCH_6J__) && \
    1.55 +      !defined(__ARM_ARCH_6K__) && \
    1.56 +      !defined(__ARM_ARCH_6Z__) && \
    1.57 +      !defined(__ARM_ARCH_6ZK__) && \
    1.58 +      !defined(__ARM_ARCH_6T2__)
    1.59 +
    1.60 +// ARMv7 and newer support native unaligned accesses, but only of 16-bit
    1.61 +// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
    1.62 +// do an unaligned read and rotate the words around a bit, or do the reads very
    1.63 +// slowly (trip through kernel mode). There's no simple #define that says just
    1.64 +// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
    1.65 +// sub-architectures. Newer gcc (>= 4.6) set an __ARM_FEATURE_ALIGNED #define,
    1.66 +// so in time, maybe we can move on to that.
    1.67 +//
    1.68 +// This is a mess, but there's not much we can do about it.
    1.69 +
    1.70 +#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
    1.71 +#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
    1.72 +
    1.73 +#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
    1.74 +#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
    1.75 +
    1.76 +// TODO(sesse): NEON supports unaligned 64-bit loads and stores.
    1.77 +// See if that would be more efficient on platforms supporting it,
    1.78 +// at least for copies.
    1.79 +
    1.80 +inline uint64 UNALIGNED_LOAD64(const void *p) {
    1.81 +  uint64 t;
    1.82 +  memcpy(&t, p, sizeof t);
    1.83 +  return t;
    1.84 +}
    1.85 +
    1.86 +inline void UNALIGNED_STORE64(void *p, uint64 v) {
    1.87 +  memcpy(p, &v, sizeof v);
    1.88 +}
    1.89 +
    1.90 +#else
    1.91 +
    1.92 +#define NEED_ALIGNED_LOADS
    1.93 +
    1.94 +// These functions are provided for architectures that don't support
    1.95 +// unaligned loads and stores.
    1.96 +
    1.97 +inline uint16 UNALIGNED_LOAD16(const void *p) {
    1.98 +  uint16 t;
    1.99 +  memcpy(&t, p, sizeof t);
   1.100 +  return t;
   1.101 +}
   1.102 +
   1.103 +inline uint32 UNALIGNED_LOAD32(const void *p) {
   1.104 +  uint32 t;
   1.105 +  memcpy(&t, p, sizeof t);
   1.106 +  return t;
   1.107 +}
   1.108 +
   1.109 +inline uint64 UNALIGNED_LOAD64(const void *p) {
   1.110 +  uint64 t;
   1.111 +  memcpy(&t, p, sizeof t);
   1.112 +  return t;
   1.113 +}
   1.114 +
   1.115 +inline void UNALIGNED_STORE16(void *p, uint16 v) {
   1.116 +  memcpy(p, &v, sizeof v);
   1.117 +}
   1.118 +
   1.119 +inline void UNALIGNED_STORE32(void *p, uint32 v) {
   1.120 +  memcpy(p, &v, sizeof v);
   1.121 +}
   1.122 +
   1.123 +inline void UNALIGNED_STORE64(void *p, uint64 v) {
   1.124 +  memcpy(p, &v, sizeof v);
   1.125 +}
   1.126 +
   1.127 +#endif
   1.128 +
   1.129 +}       // End namespace CLD2
   1.130 +
   1.131 +#endif  // BASE_PORT_H_

mercurial