security/sandbox/chromium/base/strings/utf_string_conversion_utils.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/security/sandbox/chromium/base/strings/utf_string_conversion_utils.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,97 @@
     1.4 +// Copyright (c) 2011 The Chromium Authors. All rights reserved.
     1.5 +// Use of this source code is governed by a BSD-style license that can be
     1.6 +// found in the LICENSE file.
     1.7 +
     1.8 +#ifndef BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
     1.9 +#define BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
    1.10 +
    1.11 +// This should only be used by the various UTF string conversion files.
    1.12 +
    1.13 +#include "base/base_export.h"
    1.14 +#include "base/strings/string16.h"
    1.15 +
    1.16 +namespace base {
    1.17 +
    1.18 +inline bool IsValidCodepoint(uint32 code_point) {
    1.19 +  // Excludes the surrogate code points ([0xD800, 0xDFFF]) and
    1.20 +  // codepoints larger than 0x10FFFF (the highest codepoint allowed).
    1.21 +  // Non-characters and unassigned codepoints are allowed.
    1.22 +  return code_point < 0xD800u ||
    1.23 +         (code_point >= 0xE000u && code_point <= 0x10FFFFu);
    1.24 +}
    1.25 +
    1.26 +inline bool IsValidCharacter(uint32 code_point) {
    1.27 +  // Excludes non-characters (U+FDD0..U+FDEF, and all codepoints ending in
    1.28 +  // 0xFFFE or 0xFFFF) from the set of valid code points.
    1.29 +  return code_point < 0xD800u || (code_point >= 0xE000u &&
    1.30 +      code_point < 0xFDD0u) || (code_point > 0xFDEFu &&
    1.31 +      code_point <= 0x10FFFFu && (code_point & 0xFFFEu) != 0xFFFEu);
    1.32 +}
    1.33 +
    1.34 +// ReadUnicodeCharacter --------------------------------------------------------
    1.35 +
    1.36 +// Reads a UTF-8 stream, placing the next code point into the given output
    1.37 +// |*code_point|. |src| represents the entire string to read, and |*char_index|
    1.38 +// is the character offset within the string to start reading at. |*char_index|
    1.39 +// will be updated to index the last character read, such that incrementing it
    1.40 +// (as in a for loop) will take the reader to the next character.
    1.41 +//
    1.42 +// Returns true on success. On false, |*code_point| will be invalid.
    1.43 +BASE_EXPORT bool ReadUnicodeCharacter(const char* src,
    1.44 +                                      int32 src_len,
    1.45 +                                      int32* char_index,
    1.46 +                                      uint32* code_point_out);
    1.47 +
    1.48 +// Reads a UTF-16 character. The usage is the same as the 8-bit version above.
    1.49 +BASE_EXPORT bool ReadUnicodeCharacter(const char16* src,
    1.50 +                                      int32 src_len,
    1.51 +                                      int32* char_index,
    1.52 +                                      uint32* code_point);
    1.53 +
    1.54 +#if defined(WCHAR_T_IS_UTF32)
    1.55 +// Reads UTF-32 character. The usage is the same as the 8-bit version above.
    1.56 +BASE_EXPORT bool ReadUnicodeCharacter(const wchar_t* src,
    1.57 +                                      int32 src_len,
    1.58 +                                      int32* char_index,
    1.59 +                                      uint32* code_point);
    1.60 +#endif  // defined(WCHAR_T_IS_UTF32)
    1.61 +
    1.62 +// WriteUnicodeCharacter -------------------------------------------------------
    1.63 +
    1.64 +// Appends a UTF-8 character to the given 8-bit string.  Returns the number of
    1.65 +// bytes written.
    1.66 +// TODO(brettw) Bug 79631: This function should not be exposed.
    1.67 +BASE_EXPORT size_t WriteUnicodeCharacter(uint32 code_point,
    1.68 +                                         std::string* output);
    1.69 +
    1.70 +// Appends the given code point as a UTF-16 character to the given 16-bit
    1.71 +// string.  Returns the number of 16-bit values written.
    1.72 +BASE_EXPORT size_t WriteUnicodeCharacter(uint32 code_point, string16* output);
    1.73 +
    1.74 +#if defined(WCHAR_T_IS_UTF32)
    1.75 +// Appends the given UTF-32 character to the given 32-bit string.  Returns the
    1.76 +// number of 32-bit values written.
    1.77 +inline size_t WriteUnicodeCharacter(uint32 code_point, std::wstring* output) {
    1.78 +  // This is the easy case, just append the character.
    1.79 +  output->push_back(code_point);
    1.80 +  return 1;
    1.81 +}
    1.82 +#endif  // defined(WCHAR_T_IS_UTF32)
    1.83 +
    1.84 +// Generalized Unicode converter -----------------------------------------------
    1.85 +
    1.86 +// Guesses the length of the output in UTF-8 in bytes, clears that output
    1.87 +// string, and reserves that amount of space.  We assume that the input
    1.88 +// character types are unsigned, which will be true for UTF-16 and -32 on our
    1.89 +// systems.
    1.90 +template<typename CHAR>
    1.91 +void PrepareForUTF8Output(const CHAR* src, size_t src_len, std::string* output);
    1.92 +
    1.93 +// Prepares an output buffer (containing either UTF-16 or -32 data) given some
    1.94 +// UTF-8 input that will be converted to it.  See PrepareForUTF8Output().
    1.95 +template<typename STRING>
    1.96 +void PrepareForUTF16Or32Output(const char* src, size_t src_len, STRING* output);
    1.97 +
    1.98 +}  // namespace base
    1.99 +
   1.100 +#endif  // BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_

mercurial