michael@0: /* michael@0: ******************************************************************************* michael@0: * Copyright (C) 1996-2013, International Business Machines michael@0: * Corporation and others. All Rights Reserved. michael@0: ******************************************************************************* michael@0: * file name: ucol.cpp michael@0: * encoding: US-ASCII michael@0: * tab size: 8 (not used) michael@0: * indentation:4 michael@0: * michael@0: * Modification history michael@0: * Date Name Comments michael@0: * 1996-1999 various members of ICU team maintained C API for collation framework michael@0: * 02/16/2001 synwee Added internal method getPrevSpecialCE michael@0: * 03/01/2001 synwee Added maxexpansion functionality. michael@0: * 03/16/2001 weiv Collation framework is rewritten in C and made UCA compliant michael@0: */ michael@0: michael@0: #include "unicode/utypes.h" michael@0: michael@0: #if !UCONFIG_NO_COLLATION michael@0: michael@0: #include "unicode/bytestream.h" michael@0: #include "unicode/coleitr.h" michael@0: #include "unicode/unorm.h" michael@0: #include "unicode/udata.h" michael@0: #include "unicode/ustring.h" michael@0: #include "unicode/utf8.h" michael@0: michael@0: #include "ucol_imp.h" michael@0: #include "bocsu.h" michael@0: michael@0: #include "normalizer2impl.h" michael@0: #include "unorm_it.h" michael@0: #include "umutex.h" michael@0: #include "cmemory.h" michael@0: #include "ucln_in.h" michael@0: #include "cstring.h" michael@0: #include "utracimp.h" michael@0: #include "putilimp.h" michael@0: #include "uassert.h" michael@0: #include "unicode/coll.h" michael@0: michael@0: #ifdef UCOL_DEBUG michael@0: #include michael@0: #endif michael@0: michael@0: U_NAMESPACE_USE michael@0: michael@0: #define LENGTHOF(array) (int32_t)(sizeof(array)/sizeof((array)[0])) michael@0: michael@0: #define LAST_BYTE_MASK_ 0xFF michael@0: #define SECOND_LAST_BYTE_SHIFT_ 8 michael@0: michael@0: #define ZERO_CC_LIMIT_ 0xC0 michael@0: michael@0: // These are static pointers to the NFC/NFD implementation instance. michael@0: // Each of them is always the same between calls to u_cleanup michael@0: // and therefore writing to it is not synchronized. michael@0: // They are cleaned in ucol_cleanup michael@0: static const Normalizer2 *g_nfd = NULL; michael@0: static const Normalizer2Impl *g_nfcImpl = NULL; michael@0: michael@0: // These are values from UCA required for michael@0: // implicit generation and supressing sort key compression michael@0: // they should regularly be in the UCA, but if one michael@0: // is running without UCA, it could be a problem michael@0: static const int32_t maxRegularPrimary = 0x7A; michael@0: static const int32_t minImplicitPrimary = 0xE0; michael@0: static const int32_t maxImplicitPrimary = 0xE4; michael@0: michael@0: U_CDECL_BEGIN michael@0: static UBool U_CALLCONV michael@0: ucol_cleanup(void) michael@0: { michael@0: g_nfd = NULL; michael@0: g_nfcImpl = NULL; michael@0: return TRUE; michael@0: } michael@0: michael@0: static int32_t U_CALLCONV michael@0: _getFoldingOffset(uint32_t data) { michael@0: return (int32_t)(data&0xFFFFFF); michael@0: } michael@0: michael@0: U_CDECL_END michael@0: michael@0: static inline michael@0: UBool initializeNFD(UErrorCode *status) { michael@0: if (g_nfd != NULL) { michael@0: return TRUE; michael@0: } else { michael@0: // The result is constant, until the library is reloaded. michael@0: g_nfd = Normalizer2Factory::getNFDInstance(*status); michael@0: ucln_i18n_registerCleanup(UCLN_I18N_UCOL, ucol_cleanup); michael@0: return U_SUCCESS(*status); michael@0: } michael@0: } michael@0: michael@0: // init FCD data michael@0: static inline michael@0: UBool initializeFCD(UErrorCode *status) { michael@0: if (g_nfcImpl != NULL) { michael@0: return TRUE; michael@0: } else { michael@0: // The result is constant, until the library is reloaded. michael@0: g_nfcImpl = Normalizer2Factory::getNFCImpl(*status); michael@0: // Note: Alternatively, we could also store this pointer in each collIterate struct, michael@0: // same as Normalizer2Factory::getImpl(collIterate->nfd). michael@0: ucln_i18n_registerCleanup(UCLN_I18N_UCOL, ucol_cleanup); michael@0: return U_SUCCESS(*status); michael@0: } michael@0: } michael@0: michael@0: static michael@0: inline void IInit_collIterate(const UCollator *collator, const UChar *sourceString, michael@0: int32_t sourceLen, collIterate *s, michael@0: UErrorCode *status) michael@0: { michael@0: (s)->string = (s)->pos = sourceString; michael@0: (s)->origFlags = 0; michael@0: (s)->flags = 0; michael@0: if (sourceLen >= 0) { michael@0: s->flags |= UCOL_ITER_HASLEN; michael@0: (s)->endp = (UChar *)sourceString+sourceLen; michael@0: } michael@0: else { michael@0: /* change to enable easier checking for end of string for fcdpositon */ michael@0: (s)->endp = NULL; michael@0: } michael@0: (s)->extendCEs = NULL; michael@0: (s)->extendCEsSize = 0; michael@0: (s)->CEpos = (s)->toReturn = (s)->CEs; michael@0: (s)->offsetBuffer = NULL; michael@0: (s)->offsetBufferSize = 0; michael@0: (s)->offsetReturn = (s)->offsetStore = NULL; michael@0: (s)->offsetRepeatCount = (s)->offsetRepeatValue = 0; michael@0: (s)->coll = (collator); michael@0: if (initializeNFD(status)) { michael@0: (s)->nfd = g_nfd; michael@0: } else { michael@0: return; michael@0: } michael@0: (s)->fcdPosition = 0; michael@0: if(collator->normalizationMode == UCOL_ON) { michael@0: (s)->flags |= UCOL_ITER_NORM; michael@0: } michael@0: if(collator->hiraganaQ == UCOL_ON && collator->strength >= UCOL_QUATERNARY) { michael@0: (s)->flags |= UCOL_HIRAGANA_Q; michael@0: } michael@0: (s)->iterator = NULL; michael@0: //(s)->iteratorIndex = 0; michael@0: } michael@0: michael@0: U_CAPI void U_EXPORT2 michael@0: uprv_init_collIterate(const UCollator *collator, const UChar *sourceString, michael@0: int32_t sourceLen, collIterate *s, michael@0: UErrorCode *status) { michael@0: /* Out-of-line version for use from other files. */ michael@0: IInit_collIterate(collator, sourceString, sourceLen, s, status); michael@0: } michael@0: michael@0: U_CAPI collIterate * U_EXPORT2 michael@0: uprv_new_collIterate(UErrorCode *status) { michael@0: if(U_FAILURE(*status)) { michael@0: return NULL; michael@0: } michael@0: collIterate *s = new collIterate; michael@0: if(s == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: return NULL; michael@0: } michael@0: return s; michael@0: } michael@0: michael@0: U_CAPI void U_EXPORT2 michael@0: uprv_delete_collIterate(collIterate *s) { michael@0: delete s; michael@0: } michael@0: michael@0: U_CAPI UBool U_EXPORT2 michael@0: uprv_collIterateAtEnd(collIterate *s) { michael@0: return s == NULL || s->pos == s->endp; michael@0: } michael@0: michael@0: /** michael@0: * Backup the state of the collIterate struct data michael@0: * @param data collIterate to backup michael@0: * @param backup storage michael@0: */ michael@0: static michael@0: inline void backupState(const collIterate *data, collIterateState *backup) michael@0: { michael@0: backup->fcdPosition = data->fcdPosition; michael@0: backup->flags = data->flags; michael@0: backup->origFlags = data->origFlags; michael@0: backup->pos = data->pos; michael@0: backup->bufferaddress = data->writableBuffer.getBuffer(); michael@0: backup->buffersize = data->writableBuffer.length(); michael@0: backup->iteratorMove = 0; michael@0: backup->iteratorIndex = 0; michael@0: if(data->iterator != NULL) { michael@0: //backup->iteratorIndex = data->iterator->getIndex(data->iterator, UITER_CURRENT); michael@0: backup->iteratorIndex = data->iterator->getState(data->iterator); michael@0: // no we try to fixup if we're using a normalizing iterator and we get UITER_NO_STATE michael@0: if(backup->iteratorIndex == UITER_NO_STATE) { michael@0: while((backup->iteratorIndex = data->iterator->getState(data->iterator)) == UITER_NO_STATE) { michael@0: backup->iteratorMove++; michael@0: data->iterator->move(data->iterator, -1, UITER_CURRENT); michael@0: } michael@0: data->iterator->move(data->iterator, backup->iteratorMove, UITER_CURRENT); michael@0: } michael@0: } michael@0: } michael@0: michael@0: /** michael@0: * Loads the state into the collIterate struct data michael@0: * @param data collIterate to backup michael@0: * @param backup storage michael@0: * @param forwards boolean to indicate if forwards iteration is used, michael@0: * false indicates backwards iteration michael@0: */ michael@0: static michael@0: inline void loadState(collIterate *data, const collIterateState *backup, michael@0: UBool forwards) michael@0: { michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: data->flags = backup->flags; michael@0: data->origFlags = backup->origFlags; michael@0: if(data->iterator != NULL) { michael@0: //data->iterator->move(data->iterator, backup->iteratorIndex, UITER_ZERO); michael@0: data->iterator->setState(data->iterator, backup->iteratorIndex, &status); michael@0: if(backup->iteratorMove != 0) { michael@0: data->iterator->move(data->iterator, backup->iteratorMove, UITER_CURRENT); michael@0: } michael@0: } michael@0: data->pos = backup->pos; michael@0: michael@0: if ((data->flags & UCOL_ITER_INNORMBUF) && michael@0: data->writableBuffer.getBuffer() != backup->bufferaddress) { michael@0: /* michael@0: this is when a new buffer has been reallocated and we'll have to michael@0: calculate the new position. michael@0: note the new buffer has to contain the contents of the old buffer. michael@0: */ michael@0: if (forwards) { michael@0: data->pos = data->writableBuffer.getTerminatedBuffer() + michael@0: (data->pos - backup->bufferaddress); michael@0: } michael@0: else { michael@0: /* backwards direction */ michael@0: int32_t temp = backup->buffersize - michael@0: (int32_t)(data->pos - backup->bufferaddress); michael@0: data->pos = data->writableBuffer.getTerminatedBuffer() + (data->writableBuffer.length() - temp); michael@0: } michael@0: } michael@0: if ((data->flags & UCOL_ITER_INNORMBUF) == 0) { michael@0: /* michael@0: this is alittle tricky. michael@0: if we are initially not in the normalization buffer, even if we michael@0: normalize in the later stage, the data in the buffer will be michael@0: ignored, since we skip back up to the data string. michael@0: however if we are already in the normalization buffer, any michael@0: further normalization will pull data into the normalization michael@0: buffer and modify the fcdPosition. michael@0: since we are keeping the data in the buffer for use, the michael@0: fcdPosition can not be reverted back. michael@0: arrgghh.... michael@0: */ michael@0: data->fcdPosition = backup->fcdPosition; michael@0: } michael@0: } michael@0: michael@0: static UBool michael@0: reallocCEs(collIterate *data, int32_t newCapacity) { michael@0: uint32_t *oldCEs = data->extendCEs; michael@0: if(oldCEs == NULL) { michael@0: oldCEs = data->CEs; michael@0: } michael@0: int32_t length = data->CEpos - oldCEs; michael@0: uint32_t *newCEs = (uint32_t *)uprv_malloc(newCapacity * 4); michael@0: if(newCEs == NULL) { michael@0: return FALSE; michael@0: } michael@0: uprv_memcpy(newCEs, oldCEs, length * 4); michael@0: uprv_free(data->extendCEs); michael@0: data->extendCEs = newCEs; michael@0: data->extendCEsSize = newCapacity; michael@0: data->CEpos = newCEs + length; michael@0: return TRUE; michael@0: } michael@0: michael@0: static UBool michael@0: increaseCEsCapacity(collIterate *data) { michael@0: int32_t oldCapacity; michael@0: if(data->extendCEs != NULL) { michael@0: oldCapacity = data->extendCEsSize; michael@0: } else { michael@0: oldCapacity = LENGTHOF(data->CEs); michael@0: } michael@0: return reallocCEs(data, 2 * oldCapacity); michael@0: } michael@0: michael@0: static UBool michael@0: ensureCEsCapacity(collIterate *data, int32_t minCapacity) { michael@0: int32_t oldCapacity; michael@0: if(data->extendCEs != NULL) { michael@0: oldCapacity = data->extendCEsSize; michael@0: } else { michael@0: oldCapacity = LENGTHOF(data->CEs); michael@0: } michael@0: if(minCapacity <= oldCapacity) { michael@0: return TRUE; michael@0: } michael@0: oldCapacity *= 2; michael@0: return reallocCEs(data, minCapacity > oldCapacity ? minCapacity : oldCapacity); michael@0: } michael@0: michael@0: void collIterate::appendOffset(int32_t offset, UErrorCode &errorCode) { michael@0: if(U_FAILURE(errorCode)) { michael@0: return; michael@0: } michael@0: int32_t length = offsetStore == NULL ? 0 : (int32_t)(offsetStore - offsetBuffer); michael@0: U_ASSERT(length >= offsetBufferSize || offsetStore != NULL); michael@0: if(length >= offsetBufferSize) { michael@0: int32_t newCapacity = 2 * offsetBufferSize + UCOL_EXPAND_CE_BUFFER_SIZE; michael@0: int32_t *newBuffer = static_cast(uprv_malloc(newCapacity * 4)); michael@0: if(newBuffer == NULL) { michael@0: errorCode = U_MEMORY_ALLOCATION_ERROR; michael@0: return; michael@0: } michael@0: if(length > 0) { michael@0: uprv_memcpy(newBuffer, offsetBuffer, length * 4); michael@0: } michael@0: uprv_free(offsetBuffer); michael@0: offsetBuffer = newBuffer; michael@0: offsetStore = offsetBuffer + length; michael@0: offsetBufferSize = newCapacity; michael@0: } michael@0: *offsetStore++ = offset; michael@0: } michael@0: michael@0: /* michael@0: * collIter_eos() michael@0: * Checks for a collIterate being positioned at the end of michael@0: * its source string. michael@0: * michael@0: */ michael@0: static michael@0: inline UBool collIter_eos(collIterate *s) { michael@0: if(s->flags & UCOL_USE_ITERATOR) { michael@0: return !(s->iterator->hasNext(s->iterator)); michael@0: } michael@0: if ((s->flags & UCOL_ITER_HASLEN) == 0 && *s->pos != 0) { michael@0: // Null terminated string, but not at null, so not at end. michael@0: // Whether in main or normalization buffer doesn't matter. michael@0: return FALSE; michael@0: } michael@0: michael@0: // String with length. Can't be in normalization buffer, which is always michael@0: // null termintated. michael@0: if (s->flags & UCOL_ITER_HASLEN) { michael@0: return (s->pos == s->endp); michael@0: } michael@0: michael@0: // We are at a null termination, could be either normalization buffer or main string. michael@0: if ((s->flags & UCOL_ITER_INNORMBUF) == 0) { michael@0: // At null at end of main string. michael@0: return TRUE; michael@0: } michael@0: michael@0: // At null at end of normalization buffer. Need to check whether there there are michael@0: // any characters left in the main buffer. michael@0: if(s->origFlags & UCOL_USE_ITERATOR) { michael@0: return !(s->iterator->hasNext(s->iterator)); michael@0: } else if ((s->origFlags & UCOL_ITER_HASLEN) == 0) { michael@0: // Null terminated main string. fcdPosition is the 'return' position into main buf. michael@0: return (*s->fcdPosition == 0); michael@0: } michael@0: else { michael@0: // Main string with an end pointer. michael@0: return s->fcdPosition == s->endp; michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * collIter_bos() michael@0: * Checks for a collIterate being positioned at the start of michael@0: * its source string. michael@0: * michael@0: */ michael@0: static michael@0: inline UBool collIter_bos(collIterate *source) { michael@0: // if we're going backwards, we need to know whether there is more in the michael@0: // iterator, even if we are in the side buffer michael@0: if(source->flags & UCOL_USE_ITERATOR || source->origFlags & UCOL_USE_ITERATOR) { michael@0: return !source->iterator->hasPrevious(source->iterator); michael@0: } michael@0: if (source->pos <= source->string || michael@0: ((source->flags & UCOL_ITER_INNORMBUF) && michael@0: *(source->pos - 1) == 0 && source->fcdPosition == NULL)) { michael@0: return TRUE; michael@0: } michael@0: return FALSE; michael@0: } michael@0: michael@0: /*static michael@0: inline UBool collIter_SimpleBos(collIterate *source) { michael@0: // if we're going backwards, we need to know whether there is more in the michael@0: // iterator, even if we are in the side buffer michael@0: if(source->flags & UCOL_USE_ITERATOR || source->origFlags & UCOL_USE_ITERATOR) { michael@0: return !source->iterator->hasPrevious(source->iterator); michael@0: } michael@0: if (source->pos == source->string) { michael@0: return TRUE; michael@0: } michael@0: return FALSE; michael@0: }*/ michael@0: //return (data->pos == data->string) || michael@0: michael@0: michael@0: /****************************************************************************/ michael@0: /* Following are the open/close functions */ michael@0: /* */ michael@0: /****************************************************************************/ michael@0: michael@0: static UCollator* michael@0: ucol_initFromBinary(const uint8_t *bin, int32_t length, michael@0: const UCollator *base, michael@0: UCollator *fillIn, michael@0: UErrorCode *status) michael@0: { michael@0: UCollator *result = fillIn; michael@0: if(U_FAILURE(*status)) { michael@0: return NULL; michael@0: } michael@0: /* michael@0: if(base == NULL) { michael@0: // we don't support null base yet michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return NULL; michael@0: } michael@0: */ michael@0: // We need these and we could be running without UCA michael@0: uprv_uca_initImplicitConstants(status); michael@0: UCATableHeader *colData = (UCATableHeader *)bin; michael@0: // do we want version check here? We're trying to figure out whether collators are compatible michael@0: if((base && (uprv_memcmp(colData->UCAVersion, base->image->UCAVersion, sizeof(UVersionInfo)) != 0 || michael@0: uprv_memcmp(colData->UCDVersion, base->image->UCDVersion, sizeof(UVersionInfo)) != 0)) || michael@0: colData->version[0] != UCOL_BUILDER_VERSION) michael@0: { michael@0: *status = U_COLLATOR_VERSION_MISMATCH; michael@0: return NULL; michael@0: } michael@0: else { michael@0: if((uint32_t)length > (paddedsize(sizeof(UCATableHeader)) + paddedsize(sizeof(UColOptionSet)))) { michael@0: result = ucol_initCollator((const UCATableHeader *)bin, result, base, status); michael@0: if(U_FAILURE(*status)){ michael@0: return NULL; michael@0: } michael@0: result->hasRealData = TRUE; michael@0: } michael@0: else { michael@0: if(base) { michael@0: result = ucol_initCollator(base->image, result, base, status); michael@0: ucol_setOptionsFromHeader(result, (UColOptionSet *)(bin+((const UCATableHeader *)bin)->options), status); michael@0: if(U_FAILURE(*status)){ michael@0: return NULL; michael@0: } michael@0: result->hasRealData = FALSE; michael@0: } michael@0: else { michael@0: *status = U_USELESS_COLLATOR_ERROR; michael@0: return NULL; michael@0: } michael@0: } michael@0: result->freeImageOnClose = FALSE; michael@0: } michael@0: result->actualLocale = NULL; michael@0: result->validLocale = NULL; michael@0: result->requestedLocale = NULL; michael@0: result->rules = NULL; michael@0: result->rulesLength = 0; michael@0: result->freeRulesOnClose = FALSE; michael@0: result->ucaRules = NULL; michael@0: return result; michael@0: } michael@0: michael@0: U_CAPI UCollator* U_EXPORT2 michael@0: ucol_openBinary(const uint8_t *bin, int32_t length, michael@0: const UCollator *base, michael@0: UErrorCode *status) michael@0: { michael@0: return ucol_initFromBinary(bin, length, base, NULL, status); michael@0: } michael@0: michael@0: U_CAPI int32_t U_EXPORT2 michael@0: ucol_cloneBinary(const UCollator *coll, michael@0: uint8_t *buffer, int32_t capacity, michael@0: UErrorCode *status) michael@0: { michael@0: int32_t length = 0; michael@0: if(U_FAILURE(*status)) { michael@0: return length; michael@0: } michael@0: if(capacity < 0) { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return length; michael@0: } michael@0: if(coll->hasRealData == TRUE) { michael@0: length = coll->image->size; michael@0: if(length <= capacity) { michael@0: uprv_memcpy(buffer, coll->image, length); michael@0: } else { michael@0: *status = U_BUFFER_OVERFLOW_ERROR; michael@0: } michael@0: } else { michael@0: length = (int32_t)(paddedsize(sizeof(UCATableHeader))+paddedsize(sizeof(UColOptionSet))); michael@0: if(length <= capacity) { michael@0: /* build the UCATableHeader with minimal entries */ michael@0: /* do not copy the header from the UCA file because its values are wrong! */ michael@0: /* uprv_memcpy(result, UCA->image, sizeof(UCATableHeader)); */ michael@0: michael@0: /* reset everything */ michael@0: uprv_memset(buffer, 0, length); michael@0: michael@0: /* set the tailoring-specific values */ michael@0: UCATableHeader *myData = (UCATableHeader *)buffer; michael@0: myData->size = length; michael@0: michael@0: /* offset for the options, the only part of the data that is present after the header */ michael@0: myData->options = sizeof(UCATableHeader); michael@0: michael@0: /* need to always set the expansion value for an upper bound of the options */ michael@0: myData->expansion = myData->options + sizeof(UColOptionSet); michael@0: michael@0: myData->magic = UCOL_HEADER_MAGIC; michael@0: myData->isBigEndian = U_IS_BIG_ENDIAN; michael@0: myData->charSetFamily = U_CHARSET_FAMILY; michael@0: michael@0: /* copy UCA's version; genrb will override all but the builder version with tailoring data */ michael@0: uprv_memcpy(myData->version, coll->image->version, sizeof(UVersionInfo)); michael@0: michael@0: uprv_memcpy(myData->UCAVersion, coll->image->UCAVersion, sizeof(UVersionInfo)); michael@0: uprv_memcpy(myData->UCDVersion, coll->image->UCDVersion, sizeof(UVersionInfo)); michael@0: uprv_memcpy(myData->formatVersion, coll->image->formatVersion, sizeof(UVersionInfo)); michael@0: myData->jamoSpecial = coll->image->jamoSpecial; michael@0: michael@0: /* copy the collator options */ michael@0: uprv_memcpy(buffer+paddedsize(sizeof(UCATableHeader)), coll->options, sizeof(UColOptionSet)); michael@0: } else { michael@0: *status = U_BUFFER_OVERFLOW_ERROR; michael@0: } michael@0: } michael@0: return length; michael@0: } michael@0: michael@0: U_CAPI UCollator* U_EXPORT2 michael@0: ucol_safeClone(const UCollator *coll, void * /*stackBuffer*/, int32_t * pBufferSize, UErrorCode *status) michael@0: { michael@0: UCollator * localCollator; michael@0: int32_t bufferSizeNeeded = (int32_t)sizeof(UCollator); michael@0: int32_t imageSize = 0; michael@0: int32_t rulesSize = 0; michael@0: int32_t rulesPadding = 0; michael@0: int32_t defaultReorderCodesSize = 0; michael@0: int32_t reorderCodesSize = 0; michael@0: uint8_t *image; michael@0: UChar *rules; michael@0: int32_t* defaultReorderCodes; michael@0: int32_t* reorderCodes; michael@0: uint8_t* leadBytePermutationTable; michael@0: UBool imageAllocated = FALSE; michael@0: michael@0: if (status == NULL || U_FAILURE(*status)){ michael@0: return NULL; michael@0: } michael@0: if (coll == NULL) { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return NULL; michael@0: } michael@0: michael@0: if (coll->rules && coll->freeRulesOnClose) { michael@0: rulesSize = (int32_t)(coll->rulesLength + 1)*sizeof(UChar); michael@0: rulesPadding = (int32_t)(bufferSizeNeeded % sizeof(UChar)); michael@0: bufferSizeNeeded += rulesSize + rulesPadding; michael@0: } michael@0: // no padding for alignment needed from here since the next two are 4 byte quantities michael@0: if (coll->defaultReorderCodes) { michael@0: defaultReorderCodesSize = coll->defaultReorderCodesLength * sizeof(int32_t); michael@0: bufferSizeNeeded += defaultReorderCodesSize; michael@0: } michael@0: if (coll->reorderCodes) { michael@0: reorderCodesSize = coll->reorderCodesLength * sizeof(int32_t); michael@0: bufferSizeNeeded += reorderCodesSize; michael@0: } michael@0: if (coll->leadBytePermutationTable) { michael@0: bufferSizeNeeded += 256 * sizeof(uint8_t); michael@0: } michael@0: michael@0: if (pBufferSize != NULL) { michael@0: int32_t inputSize = *pBufferSize; michael@0: *pBufferSize = 1; michael@0: if (inputSize == 0) { michael@0: return NULL; // preflighting for deprecated functionality michael@0: } michael@0: } michael@0: michael@0: char *stackBufferChars = (char *)uprv_malloc(bufferSizeNeeded); michael@0: // Null pointer check. michael@0: if (stackBufferChars == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: return NULL; michael@0: } michael@0: *status = U_SAFECLONE_ALLOCATED_WARNING; michael@0: michael@0: localCollator = (UCollator *)stackBufferChars; michael@0: rules = (UChar *)(stackBufferChars + sizeof(UCollator) + rulesPadding); michael@0: defaultReorderCodes = (int32_t*)((uint8_t*)rules + rulesSize); michael@0: reorderCodes = (int32_t*)((uint8_t*)defaultReorderCodes + defaultReorderCodesSize); michael@0: leadBytePermutationTable = (uint8_t*)reorderCodes + reorderCodesSize; michael@0: michael@0: { michael@0: UErrorCode tempStatus = U_ZERO_ERROR; michael@0: imageSize = ucol_cloneBinary(coll, NULL, 0, &tempStatus); michael@0: } michael@0: if (coll->freeImageOnClose) { michael@0: image = (uint8_t *)uprv_malloc(imageSize); michael@0: // Null pointer check michael@0: if (image == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: return NULL; michael@0: } michael@0: ucol_cloneBinary(coll, image, imageSize, status); michael@0: imageAllocated = TRUE; michael@0: } michael@0: else { michael@0: image = (uint8_t *)coll->image; michael@0: } michael@0: localCollator = ucol_initFromBinary(image, imageSize, coll->UCA, localCollator, status); michael@0: if (U_FAILURE(*status)) { michael@0: return NULL; michael@0: } michael@0: michael@0: if (coll->rules) { michael@0: if (coll->freeRulesOnClose) { michael@0: localCollator->rules = u_strcpy(rules, coll->rules); michael@0: //bufferEnd += rulesSize; michael@0: } michael@0: else { michael@0: localCollator->rules = coll->rules; michael@0: } michael@0: localCollator->freeRulesOnClose = FALSE; michael@0: localCollator->rulesLength = coll->rulesLength; michael@0: } michael@0: michael@0: // collator reordering michael@0: if (coll->defaultReorderCodes) { michael@0: localCollator->defaultReorderCodes = michael@0: (int32_t*) uprv_memcpy(defaultReorderCodes, coll->defaultReorderCodes, coll->defaultReorderCodesLength * sizeof(int32_t)); michael@0: localCollator->defaultReorderCodesLength = coll->defaultReorderCodesLength; michael@0: localCollator->freeDefaultReorderCodesOnClose = FALSE; michael@0: } michael@0: if (coll->reorderCodes) { michael@0: localCollator->reorderCodes = michael@0: (int32_t*)uprv_memcpy(reorderCodes, coll->reorderCodes, coll->reorderCodesLength * sizeof(int32_t)); michael@0: localCollator->reorderCodesLength = coll->reorderCodesLength; michael@0: localCollator->freeReorderCodesOnClose = FALSE; michael@0: } michael@0: if (coll->leadBytePermutationTable) { michael@0: localCollator->leadBytePermutationTable = michael@0: (uint8_t*) uprv_memcpy(leadBytePermutationTable, coll->leadBytePermutationTable, 256); michael@0: localCollator->freeLeadBytePermutationTableOnClose = FALSE; michael@0: } michael@0: michael@0: int32_t i; michael@0: for(i = 0; i < UCOL_ATTRIBUTE_COUNT; i++) { michael@0: ucol_setAttribute(localCollator, (UColAttribute)i, ucol_getAttribute(coll, (UColAttribute)i, status), status); michael@0: } michael@0: // zero copies of pointers michael@0: localCollator->actualLocale = NULL; michael@0: localCollator->validLocale = NULL; michael@0: localCollator->requestedLocale = NULL; michael@0: localCollator->ucaRules = coll->ucaRules; // There should only be one copy here. michael@0: localCollator->freeOnClose = TRUE; michael@0: localCollator->freeImageOnClose = imageAllocated; michael@0: return localCollator; michael@0: } michael@0: michael@0: U_CAPI void U_EXPORT2 michael@0: ucol_close(UCollator *coll) michael@0: { michael@0: UTRACE_ENTRY_OC(UTRACE_UCOL_CLOSE); michael@0: UTRACE_DATA1(UTRACE_INFO, "coll = %p", coll); michael@0: if(coll != NULL) { michael@0: // these are always owned by each UCollator struct, michael@0: // so we always free them michael@0: if(coll->validLocale != NULL) { michael@0: uprv_free(coll->validLocale); michael@0: } michael@0: if(coll->actualLocale != NULL) { michael@0: uprv_free(coll->actualLocale); michael@0: } michael@0: if(coll->requestedLocale != NULL) { michael@0: uprv_free(coll->requestedLocale); michael@0: } michael@0: if(coll->latinOneCEs != NULL) { michael@0: uprv_free(coll->latinOneCEs); michael@0: } michael@0: if(coll->options != NULL && coll->freeOptionsOnClose) { michael@0: uprv_free(coll->options); michael@0: } michael@0: if(coll->rules != NULL && coll->freeRulesOnClose) { michael@0: uprv_free((UChar *)coll->rules); michael@0: } michael@0: if(coll->image != NULL && coll->freeImageOnClose) { michael@0: uprv_free((UCATableHeader *)coll->image); michael@0: } michael@0: michael@0: if(coll->leadBytePermutationTable != NULL && coll->freeLeadBytePermutationTableOnClose == TRUE) { michael@0: uprv_free(coll->leadBytePermutationTable); michael@0: } michael@0: if(coll->defaultReorderCodes != NULL && coll->freeDefaultReorderCodesOnClose == TRUE) { michael@0: uprv_free(coll->defaultReorderCodes); michael@0: } michael@0: if(coll->reorderCodes != NULL && coll->freeReorderCodesOnClose == TRUE) { michael@0: uprv_free(coll->reorderCodes); michael@0: } michael@0: michael@0: if(coll->delegate != NULL) { michael@0: delete (Collator*)coll->delegate; michael@0: } michael@0: michael@0: /* Here, it would be advisable to close: */ michael@0: /* - UData for UCA (unless we stuff it in the root resb */ michael@0: /* Again, do we need additional housekeeping... HMMM! */ michael@0: UTRACE_DATA1(UTRACE_INFO, "coll->freeOnClose: %d", coll->freeOnClose); michael@0: if(coll->freeOnClose){ michael@0: /* for safeClone, if freeOnClose is FALSE, michael@0: don't free the other instance data */ michael@0: uprv_free(coll); michael@0: } michael@0: } michael@0: UTRACE_EXIT(); michael@0: } michael@0: michael@0: void ucol_setOptionsFromHeader(UCollator* result, UColOptionSet * opts, UErrorCode *status) { michael@0: if(U_FAILURE(*status)) { michael@0: return; michael@0: } michael@0: result->caseFirst = (UColAttributeValue)opts->caseFirst; michael@0: result->caseLevel = (UColAttributeValue)opts->caseLevel; michael@0: result->frenchCollation = (UColAttributeValue)opts->frenchCollation; michael@0: result->normalizationMode = (UColAttributeValue)opts->normalizationMode; michael@0: if(result->normalizationMode == UCOL_ON && !initializeFCD(status)) { michael@0: return; michael@0: } michael@0: result->strength = (UColAttributeValue)opts->strength; michael@0: result->variableTopValue = opts->variableTopValue; michael@0: result->alternateHandling = (UColAttributeValue)opts->alternateHandling; michael@0: result->hiraganaQ = (UColAttributeValue)opts->hiraganaQ; michael@0: result->numericCollation = (UColAttributeValue)opts->numericCollation; michael@0: result->caseFirstisDefault = TRUE; michael@0: result->caseLevelisDefault = TRUE; michael@0: result->frenchCollationisDefault = TRUE; michael@0: result->normalizationModeisDefault = TRUE; michael@0: result->strengthisDefault = TRUE; michael@0: result->variableTopValueisDefault = TRUE; michael@0: result->alternateHandlingisDefault = TRUE; michael@0: result->hiraganaQisDefault = TRUE; michael@0: result->numericCollationisDefault = TRUE; michael@0: michael@0: ucol_updateInternalState(result, status); michael@0: michael@0: result->options = opts; michael@0: } michael@0: michael@0: michael@0: /** michael@0: * Approximate determination if a character is at a contraction end. michael@0: * Guaranteed to be TRUE if a character is at the end of a contraction, michael@0: * otherwise it is not deterministic. michael@0: * @param c character to be determined michael@0: * @param coll collator michael@0: */ michael@0: static michael@0: inline UBool ucol_contractionEndCP(UChar c, const UCollator *coll) { michael@0: if (c < coll->minContrEndCP) { michael@0: return FALSE; michael@0: } michael@0: michael@0: int32_t hash = c; michael@0: uint8_t htbyte; michael@0: if (hash >= UCOL_UNSAFECP_TABLE_SIZE*8) { michael@0: if (U16_IS_TRAIL(c)) { michael@0: return TRUE; michael@0: } michael@0: hash = (hash & UCOL_UNSAFECP_TABLE_MASK) + 256; michael@0: } michael@0: htbyte = coll->contrEndCP[hash>>3]; michael@0: return (((htbyte >> (hash & 7)) & 1) == 1); michael@0: } michael@0: michael@0: michael@0: michael@0: /* michael@0: * i_getCombiningClass() michael@0: * A fast, at least partly inline version of u_getCombiningClass() michael@0: * This is a candidate for further optimization. Used heavily michael@0: * in contraction processing. michael@0: */ michael@0: static michael@0: inline uint8_t i_getCombiningClass(UChar32 c, const UCollator *coll) { michael@0: uint8_t sCC = 0; michael@0: if ((c >= 0x300 && ucol_unsafeCP(c, coll)) || c > 0xFFFF) { michael@0: sCC = u_getCombiningClass(c); michael@0: } michael@0: return sCC; michael@0: } michael@0: michael@0: UCollator* ucol_initCollator(const UCATableHeader *image, UCollator *fillIn, const UCollator *UCA, UErrorCode *status) { michael@0: UChar c; michael@0: UCollator *result = fillIn; michael@0: if(U_FAILURE(*status) || image == NULL) { michael@0: return NULL; michael@0: } michael@0: michael@0: if(result == NULL) { michael@0: result = (UCollator *)uprv_malloc(sizeof(UCollator)); michael@0: if(result == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: return result; michael@0: } michael@0: result->freeOnClose = TRUE; michael@0: } else { michael@0: result->freeOnClose = FALSE; michael@0: } michael@0: michael@0: result->delegate = NULL; michael@0: michael@0: result->image = image; michael@0: result->mapping.getFoldingOffset = _getFoldingOffset; michael@0: const uint8_t *mapping = (uint8_t*)result->image+result->image->mappingPosition; michael@0: utrie_unserialize(&result->mapping, mapping, result->image->endExpansionCE - result->image->mappingPosition, status); michael@0: if(U_FAILURE(*status)) { michael@0: if(result->freeOnClose == TRUE) { michael@0: uprv_free(result); michael@0: result = NULL; michael@0: } michael@0: return result; michael@0: } michael@0: michael@0: result->latinOneMapping = UTRIE_GET32_LATIN1(&result->mapping); michael@0: result->contractionCEs = (uint32_t*)((uint8_t*)result->image+result->image->contractionCEs); michael@0: result->contractionIndex = (UChar*)((uint8_t*)result->image+result->image->contractionIndex); michael@0: result->expansion = (uint32_t*)((uint8_t*)result->image+result->image->expansion); michael@0: result->rules = NULL; michael@0: result->rulesLength = 0; michael@0: result->freeRulesOnClose = FALSE; michael@0: result->defaultReorderCodes = NULL; michael@0: result->defaultReorderCodesLength = 0; michael@0: result->freeDefaultReorderCodesOnClose = FALSE; michael@0: result->reorderCodes = NULL; michael@0: result->reorderCodesLength = 0; michael@0: result->freeReorderCodesOnClose = FALSE; michael@0: result->leadBytePermutationTable = NULL; michael@0: result->freeLeadBytePermutationTableOnClose = FALSE; michael@0: michael@0: /* get the version info from UCATableHeader and populate the Collator struct*/ michael@0: result->dataVersion[0] = result->image->version[0]; /* UCA Builder version*/ michael@0: result->dataVersion[1] = result->image->version[1]; /* UCA Tailoring rules version*/ michael@0: result->dataVersion[2] = 0; michael@0: result->dataVersion[3] = 0; michael@0: michael@0: result->unsafeCP = (uint8_t *)result->image + result->image->unsafeCP; michael@0: result->minUnsafeCP = 0; michael@0: for (c=0; c<0x300; c++) { // Find the smallest unsafe char. michael@0: if (ucol_unsafeCP(c, result)) break; michael@0: } michael@0: result->minUnsafeCP = c; michael@0: michael@0: result->contrEndCP = (uint8_t *)result->image + result->image->contrEndCP; michael@0: result->minContrEndCP = 0; michael@0: for (c=0; c<0x300; c++) { // Find the Contraction-ending char. michael@0: if (ucol_contractionEndCP(c, result)) break; michael@0: } michael@0: result->minContrEndCP = c; michael@0: michael@0: /* max expansion tables */ michael@0: result->endExpansionCE = (uint32_t*)((uint8_t*)result->image + michael@0: result->image->endExpansionCE); michael@0: result->lastEndExpansionCE = result->endExpansionCE + michael@0: result->image->endExpansionCECount - 1; michael@0: result->expansionCESize = (uint8_t*)result->image + michael@0: result->image->expansionCESize; michael@0: michael@0: michael@0: //result->errorCode = *status; michael@0: michael@0: result->latinOneCEs = NULL; michael@0: michael@0: result->latinOneRegenTable = FALSE; michael@0: result->latinOneFailed = FALSE; michael@0: result->UCA = UCA; michael@0: michael@0: /* Normally these will be set correctly later. This is the default if you use UCA or the default. */ michael@0: result->ucaRules = NULL; michael@0: result->actualLocale = NULL; michael@0: result->validLocale = NULL; michael@0: result->requestedLocale = NULL; michael@0: result->hasRealData = FALSE; // real data lives in .dat file... michael@0: result->freeImageOnClose = FALSE; michael@0: michael@0: /* set attributes */ michael@0: ucol_setOptionsFromHeader( michael@0: result, michael@0: (UColOptionSet*)((uint8_t*)result->image+result->image->options), michael@0: status); michael@0: result->freeOptionsOnClose = FALSE; michael@0: michael@0: return result; michael@0: } michael@0: michael@0: /* new Mark's code */ michael@0: michael@0: /** michael@0: * For generation of Implicit CEs michael@0: * @author Davis michael@0: * michael@0: * Cleaned up so that changes can be made more easily. michael@0: * Old values: michael@0: # First Implicit: E26A792D michael@0: # Last Implicit: E3DC70C0 michael@0: # First CJK: E0030300 michael@0: # Last CJK: E0A9DD00 michael@0: # First CJK_A: E0A9DF00 michael@0: # Last CJK_A: E0DE3100 michael@0: */ michael@0: /* Following is a port of Mark's code for new treatment of implicits. michael@0: * It is positioned here, since ucol_initUCA need to initialize the michael@0: * variables below according to the data in the fractional UCA. michael@0: */ michael@0: michael@0: /** michael@0: * Function used to: michael@0: * a) collapse the 2 different Han ranges from UCA into one (in the right order), and michael@0: * b) bump any non-CJK characters by 10FFFF. michael@0: * The relevant blocks are: michael@0: * A: 4E00..9FFF; CJK Unified Ideographs michael@0: * F900..FAFF; CJK Compatibility Ideographs michael@0: * B: 3400..4DBF; CJK Unified Ideographs Extension A michael@0: * 20000..XX; CJK Unified Ideographs Extension B (and others later on) michael@0: * As long as michael@0: * no new B characters are allocated between 4E00 and FAFF, and michael@0: * no new A characters are outside of this range, michael@0: * (very high probability) this simple code will work. michael@0: * The reordered blocks are: michael@0: * Block1 is CJK michael@0: * Block2 is CJK_COMPAT_USED michael@0: * Block3 is CJK_A michael@0: * (all contiguous) michael@0: * Any other CJK gets its normal code point michael@0: * Any non-CJK gets +10FFFF michael@0: * When we reorder Block1, we make sure that it is at the very start, michael@0: * so that it will use a 3-byte form. michael@0: * Warning: the we only pick up the compatibility characters that are michael@0: * NOT decomposed, so that block is smaller! michael@0: */ michael@0: michael@0: // CONSTANTS michael@0: static const UChar32 michael@0: NON_CJK_OFFSET = 0x110000, michael@0: UCOL_MAX_INPUT = 0x220001; // 2 * Unicode range + 2 michael@0: michael@0: /** michael@0: * Precomputed by initImplicitConstants() michael@0: */ michael@0: static int32_t michael@0: final3Multiplier = 0, michael@0: final4Multiplier = 0, michael@0: final3Count = 0, michael@0: final4Count = 0, michael@0: medialCount = 0, michael@0: min3Primary = 0, michael@0: min4Primary = 0, michael@0: max4Primary = 0, michael@0: minTrail = 0, michael@0: maxTrail = 0, michael@0: max3Trail = 0, michael@0: max4Trail = 0, michael@0: min4Boundary = 0; michael@0: michael@0: static const UChar32 michael@0: // 4E00;;Lo;0;L;;;;;N;;;;; michael@0: // 9FCC;;Lo;0;L;;;;;N;;;;; (Unicode 6.1) michael@0: CJK_BASE = 0x4E00, michael@0: CJK_LIMIT = 0x9FCC+1, michael@0: // Unified CJK ideographs in the compatibility ideographs block. michael@0: CJK_COMPAT_USED_BASE = 0xFA0E, michael@0: CJK_COMPAT_USED_LIMIT = 0xFA2F+1, michael@0: // 3400;;Lo;0;L;;;;;N;;;;; michael@0: // 4DB5;;Lo;0;L;;;;;N;;;;; michael@0: CJK_A_BASE = 0x3400, michael@0: CJK_A_LIMIT = 0x4DB5+1, michael@0: // 20000;;Lo;0;L;;;;;N;;;;; michael@0: // 2A6D6;;Lo;0;L;;;;;N;;;;; michael@0: CJK_B_BASE = 0x20000, michael@0: CJK_B_LIMIT = 0x2A6D6+1, michael@0: // 2A700;;Lo;0;L;;;;;N;;;;; michael@0: // 2B734;;Lo;0;L;;;;;N;;;;; michael@0: CJK_C_BASE = 0x2A700, michael@0: CJK_C_LIMIT = 0x2B734+1, michael@0: // 2B740;;Lo;0;L;;;;;N;;;;; michael@0: // 2B81D;;Lo;0;L;;;;;N;;;;; michael@0: CJK_D_BASE = 0x2B740, michael@0: CJK_D_LIMIT = 0x2B81D+1; michael@0: // when adding to this list, look for all occurrences (in project) michael@0: // of CJK_C_BASE and CJK_C_LIMIT, etc. to check for code that needs changing!!!! michael@0: michael@0: static UChar32 swapCJK(UChar32 i) { michael@0: if (i < CJK_A_BASE) { michael@0: // non-CJK michael@0: } else if (i < CJK_A_LIMIT) { michael@0: // Extension A has lower code points than the original Unihan+compat michael@0: // but sorts higher. michael@0: return i - CJK_A_BASE michael@0: + (CJK_LIMIT - CJK_BASE) michael@0: + (CJK_COMPAT_USED_LIMIT - CJK_COMPAT_USED_BASE); michael@0: } else if (i < CJK_BASE) { michael@0: // non-CJK michael@0: } else if (i < CJK_LIMIT) { michael@0: return i - CJK_BASE; michael@0: } else if (i < CJK_COMPAT_USED_BASE) { michael@0: // non-CJK michael@0: } else if (i < CJK_COMPAT_USED_LIMIT) { michael@0: return i - CJK_COMPAT_USED_BASE michael@0: + (CJK_LIMIT - CJK_BASE); michael@0: } else if (i < CJK_B_BASE) { michael@0: // non-CJK michael@0: } else if (i < CJK_B_LIMIT) { michael@0: return i; // non-BMP-CJK michael@0: } else if (i < CJK_C_BASE) { michael@0: // non-CJK michael@0: } else if (i < CJK_C_LIMIT) { michael@0: return i; // non-BMP-CJK michael@0: } else if (i < CJK_D_BASE) { michael@0: // non-CJK michael@0: } else if (i < CJK_D_LIMIT) { michael@0: return i; // non-BMP-CJK michael@0: } michael@0: return i + NON_CJK_OFFSET; // non-CJK michael@0: } michael@0: michael@0: U_CAPI UChar32 U_EXPORT2 michael@0: uprv_uca_getRawFromCodePoint(UChar32 i) { michael@0: return swapCJK(i)+1; michael@0: } michael@0: michael@0: U_CAPI UChar32 U_EXPORT2 michael@0: uprv_uca_getCodePointFromRaw(UChar32 i) { michael@0: i--; michael@0: UChar32 result = 0; michael@0: if(i >= NON_CJK_OFFSET) { michael@0: result = i - NON_CJK_OFFSET; michael@0: } else if(i >= CJK_B_BASE) { michael@0: result = i; michael@0: } else if(i < CJK_A_LIMIT + (CJK_LIMIT - CJK_BASE) + (CJK_COMPAT_USED_LIMIT - CJK_COMPAT_USED_BASE)) { // rest of CJKs, compacted michael@0: if(i < CJK_LIMIT - CJK_BASE) { michael@0: result = i + CJK_BASE; michael@0: } else if(i < (CJK_LIMIT - CJK_BASE) + (CJK_COMPAT_USED_LIMIT - CJK_COMPAT_USED_BASE)) { michael@0: result = i + CJK_COMPAT_USED_BASE - (CJK_LIMIT - CJK_BASE); michael@0: } else { michael@0: result = i + CJK_A_BASE - (CJK_LIMIT - CJK_BASE) - (CJK_COMPAT_USED_LIMIT - CJK_COMPAT_USED_BASE); michael@0: } michael@0: } else { michael@0: result = -1; michael@0: } michael@0: return result; michael@0: } michael@0: michael@0: // GET IMPLICIT PRIMARY WEIGHTS michael@0: // Return value is left justified primary key michael@0: U_CAPI uint32_t U_EXPORT2 michael@0: uprv_uca_getImplicitFromRaw(UChar32 cp) { michael@0: /* michael@0: if (cp < 0 || cp > UCOL_MAX_INPUT) { michael@0: throw new IllegalArgumentException("Code point out of range " + Utility.hex(cp)); michael@0: } michael@0: */ michael@0: int32_t last0 = cp - min4Boundary; michael@0: if (last0 < 0) { michael@0: int32_t last1 = cp / final3Count; michael@0: last0 = cp % final3Count; michael@0: michael@0: int32_t last2 = last1 / medialCount; michael@0: last1 %= medialCount; michael@0: michael@0: last0 = minTrail + last0*final3Multiplier; // spread out, leaving gap at start michael@0: last1 = minTrail + last1; // offset michael@0: last2 = min3Primary + last2; // offset michael@0: /* michael@0: if (last2 >= min4Primary) { michael@0: throw new IllegalArgumentException("4-byte out of range: " + Utility.hex(cp) + ", " + Utility.hex(last2)); michael@0: } michael@0: */ michael@0: return (last2 << 24) + (last1 << 16) + (last0 << 8); michael@0: } else { michael@0: int32_t last1 = last0 / final4Count; michael@0: last0 %= final4Count; michael@0: michael@0: int32_t last2 = last1 / medialCount; michael@0: last1 %= medialCount; michael@0: michael@0: int32_t last3 = last2 / medialCount; michael@0: last2 %= medialCount; michael@0: michael@0: last0 = minTrail + last0*final4Multiplier; // spread out, leaving gap at start michael@0: last1 = minTrail + last1; // offset michael@0: last2 = minTrail + last2; // offset michael@0: last3 = min4Primary + last3; // offset michael@0: /* michael@0: if (last3 > max4Primary) { michael@0: throw new IllegalArgumentException("4-byte out of range: " + Utility.hex(cp) + ", " + Utility.hex(last3)); michael@0: } michael@0: */ michael@0: return (last3 << 24) + (last2 << 16) + (last1 << 8) + last0; michael@0: } michael@0: } michael@0: michael@0: static uint32_t U_EXPORT2 michael@0: uprv_uca_getImplicitPrimary(UChar32 cp) { michael@0: //fprintf(stdout, "Incoming: %04x\n", cp); michael@0: //if (DEBUG) System.out.println("Incoming: " + Utility.hex(cp)); michael@0: michael@0: cp = swapCJK(cp); michael@0: cp++; michael@0: // we now have a range of numbers from 0 to 21FFFF. michael@0: michael@0: //if (DEBUG) System.out.println("CJK swapped: " + Utility.hex(cp)); michael@0: //fprintf(stdout, "CJK swapped: %04x\n", cp); michael@0: michael@0: return uprv_uca_getImplicitFromRaw(cp); michael@0: } michael@0: michael@0: /** michael@0: * Converts implicit CE into raw integer ("code point") michael@0: * @param implicit michael@0: * @return -1 if illegal format michael@0: */ michael@0: U_CAPI UChar32 U_EXPORT2 michael@0: uprv_uca_getRawFromImplicit(uint32_t implicit) { michael@0: UChar32 result; michael@0: UChar32 b3 = implicit & 0xFF; michael@0: UChar32 b2 = (implicit >> 8) & 0xFF; michael@0: UChar32 b1 = (implicit >> 16) & 0xFF; michael@0: UChar32 b0 = (implicit >> 24) & 0xFF; michael@0: michael@0: // simple parameter checks michael@0: if (b0 < min3Primary || b0 > max4Primary michael@0: || b1 < minTrail || b1 > maxTrail) michael@0: return -1; michael@0: // normal offsets michael@0: b1 -= minTrail; michael@0: michael@0: // take care of the final values, and compose michael@0: if (b0 < min4Primary) { michael@0: if (b2 < minTrail || b2 > max3Trail || b3 != 0) michael@0: return -1; michael@0: b2 -= minTrail; michael@0: UChar32 remainder = b2 % final3Multiplier; michael@0: if (remainder != 0) michael@0: return -1; michael@0: b0 -= min3Primary; michael@0: b2 /= final3Multiplier; michael@0: result = ((b0 * medialCount) + b1) * final3Count + b2; michael@0: } else { michael@0: if (b2 < minTrail || b2 > maxTrail michael@0: || b3 < minTrail || b3 > max4Trail) michael@0: return -1; michael@0: b2 -= minTrail; michael@0: b3 -= minTrail; michael@0: UChar32 remainder = b3 % final4Multiplier; michael@0: if (remainder != 0) michael@0: return -1; michael@0: b3 /= final4Multiplier; michael@0: b0 -= min4Primary; michael@0: result = (((b0 * medialCount) + b1) * medialCount + b2) * final4Count + b3 + min4Boundary; michael@0: } michael@0: // final check michael@0: if (result < 0 || result > UCOL_MAX_INPUT) michael@0: return -1; michael@0: return result; michael@0: } michael@0: michael@0: michael@0: static inline int32_t divideAndRoundUp(int a, int b) { michael@0: return 1 + (a-1)/b; michael@0: } michael@0: michael@0: /* this function is either called from initUCA or from genUCA before michael@0: * doing canonical closure for the UCA. michael@0: */ michael@0: michael@0: /** michael@0: * Set up to generate implicits. michael@0: * Maintenance Note: this function may end up being called more than once, due michael@0: * to threading races during initialization. Make sure that michael@0: * none of the Constants is ever transiently assigned an michael@0: * incorrect value. michael@0: * @param minPrimary michael@0: * @param maxPrimary michael@0: * @param minTrail final byte michael@0: * @param maxTrail final byte michael@0: * @param gap3 the gap we leave for tailoring for 3-byte forms michael@0: * @param gap4 the gap we leave for tailoring for 4-byte forms michael@0: */ michael@0: static void initImplicitConstants(int minPrimary, int maxPrimary, michael@0: int minTrailIn, int maxTrailIn, michael@0: int gap3, int primaries3count, michael@0: UErrorCode *status) { michael@0: // some simple parameter checks michael@0: if ((minPrimary < 0 || minPrimary >= maxPrimary || maxPrimary > 0xFF) michael@0: || (minTrailIn < 0 || minTrailIn >= maxTrailIn || maxTrailIn > 0xFF) michael@0: || (primaries3count < 1)) michael@0: { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return; michael@0: }; michael@0: michael@0: minTrail = minTrailIn; michael@0: maxTrail = maxTrailIn; michael@0: michael@0: min3Primary = minPrimary; michael@0: max4Primary = maxPrimary; michael@0: // compute constants for use later. michael@0: // number of values we can use in trailing bytes michael@0: // leave room for empty values between AND above, e.g. if gap = 2 michael@0: // range 3..7 => +3 -4 -5 -6 -7: so 1 value michael@0: // range 3..8 => +3 -4 -5 +6 -7 -8: so 2 values michael@0: // range 3..9 => +3 -4 -5 +6 -7 -8 -9: so 2 values michael@0: final3Multiplier = gap3 + 1; michael@0: final3Count = (maxTrail - minTrail + 1) / final3Multiplier; michael@0: max3Trail = minTrail + (final3Count - 1) * final3Multiplier; michael@0: michael@0: // medials can use full range michael@0: medialCount = (maxTrail - minTrail + 1); michael@0: // find out how many values fit in each form michael@0: int32_t threeByteCount = medialCount * final3Count; michael@0: // now determine where the 3/4 boundary is. michael@0: // we use 3 bytes below the boundary, and 4 above michael@0: int32_t primariesAvailable = maxPrimary - minPrimary + 1; michael@0: int32_t primaries4count = primariesAvailable - primaries3count; michael@0: michael@0: michael@0: int32_t min3ByteCoverage = primaries3count * threeByteCount; michael@0: min4Primary = minPrimary + primaries3count; michael@0: min4Boundary = min3ByteCoverage; michael@0: // Now expand out the multiplier for the 4 bytes, and redo. michael@0: michael@0: int32_t totalNeeded = UCOL_MAX_INPUT - min4Boundary; michael@0: int32_t neededPerPrimaryByte = divideAndRoundUp(totalNeeded, primaries4count); michael@0: int32_t neededPerFinalByte = divideAndRoundUp(neededPerPrimaryByte, medialCount * medialCount); michael@0: int32_t gap4 = (maxTrail - minTrail - 1) / neededPerFinalByte; michael@0: if (gap4 < 1) { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return; michael@0: } michael@0: final4Multiplier = gap4 + 1; michael@0: final4Count = neededPerFinalByte; michael@0: max4Trail = minTrail + (final4Count - 1) * final4Multiplier; michael@0: } michael@0: michael@0: /** michael@0: * Supply parameters for generating implicit CEs michael@0: */ michael@0: U_CAPI void U_EXPORT2 michael@0: uprv_uca_initImplicitConstants(UErrorCode *status) { michael@0: // 13 is the largest 4-byte gap we can use without getting 2 four-byte forms. michael@0: //initImplicitConstants(minPrimary, maxPrimary, 0x04, 0xFE, 1, 1, status); michael@0: initImplicitConstants(minImplicitPrimary, maxImplicitPrimary, 0x04, 0xFE, 1, 1, status); michael@0: } michael@0: michael@0: michael@0: /* collIterNormalize Incremental Normalization happens here. */ michael@0: /* pick up the range of chars identifed by FCD, */ michael@0: /* normalize it into the collIterate's writable buffer, */ michael@0: /* switch the collIterate's state to use the writable buffer. */ michael@0: /* */ michael@0: static michael@0: void collIterNormalize(collIterate *collationSource) michael@0: { michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: const UChar *srcP = collationSource->pos - 1; /* Start of chars to normalize */ michael@0: const UChar *endP = collationSource->fcdPosition; /* End of region to normalize+1 */ michael@0: michael@0: collationSource->nfd->normalize(UnicodeString(FALSE, srcP, (int32_t)(endP - srcP)), michael@0: collationSource->writableBuffer, michael@0: status); michael@0: if (U_FAILURE(status)) { michael@0: #ifdef UCOL_DEBUG michael@0: fprintf(stderr, "collIterNormalize(), NFD failed, status = %s\n", u_errorName(status)); michael@0: #endif michael@0: return; michael@0: } michael@0: michael@0: collationSource->pos = collationSource->writableBuffer.getTerminatedBuffer(); michael@0: collationSource->origFlags = collationSource->flags; michael@0: collationSource->flags |= UCOL_ITER_INNORMBUF; michael@0: collationSource->flags &= ~(UCOL_ITER_NORM | UCOL_ITER_HASLEN | UCOL_USE_ITERATOR); michael@0: } michael@0: michael@0: michael@0: // This function takes the iterator and extracts normalized stuff up to the next boundary michael@0: // It is similar in the end results to the collIterNormalize, but for the cases when we michael@0: // use an iterator michael@0: /*static michael@0: inline void normalizeIterator(collIterate *collationSource) { michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: UBool wasNormalized = FALSE; michael@0: //int32_t iterIndex = collationSource->iterator->getIndex(collationSource->iterator, UITER_CURRENT); michael@0: uint32_t iterIndex = collationSource->iterator->getState(collationSource->iterator); michael@0: int32_t normLen = unorm_next(collationSource->iterator, collationSource->writableBuffer, michael@0: (int32_t)collationSource->writableBufSize, UNORM_FCD, 0, TRUE, &wasNormalized, &status); michael@0: if(status == U_BUFFER_OVERFLOW_ERROR || normLen == (int32_t)collationSource->writableBufSize) { michael@0: // reallocate and terminate michael@0: if(!u_growBufferFromStatic(collationSource->stackWritableBuffer, michael@0: &collationSource->writableBuffer, michael@0: (int32_t *)&collationSource->writableBufSize, normLen + 1, michael@0: 0) michael@0: ) { michael@0: #ifdef UCOL_DEBUG michael@0: fprintf(stderr, "normalizeIterator(), out of memory\n"); michael@0: #endif michael@0: return; michael@0: } michael@0: status = U_ZERO_ERROR; michael@0: //collationSource->iterator->move(collationSource->iterator, iterIndex, UITER_ZERO); michael@0: collationSource->iterator->setState(collationSource->iterator, iterIndex, &status); michael@0: normLen = unorm_next(collationSource->iterator, collationSource->writableBuffer, michael@0: (int32_t)collationSource->writableBufSize, UNORM_FCD, 0, TRUE, &wasNormalized, &status); michael@0: } michael@0: // Terminate the buffer - we already checked that it is big enough michael@0: collationSource->writableBuffer[normLen] = 0; michael@0: if(collationSource->writableBuffer != collationSource->stackWritableBuffer) { michael@0: collationSource->flags |= UCOL_ITER_ALLOCATED; michael@0: } michael@0: collationSource->pos = collationSource->writableBuffer; michael@0: collationSource->origFlags = collationSource->flags; michael@0: collationSource->flags |= UCOL_ITER_INNORMBUF; michael@0: collationSource->flags &= ~(UCOL_ITER_NORM | UCOL_ITER_HASLEN | UCOL_USE_ITERATOR); michael@0: }*/ michael@0: michael@0: michael@0: /* Incremental FCD check and normalize */ michael@0: /* Called from getNextCE when normalization state is suspect. */ michael@0: /* When entering, the state is known to be this: */ michael@0: /* o We are working in the main buffer of the collIterate, not the side */ michael@0: /* writable buffer. When in the side buffer, normalization mode is always off, */ michael@0: /* so we won't get here. */ michael@0: /* o The leading combining class from the current character is 0 or */ michael@0: /* the trailing combining class of the previous char was zero. */ michael@0: /* True because the previous call to this function will have always exited */ michael@0: /* that way, and we get called for every char where cc might be non-zero. */ michael@0: static michael@0: inline UBool collIterFCD(collIterate *collationSource) { michael@0: const UChar *srcP, *endP; michael@0: uint8_t leadingCC; michael@0: uint8_t prevTrailingCC = 0; michael@0: uint16_t fcd; michael@0: UBool needNormalize = FALSE; michael@0: michael@0: srcP = collationSource->pos-1; michael@0: michael@0: if (collationSource->flags & UCOL_ITER_HASLEN) { michael@0: endP = collationSource->endp; michael@0: } else { michael@0: endP = NULL; michael@0: } michael@0: michael@0: // Get the trailing combining class of the current character. If it's zero, we are OK. michael@0: fcd = g_nfcImpl->nextFCD16(srcP, endP); michael@0: if (fcd != 0) { michael@0: prevTrailingCC = (uint8_t)(fcd & LAST_BYTE_MASK_); michael@0: michael@0: if (prevTrailingCC != 0) { michael@0: // The current char has a non-zero trailing CC. Scan forward until we find michael@0: // a char with a leading cc of zero. michael@0: while (endP == NULL || srcP != endP) michael@0: { michael@0: const UChar *savedSrcP = srcP; michael@0: michael@0: fcd = g_nfcImpl->nextFCD16(srcP, endP); michael@0: leadingCC = (uint8_t)(fcd >> SECOND_LAST_BYTE_SHIFT_); michael@0: if (leadingCC == 0) { michael@0: srcP = savedSrcP; // Hit char that is not part of combining sequence. michael@0: // back up over it. (Could be surrogate pair!) michael@0: break; michael@0: } michael@0: michael@0: if (leadingCC < prevTrailingCC) { michael@0: needNormalize = TRUE; michael@0: } michael@0: michael@0: prevTrailingCC = (uint8_t)(fcd & LAST_BYTE_MASK_); michael@0: } michael@0: } michael@0: } michael@0: michael@0: collationSource->fcdPosition = (UChar *)srcP; michael@0: michael@0: return needNormalize; michael@0: } michael@0: michael@0: /****************************************************************************/ michael@0: /* Following are the CE retrieval functions */ michael@0: /* */ michael@0: /****************************************************************************/ michael@0: michael@0: static uint32_t getImplicit(UChar32 cp, collIterate *collationSource); michael@0: static uint32_t getPrevImplicit(UChar32 cp, collIterate *collationSource); michael@0: michael@0: /* there should be a macro version of this function in the header file */ michael@0: /* This is the first function that tries to fetch a collation element */ michael@0: /* If it's not succesfull or it encounters a more difficult situation */ michael@0: /* some more sofisticated and slower functions are invoked */ michael@0: static michael@0: inline uint32_t ucol_IGetNextCE(const UCollator *coll, collIterate *collationSource, UErrorCode *status) { michael@0: uint32_t order = 0; michael@0: if (collationSource->CEpos > collationSource->toReturn) { /* Are there any CEs from previous expansions? */ michael@0: order = *(collationSource->toReturn++); /* if so, return them */ michael@0: if(collationSource->CEpos == collationSource->toReturn) { michael@0: collationSource->CEpos = collationSource->toReturn = collationSource->extendCEs ? collationSource->extendCEs : collationSource->CEs; michael@0: } michael@0: return order; michael@0: } michael@0: michael@0: UChar ch = 0; michael@0: collationSource->offsetReturn = NULL; michael@0: michael@0: do { michael@0: for (;;) /* Loop handles case when incremental normalize switches */ michael@0: { /* to or from the side buffer / original string, and we */ michael@0: /* need to start again to get the next character. */ michael@0: michael@0: if ((collationSource->flags & (UCOL_ITER_HASLEN | UCOL_ITER_INNORMBUF | UCOL_ITER_NORM | UCOL_HIRAGANA_Q | UCOL_USE_ITERATOR)) == 0) michael@0: { michael@0: // The source string is null terminated and we're not working from the side buffer, michael@0: // and we're not normalizing. This is the fast path. michael@0: // (We can be in the side buffer for Thai pre-vowel reordering even when not normalizing.) michael@0: ch = *collationSource->pos++; michael@0: if (ch != 0) { michael@0: break; michael@0: } michael@0: else { michael@0: return UCOL_NO_MORE_CES; michael@0: } michael@0: } michael@0: michael@0: if (collationSource->flags & UCOL_ITER_HASLEN) { michael@0: // Normal path for strings when length is specified. michael@0: // (We can't be in side buffer because it is always null terminated.) michael@0: if (collationSource->pos >= collationSource->endp) { michael@0: // Ran off of the end of the main source string. We're done. michael@0: return UCOL_NO_MORE_CES; michael@0: } michael@0: ch = *collationSource->pos++; michael@0: } michael@0: else if(collationSource->flags & UCOL_USE_ITERATOR) { michael@0: UChar32 iterCh = collationSource->iterator->next(collationSource->iterator); michael@0: if(iterCh == U_SENTINEL) { michael@0: return UCOL_NO_MORE_CES; michael@0: } michael@0: ch = (UChar)iterCh; michael@0: } michael@0: else michael@0: { michael@0: // Null terminated string. michael@0: ch = *collationSource->pos++; michael@0: if (ch == 0) { michael@0: // Ran off end of buffer. michael@0: if ((collationSource->flags & UCOL_ITER_INNORMBUF) == 0) { michael@0: // Ran off end of main string. backing up one character. michael@0: collationSource->pos--; michael@0: return UCOL_NO_MORE_CES; michael@0: } michael@0: else michael@0: { michael@0: // Hit null in the normalize side buffer. michael@0: // Usually this means the end of the normalized data, michael@0: // except for one odd case: a null followed by combining chars, michael@0: // which is the case if we are at the start of the buffer. michael@0: if (collationSource->pos == collationSource->writableBuffer.getBuffer()+1) { michael@0: break; michael@0: } michael@0: michael@0: // Null marked end of side buffer. michael@0: // Revert to the main string and michael@0: // loop back to top to try again to get a character. michael@0: collationSource->pos = collationSource->fcdPosition; michael@0: collationSource->flags = collationSource->origFlags; michael@0: continue; michael@0: } michael@0: } michael@0: } michael@0: michael@0: if(collationSource->flags&UCOL_HIRAGANA_Q) { michael@0: /* Codepoints \u3099-\u309C are both Hiragana and Katakana. Set the flag michael@0: * based on whether the previous codepoint was Hiragana or Katakana. michael@0: */ michael@0: if(((ch>=0x3040 && ch<=0x3096) || (ch >= 0x309d && ch <= 0x309f)) || michael@0: ((collationSource->flags & UCOL_WAS_HIRAGANA) && (ch >= 0x3099 && ch <= 0x309C))) { michael@0: collationSource->flags |= UCOL_WAS_HIRAGANA; michael@0: } else { michael@0: collationSource->flags &= ~UCOL_WAS_HIRAGANA; michael@0: } michael@0: } michael@0: michael@0: // We've got a character. See if there's any fcd and/or normalization stuff to do. michael@0: // Note that UCOL_ITER_NORM flag is always zero when we are in the side buffer. michael@0: if ((collationSource->flags & UCOL_ITER_NORM) == 0) { michael@0: break; michael@0: } michael@0: michael@0: if (collationSource->fcdPosition >= collationSource->pos) { michael@0: // An earlier FCD check has already covered the current character. michael@0: // We can go ahead and process this char. michael@0: break; michael@0: } michael@0: michael@0: if (ch < ZERO_CC_LIMIT_ ) { michael@0: // Fast fcd safe path. Trailing combining class == 0. This char is OK. michael@0: break; michael@0: } michael@0: michael@0: if (ch < NFC_ZERO_CC_BLOCK_LIMIT_) { michael@0: // We need to peek at the next character in order to tell if we are FCD michael@0: if ((collationSource->flags & UCOL_ITER_HASLEN) && collationSource->pos >= collationSource->endp) { michael@0: // We are at the last char of source string. michael@0: // It is always OK for FCD check. michael@0: break; michael@0: } michael@0: michael@0: // Not at last char of source string (or we'll check against terminating null). Do the FCD fast test michael@0: if (*collationSource->pos < NFC_ZERO_CC_BLOCK_LIMIT_) { michael@0: break; michael@0: } michael@0: } michael@0: michael@0: michael@0: // Need a more complete FCD check and possible normalization. michael@0: if (collIterFCD(collationSource)) { michael@0: collIterNormalize(collationSource); michael@0: } michael@0: if ((collationSource->flags & UCOL_ITER_INNORMBUF) == 0) { michael@0: // No normalization was needed. Go ahead and process the char we already had. michael@0: break; michael@0: } michael@0: michael@0: // Some normalization happened. Next loop iteration will pick up a char michael@0: // from the normalization buffer. michael@0: michael@0: } // end for (;;) michael@0: michael@0: michael@0: if (ch <= 0xFF) { michael@0: /* For latin-1 characters we never need to fall back to the UCA table */ michael@0: /* because all of the UCA data is replicated in the latinOneMapping array */ michael@0: order = coll->latinOneMapping[ch]; michael@0: if (order > UCOL_NOT_FOUND) { michael@0: order = ucol_prv_getSpecialCE(coll, ch, order, collationSource, status); michael@0: } michael@0: } michael@0: else michael@0: { michael@0: // Always use UCA for Han, Hangul michael@0: // (Han extension A is before main Han block) michael@0: // **** Han compatibility chars ?? **** michael@0: if ((collationSource->flags & UCOL_FORCE_HAN_IMPLICIT) != 0 && michael@0: (ch >= UCOL_FIRST_HAN_A && ch <= UCOL_LAST_HANGUL)) { michael@0: if (ch > UCOL_LAST_HAN && ch < UCOL_FIRST_HANGUL) { michael@0: // between the two target ranges; do normal lookup michael@0: // **** this range is YI, Modifier tone letters, **** michael@0: // **** Latin-D, Syloti Nagari, Phagas-pa. **** michael@0: // **** Latin-D might be tailored, so we need to **** michael@0: // **** do the normal lookup for these guys. **** michael@0: order = UTRIE_GET32_FROM_LEAD(&coll->mapping, ch); michael@0: } else { michael@0: // in one of the target ranges; use UCA michael@0: order = UCOL_NOT_FOUND; michael@0: } michael@0: } else { michael@0: order = UTRIE_GET32_FROM_LEAD(&coll->mapping, ch); michael@0: } michael@0: michael@0: if(order > UCOL_NOT_FOUND) { /* if a CE is special */ michael@0: order = ucol_prv_getSpecialCE(coll, ch, order, collationSource, status); /* and try to get the special CE */ michael@0: } michael@0: michael@0: if(order == UCOL_NOT_FOUND && coll->UCA) { /* We couldn't find a good CE in the tailoring */ michael@0: /* if we got here, the codepoint MUST be over 0xFF - so we look directly in the trie */ michael@0: order = UTRIE_GET32_FROM_LEAD(&coll->UCA->mapping, ch); michael@0: michael@0: if(order > UCOL_NOT_FOUND) { /* UCA also gives us a special CE */ michael@0: order = ucol_prv_getSpecialCE(coll->UCA, ch, order, collationSource, status); michael@0: } michael@0: } michael@0: } michael@0: } while ( order == UCOL_IGNORABLE && ch >= UCOL_FIRST_HANGUL && ch <= UCOL_LAST_HANGUL ); michael@0: michael@0: if(order == UCOL_NOT_FOUND) { michael@0: order = getImplicit(ch, collationSource); michael@0: } michael@0: return order; /* return the CE */ michael@0: } michael@0: michael@0: /* ucol_getNextCE, out-of-line version for use from other files. */ michael@0: U_CAPI uint32_t U_EXPORT2 michael@0: ucol_getNextCE(const UCollator *coll, collIterate *collationSource, UErrorCode *status) { michael@0: return ucol_IGetNextCE(coll, collationSource, status); michael@0: } michael@0: michael@0: michael@0: /** michael@0: * Incremental previous normalization happens here. Pick up the range of chars michael@0: * identifed by FCD, normalize it into the collIterate's writable buffer, michael@0: * switch the collIterate's state to use the writable buffer. michael@0: * @param data collation iterator data michael@0: */ michael@0: static michael@0: void collPrevIterNormalize(collIterate *data) michael@0: { michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: const UChar *pEnd = data->pos; /* End normalize + 1 */ michael@0: const UChar *pStart; michael@0: michael@0: /* Start normalize */ michael@0: if (data->fcdPosition == NULL) { michael@0: pStart = data->string; michael@0: } michael@0: else { michael@0: pStart = data->fcdPosition + 1; michael@0: } michael@0: michael@0: int32_t normLen = michael@0: data->nfd->normalize(UnicodeString(FALSE, pStart, (int32_t)((pEnd - pStart) + 1)), michael@0: data->writableBuffer, michael@0: status). michael@0: length(); michael@0: if(U_FAILURE(status)) { michael@0: return; michael@0: } michael@0: /* michael@0: this puts the null termination infront of the normalized string instead michael@0: of the end michael@0: */ michael@0: data->writableBuffer.insert(0, (UChar)0); michael@0: michael@0: /* michael@0: * The usual case at this point is that we've got a base michael@0: * character followed by marks that were normalized. If michael@0: * fcdPosition is NULL, that means that we backed up to michael@0: * the beginning of the string and there's no base character. michael@0: * michael@0: * Forward processing will usually normalize when it sees michael@0: * the first mark, so that mark will get it's natural offset michael@0: * and the rest will get the offset of the character following michael@0: * the marks. The base character will also get its natural offset. michael@0: * michael@0: * We write the offset of the base character, if there is one, michael@0: * followed by the offset of the first mark and then the offsets michael@0: * of the rest of the marks. michael@0: */ michael@0: int32_t firstMarkOffset = 0; michael@0: int32_t trailOffset = (int32_t)(data->pos - data->string + 1); michael@0: int32_t trailCount = normLen - 1; michael@0: michael@0: if (data->fcdPosition != NULL) { michael@0: int32_t baseOffset = (int32_t)(data->fcdPosition - data->string); michael@0: UChar baseChar = *data->fcdPosition; michael@0: michael@0: firstMarkOffset = baseOffset + 1; michael@0: michael@0: /* michael@0: * If the base character is the start of a contraction, forward processing michael@0: * will normalize the marks while checking for the contraction, which means michael@0: * that the offset of the first mark will the same as the other marks. michael@0: * michael@0: * **** THIS IS PROBABLY NOT A COMPLETE TEST **** michael@0: */ michael@0: if (baseChar >= 0x100) { michael@0: uint32_t baseOrder = UTRIE_GET32_FROM_LEAD(&data->coll->mapping, baseChar); michael@0: michael@0: if (baseOrder == UCOL_NOT_FOUND && data->coll->UCA) { michael@0: baseOrder = UTRIE_GET32_FROM_LEAD(&data->coll->UCA->mapping, baseChar); michael@0: } michael@0: michael@0: if (baseOrder > UCOL_NOT_FOUND && getCETag(baseOrder) == CONTRACTION_TAG) { michael@0: firstMarkOffset = trailOffset; michael@0: } michael@0: } michael@0: michael@0: data->appendOffset(baseOffset, status); michael@0: } michael@0: michael@0: data->appendOffset(firstMarkOffset, status); michael@0: michael@0: for (int32_t i = 0; i < trailCount; i += 1) { michael@0: data->appendOffset(trailOffset, status); michael@0: } michael@0: michael@0: data->offsetRepeatValue = trailOffset; michael@0: michael@0: data->offsetReturn = data->offsetStore - 1; michael@0: if (data->offsetReturn == data->offsetBuffer) { michael@0: data->offsetStore = data->offsetBuffer; michael@0: } michael@0: michael@0: data->pos = data->writableBuffer.getTerminatedBuffer() + 1 + normLen; michael@0: data->origFlags = data->flags; michael@0: data->flags |= UCOL_ITER_INNORMBUF; michael@0: data->flags &= ~(UCOL_ITER_NORM | UCOL_ITER_HASLEN); michael@0: } michael@0: michael@0: michael@0: /** michael@0: * Incremental FCD check for previous iteration and normalize. Called from michael@0: * getPrevCE when normalization state is suspect. michael@0: * When entering, the state is known to be this: michael@0: * o We are working in the main buffer of the collIterate, not the side michael@0: * writable buffer. When in the side buffer, normalization mode is always michael@0: * off, so we won't get here. michael@0: * o The leading combining class from the current character is 0 or the michael@0: * trailing combining class of the previous char was zero. michael@0: * True because the previous call to this function will have always exited michael@0: * that way, and we get called for every char where cc might be non-zero. michael@0: * @param data collation iterate struct michael@0: * @return normalization status, TRUE for normalization to be done, FALSE michael@0: * otherwise michael@0: */ michael@0: static michael@0: inline UBool collPrevIterFCD(collIterate *data) michael@0: { michael@0: const UChar *src, *start; michael@0: uint8_t leadingCC; michael@0: uint8_t trailingCC = 0; michael@0: uint16_t fcd; michael@0: UBool result = FALSE; michael@0: michael@0: start = data->string; michael@0: src = data->pos + 1; michael@0: michael@0: /* Get the trailing combining class of the current character. */ michael@0: fcd = g_nfcImpl->previousFCD16(start, src); michael@0: michael@0: leadingCC = (uint8_t)(fcd >> SECOND_LAST_BYTE_SHIFT_); michael@0: michael@0: if (leadingCC != 0) { michael@0: /* michael@0: The current char has a non-zero leading combining class. michael@0: Scan backward until we find a char with a trailing cc of zero. michael@0: */ michael@0: for (;;) michael@0: { michael@0: if (start == src) { michael@0: data->fcdPosition = NULL; michael@0: return result; michael@0: } michael@0: michael@0: fcd = g_nfcImpl->previousFCD16(start, src); michael@0: michael@0: trailingCC = (uint8_t)(fcd & LAST_BYTE_MASK_); michael@0: michael@0: if (trailingCC == 0) { michael@0: break; michael@0: } michael@0: michael@0: if (leadingCC < trailingCC) { michael@0: result = TRUE; michael@0: } michael@0: michael@0: leadingCC = (uint8_t)(fcd >> SECOND_LAST_BYTE_SHIFT_); michael@0: } michael@0: } michael@0: michael@0: data->fcdPosition = (UChar *)src; michael@0: michael@0: return result; michael@0: } michael@0: michael@0: /** gets a code unit from the string at a given offset michael@0: * Handles both normal and iterative cases. michael@0: * No error checking - caller beware! michael@0: */ michael@0: static inline michael@0: UChar peekCodeUnit(collIterate *source, int32_t offset) { michael@0: if(source->pos != NULL) { michael@0: return *(source->pos + offset); michael@0: } else if(source->iterator != NULL) { michael@0: UChar32 c; michael@0: if(offset != 0) { michael@0: source->iterator->move(source->iterator, offset, UITER_CURRENT); michael@0: c = source->iterator->next(source->iterator); michael@0: source->iterator->move(source->iterator, -offset-1, UITER_CURRENT); michael@0: } else { michael@0: c = source->iterator->current(source->iterator); michael@0: } michael@0: return c >= 0 ? (UChar)c : 0xfffd; // If the caller works properly, we should never see c<0. michael@0: } else { michael@0: return 0xfffd; michael@0: } michael@0: } michael@0: michael@0: // Code point version. Treats the offset as a _code point_ delta. michael@0: // We cannot use U16_FWD_1_UNSAFE and similar because we might not have well-formed UTF-16. michael@0: // We cannot use U16_FWD_1 and similar because we do not know the start and limit of the buffer. michael@0: static inline michael@0: UChar32 peekCodePoint(collIterate *source, int32_t offset) { michael@0: UChar32 c; michael@0: if(source->pos != NULL) { michael@0: const UChar *p = source->pos; michael@0: if(offset >= 0) { michael@0: // Skip forward over (offset-1) code points. michael@0: while(--offset >= 0) { michael@0: if(U16_IS_LEAD(*p++) && U16_IS_TRAIL(*p)) { michael@0: ++p; michael@0: } michael@0: } michael@0: // Read the code point there. michael@0: c = *p++; michael@0: UChar trail; michael@0: if(U16_IS_LEAD(c) && U16_IS_TRAIL(trail = *p)) { michael@0: c = U16_GET_SUPPLEMENTARY(c, trail); michael@0: } michael@0: } else /* offset<0 */ { michael@0: // Skip backward over (offset-1) code points. michael@0: while(++offset < 0) { michael@0: if(U16_IS_TRAIL(*--p) && U16_IS_LEAD(*(p - 1))) { michael@0: --p; michael@0: } michael@0: } michael@0: // Read the code point before that. michael@0: c = *--p; michael@0: UChar lead; michael@0: if(U16_IS_TRAIL(c) && U16_IS_LEAD(lead = *(p - 1))) { michael@0: c = U16_GET_SUPPLEMENTARY(lead, c); michael@0: } michael@0: } michael@0: } else if(source->iterator != NULL) { michael@0: if(offset >= 0) { michael@0: // Skip forward over (offset-1) code points. michael@0: int32_t fwd = offset; michael@0: while(fwd-- > 0) { michael@0: uiter_next32(source->iterator); michael@0: } michael@0: // Read the code point there. michael@0: c = uiter_current32(source->iterator); michael@0: // Return to the starting point, skipping backward over (offset-1) code points. michael@0: while(offset-- > 0) { michael@0: uiter_previous32(source->iterator); michael@0: } michael@0: } else /* offset<0 */ { michael@0: // Read backward, reading offset code points, remember only the last-read one. michael@0: int32_t back = offset; michael@0: do { michael@0: c = uiter_previous32(source->iterator); michael@0: } while(++back < 0); michael@0: // Return to the starting position, skipping forward over offset code points. michael@0: do { michael@0: uiter_next32(source->iterator); michael@0: } while(++offset < 0); michael@0: } michael@0: } else { michael@0: c = U_SENTINEL; michael@0: } michael@0: return c; michael@0: } michael@0: michael@0: /** michael@0: * Determines if we are at the start of the data string in the backwards michael@0: * collation iterator michael@0: * @param data collation iterator michael@0: * @return TRUE if we are at the start michael@0: */ michael@0: static michael@0: inline UBool isAtStartPrevIterate(collIterate *data) { michael@0: if(data->pos == NULL && data->iterator != NULL) { michael@0: return !data->iterator->hasPrevious(data->iterator); michael@0: } michael@0: //return (collIter_bos(data)) || michael@0: return (data->pos == data->string) || michael@0: ((data->flags & UCOL_ITER_INNORMBUF) && (data->pos != NULL) && michael@0: *(data->pos - 1) == 0 && data->fcdPosition == NULL); michael@0: } michael@0: michael@0: static michael@0: inline void goBackOne(collIterate *data) { michael@0: # if 0 michael@0: // somehow, it looks like we need to keep iterator synced up michael@0: // at all times, as above. michael@0: if(data->pos) { michael@0: data->pos--; michael@0: } michael@0: if(data->iterator) { michael@0: data->iterator->previous(data->iterator); michael@0: } michael@0: #endif michael@0: if(data->iterator && (data->flags & UCOL_USE_ITERATOR)) { michael@0: data->iterator->previous(data->iterator); michael@0: } michael@0: if(data->pos) { michael@0: data->pos --; michael@0: } michael@0: } michael@0: michael@0: /** michael@0: * Inline function that gets a simple CE. michael@0: * So what it does is that it will first check the expansion buffer. If the michael@0: * expansion buffer is not empty, ie the end pointer to the expansion buffer michael@0: * is different from the string pointer, we return the collation element at the michael@0: * return pointer and decrement it. michael@0: * For more complicated CEs it resorts to getComplicatedCE. michael@0: * @param coll collator data michael@0: * @param data collation iterator struct michael@0: * @param status error status michael@0: */ michael@0: static michael@0: inline uint32_t ucol_IGetPrevCE(const UCollator *coll, collIterate *data, michael@0: UErrorCode *status) michael@0: { michael@0: uint32_t result = (uint32_t)UCOL_NULLORDER; michael@0: michael@0: if (data->offsetReturn != NULL) { michael@0: if (data->offsetRepeatCount > 0) { michael@0: data->offsetRepeatCount -= 1; michael@0: } else { michael@0: if (data->offsetReturn == data->offsetBuffer) { michael@0: data->offsetReturn = NULL; michael@0: data->offsetStore = data->offsetBuffer; michael@0: } else { michael@0: data->offsetReturn -= 1; michael@0: } michael@0: } michael@0: } michael@0: michael@0: if ((data->extendCEs && data->toReturn > data->extendCEs) || michael@0: (!data->extendCEs && data->toReturn > data->CEs)) michael@0: { michael@0: data->toReturn -= 1; michael@0: result = *(data->toReturn); michael@0: if (data->CEs == data->toReturn || data->extendCEs == data->toReturn) { michael@0: data->CEpos = data->toReturn; michael@0: } michael@0: } michael@0: else { michael@0: UChar ch = 0; michael@0: michael@0: do { michael@0: /* michael@0: Loop handles case when incremental normalize switches to or from the michael@0: side buffer / original string, and we need to start again to get the michael@0: next character. michael@0: */ michael@0: for (;;) { michael@0: if (data->flags & UCOL_ITER_HASLEN) { michael@0: /* michael@0: Normal path for strings when length is specified. michael@0: Not in side buffer because it is always null terminated. michael@0: */ michael@0: if (data->pos <= data->string) { michael@0: /* End of the main source string */ michael@0: return UCOL_NO_MORE_CES; michael@0: } michael@0: data->pos --; michael@0: ch = *data->pos; michael@0: } michael@0: // we are using an iterator to go back. Pray for us! michael@0: else if (data->flags & UCOL_USE_ITERATOR) { michael@0: UChar32 iterCh = data->iterator->previous(data->iterator); michael@0: if(iterCh == U_SENTINEL) { michael@0: return UCOL_NO_MORE_CES; michael@0: } else { michael@0: ch = (UChar)iterCh; michael@0: } michael@0: } michael@0: else { michael@0: data->pos --; michael@0: ch = *data->pos; michael@0: /* we are in the side buffer. */ michael@0: if (ch == 0) { michael@0: /* michael@0: At the start of the normalize side buffer. michael@0: Go back to string. michael@0: Because pointer points to the last accessed character, michael@0: hence we have to increment it by one here. michael@0: */ michael@0: data->flags = data->origFlags; michael@0: data->offsetRepeatValue = 0; michael@0: michael@0: if (data->fcdPosition == NULL) { michael@0: data->pos = data->string; michael@0: return UCOL_NO_MORE_CES; michael@0: } michael@0: else { michael@0: data->pos = data->fcdPosition + 1; michael@0: } michael@0: michael@0: continue; michael@0: } michael@0: } michael@0: michael@0: if(data->flags&UCOL_HIRAGANA_Q) { michael@0: if(ch>=0x3040 && ch<=0x309f) { michael@0: data->flags |= UCOL_WAS_HIRAGANA; michael@0: } else { michael@0: data->flags &= ~UCOL_WAS_HIRAGANA; michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * got a character to determine if there's fcd and/or normalization michael@0: * stuff to do. michael@0: * if the current character is not fcd. michael@0: * if current character is at the start of the string michael@0: * Trailing combining class == 0. michael@0: * Note if pos is in the writablebuffer, norm is always 0 michael@0: */ michael@0: if (ch < ZERO_CC_LIMIT_ || michael@0: // this should propel us out of the loop in the iterator case michael@0: (data->flags & UCOL_ITER_NORM) == 0 || michael@0: (data->fcdPosition != NULL && data->fcdPosition <= data->pos) michael@0: || data->string == data->pos) { michael@0: break; michael@0: } michael@0: michael@0: if (ch < NFC_ZERO_CC_BLOCK_LIMIT_) { michael@0: /* if next character is FCD */ michael@0: if (data->pos == data->string) { michael@0: /* First char of string is always OK for FCD check */ michael@0: break; michael@0: } michael@0: michael@0: /* Not first char of string, do the FCD fast test */ michael@0: if (*(data->pos - 1) < NFC_ZERO_CC_BLOCK_LIMIT_) { michael@0: break; michael@0: } michael@0: } michael@0: michael@0: /* Need a more complete FCD check and possible normalization. */ michael@0: if (collPrevIterFCD(data)) { michael@0: collPrevIterNormalize(data); michael@0: } michael@0: michael@0: if ((data->flags & UCOL_ITER_INNORMBUF) == 0) { michael@0: /* No normalization. Go ahead and process the char. */ michael@0: break; michael@0: } michael@0: michael@0: /* michael@0: Some normalization happened. michael@0: Next loop picks up a char from the normalization buffer. michael@0: */ michael@0: } michael@0: michael@0: /* attempt to handle contractions, after removal of the backwards michael@0: contraction michael@0: */ michael@0: if (ucol_contractionEndCP(ch, coll) && !isAtStartPrevIterate(data)) { michael@0: result = ucol_prv_getSpecialPrevCE(coll, ch, UCOL_CONTRACTION, data, status); michael@0: } else { michael@0: if (ch <= 0xFF) { michael@0: result = coll->latinOneMapping[ch]; michael@0: } michael@0: else { michael@0: // Always use UCA for [3400..9FFF], [AC00..D7AF] michael@0: // **** [FA0E..FA2F] ?? **** michael@0: if ((data->flags & UCOL_FORCE_HAN_IMPLICIT) != 0 && michael@0: (ch >= 0x3400 && ch <= 0xD7AF)) { michael@0: if (ch > 0x9FFF && ch < 0xAC00) { michael@0: // between the two target ranges; do normal lookup michael@0: // **** this range is YI, Modifier tone letters, **** michael@0: // **** Latin-D, Syloti Nagari, Phagas-pa. **** michael@0: // **** Latin-D might be tailored, so we need to **** michael@0: // **** do the normal lookup for these guys. **** michael@0: result = UTRIE_GET32_FROM_LEAD(&coll->mapping, ch); michael@0: } else { michael@0: result = UCOL_NOT_FOUND; michael@0: } michael@0: } else { michael@0: result = UTRIE_GET32_FROM_LEAD(&coll->mapping, ch); michael@0: } michael@0: } michael@0: if (result > UCOL_NOT_FOUND) { michael@0: result = ucol_prv_getSpecialPrevCE(coll, ch, result, data, status); michael@0: } michael@0: if (result == UCOL_NOT_FOUND) { // Not found in master list michael@0: if (!isAtStartPrevIterate(data) && michael@0: ucol_contractionEndCP(ch, data->coll)) michael@0: { michael@0: result = UCOL_CONTRACTION; michael@0: } else { michael@0: if(coll->UCA) { michael@0: result = UTRIE_GET32_FROM_LEAD(&coll->UCA->mapping, ch); michael@0: } michael@0: } michael@0: michael@0: if (result > UCOL_NOT_FOUND) { michael@0: if(coll->UCA) { michael@0: result = ucol_prv_getSpecialPrevCE(coll->UCA, ch, result, data, status); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } while ( result == UCOL_IGNORABLE && ch >= UCOL_FIRST_HANGUL && ch <= UCOL_LAST_HANGUL ); michael@0: michael@0: if(result == UCOL_NOT_FOUND) { michael@0: result = getPrevImplicit(ch, data); michael@0: } michael@0: } michael@0: michael@0: return result; michael@0: } michael@0: michael@0: michael@0: /* ucol_getPrevCE, out-of-line version for use from other files. */ michael@0: U_CFUNC uint32_t U_EXPORT2 michael@0: ucol_getPrevCE(const UCollator *coll, collIterate *data, michael@0: UErrorCode *status) { michael@0: return ucol_IGetPrevCE(coll, data, status); michael@0: } michael@0: michael@0: michael@0: /* this should be connected to special Jamo handling */ michael@0: U_CFUNC uint32_t U_EXPORT2 michael@0: ucol_getFirstCE(const UCollator *coll, UChar u, UErrorCode *status) { michael@0: collIterate colIt; michael@0: IInit_collIterate(coll, &u, 1, &colIt, status); michael@0: if(U_FAILURE(*status)) { michael@0: return 0; michael@0: } michael@0: return ucol_IGetNextCE(coll, &colIt, status); michael@0: } michael@0: michael@0: /** michael@0: * Inserts the argument character into the end of the buffer pushing back the michael@0: * null terminator. michael@0: * @param data collIterate struct data michael@0: * @param ch character to be appended michael@0: * @return the position of the new addition michael@0: */ michael@0: static michael@0: inline const UChar * insertBufferEnd(collIterate *data, UChar ch) michael@0: { michael@0: int32_t oldLength = data->writableBuffer.length(); michael@0: return data->writableBuffer.append(ch).getTerminatedBuffer() + oldLength; michael@0: } michael@0: michael@0: /** michael@0: * Inserts the argument string into the end of the buffer pushing back the michael@0: * null terminator. michael@0: * @param data collIterate struct data michael@0: * @param string to be appended michael@0: * @param length of the string to be appended michael@0: * @return the position of the new addition michael@0: */ michael@0: static michael@0: inline const UChar * insertBufferEnd(collIterate *data, const UChar *str, int32_t length) michael@0: { michael@0: int32_t oldLength = data->writableBuffer.length(); michael@0: return data->writableBuffer.append(str, length).getTerminatedBuffer() + oldLength; michael@0: } michael@0: michael@0: /** michael@0: * Special normalization function for contraction in the forwards iterator. michael@0: * This normalization sequence will place the current character at source->pos michael@0: * and its following normalized sequence into the buffer. michael@0: * The fcd position, pos will be changed. michael@0: * pos will now point to positions in the buffer. michael@0: * Flags will be changed accordingly. michael@0: * @param data collation iterator data michael@0: */ michael@0: static michael@0: inline void normalizeNextContraction(collIterate *data) michael@0: { michael@0: int32_t strsize; michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: /* because the pointer points to the next character */ michael@0: const UChar *pStart = data->pos - 1; michael@0: const UChar *pEnd; michael@0: michael@0: if ((data->flags & UCOL_ITER_INNORMBUF) == 0) { michael@0: data->writableBuffer.setTo(*(pStart - 1)); michael@0: strsize = 1; michael@0: } michael@0: else { michael@0: strsize = data->writableBuffer.length(); michael@0: } michael@0: michael@0: pEnd = data->fcdPosition; michael@0: michael@0: data->writableBuffer.append( michael@0: data->nfd->normalize(UnicodeString(FALSE, pStart, (int32_t)(pEnd - pStart)), status)); michael@0: if(U_FAILURE(status)) { michael@0: return; michael@0: } michael@0: michael@0: data->pos = data->writableBuffer.getTerminatedBuffer() + strsize; michael@0: data->origFlags = data->flags; michael@0: data->flags |= UCOL_ITER_INNORMBUF; michael@0: data->flags &= ~(UCOL_ITER_NORM | UCOL_ITER_HASLEN); michael@0: } michael@0: michael@0: /** michael@0: * Contraction character management function that returns the next character michael@0: * for the forwards iterator. michael@0: * Does nothing if the next character is in buffer and not the first character michael@0: * in it. michael@0: * Else it checks next character in data string to see if it is normalizable. michael@0: * If it is not, the character is simply copied into the buffer, else michael@0: * the whole normalized substring is copied into the buffer, including the michael@0: * current character. michael@0: * @param data collation element iterator data michael@0: * @return next character michael@0: */ michael@0: static michael@0: inline UChar getNextNormalizedChar(collIterate *data) michael@0: { michael@0: UChar nextch; michael@0: UChar ch; michael@0: // Here we need to add the iterator code. One problem is the way michael@0: // end of string is handled. If we just return next char, it could michael@0: // be the sentinel. Most of the cases already check for this, but we michael@0: // need to be sure. michael@0: if ((data->flags & (UCOL_ITER_NORM | UCOL_ITER_INNORMBUF)) == 0 ) { michael@0: /* if no normalization and not in buffer. */ michael@0: if(data->flags & UCOL_USE_ITERATOR) { michael@0: return (UChar)data->iterator->next(data->iterator); michael@0: } else { michael@0: return *(data->pos ++); michael@0: } michael@0: } michael@0: michael@0: //if (data->flags & UCOL_ITER_NORM && data->flags & UCOL_USE_ITERATOR) { michael@0: //normalizeIterator(data); michael@0: //} michael@0: michael@0: UBool innormbuf = (UBool)(data->flags & UCOL_ITER_INNORMBUF); michael@0: if ((innormbuf && *data->pos != 0) || michael@0: (data->fcdPosition != NULL && !innormbuf && michael@0: data->pos < data->fcdPosition)) { michael@0: /* michael@0: if next character is in normalized buffer, no further normalization michael@0: is required michael@0: */ michael@0: return *(data->pos ++); michael@0: } michael@0: michael@0: if (data->flags & UCOL_ITER_HASLEN) { michael@0: /* in data string */ michael@0: if (data->pos + 1 == data->endp) { michael@0: return *(data->pos ++); michael@0: } michael@0: } michael@0: else { michael@0: if (innormbuf) { michael@0: // inside the normalization buffer, but at the end michael@0: // (since we encountered zero). This means, in the michael@0: // case we're using char iterator, that we need to michael@0: // do another round of normalization. michael@0: //if(data->origFlags & UCOL_USE_ITERATOR) { michael@0: // we need to restore original flags, michael@0: // otherwise, we'll lose them michael@0: //data->flags = data->origFlags; michael@0: //normalizeIterator(data); michael@0: //return *(data->pos++); michael@0: //} else { michael@0: /* michael@0: in writable buffer, at this point fcdPosition can not be michael@0: pointing to the end of the data string. see contracting tag. michael@0: */ michael@0: if(data->fcdPosition) { michael@0: if (*(data->fcdPosition + 1) == 0 || michael@0: data->fcdPosition + 1 == data->endp) { michael@0: /* at the end of the string, dump it into the normalizer */ michael@0: data->pos = insertBufferEnd(data, *(data->fcdPosition)) + 1; michael@0: // Check if data->pos received a null pointer michael@0: if (data->pos == NULL) { michael@0: return (UChar)-1; // Return to indicate error. michael@0: } michael@0: return *(data->fcdPosition ++); michael@0: } michael@0: data->pos = data->fcdPosition; michael@0: } else if(data->origFlags & UCOL_USE_ITERATOR) { michael@0: // if we are here, we're using a normalizing iterator. michael@0: // we should just continue further. michael@0: data->flags = data->origFlags; michael@0: data->pos = NULL; michael@0: return (UChar)data->iterator->next(data->iterator); michael@0: } michael@0: //} michael@0: } michael@0: else { michael@0: if (*(data->pos + 1) == 0) { michael@0: return *(data->pos ++); michael@0: } michael@0: } michael@0: } michael@0: michael@0: ch = *data->pos ++; michael@0: nextch = *data->pos; michael@0: michael@0: /* michael@0: * if the current character is not fcd. michael@0: * Trailing combining class == 0. michael@0: */ michael@0: if ((data->fcdPosition == NULL || data->fcdPosition < data->pos) && michael@0: (nextch >= NFC_ZERO_CC_BLOCK_LIMIT_ || michael@0: ch >= NFC_ZERO_CC_BLOCK_LIMIT_)) { michael@0: /* michael@0: Need a more complete FCD check and possible normalization. michael@0: normalize substring will be appended to buffer michael@0: */ michael@0: if (collIterFCD(data)) { michael@0: normalizeNextContraction(data); michael@0: return *(data->pos ++); michael@0: } michael@0: else if (innormbuf) { michael@0: /* fcdposition shifted even when there's no normalization, if we michael@0: don't input the rest into this, we'll get the wrong position when michael@0: we reach the end of the writableBuffer */ michael@0: int32_t length = (int32_t)(data->fcdPosition - data->pos + 1); michael@0: data->pos = insertBufferEnd(data, data->pos - 1, length); michael@0: // Check if data->pos received a null pointer michael@0: if (data->pos == NULL) { michael@0: return (UChar)-1; // Return to indicate error. michael@0: } michael@0: return *(data->pos ++); michael@0: } michael@0: } michael@0: michael@0: if (innormbuf) { michael@0: /* michael@0: no normalization is to be done hence only one character will be michael@0: appended to the buffer. michael@0: */ michael@0: data->pos = insertBufferEnd(data, ch) + 1; michael@0: // Check if data->pos received a null pointer michael@0: if (data->pos == NULL) { michael@0: return (UChar)-1; // Return to indicate error. michael@0: } michael@0: } michael@0: michael@0: /* points back to the pos in string */ michael@0: return ch; michael@0: } michael@0: michael@0: michael@0: michael@0: /** michael@0: * Function to copy the buffer into writableBuffer and sets the fcd position to michael@0: * the correct position michael@0: * @param source data string source michael@0: * @param buffer character buffer michael@0: */ michael@0: static michael@0: inline void setDiscontiguosAttribute(collIterate *source, const UnicodeString &buffer) michael@0: { michael@0: /* okay confusing part here. to ensure that the skipped characters are michael@0: considered later, we need to place it in the appropriate position in the michael@0: normalization buffer and reassign the pos pointer. simple case if pos michael@0: reside in string, simply copy to normalization buffer and michael@0: fcdposition = pos, pos = start of normalization buffer. if pos in michael@0: normalization buffer, we'll insert the copy infront of pos and point pos michael@0: to the start of the normalization buffer. why am i doing these copies? michael@0: well, so that the whole chunk of codes in the getNextCE, ucol_prv_getSpecialCE does michael@0: not require any changes, which be really painful. */ michael@0: if (source->flags & UCOL_ITER_INNORMBUF) { michael@0: int32_t replaceLength = source->pos - source->writableBuffer.getBuffer(); michael@0: source->writableBuffer.replace(0, replaceLength, buffer); michael@0: } michael@0: else { michael@0: source->fcdPosition = source->pos; michael@0: source->origFlags = source->flags; michael@0: source->flags |= UCOL_ITER_INNORMBUF; michael@0: source->flags &= ~(UCOL_ITER_NORM | UCOL_ITER_HASLEN | UCOL_USE_ITERATOR); michael@0: source->writableBuffer = buffer; michael@0: } michael@0: michael@0: source->pos = source->writableBuffer.getTerminatedBuffer(); michael@0: } michael@0: michael@0: /** michael@0: * Function to get the discontiguos collation element within the source. michael@0: * Note this function will set the position to the appropriate places. michael@0: * @param coll current collator used michael@0: * @param source data string source michael@0: * @param constart index to the start character in the contraction table michael@0: * @return discontiguos collation element offset michael@0: */ michael@0: static michael@0: uint32_t getDiscontiguous(const UCollator *coll, collIterate *source, michael@0: const UChar *constart) michael@0: { michael@0: /* source->pos currently points to the second combining character after michael@0: the start character */ michael@0: const UChar *temppos = source->pos; michael@0: UnicodeString buffer; michael@0: const UChar *tempconstart = constart; michael@0: uint8_t tempflags = source->flags; michael@0: UBool multicontraction = FALSE; michael@0: collIterateState discState; michael@0: michael@0: backupState(source, &discState); michael@0: michael@0: buffer.setTo(peekCodePoint(source, -1)); michael@0: for (;;) { michael@0: UChar *UCharOffset; michael@0: UChar schar, michael@0: tchar; michael@0: uint32_t result; michael@0: michael@0: if (((source->flags & UCOL_ITER_HASLEN) && source->pos >= source->endp) michael@0: || (peekCodeUnit(source, 0) == 0 && michael@0: //|| (*source->pos == 0 && michael@0: ((source->flags & UCOL_ITER_INNORMBUF) == 0 || michael@0: source->fcdPosition == NULL || michael@0: source->fcdPosition == source->endp || michael@0: *(source->fcdPosition) == 0 || michael@0: u_getCombiningClass(*(source->fcdPosition)) == 0)) || michael@0: /* end of string in null terminated string or stopped by a michael@0: null character, note fcd does not always point to a base michael@0: character after the discontiguos change */ michael@0: u_getCombiningClass(peekCodePoint(source, 0)) == 0) { michael@0: //u_getCombiningClass(*(source->pos)) == 0) { michael@0: //constart = (UChar *)coll->image + getContractOffset(CE); michael@0: if (multicontraction) { michael@0: source->pos = temppos - 1; michael@0: setDiscontiguosAttribute(source, buffer); michael@0: return *(coll->contractionCEs + michael@0: (tempconstart - coll->contractionIndex)); michael@0: } michael@0: constart = tempconstart; michael@0: break; michael@0: } michael@0: michael@0: UCharOffset = (UChar *)(tempconstart + 1); /* skip the backward offset*/ michael@0: schar = getNextNormalizedChar(source); michael@0: michael@0: while (schar > (tchar = *UCharOffset)) { michael@0: UCharOffset++; michael@0: } michael@0: michael@0: if (schar != tchar) { michael@0: /* not the correct codepoint. we stuff the current codepoint into michael@0: the discontiguos buffer and try the next character */ michael@0: buffer.append(schar); michael@0: continue; michael@0: } michael@0: else { michael@0: if (u_getCombiningClass(schar) == michael@0: u_getCombiningClass(peekCodePoint(source, -2))) { michael@0: buffer.append(schar); michael@0: continue; michael@0: } michael@0: result = *(coll->contractionCEs + michael@0: (UCharOffset - coll->contractionIndex)); michael@0: } michael@0: michael@0: if (result == UCOL_NOT_FOUND) { michael@0: break; michael@0: } else if (isContraction(result)) { michael@0: /* this is a multi-contraction*/ michael@0: tempconstart = (UChar *)coll->image + getContractOffset(result); michael@0: if (*(coll->contractionCEs + (constart - coll->contractionIndex)) michael@0: != UCOL_NOT_FOUND) { michael@0: multicontraction = TRUE; michael@0: temppos = source->pos + 1; michael@0: } michael@0: } else { michael@0: setDiscontiguosAttribute(source, buffer); michael@0: return result; michael@0: } michael@0: } michael@0: michael@0: /* no problems simply reverting just like that, michael@0: if we are in string before getting into this function, points back to michael@0: string hence no problem. michael@0: if we are in normalization buffer before getting into this function, michael@0: since we'll never use another normalization within this function, we michael@0: know that fcdposition points to a base character. the normalization buffer michael@0: never change, hence this revert works. */ michael@0: loadState(source, &discState, TRUE); michael@0: goBackOne(source); michael@0: michael@0: //source->pos = temppos - 1; michael@0: source->flags = tempflags; michael@0: return *(coll->contractionCEs + (constart - coll->contractionIndex)); michael@0: } michael@0: michael@0: /* now uses Mark's getImplicitPrimary code */ michael@0: static michael@0: inline uint32_t getImplicit(UChar32 cp, collIterate *collationSource) { michael@0: uint32_t r = uprv_uca_getImplicitPrimary(cp); michael@0: *(collationSource->CEpos++) = ((r & 0x0000FFFF)<<16) | 0x000000C0; michael@0: collationSource->offsetRepeatCount += 1; michael@0: return (r & UCOL_PRIMARYMASK) | 0x00000505; // This was 'order' michael@0: } michael@0: michael@0: /** michael@0: * Inserts the argument character into the front of the buffer replacing the michael@0: * front null terminator. michael@0: * @param data collation element iterator data michael@0: * @param ch character to be appended michael@0: */ michael@0: static michael@0: inline void insertBufferFront(collIterate *data, UChar ch) michael@0: { michael@0: data->pos = data->writableBuffer.setCharAt(0, ch).insert(0, (UChar)0).getTerminatedBuffer() + 2; michael@0: } michael@0: michael@0: /** michael@0: * Special normalization function for contraction in the previous iterator. michael@0: * This normalization sequence will place the current character at source->pos michael@0: * and its following normalized sequence into the buffer. michael@0: * The fcd position, pos will be changed. michael@0: * pos will now point to positions in the buffer. michael@0: * Flags will be changed accordingly. michael@0: * @param data collation iterator data michael@0: */ michael@0: static michael@0: inline void normalizePrevContraction(collIterate *data, UErrorCode *status) michael@0: { michael@0: const UChar *pEnd = data->pos + 1; /* End normalize + 1 */ michael@0: const UChar *pStart; michael@0: michael@0: UnicodeString endOfBuffer; michael@0: if (data->flags & UCOL_ITER_HASLEN) { michael@0: /* michael@0: normalization buffer not used yet, we'll pull down the next michael@0: character into the end of the buffer michael@0: */ michael@0: endOfBuffer.setTo(*pEnd); michael@0: } michael@0: else { michael@0: endOfBuffer.setTo(data->writableBuffer, 1); // after the leading NUL michael@0: } michael@0: michael@0: if (data->fcdPosition == NULL) { michael@0: pStart = data->string; michael@0: } michael@0: else { michael@0: pStart = data->fcdPosition + 1; michael@0: } michael@0: int32_t normLen = michael@0: data->nfd->normalize(UnicodeString(FALSE, pStart, (int32_t)(pEnd - pStart)), michael@0: data->writableBuffer, michael@0: *status). michael@0: length(); michael@0: if(U_FAILURE(*status)) { michael@0: return; michael@0: } michael@0: /* michael@0: this puts the null termination infront of the normalized string instead michael@0: of the end michael@0: */ michael@0: data->pos = michael@0: data->writableBuffer.insert(0, (UChar)0).append(endOfBuffer).getTerminatedBuffer() + michael@0: 1 + normLen; michael@0: data->origFlags = data->flags; michael@0: data->flags |= UCOL_ITER_INNORMBUF; michael@0: data->flags &= ~(UCOL_ITER_NORM | UCOL_ITER_HASLEN); michael@0: } michael@0: michael@0: /** michael@0: * Contraction character management function that returns the previous character michael@0: * for the backwards iterator. michael@0: * Does nothing if the previous character is in buffer and not the first michael@0: * character in it. michael@0: * Else it checks previous character in data string to see if it is michael@0: * normalizable. michael@0: * If it is not, the character is simply copied into the buffer, else michael@0: * the whole normalized substring is copied into the buffer, including the michael@0: * current character. michael@0: * @param data collation element iterator data michael@0: * @return previous character michael@0: */ michael@0: static michael@0: inline UChar getPrevNormalizedChar(collIterate *data, UErrorCode *status) michael@0: { michael@0: UChar prevch; michael@0: UChar ch; michael@0: const UChar *start; michael@0: UBool innormbuf = (UBool)(data->flags & UCOL_ITER_INNORMBUF); michael@0: if ((data->flags & (UCOL_ITER_NORM | UCOL_ITER_INNORMBUF)) == 0 || michael@0: (innormbuf && *(data->pos - 1) != 0)) { michael@0: /* michael@0: if no normalization. michael@0: if previous character is in normalized buffer, no further normalization michael@0: is required michael@0: */ michael@0: if(data->flags & UCOL_USE_ITERATOR) { michael@0: data->iterator->move(data->iterator, -1, UITER_CURRENT); michael@0: return (UChar)data->iterator->next(data->iterator); michael@0: } else { michael@0: return *(data->pos - 1); michael@0: } michael@0: } michael@0: michael@0: start = data->pos; michael@0: if ((data->fcdPosition==NULL)||(data->flags & UCOL_ITER_HASLEN)) { michael@0: /* in data string */ michael@0: if ((start - 1) == data->string) { michael@0: return *(start - 1); michael@0: } michael@0: start --; michael@0: ch = *start; michael@0: prevch = *(start - 1); michael@0: } michael@0: else { michael@0: /* michael@0: in writable buffer, at this point fcdPosition can not be NULL. michael@0: see contracting tag. michael@0: */ michael@0: if (data->fcdPosition == data->string) { michael@0: /* at the start of the string, just dump it into the normalizer */ michael@0: insertBufferFront(data, *(data->fcdPosition)); michael@0: data->fcdPosition = NULL; michael@0: return *(data->pos - 1); michael@0: } michael@0: start = data->fcdPosition; michael@0: ch = *start; michael@0: prevch = *(start - 1); michael@0: } michael@0: /* michael@0: * if the current character is not fcd. michael@0: * Trailing combining class == 0. michael@0: */ michael@0: if (data->fcdPosition > start && michael@0: (ch >= NFC_ZERO_CC_BLOCK_LIMIT_ || prevch >= NFC_ZERO_CC_BLOCK_LIMIT_)) michael@0: { michael@0: /* michael@0: Need a more complete FCD check and possible normalization. michael@0: normalize substring will be appended to buffer michael@0: */ michael@0: const UChar *backuppos = data->pos; michael@0: data->pos = start; michael@0: if (collPrevIterFCD(data)) { michael@0: normalizePrevContraction(data, status); michael@0: return *(data->pos - 1); michael@0: } michael@0: data->pos = backuppos; michael@0: data->fcdPosition ++; michael@0: } michael@0: michael@0: if (innormbuf) { michael@0: /* michael@0: no normalization is to be done hence only one character will be michael@0: appended to the buffer. michael@0: */ michael@0: insertBufferFront(data, ch); michael@0: data->fcdPosition --; michael@0: } michael@0: michael@0: return ch; michael@0: } michael@0: michael@0: /* This function handles the special CEs like contractions, expansions, surrogates, Thai */ michael@0: /* It is called by getNextCE */ michael@0: michael@0: /* The following should be even */ michael@0: #define UCOL_MAX_DIGITS_FOR_NUMBER 254 michael@0: michael@0: uint32_t ucol_prv_getSpecialCE(const UCollator *coll, UChar ch, uint32_t CE, collIterate *source, UErrorCode *status) { michael@0: collIterateState entryState; michael@0: backupState(source, &entryState); michael@0: UChar32 cp = ch; michael@0: michael@0: for (;;) { michael@0: // This loop will repeat only in the case of contractions, and only when a contraction michael@0: // is found and the first CE resulting from that contraction is itself a special michael@0: // (an expansion, for example.) All other special CE types are fully handled the michael@0: // first time through, and the loop exits. michael@0: michael@0: const uint32_t *CEOffset = NULL; michael@0: switch(getCETag(CE)) { michael@0: case NOT_FOUND_TAG: michael@0: /* This one is not found, and we'll let somebody else bother about it... no more games */ michael@0: return CE; michael@0: case SPEC_PROC_TAG: michael@0: { michael@0: // Special processing is getting a CE that is preceded by a certain prefix michael@0: // Currently this is only needed for optimizing Japanese length and iteration marks. michael@0: // When we encouter a special processing tag, we go backwards and try to see if michael@0: // we have a match. michael@0: // Contraction tables are used - so the whole process is not unlike contraction. michael@0: // prefix data is stored backwards in the table. michael@0: const UChar *UCharOffset; michael@0: UChar schar, tchar; michael@0: collIterateState prefixState; michael@0: backupState(source, &prefixState); michael@0: loadState(source, &entryState, TRUE); michael@0: goBackOne(source); // We want to look at the point where we entered - actually one michael@0: // before that... michael@0: michael@0: for(;;) { michael@0: // This loop will run once per source string character, for as long as we michael@0: // are matching a potential contraction sequence michael@0: michael@0: // First we position ourselves at the begining of contraction sequence michael@0: const UChar *ContractionStart = UCharOffset = (UChar *)coll->image+getContractOffset(CE); michael@0: if (collIter_bos(source)) { michael@0: CE = *(coll->contractionCEs + (UCharOffset - coll->contractionIndex)); michael@0: break; michael@0: } michael@0: schar = getPrevNormalizedChar(source, status); michael@0: goBackOne(source); michael@0: michael@0: while(schar > (tchar = *UCharOffset)) { /* since the contraction codepoints should be ordered, we skip all that are smaller */ michael@0: UCharOffset++; michael@0: } michael@0: michael@0: if (schar == tchar) { michael@0: // Found the source string char in the table. michael@0: // Pick up the corresponding CE from the table. michael@0: CE = *(coll->contractionCEs + michael@0: (UCharOffset - coll->contractionIndex)); michael@0: } michael@0: else michael@0: { michael@0: // Source string char was not in the table. michael@0: // We have not found the prefix. michael@0: CE = *(coll->contractionCEs + michael@0: (ContractionStart - coll->contractionIndex)); michael@0: } michael@0: michael@0: if(!isPrefix(CE)) { michael@0: // The source string char was in the contraction table, and the corresponding michael@0: // CE is not a prefix CE. We found the prefix, break michael@0: // out of loop, this CE will end up being returned. This is the normal michael@0: // way out of prefix handling when the source actually contained michael@0: // the prefix. michael@0: break; michael@0: } michael@0: } michael@0: if(CE != UCOL_NOT_FOUND) { // we found something and we can merilly continue michael@0: loadState(source, &prefixState, TRUE); michael@0: if(source->origFlags & UCOL_USE_ITERATOR) { michael@0: source->flags = source->origFlags; michael@0: } michael@0: } else { // prefix search was a failure, we have to backup all the way to the start michael@0: loadState(source, &entryState, TRUE); michael@0: } michael@0: break; michael@0: } michael@0: case CONTRACTION_TAG: michael@0: { michael@0: /* This should handle contractions */ michael@0: collIterateState state; michael@0: backupState(source, &state); michael@0: uint32_t firstCE = *(coll->contractionCEs + ((UChar *)coll->image+getContractOffset(CE) - coll->contractionIndex)); //UCOL_NOT_FOUND; michael@0: const UChar *UCharOffset; michael@0: UChar schar, tchar; michael@0: michael@0: for (;;) { michael@0: /* This loop will run once per source string character, for as long as we */ michael@0: /* are matching a potential contraction sequence */ michael@0: michael@0: /* First we position ourselves at the begining of contraction sequence */ michael@0: const UChar *ContractionStart = UCharOffset = (UChar *)coll->image+getContractOffset(CE); michael@0: michael@0: if (collIter_eos(source)) { michael@0: // Ran off the end of the source string. michael@0: CE = *(coll->contractionCEs + (UCharOffset - coll->contractionIndex)); michael@0: // So we'll pick whatever we have at the point... michael@0: if (CE == UCOL_NOT_FOUND) { michael@0: // back up the source over all the chars we scanned going into this contraction. michael@0: CE = firstCE; michael@0: loadState(source, &state, TRUE); michael@0: if(source->origFlags & UCOL_USE_ITERATOR) { michael@0: source->flags = source->origFlags; michael@0: } michael@0: } michael@0: break; michael@0: } michael@0: michael@0: uint8_t maxCC = (uint8_t)(*(UCharOffset)&0xFF); /*get the discontiguos stuff */ /* skip the backward offset, see above */ michael@0: uint8_t allSame = (uint8_t)(*(UCharOffset++)>>8); michael@0: michael@0: schar = getNextNormalizedChar(source); michael@0: while(schar > (tchar = *UCharOffset)) { /* since the contraction codepoints should be ordered, we skip all that are smaller */ michael@0: UCharOffset++; michael@0: } michael@0: michael@0: if (schar == tchar) { michael@0: // Found the source string char in the contraction table. michael@0: // Pick up the corresponding CE from the table. michael@0: CE = *(coll->contractionCEs + michael@0: (UCharOffset - coll->contractionIndex)); michael@0: } michael@0: else michael@0: { michael@0: // Source string char was not in contraction table. michael@0: // Unless we have a discontiguous contraction, we have finished michael@0: // with this contraction. michael@0: // in order to do the proper detection, we michael@0: // need to see if we're dealing with a supplementary michael@0: /* We test whether the next two char are surrogate pairs. michael@0: * This test is done if the iterator is not NULL. michael@0: * If there is no surrogate pair, the iterator michael@0: * goes back one if needed. */ michael@0: UChar32 miss = schar; michael@0: if (source->iterator) { michael@0: UChar32 surrNextChar; /* the next char in the iteration to test */ michael@0: int32_t prevPos; /* holds the previous position before move forward of the source iterator */ michael@0: if(U16_IS_LEAD(schar) && source->iterator->hasNext(source->iterator)) { michael@0: prevPos = source->iterator->index; michael@0: surrNextChar = getNextNormalizedChar(source); michael@0: if (U16_IS_TRAIL(surrNextChar)) { michael@0: miss = U16_GET_SUPPLEMENTARY(schar, surrNextChar); michael@0: } else if (prevPos < source->iterator->index){ michael@0: goBackOne(source); michael@0: } michael@0: } michael@0: } else if (U16_IS_LEAD(schar)) { michael@0: miss = U16_GET_SUPPLEMENTARY(schar, getNextNormalizedChar(source)); michael@0: } michael@0: michael@0: uint8_t sCC; michael@0: if (miss < 0x300 || michael@0: maxCC == 0 || michael@0: (sCC = i_getCombiningClass(miss, coll)) == 0 || michael@0: sCC>maxCC || michael@0: (allSame != 0 && sCC == maxCC) || michael@0: collIter_eos(source)) michael@0: { michael@0: // Contraction can not be discontiguous. michael@0: goBackOne(source); // back up the source string by one, michael@0: // because the character we just looked at was michael@0: // not part of the contraction. */ michael@0: if(U_IS_SUPPLEMENTARY(miss)) { michael@0: goBackOne(source); michael@0: } michael@0: CE = *(coll->contractionCEs + michael@0: (ContractionStart - coll->contractionIndex)); michael@0: } else { michael@0: // michael@0: // Contraction is possibly discontiguous. michael@0: // Scan more of source string looking for a match michael@0: // michael@0: UChar tempchar; michael@0: /* find the next character if schar is not a base character michael@0: and we are not yet at the end of the string */ michael@0: tempchar = getNextNormalizedChar(source); michael@0: // probably need another supplementary thingie here michael@0: goBackOne(source); michael@0: if (i_getCombiningClass(tempchar, coll) == 0) { michael@0: goBackOne(source); michael@0: if(U_IS_SUPPLEMENTARY(miss)) { michael@0: goBackOne(source); michael@0: } michael@0: /* Spit out the last char of the string, wasn't tasty enough */ michael@0: CE = *(coll->contractionCEs + michael@0: (ContractionStart - coll->contractionIndex)); michael@0: } else { michael@0: CE = getDiscontiguous(coll, source, ContractionStart); michael@0: } michael@0: } michael@0: } // else after if(schar == tchar) michael@0: michael@0: if(CE == UCOL_NOT_FOUND) { michael@0: /* The Source string did not match the contraction that we were checking. */ michael@0: /* Back up the source position to undo the effects of having partially */ michael@0: /* scanned through what ultimately proved to not be a contraction. */ michael@0: loadState(source, &state, TRUE); michael@0: CE = firstCE; michael@0: break; michael@0: } michael@0: michael@0: if(!isContraction(CE)) { michael@0: // The source string char was in the contraction table, and the corresponding michael@0: // CE is not a contraction CE. We completed the contraction, break michael@0: // out of loop, this CE will end up being returned. This is the normal michael@0: // way out of contraction handling when the source actually contained michael@0: // the contraction. michael@0: break; michael@0: } michael@0: michael@0: michael@0: // The source string char was in the contraction table, and the corresponding michael@0: // CE is IS a contraction CE. We will continue looping to check the source michael@0: // string for the remaining chars in the contraction. michael@0: uint32_t tempCE = *(coll->contractionCEs + (ContractionStart - coll->contractionIndex)); michael@0: if(tempCE != UCOL_NOT_FOUND) { michael@0: // We have scanned a a section of source string for which there is a michael@0: // CE from the contraction table. Remember the CE and scan position, so michael@0: // that we can return to this point if further scanning fails to michael@0: // match a longer contraction sequence. michael@0: firstCE = tempCE; michael@0: michael@0: goBackOne(source); michael@0: backupState(source, &state); michael@0: getNextNormalizedChar(source); michael@0: michael@0: // Another way to do this is: michael@0: //collIterateState tempState; michael@0: //backupState(source, &tempState); michael@0: //goBackOne(source); michael@0: //backupState(source, &state); michael@0: //loadState(source, &tempState, TRUE); michael@0: michael@0: // The problem is that for incomplete contractions we have to remember the previous michael@0: // position. Before, the only thing I needed to do was state.pos--; michael@0: // After iterator introduction and especially after introduction of normalizing michael@0: // iterators, it became much more difficult to decrease the saved state. michael@0: // I'm not yet sure which of the two methods above is faster. michael@0: } michael@0: } // for(;;) michael@0: break; michael@0: } // case CONTRACTION_TAG: michael@0: case LONG_PRIMARY_TAG: michael@0: { michael@0: *(source->CEpos++) = ((CE & 0xFF)<<24)|UCOL_CONTINUATION_MARKER; michael@0: CE = ((CE & 0xFFFF00) << 8) | (UCOL_BYTE_COMMON << 8) | UCOL_BYTE_COMMON; michael@0: source->offsetRepeatCount += 1; michael@0: return CE; michael@0: } michael@0: case EXPANSION_TAG: michael@0: { michael@0: /* This should handle expansion. */ michael@0: /* NOTE: we can encounter both continuations and expansions in an expansion! */ michael@0: /* I have to decide where continuations are going to be dealt with */ michael@0: uint32_t size; michael@0: uint32_t i; /* general counter */ michael@0: michael@0: CEOffset = (uint32_t *)coll->image+getExpansionOffset(CE); /* find the offset to expansion table */ michael@0: size = getExpansionCount(CE); michael@0: CE = *CEOffset++; michael@0: //source->offsetRepeatCount = -1; michael@0: michael@0: if(size != 0) { /* if there are less than 16 elements in expansion, we don't terminate */ michael@0: for(i = 1; iCEpos++) = *CEOffset++; michael@0: source->offsetRepeatCount += 1; michael@0: } michael@0: } else { /* else, we do */ michael@0: while(*CEOffset != 0) { michael@0: *(source->CEpos++) = *CEOffset++; michael@0: source->offsetRepeatCount += 1; michael@0: } michael@0: } michael@0: michael@0: return CE; michael@0: } michael@0: case DIGIT_TAG: michael@0: { michael@0: /* michael@0: We do a check to see if we want to collate digits as numbers; if so we generate michael@0: a custom collation key. Otherwise we pull out the value stored in the expansion table. michael@0: */ michael@0: //uint32_t size; michael@0: uint32_t i; /* general counter */ michael@0: michael@0: if (source->coll->numericCollation == UCOL_ON){ michael@0: collIterateState digitState = {0,0,0,0,0,0,0,0,0}; michael@0: UChar32 char32 = 0; michael@0: int32_t digVal = 0; michael@0: michael@0: uint32_t digIndx = 0; michael@0: uint32_t endIndex = 0; michael@0: uint32_t trailingZeroIndex = 0; michael@0: michael@0: uint8_t collateVal = 0; michael@0: michael@0: UBool nonZeroValReached = FALSE; michael@0: michael@0: uint8_t numTempBuf[UCOL_MAX_DIGITS_FOR_NUMBER/2 + 3]; // I just need a temporary place to store my generated CEs. michael@0: /* michael@0: We parse the source string until we hit a char that's NOT a digit. michael@0: Use this u_charDigitValue. This might be slow because we have to michael@0: handle surrogates... michael@0: */ michael@0: /* michael@0: if (U16_IS_LEAD(ch)){ michael@0: if (!collIter_eos(source)) { michael@0: backupState(source, &digitState); michael@0: UChar trail = getNextNormalizedChar(source); michael@0: if(U16_IS_TRAIL(trail)) { michael@0: char32 = U16_GET_SUPPLEMENTARY(ch, trail); michael@0: } else { michael@0: loadState(source, &digitState, TRUE); michael@0: char32 = ch; michael@0: } michael@0: } else { michael@0: char32 = ch; michael@0: } michael@0: } else { michael@0: char32 = ch; michael@0: } michael@0: digVal = u_charDigitValue(char32); michael@0: */ michael@0: digVal = u_charDigitValue(cp); // if we have arrived here, we have michael@0: // already processed possible supplementaries that trigered the digit tag - michael@0: // all supplementaries are marked in the UCA. michael@0: /* michael@0: We pad a zero in front of the first element anyways. This takes michael@0: care of the (probably) most common case where people are sorting things followed michael@0: by a single digit michael@0: */ michael@0: digIndx++; michael@0: for(;;){ michael@0: // Make sure we have enough space. No longer needed; michael@0: // at this point digIndx now has a max value of UCOL_MAX_DIGITS_FOR_NUMBER michael@0: // (it has been pre-incremented) so we just ensure that numTempBuf is big enough michael@0: // (UCOL_MAX_DIGITS_FOR_NUMBER/2 + 3). michael@0: michael@0: // Skipping over leading zeroes. michael@0: if (digVal != 0) { michael@0: nonZeroValReached = TRUE; michael@0: } michael@0: if (nonZeroValReached) { michael@0: /* michael@0: We parse the digit string into base 100 numbers (this fits into a byte). michael@0: We only add to the buffer in twos, thus if we are parsing an odd character, michael@0: that serves as the 'tens' digit while the if we are parsing an even one, that michael@0: is the 'ones' digit. We dumped the parsed base 100 value (collateVal) into michael@0: a buffer. We multiply each collateVal by 2 (to give us room) and add 5 (to avoid michael@0: overlapping magic CE byte values). The last byte we subtract 1 to ensure it is less michael@0: than all the other bytes. michael@0: */ michael@0: michael@0: if (digIndx % 2 == 1){ michael@0: collateVal += (uint8_t)digVal; michael@0: michael@0: // We don't enter the low-order-digit case unless we've already seen michael@0: // the high order, or for the first digit, which is always non-zero. michael@0: if (collateVal != 0) michael@0: trailingZeroIndex = 0; michael@0: michael@0: numTempBuf[(digIndx/2) + 2] = collateVal*2 + 6; michael@0: collateVal = 0; michael@0: } michael@0: else{ michael@0: // We drop the collation value into the buffer so if we need to do michael@0: // a "front patch" we don't have to check to see if we're hitting the michael@0: // last element. michael@0: collateVal = (uint8_t)(digVal * 10); michael@0: michael@0: // Check for trailing zeroes. michael@0: if (collateVal == 0) michael@0: { michael@0: if (!trailingZeroIndex) michael@0: trailingZeroIndex = (digIndx/2) + 2; michael@0: } michael@0: else michael@0: trailingZeroIndex = 0; michael@0: michael@0: numTempBuf[(digIndx/2) + 2] = collateVal*2 + 6; michael@0: } michael@0: digIndx++; michael@0: } michael@0: michael@0: // Get next character. michael@0: if (!collIter_eos(source)){ michael@0: ch = getNextNormalizedChar(source); michael@0: if (U16_IS_LEAD(ch)){ michael@0: if (!collIter_eos(source)) { michael@0: backupState(source, &digitState); michael@0: UChar trail = getNextNormalizedChar(source); michael@0: if(U16_IS_TRAIL(trail)) { michael@0: char32 = U16_GET_SUPPLEMENTARY(ch, trail); michael@0: } else { michael@0: loadState(source, &digitState, TRUE); michael@0: char32 = ch; michael@0: } michael@0: } michael@0: } else { michael@0: char32 = ch; michael@0: } michael@0: michael@0: if ((digVal = u_charDigitValue(char32)) == -1 || digIndx > UCOL_MAX_DIGITS_FOR_NUMBER){ michael@0: // Resetting position to point to the next unprocessed char. We michael@0: // overshot it when doing our test/set for numbers. michael@0: if (char32 > 0xFFFF) { // For surrogates. michael@0: loadState(source, &digitState, TRUE); michael@0: //goBackOne(source); michael@0: } michael@0: goBackOne(source); michael@0: break; michael@0: } michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (nonZeroValReached == FALSE){ michael@0: digIndx = 2; michael@0: numTempBuf[2] = 6; michael@0: } michael@0: michael@0: endIndex = trailingZeroIndex ? trailingZeroIndex : ((digIndx/2) + 2) ; michael@0: if (digIndx % 2 != 0){ michael@0: /* michael@0: We missed a value. Since digIndx isn't even, stuck too many values into the buffer (this is what michael@0: we get for padding the first byte with a zero). "Front-patch" now by pushing all nybbles forward. michael@0: Doing it this way ensures that at least 50% of the time (statistically speaking) we'll only be doing a michael@0: single pass and optimizes for strings with single digits. I'm just assuming that's the more common case. michael@0: */ michael@0: michael@0: for(i = 2; i < endIndex; i++){ michael@0: numTempBuf[i] = (((((numTempBuf[i] - 6)/2) % 10) * 10) + michael@0: (((numTempBuf[i+1])-6)/2) / 10) * 2 + 6; michael@0: } michael@0: --digIndx; michael@0: } michael@0: michael@0: // Subtract one off of the last byte. michael@0: numTempBuf[endIndex-1] -= 1; michael@0: michael@0: /* michael@0: We want to skip over the first two slots in the buffer. The first slot michael@0: is reserved for the header byte UCOL_CODAN_PLACEHOLDER. The second slot is for the michael@0: sign/exponent byte: 0x80 + (decimalPos/2) & 7f. michael@0: */ michael@0: numTempBuf[0] = UCOL_CODAN_PLACEHOLDER; michael@0: numTempBuf[1] = (uint8_t)(0x80 + ((digIndx/2) & 0x7F)); michael@0: michael@0: // Now transfer the collation key to our collIterate struct. michael@0: // The total size for our collation key is endIndx bumped up to the next largest even value divided by two. michael@0: //size = ((endIndex+1) & ~1)/2; michael@0: CE = (((numTempBuf[0] << 8) | numTempBuf[1]) << UCOL_PRIMARYORDERSHIFT) | //Primary weight michael@0: (UCOL_BYTE_COMMON << UCOL_SECONDARYORDERSHIFT) | // Secondary weight michael@0: UCOL_BYTE_COMMON; // Tertiary weight. michael@0: i = 2; // Reset the index into the buffer. michael@0: while(i < endIndex) michael@0: { michael@0: uint32_t primWeight = numTempBuf[i++] << 8; michael@0: if ( i < endIndex) michael@0: primWeight |= numTempBuf[i++]; michael@0: *(source->CEpos++) = (primWeight << UCOL_PRIMARYORDERSHIFT) | UCOL_CONTINUATION_MARKER; michael@0: } michael@0: michael@0: } else { michael@0: // no numeric mode, we'll just switch to whatever we stashed and continue michael@0: CEOffset = (uint32_t *)coll->image+getExpansionOffset(CE); /* find the offset to expansion table */ michael@0: CE = *CEOffset++; michael@0: break; michael@0: } michael@0: return CE; michael@0: } michael@0: /* various implicits optimization */ michael@0: case IMPLICIT_TAG: /* everything that is not defined otherwise */ michael@0: /* UCA is filled with these. Tailorings are NOT_FOUND */ michael@0: return getImplicit(cp, source); michael@0: case CJK_IMPLICIT_TAG: /* 0x3400-0x4DB5, 0x4E00-0x9FA5, 0xF900-0xFA2D*/ michael@0: // TODO: remove CJK_IMPLICIT_TAG completely - handled by the getImplicit michael@0: return getImplicit(cp, source); michael@0: case HANGUL_SYLLABLE_TAG: /* AC00-D7AF*/ michael@0: { michael@0: static const uint32_t michael@0: SBase = 0xAC00, LBase = 0x1100, VBase = 0x1161, TBase = 0x11A7; michael@0: //const uint32_t LCount = 19; michael@0: static const uint32_t VCount = 21; michael@0: static const uint32_t TCount = 28; michael@0: //const uint32_t NCount = VCount * TCount; // 588 michael@0: //const uint32_t SCount = LCount * NCount; // 11172 michael@0: uint32_t L = ch - SBase; michael@0: michael@0: // divide into pieces michael@0: michael@0: uint32_t T = L % TCount; // we do it in this order since some compilers can do % and / in one operation michael@0: L /= TCount; michael@0: uint32_t V = L % VCount; michael@0: L /= VCount; michael@0: michael@0: // offset them michael@0: michael@0: L += LBase; michael@0: V += VBase; michael@0: T += TBase; michael@0: michael@0: // return the first CE, but first put the rest into the expansion buffer michael@0: if (!source->coll->image->jamoSpecial) { // FAST PATH michael@0: michael@0: *(source->CEpos++) = UTRIE_GET32_FROM_LEAD(&coll->mapping, V); michael@0: if (T != TBase) { michael@0: *(source->CEpos++) = UTRIE_GET32_FROM_LEAD(&coll->mapping, T); michael@0: } michael@0: michael@0: return UTRIE_GET32_FROM_LEAD(&coll->mapping, L); michael@0: michael@0: } else { // Jamo is Special michael@0: // Since Hanguls pass the FCD check, it is michael@0: // guaranteed that we won't be in michael@0: // the normalization buffer if something like this happens michael@0: michael@0: // However, if we are using a uchar iterator and normalization michael@0: // is ON, the Hangul that lead us here is going to be in that michael@0: // normalization buffer. Here we want to restore the uchar michael@0: // iterator state and pull out of the normalization buffer michael@0: if(source->iterator != NULL && source->flags & UCOL_ITER_INNORMBUF) { michael@0: source->flags = source->origFlags; // restore the iterator michael@0: source->pos = NULL; michael@0: } michael@0: michael@0: // Move Jamos into normalization buffer michael@0: UChar *buffer = source->writableBuffer.getBuffer(4); michael@0: int32_t bufferLength; michael@0: buffer[0] = (UChar)L; michael@0: buffer[1] = (UChar)V; michael@0: if (T != TBase) { michael@0: buffer[2] = (UChar)T; michael@0: bufferLength = 3; michael@0: } else { michael@0: bufferLength = 2; michael@0: } michael@0: source->writableBuffer.releaseBuffer(bufferLength); michael@0: michael@0: // Indicate where to continue in main input string after exhausting the writableBuffer michael@0: source->fcdPosition = source->pos; michael@0: michael@0: source->pos = source->writableBuffer.getTerminatedBuffer(); michael@0: source->origFlags = source->flags; michael@0: source->flags |= UCOL_ITER_INNORMBUF; michael@0: source->flags &= ~(UCOL_ITER_NORM | UCOL_ITER_HASLEN); michael@0: michael@0: return(UCOL_IGNORABLE); michael@0: } michael@0: } michael@0: case SURROGATE_TAG: michael@0: /* we encountered a leading surrogate. We shall get the CE by using the following code unit */ michael@0: /* two things can happen here: next code point can be a trailing surrogate - we will use it */ michael@0: /* to retrieve the CE, or it is not a trailing surrogate (or the string is done). In that case */ michael@0: /* we treat it like an unassigned code point. */ michael@0: { michael@0: UChar trail; michael@0: collIterateState state; michael@0: backupState(source, &state); michael@0: if (collIter_eos(source) || !(U16_IS_TRAIL((trail = getNextNormalizedChar(source))))) { michael@0: // we chould have stepped one char forward and it might have turned that it michael@0: // was not a trail surrogate. In that case, we have to backup. michael@0: loadState(source, &state, TRUE); michael@0: return UCOL_NOT_FOUND; michael@0: } else { michael@0: /* TODO: CE contain the data from the previous CE + the mask. It should at least be unmasked */ michael@0: CE = UTRIE_GET32_FROM_OFFSET_TRAIL(&coll->mapping, CE&0xFFFFFF, trail); michael@0: if(CE == UCOL_NOT_FOUND) { // there are tailored surrogates in this block, but not this one. michael@0: // We need to backup michael@0: loadState(source, &state, TRUE); michael@0: return CE; michael@0: } michael@0: // calculate the supplementary code point value, if surrogate was not tailored michael@0: cp = ((((uint32_t)ch)<<10UL)+(trail)-(((uint32_t)0xd800<<10UL)+0xdc00-0x10000)); michael@0: } michael@0: } michael@0: break; michael@0: case LEAD_SURROGATE_TAG: /* D800-DBFF*/ michael@0: UChar nextChar; michael@0: if( source->flags & UCOL_USE_ITERATOR) { michael@0: if(U_IS_TRAIL(nextChar = (UChar)source->iterator->current(source->iterator))) { michael@0: cp = U16_GET_SUPPLEMENTARY(ch, nextChar); michael@0: source->iterator->next(source->iterator); michael@0: return getImplicit(cp, source); michael@0: } michael@0: } else if((((source->flags & UCOL_ITER_HASLEN) == 0 ) || (source->posendp)) && michael@0: U_IS_TRAIL((nextChar=*source->pos))) { michael@0: cp = U16_GET_SUPPLEMENTARY(ch, nextChar); michael@0: source->pos++; michael@0: return getImplicit(cp, source); michael@0: } michael@0: return UCOL_NOT_FOUND; michael@0: case TRAIL_SURROGATE_TAG: /* DC00-DFFF*/ michael@0: return UCOL_NOT_FOUND; /* broken surrogate sequence */ michael@0: case CHARSET_TAG: michael@0: /* not yet implemented */ michael@0: /* probably after 1.8 */ michael@0: return UCOL_NOT_FOUND; michael@0: default: michael@0: *status = U_INTERNAL_PROGRAM_ERROR; michael@0: CE=0; michael@0: break; michael@0: } michael@0: if (CE <= UCOL_NOT_FOUND) break; michael@0: } michael@0: return CE; michael@0: } michael@0: michael@0: michael@0: /* now uses Mark's getImplicitPrimary code */ michael@0: static michael@0: inline uint32_t getPrevImplicit(UChar32 cp, collIterate *collationSource) { michael@0: uint32_t r = uprv_uca_getImplicitPrimary(cp); michael@0: michael@0: *(collationSource->CEpos++) = (r & UCOL_PRIMARYMASK) | 0x00000505; michael@0: collationSource->toReturn = collationSource->CEpos; michael@0: michael@0: // **** doesn't work if using iterator **** michael@0: if (collationSource->flags & UCOL_ITER_INNORMBUF) { michael@0: collationSource->offsetRepeatCount = 1; michael@0: } else { michael@0: int32_t firstOffset = (int32_t)(collationSource->pos - collationSource->string); michael@0: michael@0: UErrorCode errorCode = U_ZERO_ERROR; michael@0: collationSource->appendOffset(firstOffset, errorCode); michael@0: collationSource->appendOffset(firstOffset + 1, errorCode); michael@0: michael@0: collationSource->offsetReturn = collationSource->offsetStore - 1; michael@0: *(collationSource->offsetBuffer) = firstOffset; michael@0: if (collationSource->offsetReturn == collationSource->offsetBuffer) { michael@0: collationSource->offsetStore = collationSource->offsetBuffer; michael@0: } michael@0: } michael@0: michael@0: return ((r & 0x0000FFFF)<<16) | 0x000000C0; michael@0: } michael@0: michael@0: /** michael@0: * This function handles the special CEs like contractions, expansions, michael@0: * surrogates, Thai. michael@0: * It is called by both getPrevCE michael@0: */ michael@0: uint32_t ucol_prv_getSpecialPrevCE(const UCollator *coll, UChar ch, uint32_t CE, michael@0: collIterate *source, michael@0: UErrorCode *status) michael@0: { michael@0: const uint32_t *CEOffset = NULL; michael@0: UChar *UCharOffset = NULL; michael@0: UChar schar; michael@0: const UChar *constart = NULL; michael@0: uint32_t size; michael@0: UChar buffer[UCOL_MAX_BUFFER]; michael@0: uint32_t *endCEBuffer; michael@0: UChar *strbuffer; michael@0: int32_t noChars = 0; michael@0: int32_t CECount = 0; michael@0: michael@0: for(;;) michael@0: { michael@0: /* the only ces that loops are thai and contractions */ michael@0: switch (getCETag(CE)) michael@0: { michael@0: case NOT_FOUND_TAG: /* this tag always returns */ michael@0: return CE; michael@0: michael@0: case SPEC_PROC_TAG: michael@0: { michael@0: // Special processing is getting a CE that is preceded by a certain prefix michael@0: // Currently this is only needed for optimizing Japanese length and iteration marks. michael@0: // When we encouter a special processing tag, we go backwards and try to see if michael@0: // we have a match. michael@0: // Contraction tables are used - so the whole process is not unlike contraction. michael@0: // prefix data is stored backwards in the table. michael@0: const UChar *UCharOffset; michael@0: UChar schar, tchar; michael@0: collIterateState prefixState; michael@0: backupState(source, &prefixState); michael@0: for(;;) { michael@0: // This loop will run once per source string character, for as long as we michael@0: // are matching a potential contraction sequence michael@0: michael@0: // First we position ourselves at the begining of contraction sequence michael@0: const UChar *ContractionStart = UCharOffset = (UChar *)coll->image+getContractOffset(CE); michael@0: michael@0: if (collIter_bos(source)) { michael@0: CE = *(coll->contractionCEs + (UCharOffset - coll->contractionIndex)); michael@0: break; michael@0: } michael@0: schar = getPrevNormalizedChar(source, status); michael@0: goBackOne(source); michael@0: michael@0: while(schar > (tchar = *UCharOffset)) { /* since the contraction codepoints should be ordered, we skip all that are smaller */ michael@0: UCharOffset++; michael@0: } michael@0: michael@0: if (schar == tchar) { michael@0: // Found the source string char in the table. michael@0: // Pick up the corresponding CE from the table. michael@0: CE = *(coll->contractionCEs + michael@0: (UCharOffset - coll->contractionIndex)); michael@0: } michael@0: else michael@0: { michael@0: // if there is a completely ignorable code point in the middle of michael@0: // a prefix, we need to act as if it's not there michael@0: // assumption: 'real' noncharacters (*fffe, *ffff, fdd0-fdef are set to zero) michael@0: // lone surrogates cannot be set to zero as it would break other processing michael@0: uint32_t isZeroCE = UTRIE_GET32_FROM_LEAD(&coll->mapping, schar); michael@0: // it's easy for BMP code points michael@0: if(isZeroCE == 0) { michael@0: continue; michael@0: } else if(U16_IS_SURROGATE(schar)) { michael@0: // for supplementary code points, we have to check the next one michael@0: // situations where we are going to ignore michael@0: // 1. beginning of the string: schar is a lone surrogate michael@0: // 2. schar is a lone surrogate michael@0: // 3. schar is a trail surrogate in a valid surrogate sequence michael@0: // that is explicitly set to zero. michael@0: if (!collIter_bos(source)) { michael@0: UChar lead; michael@0: if(!U16_IS_SURROGATE_LEAD(schar) && U16_IS_LEAD(lead = getPrevNormalizedChar(source, status))) { michael@0: isZeroCE = UTRIE_GET32_FROM_LEAD(&coll->mapping, lead); michael@0: if(isSpecial(isZeroCE) && getCETag(isZeroCE) == SURROGATE_TAG) { michael@0: uint32_t finalCE = UTRIE_GET32_FROM_OFFSET_TRAIL(&coll->mapping, isZeroCE&0xFFFFFF, schar); michael@0: if(finalCE == 0) { michael@0: // this is a real, assigned completely ignorable code point michael@0: goBackOne(source); michael@0: continue; michael@0: } michael@0: } michael@0: } else { michael@0: // lone surrogate, treat like unassigned michael@0: return UCOL_NOT_FOUND; michael@0: } michael@0: } else { michael@0: // lone surrogate at the beggining, treat like unassigned michael@0: return UCOL_NOT_FOUND; michael@0: } michael@0: } michael@0: // Source string char was not in the table. michael@0: // We have not found the prefix. michael@0: CE = *(coll->contractionCEs + michael@0: (ContractionStart - coll->contractionIndex)); michael@0: } michael@0: michael@0: if(!isPrefix(CE)) { michael@0: // The source string char was in the contraction table, and the corresponding michael@0: // CE is not a prefix CE. We found the prefix, break michael@0: // out of loop, this CE will end up being returned. This is the normal michael@0: // way out of prefix handling when the source actually contained michael@0: // the prefix. michael@0: break; michael@0: } michael@0: } michael@0: loadState(source, &prefixState, TRUE); michael@0: break; michael@0: } michael@0: michael@0: case CONTRACTION_TAG: { michael@0: /* to ensure that the backwards and forwards iteration matches, we michael@0: take the current region of most possible match and pass it through michael@0: the forward iteration. this will ensure that the obstinate problem of michael@0: overlapping contractions will not occur. michael@0: */ michael@0: schar = peekCodeUnit(source, 0); michael@0: constart = (UChar *)coll->image + getContractOffset(CE); michael@0: if (isAtStartPrevIterate(source) michael@0: /* commented away contraction end checks after adding the checks michael@0: in getPrevCE */) { michael@0: /* start of string or this is not the end of any contraction */ michael@0: CE = *(coll->contractionCEs + michael@0: (constart - coll->contractionIndex)); michael@0: break; michael@0: } michael@0: strbuffer = buffer; michael@0: UCharOffset = strbuffer + (UCOL_MAX_BUFFER - 1); michael@0: *(UCharOffset --) = 0; michael@0: noChars = 0; michael@0: // have to swap thai characters michael@0: while (ucol_unsafeCP(schar, coll)) { michael@0: *(UCharOffset) = schar; michael@0: noChars++; michael@0: UCharOffset --; michael@0: schar = getPrevNormalizedChar(source, status); michael@0: goBackOne(source); michael@0: // TODO: when we exhaust the contraction buffer, michael@0: // it needs to get reallocated. The problem is michael@0: // that the size depends on the string which is michael@0: // not iterated over. However, since we're travelling michael@0: // backwards, we already had to set the iterator at michael@0: // the end - so we might as well know where we are? michael@0: if (UCharOffset + 1 == buffer) { michael@0: /* we have exhausted the buffer */ michael@0: int32_t newsize = 0; michael@0: if(source->pos) { // actually dealing with a position michael@0: newsize = (int32_t)(source->pos - source->string + 1); michael@0: } else { // iterator michael@0: newsize = 4 * UCOL_MAX_BUFFER; michael@0: } michael@0: strbuffer = (UChar *)uprv_malloc(sizeof(UChar) * michael@0: (newsize + UCOL_MAX_BUFFER)); michael@0: /* test for NULL */ michael@0: if (strbuffer == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: return UCOL_NO_MORE_CES; michael@0: } michael@0: UCharOffset = strbuffer + newsize; michael@0: uprv_memcpy(UCharOffset, buffer, michael@0: UCOL_MAX_BUFFER * sizeof(UChar)); michael@0: UCharOffset --; michael@0: } michael@0: if ((source->pos && (source->pos == source->string || michael@0: ((source->flags & UCOL_ITER_INNORMBUF) && michael@0: *(source->pos - 1) == 0 && source->fcdPosition == NULL))) michael@0: || (source->iterator && !source->iterator->hasPrevious(source->iterator))) { michael@0: break; michael@0: } michael@0: } michael@0: /* adds the initial base character to the string */ michael@0: *(UCharOffset) = schar; michael@0: noChars++; michael@0: michael@0: int32_t offsetBias; michael@0: michael@0: // **** doesn't work if using iterator **** michael@0: if (source->flags & UCOL_ITER_INNORMBUF) { michael@0: offsetBias = -1; michael@0: } else { michael@0: offsetBias = (int32_t)(source->pos - source->string); michael@0: } michael@0: michael@0: /* a new collIterate is used to simplify things, since using the current michael@0: collIterate will mean that the forward and backwards iteration will michael@0: share and change the same buffers. we don't want to get into that. */ michael@0: collIterate temp; michael@0: int32_t rawOffset; michael@0: michael@0: IInit_collIterate(coll, UCharOffset, noChars, &temp, status); michael@0: if(U_FAILURE(*status)) { michael@0: return (uint32_t)UCOL_NULLORDER; michael@0: } michael@0: temp.flags &= ~UCOL_ITER_NORM; michael@0: temp.flags |= source->flags & UCOL_FORCE_HAN_IMPLICIT; michael@0: michael@0: rawOffset = (int32_t)(temp.pos - temp.string); // should always be zero? michael@0: CE = ucol_IGetNextCE(coll, &temp, status); michael@0: michael@0: if (source->extendCEs) { michael@0: endCEBuffer = source->extendCEs + source->extendCEsSize; michael@0: CECount = (int32_t)((source->CEpos - source->extendCEs)/sizeof(uint32_t)); michael@0: } else { michael@0: endCEBuffer = source->CEs + UCOL_EXPAND_CE_BUFFER_SIZE; michael@0: CECount = (int32_t)((source->CEpos - source->CEs)/sizeof(uint32_t)); michael@0: } michael@0: michael@0: while (CE != UCOL_NO_MORE_CES) { michael@0: *(source->CEpos ++) = CE; michael@0: michael@0: if (offsetBias >= 0) { michael@0: source->appendOffset(rawOffset + offsetBias, *status); michael@0: } michael@0: michael@0: CECount++; michael@0: if (source->CEpos == endCEBuffer) { michael@0: /* ran out of CE space, reallocate to new buffer. michael@0: If reallocation fails, reset pointers and bail out, michael@0: there's no guarantee of the right character position after michael@0: this bail*/ michael@0: if (!increaseCEsCapacity(source)) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: break; michael@0: } michael@0: michael@0: endCEBuffer = source->extendCEs + source->extendCEsSize; michael@0: } michael@0: michael@0: if ((temp.flags & UCOL_ITER_INNORMBUF) != 0) { michael@0: rawOffset = (int32_t)(temp.fcdPosition - temp.string); michael@0: } else { michael@0: rawOffset = (int32_t)(temp.pos - temp.string); michael@0: } michael@0: michael@0: CE = ucol_IGetNextCE(coll, &temp, status); michael@0: } michael@0: michael@0: if (strbuffer != buffer) { michael@0: uprv_free(strbuffer); michael@0: } michael@0: if (U_FAILURE(*status)) { michael@0: return (uint32_t)UCOL_NULLORDER; michael@0: } michael@0: michael@0: if (source->offsetRepeatValue != 0) { michael@0: if (CECount > noChars) { michael@0: source->offsetRepeatCount += temp.offsetRepeatCount; michael@0: } else { michael@0: // **** does this really skip the right offsets? **** michael@0: source->offsetReturn -= (noChars - CECount); michael@0: } michael@0: } michael@0: michael@0: if (offsetBias >= 0) { michael@0: source->offsetReturn = source->offsetStore - 1; michael@0: if (source->offsetReturn == source->offsetBuffer) { michael@0: source->offsetStore = source->offsetBuffer; michael@0: } michael@0: } michael@0: michael@0: source->toReturn = source->CEpos - 1; michael@0: if (source->toReturn == source->CEs) { michael@0: source->CEpos = source->CEs; michael@0: } michael@0: michael@0: return *(source->toReturn); michael@0: } michael@0: case LONG_PRIMARY_TAG: michael@0: { michael@0: *(source->CEpos++) = ((CE & 0xFFFF00) << 8) | (UCOL_BYTE_COMMON << 8) | UCOL_BYTE_COMMON; michael@0: *(source->CEpos++) = ((CE & 0xFF)<<24)|UCOL_CONTINUATION_MARKER; michael@0: source->toReturn = source->CEpos - 1; michael@0: michael@0: if (source->flags & UCOL_ITER_INNORMBUF) { michael@0: source->offsetRepeatCount = 1; michael@0: } else { michael@0: int32_t firstOffset = (int32_t)(source->pos - source->string); michael@0: michael@0: source->appendOffset(firstOffset, *status); michael@0: source->appendOffset(firstOffset + 1, *status); michael@0: michael@0: source->offsetReturn = source->offsetStore - 1; michael@0: *(source->offsetBuffer) = firstOffset; michael@0: if (source->offsetReturn == source->offsetBuffer) { michael@0: source->offsetStore = source->offsetBuffer; michael@0: } michael@0: } michael@0: michael@0: michael@0: return *(source->toReturn); michael@0: } michael@0: michael@0: case EXPANSION_TAG: /* this tag always returns */ michael@0: { michael@0: /* michael@0: This should handle expansion. michael@0: NOTE: we can encounter both continuations and expansions in an expansion! michael@0: I have to decide where continuations are going to be dealt with michael@0: */ michael@0: int32_t firstOffset = (int32_t)(source->pos - source->string); michael@0: michael@0: // **** doesn't work if using iterator **** michael@0: if (source->offsetReturn != NULL) { michael@0: if (! (source->flags & UCOL_ITER_INNORMBUF) && source->offsetReturn == source->offsetBuffer) { michael@0: source->offsetStore = source->offsetBuffer; michael@0: }else { michael@0: firstOffset = -1; michael@0: } michael@0: } michael@0: michael@0: /* find the offset to expansion table */ michael@0: CEOffset = (uint32_t *)coll->image + getExpansionOffset(CE); michael@0: size = getExpansionCount(CE); michael@0: if (size != 0) { michael@0: /* michael@0: if there are less than 16 elements in expansion, we don't terminate michael@0: */ michael@0: uint32_t count; michael@0: michael@0: for (count = 0; count < size; count++) { michael@0: *(source->CEpos ++) = *CEOffset++; michael@0: michael@0: if (firstOffset >= 0) { michael@0: source->appendOffset(firstOffset + 1, *status); michael@0: } michael@0: } michael@0: } else { michael@0: /* else, we do */ michael@0: while (*CEOffset != 0) { michael@0: *(source->CEpos ++) = *CEOffset ++; michael@0: michael@0: if (firstOffset >= 0) { michael@0: source->appendOffset(firstOffset + 1, *status); michael@0: } michael@0: } michael@0: } michael@0: michael@0: if (firstOffset >= 0) { michael@0: source->offsetReturn = source->offsetStore - 1; michael@0: *(source->offsetBuffer) = firstOffset; michael@0: if (source->offsetReturn == source->offsetBuffer) { michael@0: source->offsetStore = source->offsetBuffer; michael@0: } michael@0: } else { michael@0: source->offsetRepeatCount += size - 1; michael@0: } michael@0: michael@0: source->toReturn = source->CEpos - 1; michael@0: // in case of one element expansion, we michael@0: // want to immediately return CEpos michael@0: if(source->toReturn == source->CEs) { michael@0: source->CEpos = source->CEs; michael@0: } michael@0: michael@0: return *(source->toReturn); michael@0: } michael@0: michael@0: case DIGIT_TAG: michael@0: { michael@0: /* michael@0: We do a check to see if we want to collate digits as numbers; if so we generate michael@0: a custom collation key. Otherwise we pull out the value stored in the expansion table. michael@0: */ michael@0: uint32_t i; /* general counter */ michael@0: michael@0: if (source->coll->numericCollation == UCOL_ON){ michael@0: uint32_t digIndx = 0; michael@0: uint32_t endIndex = 0; michael@0: uint32_t leadingZeroIndex = 0; michael@0: uint32_t trailingZeroCount = 0; michael@0: michael@0: uint8_t collateVal = 0; michael@0: michael@0: UBool nonZeroValReached = FALSE; michael@0: michael@0: uint8_t numTempBuf[UCOL_MAX_DIGITS_FOR_NUMBER/2 + 2]; // I just need a temporary place to store my generated CEs. michael@0: /* michael@0: We parse the source string until we hit a char that's NOT a digit. michael@0: Use this u_charDigitValue. This might be slow because we have to michael@0: handle surrogates... michael@0: */ michael@0: /* michael@0: We need to break up the digit string into collection elements of UCOL_MAX_DIGITS_FOR_NUMBER or less, michael@0: with any chunks smaller than that being on the right end of the digit string - i.e. the first collation michael@0: element we process when going backward. To determine how long that chunk might be, we may need to make michael@0: two passes through the loop that collects digits - one to see how long the string is (and how much is michael@0: leading zeros) to determine the length of that right-hand chunk, and a second (if the whole string has michael@0: more than UCOL_MAX_DIGITS_FOR_NUMBER non-leading-zero digits) to actually process that collation michael@0: element chunk after resetting the state to the initialState at the right side of the digit string. michael@0: */ michael@0: uint32_t ceLimit = 0; michael@0: UChar initial_ch = ch; michael@0: collIterateState initialState = {0,0,0,0,0,0,0,0,0}; michael@0: backupState(source, &initialState); michael@0: michael@0: for(;;) { michael@0: collIterateState state = {0,0,0,0,0,0,0,0,0}; michael@0: UChar32 char32 = 0; michael@0: int32_t digVal = 0; michael@0: michael@0: if (U16_IS_TRAIL (ch)) { michael@0: if (!collIter_bos(source)){ michael@0: UChar lead = getPrevNormalizedChar(source, status); michael@0: if(U16_IS_LEAD(lead)) { michael@0: char32 = U16_GET_SUPPLEMENTARY(lead,ch); michael@0: goBackOne(source); michael@0: } else { michael@0: char32 = ch; michael@0: } michael@0: } else { michael@0: char32 = ch; michael@0: } michael@0: } else { michael@0: char32 = ch; michael@0: } michael@0: digVal = u_charDigitValue(char32); michael@0: michael@0: for(;;) { michael@0: // Make sure we have enough space. No longer needed; michael@0: // at this point the largest value of digIndx when we need to save data in numTempBuf michael@0: // is UCOL_MAX_DIGITS_FOR_NUMBER-1 (digIndx is post-incremented) so we just ensure michael@0: // that numTempBuf is big enough (UCOL_MAX_DIGITS_FOR_NUMBER/2 + 2). michael@0: michael@0: // Skip over trailing zeroes, and keep a count of them. michael@0: if (digVal != 0) michael@0: nonZeroValReached = TRUE; michael@0: michael@0: if (nonZeroValReached) { michael@0: /* michael@0: We parse the digit string into base 100 numbers (this fits into a byte). michael@0: We only add to the buffer in twos, thus if we are parsing an odd character, michael@0: that serves as the 'tens' digit while the if we are parsing an even one, that michael@0: is the 'ones' digit. We dumped the parsed base 100 value (collateVal) into michael@0: a buffer. We multiply each collateVal by 2 (to give us room) and add 5 (to avoid michael@0: overlapping magic CE byte values). The last byte we subtract 1 to ensure it is less michael@0: than all the other bytes. michael@0: michael@0: Since we're doing in this reverse we want to put the first digit encountered into the michael@0: ones place and the second digit encountered into the tens place. michael@0: */ michael@0: michael@0: if ((digIndx + trailingZeroCount) % 2 == 1) { michael@0: // High-order digit case (tens place) michael@0: collateVal += (uint8_t)(digVal * 10); michael@0: michael@0: // We cannot set leadingZeroIndex unless it has been set for the michael@0: // low-order digit. Therefore, all we can do for the high-order michael@0: // digit is turn it off, never on. michael@0: // The only time we will have a high digit without a low is for michael@0: // the very first non-zero digit, so no zero check is necessary. michael@0: if (collateVal != 0) michael@0: leadingZeroIndex = 0; michael@0: michael@0: // The first pass through, digIndx may exceed the limit, but in that case michael@0: // we no longer care about numTempBuf contents since they will be discarded michael@0: if ( digIndx < UCOL_MAX_DIGITS_FOR_NUMBER ) { michael@0: numTempBuf[(digIndx/2) + 2] = collateVal*2 + 6; michael@0: } michael@0: collateVal = 0; michael@0: } else { michael@0: // Low-order digit case (ones place) michael@0: collateVal = (uint8_t)digVal; michael@0: michael@0: // Check for leading zeroes. michael@0: if (collateVal == 0) { michael@0: if (!leadingZeroIndex) michael@0: leadingZeroIndex = (digIndx/2) + 2; michael@0: } else michael@0: leadingZeroIndex = 0; michael@0: michael@0: // No need to write to buffer; the case of a last odd digit michael@0: // is handled below. michael@0: } michael@0: ++digIndx; michael@0: } else michael@0: ++trailingZeroCount; michael@0: michael@0: if (!collIter_bos(source)) { michael@0: ch = getPrevNormalizedChar(source, status); michael@0: //goBackOne(source); michael@0: if (U16_IS_TRAIL(ch)) { michael@0: backupState(source, &state); michael@0: if (!collIter_bos(source)) { michael@0: goBackOne(source); michael@0: UChar lead = getPrevNormalizedChar(source, status); michael@0: michael@0: if(U16_IS_LEAD(lead)) { michael@0: char32 = U16_GET_SUPPLEMENTARY(lead,ch); michael@0: } else { michael@0: loadState(source, &state, FALSE); michael@0: char32 = ch; michael@0: } michael@0: } michael@0: } else michael@0: char32 = ch; michael@0: michael@0: if ((digVal = u_charDigitValue(char32)) == -1 || (ceLimit > 0 && (digIndx + trailingZeroCount) >= ceLimit)) { michael@0: if (char32 > 0xFFFF) {// For surrogates. michael@0: loadState(source, &state, FALSE); michael@0: } michael@0: // Don't need to "reverse" the goBackOne call, michael@0: // as this points to the next position to process.. michael@0: //if (char32 > 0xFFFF) // For surrogates. michael@0: //getNextNormalizedChar(source); michael@0: break; michael@0: } michael@0: michael@0: goBackOne(source); michael@0: }else michael@0: break; michael@0: } michael@0: michael@0: if (digIndx + trailingZeroCount <= UCOL_MAX_DIGITS_FOR_NUMBER) { michael@0: // our collation element is not too big, go ahead and finish with it michael@0: break; michael@0: } michael@0: // our digit string is too long for a collation element; michael@0: // set the limit for it, reset the state and begin again michael@0: ceLimit = (digIndx + trailingZeroCount) % UCOL_MAX_DIGITS_FOR_NUMBER; michael@0: if ( ceLimit == 0 ) { michael@0: ceLimit = UCOL_MAX_DIGITS_FOR_NUMBER; michael@0: } michael@0: ch = initial_ch; michael@0: loadState(source, &initialState, FALSE); michael@0: digIndx = endIndex = leadingZeroIndex = trailingZeroCount = 0; michael@0: collateVal = 0; michael@0: nonZeroValReached = FALSE; michael@0: } michael@0: michael@0: if (! nonZeroValReached) { michael@0: digIndx = 2; michael@0: trailingZeroCount = 0; michael@0: numTempBuf[2] = 6; michael@0: } michael@0: michael@0: if ((digIndx + trailingZeroCount) % 2 != 0) { michael@0: numTempBuf[((digIndx)/2) + 2] = collateVal*2 + 6; michael@0: digIndx += 1; // The implicit leading zero michael@0: } michael@0: if (trailingZeroCount % 2 != 0) { michael@0: // We had to consume one trailing zero for the low digit michael@0: // of the least significant byte michael@0: digIndx += 1; // The trailing zero not in the exponent michael@0: trailingZeroCount -= 1; michael@0: } michael@0: michael@0: endIndex = leadingZeroIndex ? leadingZeroIndex : ((digIndx/2) + 2) ; michael@0: michael@0: // Subtract one off of the last byte. Really the first byte here, but it's reversed... michael@0: numTempBuf[2] -= 1; michael@0: michael@0: /* michael@0: We want to skip over the first two slots in the buffer. The first slot michael@0: is reserved for the header byte UCOL_CODAN_PLACEHOLDER. The second slot is for the michael@0: sign/exponent byte: 0x80 + (decimalPos/2) & 7f. michael@0: The exponent must be adjusted by the number of leading zeroes, and the number of michael@0: trailing zeroes. michael@0: */ michael@0: numTempBuf[0] = UCOL_CODAN_PLACEHOLDER; michael@0: uint32_t exponent = (digIndx+trailingZeroCount)/2; michael@0: if (leadingZeroIndex) michael@0: exponent -= ((digIndx/2) + 2 - leadingZeroIndex); michael@0: numTempBuf[1] = (uint8_t)(0x80 + (exponent & 0x7F)); michael@0: michael@0: // Now transfer the collation key to our collIterate struct. michael@0: // The total size for our collation key is half of endIndex, rounded up. michael@0: int32_t size = (endIndex+1)/2; michael@0: if(!ensureCEsCapacity(source, size)) { michael@0: return (uint32_t)UCOL_NULLORDER; michael@0: } michael@0: *(source->CEpos++) = (((numTempBuf[0] << 8) | numTempBuf[1]) << UCOL_PRIMARYORDERSHIFT) | //Primary weight michael@0: (UCOL_BYTE_COMMON << UCOL_SECONDARYORDERSHIFT) | // Secondary weight michael@0: UCOL_BYTE_COMMON; // Tertiary weight. michael@0: i = endIndex - 1; // Reset the index into the buffer. michael@0: while(i >= 2) { michael@0: uint32_t primWeight = numTempBuf[i--] << 8; michael@0: if ( i >= 2) michael@0: primWeight |= numTempBuf[i--]; michael@0: *(source->CEpos++) = (primWeight << UCOL_PRIMARYORDERSHIFT) | UCOL_CONTINUATION_MARKER; michael@0: } michael@0: michael@0: source->toReturn = source->CEpos -1; michael@0: return *(source->toReturn); michael@0: } else { michael@0: CEOffset = (uint32_t *)coll->image + getExpansionOffset(CE); michael@0: CE = *(CEOffset++); michael@0: break; michael@0: } michael@0: } michael@0: michael@0: case HANGUL_SYLLABLE_TAG: /* AC00-D7AF*/ michael@0: { michael@0: static const uint32_t michael@0: SBase = 0xAC00, LBase = 0x1100, VBase = 0x1161, TBase = 0x11A7; michael@0: //const uint32_t LCount = 19; michael@0: static const uint32_t VCount = 21; michael@0: static const uint32_t TCount = 28; michael@0: //const uint32_t NCount = VCount * TCount; /* 588 */ michael@0: //const uint32_t SCount = LCount * NCount; /* 11172 */ michael@0: michael@0: uint32_t L = ch - SBase; michael@0: /* michael@0: divide into pieces. michael@0: we do it in this order since some compilers can do % and / in one michael@0: operation michael@0: */ michael@0: uint32_t T = L % TCount; michael@0: L /= TCount; michael@0: uint32_t V = L % VCount; michael@0: L /= VCount; michael@0: michael@0: /* offset them */ michael@0: L += LBase; michael@0: V += VBase; michael@0: T += TBase; michael@0: michael@0: int32_t firstOffset = (int32_t)(source->pos - source->string); michael@0: source->appendOffset(firstOffset, *status); michael@0: michael@0: /* michael@0: * return the first CE, but first put the rest into the expansion buffer michael@0: */ michael@0: if (!source->coll->image->jamoSpecial) { michael@0: *(source->CEpos++) = UTRIE_GET32_FROM_LEAD(&coll->mapping, L); michael@0: *(source->CEpos++) = UTRIE_GET32_FROM_LEAD(&coll->mapping, V); michael@0: source->appendOffset(firstOffset + 1, *status); michael@0: michael@0: if (T != TBase) { michael@0: *(source->CEpos++) = UTRIE_GET32_FROM_LEAD(&coll->mapping, T); michael@0: source->appendOffset(firstOffset + 1, *status); michael@0: } michael@0: michael@0: source->toReturn = source->CEpos - 1; michael@0: michael@0: source->offsetReturn = source->offsetStore - 1; michael@0: if (source->offsetReturn == source->offsetBuffer) { michael@0: source->offsetStore = source->offsetBuffer; michael@0: } michael@0: michael@0: return *(source->toReturn); michael@0: } else { michael@0: // Since Hanguls pass the FCD check, it is michael@0: // guaranteed that we won't be in michael@0: // the normalization buffer if something like this happens michael@0: michael@0: // Move Jamos into normalization buffer michael@0: UChar *tempbuffer = source->writableBuffer.getBuffer(5); michael@0: int32_t tempbufferLength, jamoOffset; michael@0: tempbuffer[0] = 0; michael@0: tempbuffer[1] = (UChar)L; michael@0: tempbuffer[2] = (UChar)V; michael@0: if (T != TBase) { michael@0: tempbuffer[3] = (UChar)T; michael@0: tempbufferLength = 4; michael@0: } else { michael@0: tempbufferLength = 3; michael@0: } michael@0: source->writableBuffer.releaseBuffer(tempbufferLength); michael@0: michael@0: // Indicate where to continue in main input string after exhausting the writableBuffer michael@0: if (source->pos == source->string) { michael@0: jamoOffset = 0; michael@0: source->fcdPosition = NULL; michael@0: } else { michael@0: jamoOffset = source->pos - source->string; michael@0: source->fcdPosition = source->pos-1; michael@0: } michael@0: michael@0: // Append offsets for the additional chars michael@0: // (not the 0, and not the L whose offsets match the original Hangul) michael@0: int32_t jamoRemaining = tempbufferLength - 2; michael@0: jamoOffset++; // appended offsets should match end of original Hangul michael@0: while (jamoRemaining-- > 0) { michael@0: source->appendOffset(jamoOffset, *status); michael@0: } michael@0: michael@0: source->offsetRepeatValue = jamoOffset; michael@0: michael@0: source->offsetReturn = source->offsetStore - 1; michael@0: if (source->offsetReturn == source->offsetBuffer) { michael@0: source->offsetStore = source->offsetBuffer; michael@0: } michael@0: michael@0: source->pos = source->writableBuffer.getTerminatedBuffer() + tempbufferLength; michael@0: source->origFlags = source->flags; michael@0: source->flags |= UCOL_ITER_INNORMBUF; michael@0: source->flags &= ~(UCOL_ITER_NORM | UCOL_ITER_HASLEN); michael@0: michael@0: return(UCOL_IGNORABLE); michael@0: } michael@0: } michael@0: michael@0: case IMPLICIT_TAG: /* everything that is not defined otherwise */ michael@0: return getPrevImplicit(ch, source); michael@0: michael@0: // TODO: Remove CJK implicits as they are handled by the getImplicitPrimary function michael@0: case CJK_IMPLICIT_TAG: /* 0x3400-0x4DB5, 0x4E00-0x9FA5, 0xF900-0xFA2D*/ michael@0: return getPrevImplicit(ch, source); michael@0: michael@0: case SURROGATE_TAG: /* This is a surrogate pair */ michael@0: /* essentially an engaged lead surrogate. */ michael@0: /* if you have encountered it here, it means that a */ michael@0: /* broken sequence was encountered and this is an error */ michael@0: return UCOL_NOT_FOUND; michael@0: michael@0: case LEAD_SURROGATE_TAG: /* D800-DBFF*/ michael@0: return UCOL_NOT_FOUND; /* broken surrogate sequence */ michael@0: michael@0: case TRAIL_SURROGATE_TAG: /* DC00-DFFF*/ michael@0: { michael@0: UChar32 cp = 0; michael@0: UChar prevChar; michael@0: const UChar *prev; michael@0: if (isAtStartPrevIterate(source)) { michael@0: /* we are at the start of the string, wrong place to be at */ michael@0: return UCOL_NOT_FOUND; michael@0: } michael@0: if (source->pos != source->writableBuffer.getBuffer()) { michael@0: prev = source->pos - 1; michael@0: } else { michael@0: prev = source->fcdPosition; michael@0: } michael@0: prevChar = *prev; michael@0: michael@0: /* Handles Han and Supplementary characters here.*/ michael@0: if (U16_IS_LEAD(prevChar)) { michael@0: cp = ((((uint32_t)prevChar)<<10UL)+(ch)-(((uint32_t)0xd800<<10UL)+0xdc00-0x10000)); michael@0: source->pos = prev; michael@0: } else { michael@0: return UCOL_NOT_FOUND; /* like unassigned */ michael@0: } michael@0: michael@0: return getPrevImplicit(cp, source); michael@0: } michael@0: michael@0: /* UCA is filled with these. Tailorings are NOT_FOUND */ michael@0: /* not yet implemented */ michael@0: case CHARSET_TAG: /* this tag always returns */ michael@0: /* probably after 1.8 */ michael@0: return UCOL_NOT_FOUND; michael@0: michael@0: default: /* this tag always returns */ michael@0: *status = U_INTERNAL_PROGRAM_ERROR; michael@0: CE=0; michael@0: break; michael@0: } michael@0: michael@0: if (CE <= UCOL_NOT_FOUND) { michael@0: break; michael@0: } michael@0: } michael@0: michael@0: return CE; michael@0: } michael@0: michael@0: /* This should really be a macro */ michael@0: /* This function is used to reverse parts of a buffer. We need this operation when doing continuation */ michael@0: /* secondaries in French */ michael@0: /* michael@0: void uprv_ucol_reverse_buffer(uint8_t *start, uint8_t *end) { michael@0: uint8_t temp; michael@0: while(start0 && src1[src1Length-1]!=0) || michael@0: src2==NULL || src2Length<-1 || src2Length==0 || (src2Length>0 && src2[src2Length-1]!=0) || michael@0: destCapacity<0 || (destCapacity>0 && dest==NULL) michael@0: ) { michael@0: /* error, attempt to write a zero byte and return 0 */ michael@0: if(dest!=NULL && destCapacity>0) { michael@0: *dest=0; michael@0: } michael@0: return 0; michael@0: } michael@0: michael@0: /* check lengths and capacity */ michael@0: if(src1Length<0) { michael@0: src1Length=(int32_t)uprv_strlen((const char *)src1)+1; michael@0: } michael@0: if(src2Length<0) { michael@0: src2Length=(int32_t)uprv_strlen((const char *)src2)+1; michael@0: } michael@0: michael@0: int32_t destLength=src1Length+src2Length; michael@0: if(destLength>destCapacity) { michael@0: /* the merged sort key does not fit into the destination */ michael@0: return destLength; michael@0: } michael@0: michael@0: /* merge the sort keys with the same number of levels */ michael@0: uint8_t *p=dest; michael@0: for(;;) { michael@0: /* copy level from src1 not including 00 or 01 */ michael@0: uint8_t b; michael@0: while((b=*src1)>=2) { michael@0: ++src1; michael@0: *p++=b; michael@0: } michael@0: michael@0: /* add a 02 merge separator */ michael@0: *p++=2; michael@0: michael@0: /* copy level from src2 not including 00 or 01 */ michael@0: while((b=*src2)>=2) { michael@0: ++src2; michael@0: *p++=b; michael@0: } michael@0: michael@0: /* if both sort keys have another level, then add a 01 level separator and continue */ michael@0: if(*src1==1 && *src2==1) { michael@0: ++src1; michael@0: ++src2; michael@0: *p++=1; michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * here, at least one sort key is finished now, but the other one michael@0: * might have some contents left from containing more levels; michael@0: * that contents is just appended to the result michael@0: */ michael@0: if(*src1!=0) { michael@0: /* src1 is not finished, therefore *src2==0, and src1 is appended */ michael@0: src2=src1; michael@0: } michael@0: /* append src2, "the other, unfinished sort key" */ michael@0: while((*p++=*src2++)!=0) {} michael@0: michael@0: /* the actual length might be less than destLength if either sort key contained illegally embedded zero bytes */ michael@0: return (int32_t)(p-dest); michael@0: } michael@0: michael@0: U_NAMESPACE_BEGIN michael@0: michael@0: class SortKeyByteSink : public ByteSink { michael@0: public: michael@0: SortKeyByteSink(char *dest, int32_t destCapacity) michael@0: : buffer_(dest), capacity_(destCapacity), michael@0: appended_(0) { michael@0: if (buffer_ == NULL) { michael@0: capacity_ = 0; michael@0: } else if(capacity_ < 0) { michael@0: buffer_ = NULL; michael@0: capacity_ = 0; michael@0: } michael@0: } michael@0: virtual ~SortKeyByteSink(); michael@0: michael@0: virtual void Append(const char *bytes, int32_t n); michael@0: void Append(uint32_t b) { michael@0: if (appended_ < capacity_ || Resize(1, appended_)) { michael@0: buffer_[appended_] = (char)b; michael@0: } michael@0: ++appended_; michael@0: } michael@0: void Append(uint32_t b1, uint32_t b2) { michael@0: int32_t a2 = appended_ + 2; michael@0: if (a2 <= capacity_ || Resize(2, appended_)) { michael@0: buffer_[appended_] = (char)b1; michael@0: buffer_[appended_ + 1] = (char)b2; michael@0: } else if(appended_ < capacity_) { michael@0: buffer_[appended_] = (char)b1; michael@0: } michael@0: appended_ = a2; michael@0: } michael@0: virtual char *GetAppendBuffer(int32_t min_capacity, michael@0: int32_t desired_capacity_hint, michael@0: char *scratch, int32_t scratch_capacity, michael@0: int32_t *result_capacity); michael@0: int32_t NumberOfBytesAppended() const { return appended_; } michael@0: /** @return FALSE if memory allocation failed */ michael@0: UBool IsOk() const { return buffer_ != NULL; } michael@0: michael@0: protected: michael@0: virtual void AppendBeyondCapacity(const char *bytes, int32_t n, int32_t length) = 0; michael@0: virtual UBool Resize(int32_t appendCapacity, int32_t length) = 0; michael@0: michael@0: void SetNotOk() { michael@0: buffer_ = NULL; michael@0: capacity_ = 0; michael@0: } michael@0: michael@0: char *buffer_; michael@0: int32_t capacity_; michael@0: int32_t appended_; michael@0: michael@0: private: michael@0: SortKeyByteSink(const SortKeyByteSink &); // copy constructor not implemented michael@0: SortKeyByteSink &operator=(const SortKeyByteSink &); // assignment operator not implemented michael@0: }; michael@0: michael@0: SortKeyByteSink::~SortKeyByteSink() {} michael@0: michael@0: void michael@0: SortKeyByteSink::Append(const char *bytes, int32_t n) { michael@0: if (n <= 0 || bytes == NULL) { michael@0: return; michael@0: } michael@0: int32_t length = appended_; michael@0: appended_ += n; michael@0: if ((buffer_ + length) == bytes) { michael@0: return; // the caller used GetAppendBuffer() and wrote the bytes already michael@0: } michael@0: int32_t available = capacity_ - length; michael@0: if (n <= available) { michael@0: uprv_memcpy(buffer_ + length, bytes, n); michael@0: } else { michael@0: AppendBeyondCapacity(bytes, n, length); michael@0: } michael@0: } michael@0: michael@0: char * michael@0: SortKeyByteSink::GetAppendBuffer(int32_t min_capacity, michael@0: int32_t desired_capacity_hint, michael@0: char *scratch, michael@0: int32_t scratch_capacity, michael@0: int32_t *result_capacity) { michael@0: if (min_capacity < 1 || scratch_capacity < min_capacity) { michael@0: *result_capacity = 0; michael@0: return NULL; michael@0: } michael@0: int32_t available = capacity_ - appended_; michael@0: if (available >= min_capacity) { michael@0: *result_capacity = available; michael@0: return buffer_ + appended_; michael@0: } else if (Resize(desired_capacity_hint, appended_)) { michael@0: *result_capacity = capacity_ - appended_; michael@0: return buffer_ + appended_; michael@0: } else { michael@0: *result_capacity = scratch_capacity; michael@0: return scratch; michael@0: } michael@0: } michael@0: michael@0: class FixedSortKeyByteSink : public SortKeyByteSink { michael@0: public: michael@0: FixedSortKeyByteSink(char *dest, int32_t destCapacity) michael@0: : SortKeyByteSink(dest, destCapacity) {} michael@0: virtual ~FixedSortKeyByteSink(); michael@0: michael@0: private: michael@0: virtual void AppendBeyondCapacity(const char *bytes, int32_t n, int32_t length); michael@0: virtual UBool Resize(int32_t appendCapacity, int32_t length); michael@0: }; michael@0: michael@0: FixedSortKeyByteSink::~FixedSortKeyByteSink() {} michael@0: michael@0: void michael@0: FixedSortKeyByteSink::AppendBeyondCapacity(const char *bytes, int32_t /*n*/, int32_t length) { michael@0: // buffer_ != NULL && bytes != NULL && n > 0 && appended_ > capacity_ michael@0: // Fill the buffer completely. michael@0: int32_t available = capacity_ - length; michael@0: if (available > 0) { michael@0: uprv_memcpy(buffer_ + length, bytes, available); michael@0: } michael@0: } michael@0: michael@0: UBool michael@0: FixedSortKeyByteSink::Resize(int32_t /*appendCapacity*/, int32_t /*length*/) { michael@0: return FALSE; michael@0: } michael@0: michael@0: class CollationKeyByteSink : public SortKeyByteSink { michael@0: public: michael@0: CollationKeyByteSink(CollationKey &key) michael@0: : SortKeyByteSink(reinterpret_cast(key.getBytes()), key.getCapacity()), michael@0: key_(key) {} michael@0: virtual ~CollationKeyByteSink(); michael@0: michael@0: private: michael@0: virtual void AppendBeyondCapacity(const char *bytes, int32_t n, int32_t length); michael@0: virtual UBool Resize(int32_t appendCapacity, int32_t length); michael@0: michael@0: CollationKey &key_; michael@0: }; michael@0: michael@0: CollationKeyByteSink::~CollationKeyByteSink() {} michael@0: michael@0: void michael@0: CollationKeyByteSink::AppendBeyondCapacity(const char *bytes, int32_t n, int32_t length) { michael@0: // buffer_ != NULL && bytes != NULL && n > 0 && appended_ > capacity_ michael@0: if (Resize(n, length)) { michael@0: uprv_memcpy(buffer_ + length, bytes, n); michael@0: } michael@0: } michael@0: michael@0: UBool michael@0: CollationKeyByteSink::Resize(int32_t appendCapacity, int32_t length) { michael@0: if (buffer_ == NULL) { michael@0: return FALSE; // allocation failed before already michael@0: } michael@0: int32_t newCapacity = 2 * capacity_; michael@0: int32_t altCapacity = length + 2 * appendCapacity; michael@0: if (newCapacity < altCapacity) { michael@0: newCapacity = altCapacity; michael@0: } michael@0: if (newCapacity < 200) { michael@0: newCapacity = 200; michael@0: } michael@0: uint8_t *newBuffer = key_.reallocate(newCapacity, length); michael@0: if (newBuffer == NULL) { michael@0: SetNotOk(); michael@0: return FALSE; michael@0: } michael@0: buffer_ = reinterpret_cast(newBuffer); michael@0: capacity_ = newCapacity; michael@0: return TRUE; michael@0: } michael@0: michael@0: /** michael@0: * uint8_t byte buffer, similar to CharString but simpler. michael@0: */ michael@0: class SortKeyLevel : public UMemory { michael@0: public: michael@0: SortKeyLevel() : len(0), ok(TRUE) {} michael@0: ~SortKeyLevel() {} michael@0: michael@0: /** @return FALSE if memory allocation failed */ michael@0: UBool isOk() const { return ok; } michael@0: UBool isEmpty() const { return len == 0; } michael@0: int32_t length() const { return len; } michael@0: const uint8_t *data() const { return buffer.getAlias(); } michael@0: uint8_t operator[](int32_t index) const { return buffer[index]; } michael@0: michael@0: void appendByte(uint32_t b); michael@0: michael@0: void appendTo(ByteSink &sink) const { michael@0: sink.Append(reinterpret_cast(buffer.getAlias()), len); michael@0: } michael@0: michael@0: uint8_t &lastByte() { michael@0: U_ASSERT(len > 0); michael@0: return buffer[len - 1]; michael@0: } michael@0: michael@0: uint8_t *getLastFewBytes(int32_t n) { michael@0: if (ok && len >= n) { michael@0: return buffer.getAlias() + len - n; michael@0: } else { michael@0: return NULL; michael@0: } michael@0: } michael@0: michael@0: private: michael@0: MaybeStackArray buffer; michael@0: int32_t len; michael@0: UBool ok; michael@0: michael@0: UBool ensureCapacity(int32_t appendCapacity); michael@0: michael@0: SortKeyLevel(const SortKeyLevel &other); // forbid copying of this class michael@0: SortKeyLevel &operator=(const SortKeyLevel &other); // forbid copying of this class michael@0: }; michael@0: michael@0: void SortKeyLevel::appendByte(uint32_t b) { michael@0: if(len < buffer.getCapacity() || ensureCapacity(1)) { michael@0: buffer[len++] = (uint8_t)b; michael@0: } michael@0: } michael@0: michael@0: UBool SortKeyLevel::ensureCapacity(int32_t appendCapacity) { michael@0: if(!ok) { michael@0: return FALSE; michael@0: } michael@0: int32_t newCapacity = 2 * buffer.getCapacity(); michael@0: int32_t altCapacity = len + 2 * appendCapacity; michael@0: if (newCapacity < altCapacity) { michael@0: newCapacity = altCapacity; michael@0: } michael@0: if (newCapacity < 200) { michael@0: newCapacity = 200; michael@0: } michael@0: if(buffer.resize(newCapacity, len)==NULL) { michael@0: return ok = FALSE; michael@0: } michael@0: return TRUE; michael@0: } michael@0: michael@0: U_NAMESPACE_END michael@0: michael@0: /* sortkey API */ michael@0: U_CAPI int32_t U_EXPORT2 michael@0: ucol_getSortKey(const UCollator *coll, michael@0: const UChar *source, michael@0: int32_t sourceLength, michael@0: uint8_t *result, michael@0: int32_t resultLength) michael@0: { michael@0: UTRACE_ENTRY(UTRACE_UCOL_GET_SORTKEY); michael@0: if (UTRACE_LEVEL(UTRACE_VERBOSE)) { michael@0: UTRACE_DATA3(UTRACE_VERBOSE, "coll=%p, source string = %vh ", coll, source, michael@0: ((sourceLength==-1 && source!=NULL) ? u_strlen(source) : sourceLength)); michael@0: } michael@0: michael@0: if(coll->delegate != NULL) { michael@0: return ((const Collator*)coll->delegate)->getSortKey(source, sourceLength, result, resultLength); michael@0: } michael@0: michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: int32_t keySize = 0; michael@0: michael@0: if(source != NULL) { michael@0: // source == NULL is actually an error situation, but we would need to michael@0: // have an error code to return it. Until we introduce a new michael@0: // API, it stays like this michael@0: michael@0: /* this uses the function pointer that is set in updateinternalstate */ michael@0: /* currently, there are two funcs: */ michael@0: /*ucol_calcSortKey(...);*/ michael@0: /*ucol_calcSortKeySimpleTertiary(...);*/ michael@0: michael@0: uint8_t noDest[1] = { 0 }; michael@0: if(result == NULL) { michael@0: // Distinguish pure preflighting from an allocation error. michael@0: result = noDest; michael@0: resultLength = 0; michael@0: } michael@0: FixedSortKeyByteSink sink(reinterpret_cast(result), resultLength); michael@0: coll->sortKeyGen(coll, source, sourceLength, sink, &status); michael@0: if(U_SUCCESS(status)) { michael@0: keySize = sink.NumberOfBytesAppended(); michael@0: } michael@0: } michael@0: UTRACE_DATA2(UTRACE_VERBOSE, "Sort Key = %vb", result, keySize); michael@0: UTRACE_EXIT_STATUS(status); michael@0: return keySize; michael@0: } michael@0: michael@0: U_CFUNC int32_t michael@0: ucol_getCollationKey(const UCollator *coll, michael@0: const UChar *source, int32_t sourceLength, michael@0: CollationKey &key, michael@0: UErrorCode &errorCode) { michael@0: CollationKeyByteSink sink(key); michael@0: coll->sortKeyGen(coll, source, sourceLength, sink, &errorCode); michael@0: return sink.NumberOfBytesAppended(); michael@0: } michael@0: michael@0: // Is this primary weight compressible? michael@0: // Returns false for multi-lead-byte scripts (digits, Latin, Han, implicit). michael@0: // TODO: This should use per-lead-byte flags from FractionalUCA.txt. michael@0: static inline UBool michael@0: isCompressible(const UCollator * /*coll*/, uint8_t primary1) { michael@0: return UCOL_BYTE_FIRST_NON_LATIN_PRIMARY <= primary1 && primary1 <= maxRegularPrimary; michael@0: } michael@0: michael@0: static michael@0: inline void doCaseShift(SortKeyLevel &cases, uint32_t &caseShift) { michael@0: if (caseShift == 0) { michael@0: cases.appendByte(UCOL_CASE_BYTE_START); michael@0: caseShift = UCOL_CASE_SHIFT_START; michael@0: } michael@0: } michael@0: michael@0: // Packs the secondary buffer when processing French locale. michael@0: static void michael@0: packFrench(const uint8_t *secondaries, int32_t secsize, SortKeyByteSink &result) { michael@0: secondaries += secsize; // We read the secondary-level bytes back to front. michael@0: uint8_t secondary; michael@0: int32_t count2 = 0; michael@0: int32_t i = 0; michael@0: // we use i here since the key size already accounts for terminators, so we'll discard the increment michael@0: for(i = 0; i 0) { michael@0: if (secondary > UCOL_COMMON2) { // not necessary for 4th level. michael@0: while (count2 > UCOL_TOP_COUNT2) { michael@0: result.Append(UCOL_COMMON_TOP2 - UCOL_TOP_COUNT2); michael@0: count2 -= (uint32_t)UCOL_TOP_COUNT2; michael@0: } michael@0: result.Append(UCOL_COMMON_TOP2 - (count2-1)); michael@0: } else { michael@0: while (count2 > UCOL_BOT_COUNT2) { michael@0: result.Append(UCOL_COMMON_BOT2 + UCOL_BOT_COUNT2); michael@0: count2 -= (uint32_t)UCOL_BOT_COUNT2; michael@0: } michael@0: result.Append(UCOL_COMMON_BOT2 + (count2-1)); michael@0: } michael@0: count2 = 0; michael@0: } michael@0: result.Append(secondary); michael@0: } michael@0: } michael@0: if (count2 > 0) { michael@0: while (count2 > UCOL_BOT_COUNT2) { michael@0: result.Append(UCOL_COMMON_BOT2 + UCOL_BOT_COUNT2); michael@0: count2 -= (uint32_t)UCOL_BOT_COUNT2; michael@0: } michael@0: result.Append(UCOL_COMMON_BOT2 + (count2-1)); michael@0: } michael@0: } michael@0: michael@0: #define DEFAULT_ERROR_SIZE_FOR_CALCSORTKEY 0 michael@0: michael@0: /* This is the sortkey work horse function */ michael@0: U_CFUNC void U_CALLCONV michael@0: ucol_calcSortKey(const UCollator *coll, michael@0: const UChar *source, michael@0: int32_t sourceLength, michael@0: SortKeyByteSink &result, michael@0: UErrorCode *status) michael@0: { michael@0: if(U_FAILURE(*status)) { michael@0: return; michael@0: } michael@0: michael@0: SortKeyByteSink &primaries = result; michael@0: SortKeyLevel secondaries; michael@0: SortKeyLevel tertiaries; michael@0: SortKeyLevel cases; michael@0: SortKeyLevel quads; michael@0: michael@0: UnicodeString normSource; michael@0: michael@0: int32_t len = (sourceLength == -1 ? u_strlen(source) : sourceLength); michael@0: michael@0: UColAttributeValue strength = coll->strength; michael@0: michael@0: uint8_t compareSec = (uint8_t)((strength >= UCOL_SECONDARY)?0:0xFF); michael@0: uint8_t compareTer = (uint8_t)((strength >= UCOL_TERTIARY)?0:0xFF); michael@0: uint8_t compareQuad = (uint8_t)((strength >= UCOL_QUATERNARY)?0:0xFF); michael@0: UBool compareIdent = (strength == UCOL_IDENTICAL); michael@0: UBool doCase = (coll->caseLevel == UCOL_ON); michael@0: UBool isFrenchSec = (coll->frenchCollation == UCOL_ON) && (compareSec == 0); michael@0: UBool shifted = (coll->alternateHandling == UCOL_SHIFTED); michael@0: //UBool qShifted = shifted && (compareQuad == 0); michael@0: UBool doHiragana = (coll->hiraganaQ == UCOL_ON) && (compareQuad == 0); michael@0: michael@0: uint32_t variableTopValue = coll->variableTopValue; michael@0: // TODO: UCOL_COMMON_BOT4 should be a function of qShifted. If we have no michael@0: // qShifted, we don't need to set UCOL_COMMON_BOT4 so high. michael@0: uint8_t UCOL_COMMON_BOT4 = (uint8_t)((coll->variableTopValue>>8)+1); michael@0: uint8_t UCOL_HIRAGANA_QUAD = 0; michael@0: if(doHiragana) { michael@0: UCOL_HIRAGANA_QUAD=UCOL_COMMON_BOT4++; michael@0: /* allocate one more space for hiragana, value for hiragana */ michael@0: } michael@0: uint8_t UCOL_BOT_COUNT4 = (uint8_t)(0xFF - UCOL_COMMON_BOT4); michael@0: michael@0: /* support for special features like caselevel and funky secondaries */ michael@0: int32_t lastSecondaryLength = 0; michael@0: uint32_t caseShift = 0; michael@0: michael@0: /* If we need to normalize, we'll do it all at once at the beginning! */ michael@0: const Normalizer2 *norm2; michael@0: if(compareIdent) { michael@0: norm2 = Normalizer2Factory::getNFDInstance(*status); michael@0: } else if(coll->normalizationMode != UCOL_OFF) { michael@0: norm2 = Normalizer2Factory::getFCDInstance(*status); michael@0: } else { michael@0: norm2 = NULL; michael@0: } michael@0: if(norm2 != NULL) { michael@0: normSource.setTo(FALSE, source, len); michael@0: int32_t qcYesLength = norm2->spanQuickCheckYes(normSource, *status); michael@0: if(qcYesLength != len) { michael@0: UnicodeString unnormalized = normSource.tempSubString(qcYesLength); michael@0: normSource.truncate(qcYesLength); michael@0: norm2->normalizeSecondAndAppend(normSource, unnormalized, *status); michael@0: source = normSource.getBuffer(); michael@0: len = normSource.length(); michael@0: } michael@0: } michael@0: collIterate s; michael@0: IInit_collIterate(coll, source, len, &s, status); michael@0: if(U_FAILURE(*status)) { michael@0: return; michael@0: } michael@0: s.flags &= ~UCOL_ITER_NORM; // source passed the FCD test or else was normalized. michael@0: michael@0: uint32_t order = 0; michael@0: michael@0: uint8_t primary1 = 0; michael@0: uint8_t primary2 = 0; michael@0: uint8_t secondary = 0; michael@0: uint8_t tertiary = 0; michael@0: uint8_t caseSwitch = coll->caseSwitch; michael@0: uint8_t tertiaryMask = coll->tertiaryMask; michael@0: int8_t tertiaryAddition = coll->tertiaryAddition; michael@0: uint8_t tertiaryTop = coll->tertiaryTop; michael@0: uint8_t tertiaryBottom = coll->tertiaryBottom; michael@0: uint8_t tertiaryCommon = coll->tertiaryCommon; michael@0: uint8_t caseBits = 0; michael@0: michael@0: UBool wasShifted = FALSE; michael@0: UBool notIsContinuation = FALSE; michael@0: michael@0: uint32_t count2 = 0, count3 = 0, count4 = 0; michael@0: uint8_t leadPrimary = 0; michael@0: michael@0: for(;;) { michael@0: order = ucol_IGetNextCE(coll, &s, status); michael@0: if(order == UCOL_NO_MORE_CES) { michael@0: break; michael@0: } michael@0: michael@0: if(order == 0) { michael@0: continue; michael@0: } michael@0: michael@0: notIsContinuation = !isContinuation(order); michael@0: michael@0: if(notIsContinuation) { michael@0: tertiary = (uint8_t)(order & UCOL_BYTE_SIZE_MASK); michael@0: } else { michael@0: tertiary = (uint8_t)((order & UCOL_REMOVE_CONTINUATION)); michael@0: } michael@0: michael@0: secondary = (uint8_t)((order >>= 8) & UCOL_BYTE_SIZE_MASK); michael@0: primary2 = (uint8_t)((order >>= 8) & UCOL_BYTE_SIZE_MASK); michael@0: primary1 = (uint8_t)(order >> 8); michael@0: michael@0: uint8_t originalPrimary1 = primary1; michael@0: if(notIsContinuation && coll->leadBytePermutationTable != NULL) { michael@0: primary1 = coll->leadBytePermutationTable[primary1]; michael@0: } michael@0: michael@0: if((shifted && ((notIsContinuation && order <= variableTopValue && primary1 > 0) michael@0: || (!notIsContinuation && wasShifted))) michael@0: || (wasShifted && primary1 == 0)) /* amendment to the UCA says that primary ignorables */ michael@0: { michael@0: /* and other ignorables should be removed if following a shifted code point */ michael@0: if(primary1 == 0) { /* if we were shifted and we got an ignorable code point */ michael@0: /* we should just completely ignore it */ michael@0: continue; michael@0: } michael@0: if(compareQuad == 0) { michael@0: if(count4 > 0) { michael@0: while (count4 > UCOL_BOT_COUNT4) { michael@0: quads.appendByte(UCOL_COMMON_BOT4 + UCOL_BOT_COUNT4); michael@0: count4 -= UCOL_BOT_COUNT4; michael@0: } michael@0: quads.appendByte(UCOL_COMMON_BOT4 + (count4-1)); michael@0: count4 = 0; michael@0: } michael@0: /* We are dealing with a variable and we're treating them as shifted */ michael@0: /* This is a shifted ignorable */ michael@0: if(primary1 != 0) { /* we need to check this since we could be in continuation */ michael@0: quads.appendByte(primary1); michael@0: } michael@0: if(primary2 != 0) { michael@0: quads.appendByte(primary2); michael@0: } michael@0: } michael@0: wasShifted = TRUE; michael@0: } else { michael@0: wasShifted = FALSE; michael@0: /* Note: This code assumes that the table is well built i.e. not having 0 bytes where they are not supposed to be. */ michael@0: /* Usually, we'll have non-zero primary1 & primary2, except in cases of a-z and friends, when primary2 will */ michael@0: /* regular and simple sortkey calc */ michael@0: if(primary1 != UCOL_IGNORABLE) { michael@0: if(notIsContinuation) { michael@0: if(leadPrimary == primary1) { michael@0: primaries.Append(primary2); michael@0: } else { michael@0: if(leadPrimary != 0) { michael@0: primaries.Append((primary1 > leadPrimary) ? UCOL_BYTE_UNSHIFTED_MAX : UCOL_BYTE_UNSHIFTED_MIN); michael@0: } michael@0: if(primary2 == UCOL_IGNORABLE) { michael@0: /* one byter, not compressed */ michael@0: primaries.Append(primary1); michael@0: leadPrimary = 0; michael@0: } else if(isCompressible(coll, originalPrimary1)) { michael@0: /* compress */ michael@0: primaries.Append(leadPrimary = primary1, primary2); michael@0: } else { michael@0: leadPrimary = 0; michael@0: primaries.Append(primary1, primary2); michael@0: } michael@0: } michael@0: } else { /* we are in continuation, so we're gonna add primary to the key don't care about compression */ michael@0: if(primary2 == UCOL_IGNORABLE) { michael@0: primaries.Append(primary1); michael@0: } else { michael@0: primaries.Append(primary1, primary2); michael@0: } michael@0: } michael@0: } michael@0: michael@0: if(secondary > compareSec) { michael@0: if(!isFrenchSec) { michael@0: /* This is compression code. */ michael@0: if (secondary == UCOL_COMMON2 && notIsContinuation) { michael@0: ++count2; michael@0: } else { michael@0: if (count2 > 0) { michael@0: if (secondary > UCOL_COMMON2) { // not necessary for 4th level. michael@0: while (count2 > UCOL_TOP_COUNT2) { michael@0: secondaries.appendByte(UCOL_COMMON_TOP2 - UCOL_TOP_COUNT2); michael@0: count2 -= (uint32_t)UCOL_TOP_COUNT2; michael@0: } michael@0: secondaries.appendByte(UCOL_COMMON_TOP2 - (count2-1)); michael@0: } else { michael@0: while (count2 > UCOL_BOT_COUNT2) { michael@0: secondaries.appendByte(UCOL_COMMON_BOT2 + UCOL_BOT_COUNT2); michael@0: count2 -= (uint32_t)UCOL_BOT_COUNT2; michael@0: } michael@0: secondaries.appendByte(UCOL_COMMON_BOT2 + (count2-1)); michael@0: } michael@0: count2 = 0; michael@0: } michael@0: secondaries.appendByte(secondary); michael@0: } michael@0: } else { michael@0: /* Do the special handling for French secondaries */ michael@0: /* We need to get continuation elements and do intermediate restore */ michael@0: /* abc1c2c3de with french secondaries need to be edc1c2c3ba NOT edc3c2c1ba */ michael@0: if(notIsContinuation) { michael@0: if (lastSecondaryLength > 1) { michael@0: uint8_t *frenchStartPtr = secondaries.getLastFewBytes(lastSecondaryLength); michael@0: if (frenchStartPtr != NULL) { michael@0: /* reverse secondaries from frenchStartPtr up to frenchEndPtr */ michael@0: uint8_t *frenchEndPtr = frenchStartPtr + lastSecondaryLength - 1; michael@0: uprv_ucol_reverse_buffer(uint8_t, frenchStartPtr, frenchEndPtr); michael@0: } michael@0: } michael@0: lastSecondaryLength = 1; michael@0: } else { michael@0: ++lastSecondaryLength; michael@0: } michael@0: secondaries.appendByte(secondary); michael@0: } michael@0: } michael@0: michael@0: if(doCase && (primary1 > 0 || strength >= UCOL_SECONDARY)) { michael@0: // do the case level if we need to do it. We don't want to calculate michael@0: // case level for primary ignorables if we have only primary strength and case level michael@0: // otherwise we would break well formedness of CEs michael@0: doCaseShift(cases, caseShift); michael@0: if(notIsContinuation) { michael@0: caseBits = (uint8_t)(tertiary & 0xC0); michael@0: michael@0: if(tertiary != 0) { michael@0: if(coll->caseFirst == UCOL_UPPER_FIRST) { michael@0: if((caseBits & 0xC0) == 0) { michael@0: cases.lastByte() |= 1 << (--caseShift); michael@0: } else { michael@0: cases.lastByte() |= 0 << (--caseShift); michael@0: /* second bit */ michael@0: doCaseShift(cases, caseShift); michael@0: cases.lastByte() |= ((caseBits>>6)&1) << (--caseShift); michael@0: } michael@0: } else { michael@0: if((caseBits & 0xC0) == 0) { michael@0: cases.lastByte() |= 0 << (--caseShift); michael@0: } else { michael@0: cases.lastByte() |= 1 << (--caseShift); michael@0: /* second bit */ michael@0: doCaseShift(cases, caseShift); michael@0: cases.lastByte() |= ((caseBits>>7)&1) << (--caseShift); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } else { michael@0: if(notIsContinuation) { michael@0: tertiary ^= caseSwitch; michael@0: } michael@0: } michael@0: michael@0: tertiary &= tertiaryMask; michael@0: if(tertiary > compareTer) { michael@0: /* This is compression code. */ michael@0: /* sequence size check is included in the if clause */ michael@0: if (tertiary == tertiaryCommon && notIsContinuation) { michael@0: ++count3; michael@0: } else { michael@0: if(tertiary > tertiaryCommon && tertiaryCommon == UCOL_COMMON3_NORMAL) { michael@0: tertiary += tertiaryAddition; michael@0: } else if(tertiary <= tertiaryCommon && tertiaryCommon == UCOL_COMMON3_UPPERFIRST) { michael@0: tertiary -= tertiaryAddition; michael@0: } michael@0: if (count3 > 0) { michael@0: if ((tertiary > tertiaryCommon)) { michael@0: while (count3 > coll->tertiaryTopCount) { michael@0: tertiaries.appendByte(tertiaryTop - coll->tertiaryTopCount); michael@0: count3 -= (uint32_t)coll->tertiaryTopCount; michael@0: } michael@0: tertiaries.appendByte(tertiaryTop - (count3-1)); michael@0: } else { michael@0: while (count3 > coll->tertiaryBottomCount) { michael@0: tertiaries.appendByte(tertiaryBottom + coll->tertiaryBottomCount); michael@0: count3 -= (uint32_t)coll->tertiaryBottomCount; michael@0: } michael@0: tertiaries.appendByte(tertiaryBottom + (count3-1)); michael@0: } michael@0: count3 = 0; michael@0: } michael@0: tertiaries.appendByte(tertiary); michael@0: } michael@0: } michael@0: michael@0: if(/*qShifted*/(compareQuad==0) && notIsContinuation) { michael@0: if(s.flags & UCOL_WAS_HIRAGANA) { // This was Hiragana and we need to note it michael@0: if(count4>0) { // Close this part michael@0: while (count4 > UCOL_BOT_COUNT4) { michael@0: quads.appendByte(UCOL_COMMON_BOT4 + UCOL_BOT_COUNT4); michael@0: count4 -= UCOL_BOT_COUNT4; michael@0: } michael@0: quads.appendByte(UCOL_COMMON_BOT4 + (count4-1)); michael@0: count4 = 0; michael@0: } michael@0: quads.appendByte(UCOL_HIRAGANA_QUAD); // Add the Hiragana michael@0: } else { // This wasn't Hiragana, so we can continue adding stuff michael@0: count4++; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* Here, we are generally done with processing */ michael@0: /* bailing out would not be too productive */ michael@0: michael@0: UBool ok = TRUE; michael@0: if(U_SUCCESS(*status)) { michael@0: /* we have done all the CE's, now let's put them together to form a key */ michael@0: if(compareSec == 0) { michael@0: if (count2 > 0) { michael@0: while (count2 > UCOL_BOT_COUNT2) { michael@0: secondaries.appendByte(UCOL_COMMON_BOT2 + UCOL_BOT_COUNT2); michael@0: count2 -= (uint32_t)UCOL_BOT_COUNT2; michael@0: } michael@0: secondaries.appendByte(UCOL_COMMON_BOT2 + (count2-1)); michael@0: } michael@0: result.Append(UCOL_LEVELTERMINATOR); michael@0: if(!secondaries.isOk()) { michael@0: ok = FALSE; michael@0: } else if(!isFrenchSec) { michael@0: secondaries.appendTo(result); michael@0: } else { michael@0: // If there are any unresolved continuation secondaries, michael@0: // reverse them here so that we can reverse the whole secondary thing. michael@0: if (lastSecondaryLength > 1) { michael@0: uint8_t *frenchStartPtr = secondaries.getLastFewBytes(lastSecondaryLength); michael@0: if (frenchStartPtr != NULL) { michael@0: /* reverse secondaries from frenchStartPtr up to frenchEndPtr */ michael@0: uint8_t *frenchEndPtr = frenchStartPtr + lastSecondaryLength - 1; michael@0: uprv_ucol_reverse_buffer(uint8_t, frenchStartPtr, frenchEndPtr); michael@0: } michael@0: } michael@0: packFrench(secondaries.data(), secondaries.length(), result); michael@0: } michael@0: } michael@0: michael@0: if(doCase) { michael@0: ok &= cases.isOk(); michael@0: result.Append(UCOL_LEVELTERMINATOR); michael@0: cases.appendTo(result); michael@0: } michael@0: michael@0: if(compareTer == 0) { michael@0: if (count3 > 0) { michael@0: if (coll->tertiaryCommon != UCOL_COMMON_BOT3) { michael@0: while (count3 >= coll->tertiaryTopCount) { michael@0: tertiaries.appendByte(tertiaryTop - coll->tertiaryTopCount); michael@0: count3 -= (uint32_t)coll->tertiaryTopCount; michael@0: } michael@0: tertiaries.appendByte(tertiaryTop - count3); michael@0: } else { michael@0: while (count3 > coll->tertiaryBottomCount) { michael@0: tertiaries.appendByte(tertiaryBottom + coll->tertiaryBottomCount); michael@0: count3 -= (uint32_t)coll->tertiaryBottomCount; michael@0: } michael@0: tertiaries.appendByte(tertiaryBottom + (count3-1)); michael@0: } michael@0: } michael@0: ok &= tertiaries.isOk(); michael@0: result.Append(UCOL_LEVELTERMINATOR); michael@0: tertiaries.appendTo(result); michael@0: michael@0: if(compareQuad == 0/*qShifted == TRUE*/) { michael@0: if(count4 > 0) { michael@0: while (count4 > UCOL_BOT_COUNT4) { michael@0: quads.appendByte(UCOL_COMMON_BOT4 + UCOL_BOT_COUNT4); michael@0: count4 -= UCOL_BOT_COUNT4; michael@0: } michael@0: quads.appendByte(UCOL_COMMON_BOT4 + (count4-1)); michael@0: } michael@0: ok &= quads.isOk(); michael@0: result.Append(UCOL_LEVELTERMINATOR); michael@0: quads.appendTo(result); michael@0: } michael@0: michael@0: if(compareIdent) { michael@0: result.Append(UCOL_LEVELTERMINATOR); michael@0: u_writeIdenticalLevelRun(s.string, len, result); michael@0: } michael@0: } michael@0: result.Append(0); michael@0: } michael@0: michael@0: /* To avoid memory leak, free the offset buffer if necessary. */ michael@0: ucol_freeOffsetBuffer(&s); michael@0: michael@0: ok &= result.IsOk(); michael@0: if(!ok && U_SUCCESS(*status)) { *status = U_MEMORY_ALLOCATION_ERROR; } michael@0: } michael@0: michael@0: michael@0: U_CFUNC void U_CALLCONV michael@0: ucol_calcSortKeySimpleTertiary(const UCollator *coll, michael@0: const UChar *source, michael@0: int32_t sourceLength, michael@0: SortKeyByteSink &result, michael@0: UErrorCode *status) michael@0: { michael@0: U_ALIGN_CODE(16); michael@0: michael@0: if(U_FAILURE(*status)) { michael@0: return; michael@0: } michael@0: michael@0: SortKeyByteSink &primaries = result; michael@0: SortKeyLevel secondaries; michael@0: SortKeyLevel tertiaries; michael@0: michael@0: UnicodeString normSource; michael@0: michael@0: int32_t len = sourceLength; michael@0: michael@0: /* If we need to normalize, we'll do it all at once at the beginning! */ michael@0: if(coll->normalizationMode != UCOL_OFF) { michael@0: normSource.setTo(len < 0, source, len); michael@0: const Normalizer2 *norm2 = Normalizer2Factory::getFCDInstance(*status); michael@0: int32_t qcYesLength = norm2->spanQuickCheckYes(normSource, *status); michael@0: if(qcYesLength != normSource.length()) { michael@0: UnicodeString unnormalized = normSource.tempSubString(qcYesLength); michael@0: normSource.truncate(qcYesLength); michael@0: norm2->normalizeSecondAndAppend(normSource, unnormalized, *status); michael@0: source = normSource.getBuffer(); michael@0: len = normSource.length(); michael@0: } michael@0: } michael@0: collIterate s; michael@0: IInit_collIterate(coll, (UChar *)source, len, &s, status); michael@0: if(U_FAILURE(*status)) { michael@0: return; michael@0: } michael@0: s.flags &= ~UCOL_ITER_NORM; // source passed the FCD test or else was normalized. michael@0: michael@0: uint32_t order = 0; michael@0: michael@0: uint8_t primary1 = 0; michael@0: uint8_t primary2 = 0; michael@0: uint8_t secondary = 0; michael@0: uint8_t tertiary = 0; michael@0: uint8_t caseSwitch = coll->caseSwitch; michael@0: uint8_t tertiaryMask = coll->tertiaryMask; michael@0: int8_t tertiaryAddition = coll->tertiaryAddition; michael@0: uint8_t tertiaryTop = coll->tertiaryTop; michael@0: uint8_t tertiaryBottom = coll->tertiaryBottom; michael@0: uint8_t tertiaryCommon = coll->tertiaryCommon; michael@0: michael@0: UBool notIsContinuation = FALSE; michael@0: michael@0: uint32_t count2 = 0, count3 = 0; michael@0: uint8_t leadPrimary = 0; michael@0: michael@0: for(;;) { michael@0: order = ucol_IGetNextCE(coll, &s, status); michael@0: michael@0: if(order == 0) { michael@0: continue; michael@0: } michael@0: michael@0: if(order == UCOL_NO_MORE_CES) { michael@0: break; michael@0: } michael@0: michael@0: notIsContinuation = !isContinuation(order); michael@0: michael@0: if(notIsContinuation) { michael@0: tertiary = (uint8_t)((order & tertiaryMask)); michael@0: } else { michael@0: tertiary = (uint8_t)((order & UCOL_REMOVE_CONTINUATION)); michael@0: } michael@0: michael@0: secondary = (uint8_t)((order >>= 8) & UCOL_BYTE_SIZE_MASK); michael@0: primary2 = (uint8_t)((order >>= 8) & UCOL_BYTE_SIZE_MASK); michael@0: primary1 = (uint8_t)(order >> 8); michael@0: michael@0: uint8_t originalPrimary1 = primary1; michael@0: if (coll->leadBytePermutationTable != NULL && notIsContinuation) { michael@0: primary1 = coll->leadBytePermutationTable[primary1]; michael@0: } michael@0: michael@0: /* Note: This code assumes that the table is well built i.e. not having 0 bytes where they are not supposed to be. */ michael@0: /* Usually, we'll have non-zero primary1 & primary2, except in cases of a-z and friends, when primary2 will */ michael@0: /* be zero with non zero primary1. primary3 is different than 0 only for long primaries - see above. */ michael@0: /* regular and simple sortkey calc */ michael@0: if(primary1 != UCOL_IGNORABLE) { michael@0: if(notIsContinuation) { michael@0: if(leadPrimary == primary1) { michael@0: primaries.Append(primary2); michael@0: } else { michael@0: if(leadPrimary != 0) { michael@0: primaries.Append((primary1 > leadPrimary) ? UCOL_BYTE_UNSHIFTED_MAX : UCOL_BYTE_UNSHIFTED_MIN); michael@0: } michael@0: if(primary2 == UCOL_IGNORABLE) { michael@0: /* one byter, not compressed */ michael@0: primaries.Append(primary1); michael@0: leadPrimary = 0; michael@0: } else if(isCompressible(coll, originalPrimary1)) { michael@0: /* compress */ michael@0: primaries.Append(leadPrimary = primary1, primary2); michael@0: } else { michael@0: leadPrimary = 0; michael@0: primaries.Append(primary1, primary2); michael@0: } michael@0: } michael@0: } else { /* we are in continuation, so we're gonna add primary to the key don't care about compression */ michael@0: if(primary2 == UCOL_IGNORABLE) { michael@0: primaries.Append(primary1); michael@0: } else { michael@0: primaries.Append(primary1, primary2); michael@0: } michael@0: } michael@0: } michael@0: michael@0: if(secondary > 0) { /* I think that != 0 test should be != IGNORABLE */ michael@0: /* This is compression code. */ michael@0: if (secondary == UCOL_COMMON2 && notIsContinuation) { michael@0: ++count2; michael@0: } else { michael@0: if (count2 > 0) { michael@0: if (secondary > UCOL_COMMON2) { // not necessary for 4th level. michael@0: while (count2 > UCOL_TOP_COUNT2) { michael@0: secondaries.appendByte(UCOL_COMMON_TOP2 - UCOL_TOP_COUNT2); michael@0: count2 -= (uint32_t)UCOL_TOP_COUNT2; michael@0: } michael@0: secondaries.appendByte(UCOL_COMMON_TOP2 - (count2-1)); michael@0: } else { michael@0: while (count2 > UCOL_BOT_COUNT2) { michael@0: secondaries.appendByte(UCOL_COMMON_BOT2 + UCOL_BOT_COUNT2); michael@0: count2 -= (uint32_t)UCOL_BOT_COUNT2; michael@0: } michael@0: secondaries.appendByte(UCOL_COMMON_BOT2 + (count2-1)); michael@0: } michael@0: count2 = 0; michael@0: } michael@0: secondaries.appendByte(secondary); michael@0: } michael@0: } michael@0: michael@0: if(notIsContinuation) { michael@0: tertiary ^= caseSwitch; michael@0: } michael@0: michael@0: if(tertiary > 0) { michael@0: /* This is compression code. */ michael@0: /* sequence size check is included in the if clause */ michael@0: if (tertiary == tertiaryCommon && notIsContinuation) { michael@0: ++count3; michael@0: } else { michael@0: if(tertiary > tertiaryCommon && tertiaryCommon == UCOL_COMMON3_NORMAL) { michael@0: tertiary += tertiaryAddition; michael@0: } else if (tertiary <= tertiaryCommon && tertiaryCommon == UCOL_COMMON3_UPPERFIRST) { michael@0: tertiary -= tertiaryAddition; michael@0: } michael@0: if (count3 > 0) { michael@0: if ((tertiary > tertiaryCommon)) { michael@0: while (count3 > coll->tertiaryTopCount) { michael@0: tertiaries.appendByte(tertiaryTop - coll->tertiaryTopCount); michael@0: count3 -= (uint32_t)coll->tertiaryTopCount; michael@0: } michael@0: tertiaries.appendByte(tertiaryTop - (count3-1)); michael@0: } else { michael@0: while (count3 > coll->tertiaryBottomCount) { michael@0: tertiaries.appendByte(tertiaryBottom + coll->tertiaryBottomCount); michael@0: count3 -= (uint32_t)coll->tertiaryBottomCount; michael@0: } michael@0: tertiaries.appendByte(tertiaryBottom + (count3-1)); michael@0: } michael@0: count3 = 0; michael@0: } michael@0: tertiaries.appendByte(tertiary); michael@0: } michael@0: } michael@0: } michael@0: michael@0: UBool ok = TRUE; michael@0: if(U_SUCCESS(*status)) { michael@0: /* we have done all the CE's, now let's put them together to form a key */ michael@0: if (count2 > 0) { michael@0: while (count2 > UCOL_BOT_COUNT2) { michael@0: secondaries.appendByte(UCOL_COMMON_BOT2 + UCOL_BOT_COUNT2); michael@0: count2 -= (uint32_t)UCOL_BOT_COUNT2; michael@0: } michael@0: secondaries.appendByte(UCOL_COMMON_BOT2 + (count2-1)); michael@0: } michael@0: ok &= secondaries.isOk(); michael@0: result.Append(UCOL_LEVELTERMINATOR); michael@0: secondaries.appendTo(result); michael@0: michael@0: if (count3 > 0) { michael@0: if (coll->tertiaryCommon != UCOL_COMMON3_NORMAL) { michael@0: while (count3 >= coll->tertiaryTopCount) { michael@0: tertiaries.appendByte(tertiaryTop - coll->tertiaryTopCount); michael@0: count3 -= (uint32_t)coll->tertiaryTopCount; michael@0: } michael@0: tertiaries.appendByte(tertiaryTop - count3); michael@0: } else { michael@0: while (count3 > coll->tertiaryBottomCount) { michael@0: tertiaries.appendByte(tertiaryBottom + coll->tertiaryBottomCount); michael@0: count3 -= (uint32_t)coll->tertiaryBottomCount; michael@0: } michael@0: tertiaries.appendByte(tertiaryBottom + (count3-1)); michael@0: } michael@0: } michael@0: ok &= tertiaries.isOk(); michael@0: result.Append(UCOL_LEVELTERMINATOR); michael@0: tertiaries.appendTo(result); michael@0: michael@0: result.Append(0); michael@0: } michael@0: michael@0: /* To avoid memory leak, free the offset buffer if necessary. */ michael@0: ucol_freeOffsetBuffer(&s); michael@0: michael@0: ok &= result.IsOk(); michael@0: if(!ok && U_SUCCESS(*status)) { *status = U_MEMORY_ALLOCATION_ERROR; } michael@0: } michael@0: michael@0: static inline michael@0: UBool isShiftedCE(uint32_t CE, uint32_t LVT, UBool *wasShifted) { michael@0: UBool notIsContinuation = !isContinuation(CE); michael@0: uint8_t primary1 = (uint8_t)((CE >> 24) & 0xFF); michael@0: if((LVT && ((notIsContinuation && (CE & 0xFFFF0000)<= LVT && primary1 > 0) michael@0: || (!notIsContinuation && *wasShifted))) michael@0: || (*wasShifted && primary1 == 0)) /* amendment to the UCA says that primary ignorables */ michael@0: { michael@0: // The stuff below should probably be in the sortkey code... maybe not... michael@0: if(primary1 != 0) { /* if we were shifted and we got an ignorable code point */ michael@0: /* we should just completely ignore it */ michael@0: *wasShifted = TRUE; michael@0: //continue; michael@0: } michael@0: //*wasShifted = TRUE; michael@0: return TRUE; michael@0: } else { michael@0: *wasShifted = FALSE; michael@0: return FALSE; michael@0: } michael@0: } michael@0: static inline michael@0: void terminatePSKLevel(int32_t level, int32_t maxLevel, int32_t &i, uint8_t *dest) { michael@0: if(level < maxLevel) { michael@0: dest[i++] = UCOL_LEVELTERMINATOR; michael@0: } else { michael@0: dest[i++] = 0; michael@0: } michael@0: } michael@0: michael@0: /** enumeration of level identifiers for partial sort key generation */ michael@0: enum { michael@0: UCOL_PSK_PRIMARY = 0, michael@0: UCOL_PSK_SECONDARY = 1, michael@0: UCOL_PSK_CASE = 2, michael@0: UCOL_PSK_TERTIARY = 3, michael@0: UCOL_PSK_QUATERNARY = 4, michael@0: UCOL_PSK_QUIN = 5, /** This is an extra level, not used - but we have three bits to blow */ michael@0: UCOL_PSK_IDENTICAL = 6, michael@0: UCOL_PSK_NULL = 7, /** level for the end of sort key. Will just produce zeros */ michael@0: UCOL_PSK_LIMIT michael@0: }; michael@0: michael@0: /** collation state enum. *_SHIFT value is how much to shift right michael@0: * to get the state piece to the right. *_MASK value should be michael@0: * ANDed with the shifted state. This data is stored in state[1] michael@0: * field. michael@0: */ michael@0: enum { michael@0: UCOL_PSK_LEVEL_SHIFT = 0, /** level identificator. stores an enum value from above */ michael@0: UCOL_PSK_LEVEL_MASK = 7, /** three bits */ michael@0: UCOL_PSK_BYTE_COUNT_OR_FRENCH_DONE_SHIFT = 3, /** number of bytes of primary or quaternary already written */ michael@0: UCOL_PSK_BYTE_COUNT_OR_FRENCH_DONE_MASK = 1, michael@0: /** can be only 0 or 1, since we get up to two bytes from primary or quaternary michael@0: * This field is also used to denote that the French secondary level is finished michael@0: */ michael@0: UCOL_PSK_WAS_SHIFTED_SHIFT = 4,/** was the last value shifted */ michael@0: UCOL_PSK_WAS_SHIFTED_MASK = 1, /** can be 0 or 1 (Boolean) */ michael@0: UCOL_PSK_USED_FRENCH_SHIFT = 5,/** how many French bytes have we already written */ michael@0: UCOL_PSK_USED_FRENCH_MASK = 3, /** up to 4 bytes. See comment just below */ michael@0: /** When we do French we need to reverse secondary values. However, continuations michael@0: * need to stay the same. So if you had abc1c2c3de, you need to have edc1c2c3ba michael@0: */ michael@0: UCOL_PSK_BOCSU_BYTES_SHIFT = 7, michael@0: UCOL_PSK_BOCSU_BYTES_MASK = 3, michael@0: UCOL_PSK_CONSUMED_CES_SHIFT = 9, michael@0: UCOL_PSK_CONSUMED_CES_MASK = 0x7FFFF michael@0: }; michael@0: michael@0: // macro calculating the number of expansion CEs available michael@0: #define uprv_numAvailableExpCEs(s) (s).CEpos - (s).toReturn michael@0: michael@0: michael@0: /** main sortkey part procedure. On the first call, michael@0: * you should pass in a collator, an iterator, empty state michael@0: * state[0] == state[1] == 0, a buffer to hold results michael@0: * number of bytes you need and an error code pointer. michael@0: * Make sure your buffer is big enough to hold the wanted michael@0: * number of sortkey bytes. I don't check. michael@0: * The only meaningful status you can get back is michael@0: * U_BUFFER_OVERFLOW_ERROR, which basically means that you michael@0: * have been dealt a raw deal and that you probably won't michael@0: * be able to use partial sortkey generation for this michael@0: * particular combination of string and collator. This michael@0: * is highly unlikely, but you should still check the error code. michael@0: * Any other status means that you're not in a sane situation michael@0: * anymore. After the first call, preserve state values and michael@0: * use them on subsequent calls to obtain more bytes of a sortkey. michael@0: * Use until the number of bytes written is smaller than the requested michael@0: * number of bytes. Generated sortkey is not compatible with the michael@0: * one generated by ucol_getSortKey, as we don't do any compression. michael@0: * However, levels are still terminated by a 1 (one) and the sortkey michael@0: * is terminated by a 0 (zero). Identical level is the same as in the michael@0: * regular sortkey - internal bocu-1 implementation is used. michael@0: * For curious, although you cannot do much about this, here is michael@0: * the structure of state words. michael@0: * state[0] - iterator state. Depends on the iterator implementation, michael@0: * but allows the iterator to continue where it stopped in michael@0: * the last iteration. michael@0: * state[1] - collation processing state. Here is the distribution michael@0: * of the bits: michael@0: * 0, 1, 2 - level of the sortkey - primary, secondary, case, tertiary michael@0: * quaternary, quin (we don't use this one), identical and michael@0: * null (producing only zeroes - first one to terminate the michael@0: * sortkey and subsequent to fill the buffer). michael@0: * 3 - byte count. Number of bytes written on the primary level. michael@0: * 4 - was shifted. Whether the previous iteration finished in the michael@0: * shifted state. michael@0: * 5, 6 - French continuation bytes written. See the comment in the enum michael@0: * 7,8 - Bocsu bytes used. Number of bytes from a bocu sequence on michael@0: * the identical level. michael@0: * 9..31 - CEs consumed. Number of getCE or next32 operations performed michael@0: * since thes last successful update of the iterator state. michael@0: */ michael@0: U_CAPI int32_t U_EXPORT2 michael@0: ucol_nextSortKeyPart(const UCollator *coll, michael@0: UCharIterator *iter, michael@0: uint32_t state[2], michael@0: uint8_t *dest, int32_t count, michael@0: UErrorCode *status) michael@0: { michael@0: /* error checking */ michael@0: if(status==NULL || U_FAILURE(*status)) { michael@0: return 0; michael@0: } michael@0: UTRACE_ENTRY(UTRACE_UCOL_NEXTSORTKEYPART); michael@0: if( coll==NULL || iter==NULL || michael@0: state==NULL || michael@0: count<0 || (count>0 && dest==NULL) michael@0: ) { michael@0: *status=U_ILLEGAL_ARGUMENT_ERROR; michael@0: UTRACE_EXIT_STATUS(status); michael@0: return 0; michael@0: } michael@0: michael@0: UTRACE_DATA6(UTRACE_VERBOSE, "coll=%p, iter=%p, state=%d %d, dest=%p, count=%d", michael@0: coll, iter, state[0], state[1], dest, count); michael@0: michael@0: if(count==0) { michael@0: /* nothing to do */ michael@0: UTRACE_EXIT_VALUE(0); michael@0: return 0; michael@0: } michael@0: /** Setting up situation according to the state we got from the previous iteration */ michael@0: // The state of the iterator from the previous invocation michael@0: uint32_t iterState = state[0]; michael@0: // Has the last iteration ended in the shifted state michael@0: UBool wasShifted = ((state[1] >> UCOL_PSK_WAS_SHIFTED_SHIFT) & UCOL_PSK_WAS_SHIFTED_MASK)?TRUE:FALSE; michael@0: // What is the current level of the sortkey? michael@0: int32_t level= (state[1] >> UCOL_PSK_LEVEL_SHIFT) & UCOL_PSK_LEVEL_MASK; michael@0: // Have we written only one byte from a two byte primary in the previous iteration? michael@0: // Also on secondary level - have we finished with the French secondary? michael@0: int32_t byteCountOrFrenchDone = (state[1] >> UCOL_PSK_BYTE_COUNT_OR_FRENCH_DONE_SHIFT) & UCOL_PSK_BYTE_COUNT_OR_FRENCH_DONE_MASK; michael@0: // number of bytes in the continuation buffer for French michael@0: int32_t usedFrench = (state[1] >> UCOL_PSK_USED_FRENCH_SHIFT) & UCOL_PSK_USED_FRENCH_MASK; michael@0: // Number of bytes already written from a bocsu sequence. Since michael@0: // the longes bocsu sequence is 4 long, this can be up to 3. michael@0: int32_t bocsuBytesUsed = (state[1] >> UCOL_PSK_BOCSU_BYTES_SHIFT) & UCOL_PSK_BOCSU_BYTES_MASK; michael@0: // Number of elements that need to be consumed in this iteration because michael@0: // the iterator returned UITER_NO_STATE at the end of the last iteration, michael@0: // so we had to save the last valid state. michael@0: int32_t cces = (state[1] >> UCOL_PSK_CONSUMED_CES_SHIFT) & UCOL_PSK_CONSUMED_CES_MASK; michael@0: michael@0: /** values that depend on the collator attributes */ michael@0: // strength of the collator. michael@0: int32_t strength = ucol_getAttribute(coll, UCOL_STRENGTH, status); michael@0: // maximal level of the partial sortkey. Need to take whether case level is done michael@0: int32_t maxLevel = 0; michael@0: if(strength < UCOL_TERTIARY) { michael@0: if(ucol_getAttribute(coll, UCOL_CASE_LEVEL, status) == UCOL_ON) { michael@0: maxLevel = UCOL_PSK_CASE; michael@0: } else { michael@0: maxLevel = strength; michael@0: } michael@0: } else { michael@0: if(strength == UCOL_TERTIARY) { michael@0: maxLevel = UCOL_PSK_TERTIARY; michael@0: } else if(strength == UCOL_QUATERNARY) { michael@0: maxLevel = UCOL_PSK_QUATERNARY; michael@0: } else { // identical michael@0: maxLevel = UCOL_IDENTICAL; michael@0: } michael@0: } michael@0: // value for the quaternary level if Hiragana is encountered. Used for JIS X 4061 collation michael@0: uint8_t UCOL_HIRAGANA_QUAD = michael@0: (ucol_getAttribute(coll, UCOL_HIRAGANA_QUATERNARY_MODE, status) == UCOL_ON)?0xFE:0xFF; michael@0: // Boundary value that decides whether a CE is shifted or not michael@0: uint32_t LVT = (coll->alternateHandling == UCOL_SHIFTED)?(coll->variableTopValue<<16):0; michael@0: // Are we doing French collation? michael@0: UBool doingFrench = (ucol_getAttribute(coll, UCOL_FRENCH_COLLATION, status) == UCOL_ON); michael@0: michael@0: /** initializing the collation state */ michael@0: UBool notIsContinuation = FALSE; michael@0: uint32_t CE = UCOL_NO_MORE_CES; michael@0: michael@0: collIterate s; michael@0: IInit_collIterate(coll, NULL, -1, &s, status); michael@0: if(U_FAILURE(*status)) { michael@0: UTRACE_EXIT_STATUS(*status); michael@0: return 0; michael@0: } michael@0: s.iterator = iter; michael@0: s.flags |= UCOL_USE_ITERATOR; michael@0: // This variable tells us whether we have produced some other levels in this iteration michael@0: // before we moved to the identical level. In that case, we need to switch the michael@0: // type of the iterator. michael@0: UBool doingIdenticalFromStart = FALSE; michael@0: // Normalizing iterator michael@0: // The division for the array length may truncate the array size to michael@0: // a little less than UNORM_ITER_SIZE, but that size is dimensioned too high michael@0: // for all platforms anyway. michael@0: UAlignedMemory stackNormIter[UNORM_ITER_SIZE/sizeof(UAlignedMemory)]; michael@0: UNormIterator *normIter = NULL; michael@0: // If the normalization is turned on for the collator and we are below identical level michael@0: // we will use a FCD normalizing iterator michael@0: if(ucol_getAttribute(coll, UCOL_NORMALIZATION_MODE, status) == UCOL_ON && level < UCOL_PSK_IDENTICAL) { michael@0: normIter = unorm_openIter(stackNormIter, sizeof(stackNormIter), status); michael@0: s.iterator = unorm_setIter(normIter, iter, UNORM_FCD, status); michael@0: s.flags &= ~UCOL_ITER_NORM; michael@0: if(U_FAILURE(*status)) { michael@0: UTRACE_EXIT_STATUS(*status); michael@0: return 0; michael@0: } michael@0: } else if(level == UCOL_PSK_IDENTICAL) { michael@0: // for identical level, we need a NFD iterator. We need to instantiate it here, since we michael@0: // will be updating the state - and this cannot be done on an ordinary iterator. michael@0: normIter = unorm_openIter(stackNormIter, sizeof(stackNormIter), status); michael@0: s.iterator = unorm_setIter(normIter, iter, UNORM_NFD, status); michael@0: s.flags &= ~UCOL_ITER_NORM; michael@0: if(U_FAILURE(*status)) { michael@0: UTRACE_EXIT_STATUS(*status); michael@0: return 0; michael@0: } michael@0: doingIdenticalFromStart = TRUE; michael@0: } michael@0: michael@0: // This is the tentative new state of the iterator. The problem michael@0: // is that the iterator might return an undefined state, in michael@0: // which case we should save the last valid state and increase michael@0: // the iterator skip value. michael@0: uint32_t newState = 0; michael@0: michael@0: // First, we set the iterator to the last valid position michael@0: // from the last iteration. This was saved in state[0]. michael@0: if(iterState == 0) { michael@0: /* initial state */ michael@0: if(level == UCOL_PSK_SECONDARY && doingFrench && !byteCountOrFrenchDone) { michael@0: s.iterator->move(s.iterator, 0, UITER_LIMIT); michael@0: } else { michael@0: s.iterator->move(s.iterator, 0, UITER_START); michael@0: } michael@0: } else { michael@0: /* reset to previous state */ michael@0: s.iterator->setState(s.iterator, iterState, status); michael@0: if(U_FAILURE(*status)) { michael@0: UTRACE_EXIT_STATUS(*status); michael@0: return 0; michael@0: } michael@0: } michael@0: michael@0: michael@0: michael@0: // This variable tells us whether we can attempt to update the state michael@0: // of iterator. Situations where we don't want to update iterator state michael@0: // are the existence of expansion CEs that are not yet processed, and michael@0: // finishing the case level without enough space in the buffer to insert michael@0: // a level terminator. michael@0: UBool canUpdateState = TRUE; michael@0: michael@0: // Consume all the CEs that were consumed at the end of the previous michael@0: // iteration without updating the iterator state. On identical level, michael@0: // consume the code points. michael@0: int32_t counter = cces; michael@0: if(level < UCOL_PSK_IDENTICAL) { michael@0: while(counter-->0) { michael@0: // If we're doing French and we are on the secondary level, michael@0: // we go backwards. michael@0: if(level == UCOL_PSK_SECONDARY && doingFrench) { michael@0: CE = ucol_IGetPrevCE(coll, &s, status); michael@0: } else { michael@0: CE = ucol_IGetNextCE(coll, &s, status); michael@0: } michael@0: if(CE==UCOL_NO_MORE_CES) { michael@0: /* should not happen */ michael@0: *status=U_INTERNAL_PROGRAM_ERROR; michael@0: UTRACE_EXIT_STATUS(*status); michael@0: return 0; michael@0: } michael@0: if(uprv_numAvailableExpCEs(s)) { michael@0: canUpdateState = FALSE; michael@0: } michael@0: } michael@0: } else { michael@0: while(counter-->0) { michael@0: uiter_next32(s.iterator); michael@0: } michael@0: } michael@0: michael@0: // French secondary needs to know whether the iterator state of zero came from previous level OR michael@0: // from a new invocation... michael@0: UBool wasDoingPrimary = FALSE; michael@0: // destination buffer byte counter. When this guy michael@0: // gets to count, we're done with the iteration michael@0: int32_t i = 0; michael@0: // used to count the zero bytes written after we michael@0: // have finished with the sort key michael@0: int32_t j = 0; michael@0: michael@0: michael@0: // Hm.... I think we're ready to plunge in. Basic story is as following: michael@0: // we have a fall through case based on level. This is used for initial michael@0: // positioning on iteration start. Every level processor contains a michael@0: // for(;;) which will be broken when we exhaust all the CEs. Other michael@0: // way to exit is a goto saveState, which happens when we have filled michael@0: // out our buffer. michael@0: switch(level) { michael@0: case UCOL_PSK_PRIMARY: michael@0: wasDoingPrimary = TRUE; michael@0: for(;;) { michael@0: if(i==count) { michael@0: goto saveState; michael@0: } michael@0: // We should save the state only if we michael@0: // are sure that we are done with the michael@0: // previous iterator state michael@0: if(canUpdateState && byteCountOrFrenchDone == 0) { michael@0: newState = s.iterator->getState(s.iterator); michael@0: if(newState != UITER_NO_STATE) { michael@0: iterState = newState; michael@0: cces = 0; michael@0: } michael@0: } michael@0: CE = ucol_IGetNextCE(coll, &s, status); michael@0: cces++; michael@0: if(CE==UCOL_NO_MORE_CES) { michael@0: // Add the level separator michael@0: terminatePSKLevel(level, maxLevel, i, dest); michael@0: byteCountOrFrenchDone=0; michael@0: // Restart the iteration an move to the michael@0: // second level michael@0: s.iterator->move(s.iterator, 0, UITER_START); michael@0: cces = 0; michael@0: level = UCOL_PSK_SECONDARY; michael@0: break; michael@0: } michael@0: if(!isContinuation(CE)){ michael@0: if(coll->leadBytePermutationTable != NULL){ michael@0: CE = (coll->leadBytePermutationTable[CE>>24] << 24) | (CE & 0x00FFFFFF); michael@0: } michael@0: } michael@0: if(!isShiftedCE(CE, LVT, &wasShifted)) { michael@0: CE >>= UCOL_PRIMARYORDERSHIFT; /* get primary */ michael@0: if(CE != 0) { michael@0: if(byteCountOrFrenchDone == 0) { michael@0: // get the second byte of primary michael@0: dest[i++]=(uint8_t)(CE >> 8); michael@0: } else { michael@0: byteCountOrFrenchDone = 0; michael@0: } michael@0: if((CE &=0xff)!=0) { michael@0: if(i==count) { michael@0: /* overflow */ michael@0: byteCountOrFrenchDone = 1; michael@0: cces--; michael@0: goto saveState; michael@0: } michael@0: dest[i++]=(uint8_t)CE; michael@0: } michael@0: } michael@0: } michael@0: if(uprv_numAvailableExpCEs(s)) { michael@0: canUpdateState = FALSE; michael@0: } else { michael@0: canUpdateState = TRUE; michael@0: } michael@0: } michael@0: /* fall through to next level */ michael@0: case UCOL_PSK_SECONDARY: michael@0: if(strength >= UCOL_SECONDARY) { michael@0: if(!doingFrench) { michael@0: for(;;) { michael@0: if(i == count) { michael@0: goto saveState; michael@0: } michael@0: // We should save the state only if we michael@0: // are sure that we are done with the michael@0: // previous iterator state michael@0: if(canUpdateState) { michael@0: newState = s.iterator->getState(s.iterator); michael@0: if(newState != UITER_NO_STATE) { michael@0: iterState = newState; michael@0: cces = 0; michael@0: } michael@0: } michael@0: CE = ucol_IGetNextCE(coll, &s, status); michael@0: cces++; michael@0: if(CE==UCOL_NO_MORE_CES) { michael@0: // Add the level separator michael@0: terminatePSKLevel(level, maxLevel, i, dest); michael@0: byteCountOrFrenchDone = 0; michael@0: // Restart the iteration an move to the michael@0: // second level michael@0: s.iterator->move(s.iterator, 0, UITER_START); michael@0: cces = 0; michael@0: level = UCOL_PSK_CASE; michael@0: break; michael@0: } michael@0: if(!isShiftedCE(CE, LVT, &wasShifted)) { michael@0: CE >>= 8; /* get secondary */ michael@0: if(CE != 0) { michael@0: dest[i++]=(uint8_t)CE; michael@0: } michael@0: } michael@0: if(uprv_numAvailableExpCEs(s)) { michael@0: canUpdateState = FALSE; michael@0: } else { michael@0: canUpdateState = TRUE; michael@0: } michael@0: } michael@0: } else { // French secondary processing michael@0: uint8_t frenchBuff[UCOL_MAX_BUFFER]; michael@0: int32_t frenchIndex = 0; michael@0: // Here we are going backwards. michael@0: // If the iterator is at the beggining, it should be michael@0: // moved to end. michael@0: if(wasDoingPrimary) { michael@0: s.iterator->move(s.iterator, 0, UITER_LIMIT); michael@0: cces = 0; michael@0: } michael@0: for(;;) { michael@0: if(i == count) { michael@0: goto saveState; michael@0: } michael@0: if(canUpdateState) { michael@0: newState = s.iterator->getState(s.iterator); michael@0: if(newState != UITER_NO_STATE) { michael@0: iterState = newState; michael@0: cces = 0; michael@0: } michael@0: } michael@0: CE = ucol_IGetPrevCE(coll, &s, status); michael@0: cces++; michael@0: if(CE==UCOL_NO_MORE_CES) { michael@0: // Add the level separator michael@0: terminatePSKLevel(level, maxLevel, i, dest); michael@0: byteCountOrFrenchDone = 0; michael@0: // Restart the iteration an move to the next level michael@0: s.iterator->move(s.iterator, 0, UITER_START); michael@0: level = UCOL_PSK_CASE; michael@0: break; michael@0: } michael@0: if(isContinuation(CE)) { // if it's a continuation, we want to save it and michael@0: // reverse when we get a first non-continuation CE. michael@0: CE >>= 8; michael@0: frenchBuff[frenchIndex++] = (uint8_t)CE; michael@0: } else if(!isShiftedCE(CE, LVT, &wasShifted)) { michael@0: CE >>= 8; /* get secondary */ michael@0: if(!frenchIndex) { michael@0: if(CE != 0) { michael@0: dest[i++]=(uint8_t)CE; michael@0: } michael@0: } else { michael@0: frenchBuff[frenchIndex++] = (uint8_t)CE; michael@0: frenchIndex -= usedFrench; michael@0: usedFrench = 0; michael@0: while(i < count && frenchIndex) { michael@0: dest[i++] = frenchBuff[--frenchIndex]; michael@0: usedFrench++; michael@0: } michael@0: } michael@0: } michael@0: if(uprv_numAvailableExpCEs(s)) { michael@0: canUpdateState = FALSE; michael@0: } else { michael@0: canUpdateState = TRUE; michael@0: } michael@0: } michael@0: } michael@0: } else { michael@0: level = UCOL_PSK_CASE; michael@0: } michael@0: /* fall through to next level */ michael@0: case UCOL_PSK_CASE: michael@0: if(ucol_getAttribute(coll, UCOL_CASE_LEVEL, status) == UCOL_ON) { michael@0: uint32_t caseShift = UCOL_CASE_SHIFT_START; michael@0: uint8_t caseByte = UCOL_CASE_BYTE_START; michael@0: uint8_t caseBits = 0; michael@0: michael@0: for(;;) { michael@0: U_ASSERT(caseShift <= UCOL_CASE_SHIFT_START); michael@0: if(i == count) { michael@0: goto saveState; michael@0: } michael@0: // We should save the state only if we michael@0: // are sure that we are done with the michael@0: // previous iterator state michael@0: if(canUpdateState) { michael@0: newState = s.iterator->getState(s.iterator); michael@0: if(newState != UITER_NO_STATE) { michael@0: iterState = newState; michael@0: cces = 0; michael@0: } michael@0: } michael@0: CE = ucol_IGetNextCE(coll, &s, status); michael@0: cces++; michael@0: if(CE==UCOL_NO_MORE_CES) { michael@0: // On the case level we might have an unfinished michael@0: // case byte. Add one if it's started. michael@0: if(caseShift != UCOL_CASE_SHIFT_START) { michael@0: dest[i++] = caseByte; michael@0: } michael@0: cces = 0; michael@0: // We have finished processing CEs on this level. michael@0: // However, we don't know if we have enough space michael@0: // to add a case level terminator. michael@0: if(i < count) { michael@0: // Add the level separator michael@0: terminatePSKLevel(level, maxLevel, i, dest); michael@0: // Restart the iteration and move to the michael@0: // next level michael@0: s.iterator->move(s.iterator, 0, UITER_START); michael@0: level = UCOL_PSK_TERTIARY; michael@0: } else { michael@0: canUpdateState = FALSE; michael@0: } michael@0: break; michael@0: } michael@0: michael@0: if(!isShiftedCE(CE, LVT, &wasShifted)) { michael@0: if(!isContinuation(CE) && ((CE & UCOL_PRIMARYMASK) != 0 || strength > UCOL_PRIMARY)) { michael@0: // do the case level if we need to do it. We don't want to calculate michael@0: // case level for primary ignorables if we have only primary strength and case level michael@0: // otherwise we would break well formedness of CEs michael@0: CE = (uint8_t)(CE & UCOL_BYTE_SIZE_MASK); michael@0: caseBits = (uint8_t)(CE & 0xC0); michael@0: // this copies the case level logic from the michael@0: // sort key generation code michael@0: if(CE != 0) { michael@0: if (caseShift == 0) { michael@0: dest[i++] = caseByte; michael@0: caseShift = UCOL_CASE_SHIFT_START; michael@0: caseByte = UCOL_CASE_BYTE_START; michael@0: } michael@0: if(coll->caseFirst == UCOL_UPPER_FIRST) { michael@0: if((caseBits & 0xC0) == 0) { michael@0: caseByte |= 1 << (--caseShift); michael@0: } else { michael@0: caseByte |= 0 << (--caseShift); michael@0: /* second bit */ michael@0: if(caseShift == 0) { michael@0: dest[i++] = caseByte; michael@0: caseShift = UCOL_CASE_SHIFT_START; michael@0: caseByte = UCOL_CASE_BYTE_START; michael@0: } michael@0: caseByte |= ((caseBits>>6)&1) << (--caseShift); michael@0: } michael@0: } else { michael@0: if((caseBits & 0xC0) == 0) { michael@0: caseByte |= 0 << (--caseShift); michael@0: } else { michael@0: caseByte |= 1 << (--caseShift); michael@0: /* second bit */ michael@0: if(caseShift == 0) { michael@0: dest[i++] = caseByte; michael@0: caseShift = UCOL_CASE_SHIFT_START; michael@0: caseByte = UCOL_CASE_BYTE_START; michael@0: } michael@0: caseByte |= ((caseBits>>7)&1) << (--caseShift); michael@0: } michael@0: } michael@0: } michael@0: michael@0: } michael@0: } michael@0: // Not sure this is correct for the case level - revisit michael@0: if(uprv_numAvailableExpCEs(s)) { michael@0: canUpdateState = FALSE; michael@0: } else { michael@0: canUpdateState = TRUE; michael@0: } michael@0: } michael@0: } else { michael@0: level = UCOL_PSK_TERTIARY; michael@0: } michael@0: /* fall through to next level */ michael@0: case UCOL_PSK_TERTIARY: michael@0: if(strength >= UCOL_TERTIARY) { michael@0: for(;;) { michael@0: if(i == count) { michael@0: goto saveState; michael@0: } michael@0: // We should save the state only if we michael@0: // are sure that we are done with the michael@0: // previous iterator state michael@0: if(canUpdateState) { michael@0: newState = s.iterator->getState(s.iterator); michael@0: if(newState != UITER_NO_STATE) { michael@0: iterState = newState; michael@0: cces = 0; michael@0: } michael@0: } michael@0: CE = ucol_IGetNextCE(coll, &s, status); michael@0: cces++; michael@0: if(CE==UCOL_NO_MORE_CES) { michael@0: // Add the level separator michael@0: terminatePSKLevel(level, maxLevel, i, dest); michael@0: byteCountOrFrenchDone = 0; michael@0: // Restart the iteration an move to the michael@0: // second level michael@0: s.iterator->move(s.iterator, 0, UITER_START); michael@0: cces = 0; michael@0: level = UCOL_PSK_QUATERNARY; michael@0: break; michael@0: } michael@0: if(!isShiftedCE(CE, LVT, &wasShifted)) { michael@0: notIsContinuation = !isContinuation(CE); michael@0: michael@0: if(notIsContinuation) { michael@0: CE = (uint8_t)(CE & UCOL_BYTE_SIZE_MASK); michael@0: CE ^= coll->caseSwitch; michael@0: CE &= coll->tertiaryMask; michael@0: } else { michael@0: CE = (uint8_t)((CE & UCOL_REMOVE_CONTINUATION)); michael@0: } michael@0: michael@0: if(CE != 0) { michael@0: dest[i++]=(uint8_t)CE; michael@0: } michael@0: } michael@0: if(uprv_numAvailableExpCEs(s)) { michael@0: canUpdateState = FALSE; michael@0: } else { michael@0: canUpdateState = TRUE; michael@0: } michael@0: } michael@0: } else { michael@0: // if we're not doing tertiary michael@0: // skip to the end michael@0: level = UCOL_PSK_NULL; michael@0: } michael@0: /* fall through to next level */ michael@0: case UCOL_PSK_QUATERNARY: michael@0: if(strength >= UCOL_QUATERNARY) { michael@0: for(;;) { michael@0: if(i == count) { michael@0: goto saveState; michael@0: } michael@0: // We should save the state only if we michael@0: // are sure that we are done with the michael@0: // previous iterator state michael@0: if(canUpdateState) { michael@0: newState = s.iterator->getState(s.iterator); michael@0: if(newState != UITER_NO_STATE) { michael@0: iterState = newState; michael@0: cces = 0; michael@0: } michael@0: } michael@0: CE = ucol_IGetNextCE(coll, &s, status); michael@0: cces++; michael@0: if(CE==UCOL_NO_MORE_CES) { michael@0: // Add the level separator michael@0: terminatePSKLevel(level, maxLevel, i, dest); michael@0: //dest[i++] = UCOL_LEVELTERMINATOR; michael@0: byteCountOrFrenchDone = 0; michael@0: // Restart the iteration an move to the michael@0: // second level michael@0: s.iterator->move(s.iterator, 0, UITER_START); michael@0: cces = 0; michael@0: level = UCOL_PSK_QUIN; michael@0: break; michael@0: } michael@0: if(CE==0) michael@0: continue; michael@0: if(isShiftedCE(CE, LVT, &wasShifted)) { michael@0: CE >>= 16; /* get primary */ michael@0: if(CE != 0) { michael@0: if(byteCountOrFrenchDone == 0) { michael@0: dest[i++]=(uint8_t)(CE >> 8); michael@0: } else { michael@0: byteCountOrFrenchDone = 0; michael@0: } michael@0: if((CE &=0xff)!=0) { michael@0: if(i==count) { michael@0: /* overflow */ michael@0: byteCountOrFrenchDone = 1; michael@0: goto saveState; michael@0: } michael@0: dest[i++]=(uint8_t)CE; michael@0: } michael@0: } michael@0: } else { michael@0: notIsContinuation = !isContinuation(CE); michael@0: if(notIsContinuation) { michael@0: if(s.flags & UCOL_WAS_HIRAGANA) { // This was Hiragana and we need to note it michael@0: dest[i++] = UCOL_HIRAGANA_QUAD; michael@0: } else { michael@0: dest[i++] = 0xFF; michael@0: } michael@0: } michael@0: } michael@0: if(uprv_numAvailableExpCEs(s)) { michael@0: canUpdateState = FALSE; michael@0: } else { michael@0: canUpdateState = TRUE; michael@0: } michael@0: } michael@0: } else { michael@0: // if we're not doing quaternary michael@0: // skip to the end michael@0: level = UCOL_PSK_NULL; michael@0: } michael@0: /* fall through to next level */ michael@0: case UCOL_PSK_QUIN: michael@0: level = UCOL_PSK_IDENTICAL; michael@0: /* fall through to next level */ michael@0: case UCOL_PSK_IDENTICAL: michael@0: if(strength >= UCOL_IDENTICAL) { michael@0: UChar32 first, second; michael@0: int32_t bocsuBytesWritten = 0; michael@0: // We always need to do identical on michael@0: // the NFD form of the string. michael@0: if(normIter == NULL) { michael@0: // we arrived from the level below and michael@0: // normalization was not turned on. michael@0: // therefore, we need to make a fresh NFD iterator michael@0: normIter = unorm_openIter(stackNormIter, sizeof(stackNormIter), status); michael@0: s.iterator = unorm_setIter(normIter, iter, UNORM_NFD, status); michael@0: } else if(!doingIdenticalFromStart) { michael@0: // there is an iterator, but we did some other levels. michael@0: // therefore, we have a FCD iterator - need to make michael@0: // a NFD one. michael@0: // normIter being at the beginning does not guarantee michael@0: // that the underlying iterator is at the beginning michael@0: iter->move(iter, 0, UITER_START); michael@0: s.iterator = unorm_setIter(normIter, iter, UNORM_NFD, status); michael@0: } michael@0: // At this point we have a NFD iterator that is positioned michael@0: // in the right place michael@0: if(U_FAILURE(*status)) { michael@0: UTRACE_EXIT_STATUS(*status); michael@0: return 0; michael@0: } michael@0: first = uiter_previous32(s.iterator); michael@0: // maybe we're at the start of the string michael@0: if(first == U_SENTINEL) { michael@0: first = 0; michael@0: } else { michael@0: uiter_next32(s.iterator); michael@0: } michael@0: michael@0: j = 0; michael@0: for(;;) { michael@0: if(i == count) { michael@0: if(j+1 < bocsuBytesWritten) { michael@0: bocsuBytesUsed = j+1; michael@0: } michael@0: goto saveState; michael@0: } michael@0: michael@0: // On identical level, we will always save michael@0: // the state if we reach this point, since michael@0: // we don't depend on getNextCE for content michael@0: // all the content is in our buffer and we michael@0: // already either stored the full buffer OR michael@0: // otherwise we won't arrive here. michael@0: newState = s.iterator->getState(s.iterator); michael@0: if(newState != UITER_NO_STATE) { michael@0: iterState = newState; michael@0: cces = 0; michael@0: } michael@0: michael@0: uint8_t buff[4]; michael@0: second = uiter_next32(s.iterator); michael@0: cces++; michael@0: michael@0: // end condition for identical level michael@0: if(second == U_SENTINEL) { michael@0: terminatePSKLevel(level, maxLevel, i, dest); michael@0: level = UCOL_PSK_NULL; michael@0: break; michael@0: } michael@0: bocsuBytesWritten = u_writeIdenticalLevelRunTwoChars(first, second, buff); michael@0: first = second; michael@0: michael@0: j = 0; michael@0: if(bocsuBytesUsed != 0) { michael@0: while(bocsuBytesUsed-->0) { michael@0: j++; michael@0: } michael@0: } michael@0: michael@0: while(i < count && j < bocsuBytesWritten) { michael@0: dest[i++] = buff[j++]; michael@0: } michael@0: } michael@0: michael@0: } else { michael@0: level = UCOL_PSK_NULL; michael@0: } michael@0: /* fall through to next level */ michael@0: case UCOL_PSK_NULL: michael@0: j = i; michael@0: while(jgetState(s.iterator)) == UITER_NO_STATE) michael@0: { michael@0: // Any of above mean that the previous transaction michael@0: // wasn't finished and that we should store the michael@0: // previous iterator state. michael@0: state[0] = iterState; michael@0: } else { michael@0: // The transaction is complete. We will continue in the next iteration. michael@0: state[0] = s.iterator->getState(s.iterator); michael@0: cces = 0; michael@0: } michael@0: // Store the number of bocsu bytes written. michael@0: if((bocsuBytesUsed & UCOL_PSK_BOCSU_BYTES_MASK) != bocsuBytesUsed) { michael@0: *status = U_INDEX_OUTOFBOUNDS_ERROR; michael@0: } michael@0: state[1] = (bocsuBytesUsed & UCOL_PSK_BOCSU_BYTES_MASK) << UCOL_PSK_BOCSU_BYTES_SHIFT; michael@0: michael@0: // Next we put in the level of comparison michael@0: state[1] |= ((level & UCOL_PSK_LEVEL_MASK) << UCOL_PSK_LEVEL_SHIFT); michael@0: michael@0: // If we are doing French, we need to store whether we have just finished the French level michael@0: if(level == UCOL_PSK_SECONDARY && doingFrench) { michael@0: state[1] |= (((int32_t)(state[0] == 0) & UCOL_PSK_BYTE_COUNT_OR_FRENCH_DONE_MASK) << UCOL_PSK_BYTE_COUNT_OR_FRENCH_DONE_SHIFT); michael@0: } else { michael@0: state[1] |= ((byteCountOrFrenchDone & UCOL_PSK_BYTE_COUNT_OR_FRENCH_DONE_MASK) << UCOL_PSK_BYTE_COUNT_OR_FRENCH_DONE_SHIFT); michael@0: } michael@0: michael@0: // Was the latest CE shifted michael@0: if(wasShifted) { michael@0: state[1] |= 1 << UCOL_PSK_WAS_SHIFTED_SHIFT; michael@0: } michael@0: // Check for cces overflow michael@0: if((cces & UCOL_PSK_CONSUMED_CES_MASK) != cces) { michael@0: *status = U_INDEX_OUTOFBOUNDS_ERROR; michael@0: } michael@0: // Store cces michael@0: state[1] |= ((cces & UCOL_PSK_CONSUMED_CES_MASK) << UCOL_PSK_CONSUMED_CES_SHIFT); michael@0: michael@0: // Check for French overflow michael@0: if((usedFrench & UCOL_PSK_USED_FRENCH_MASK) != usedFrench) { michael@0: *status = U_INDEX_OUTOFBOUNDS_ERROR; michael@0: } michael@0: // Store number of bytes written in the French secondary continuation sequence michael@0: state[1] |= ((usedFrench & UCOL_PSK_USED_FRENCH_MASK) << UCOL_PSK_USED_FRENCH_SHIFT); michael@0: michael@0: michael@0: // If we have used normalizing iterator, get rid of it michael@0: if(normIter != NULL) { michael@0: unorm_closeIter(normIter); michael@0: } michael@0: michael@0: /* To avoid memory leak, free the offset buffer if necessary. */ michael@0: ucol_freeOffsetBuffer(&s); michael@0: michael@0: // Return number of meaningful sortkey bytes. michael@0: UTRACE_DATA4(UTRACE_VERBOSE, "dest = %vb, state=%d %d", michael@0: dest,i, state[0], state[1]); michael@0: UTRACE_EXIT_VALUE(i); michael@0: return i; michael@0: } michael@0: michael@0: /** michael@0: * Produce a bound for a given sortkey and a number of levels. michael@0: */ michael@0: U_CAPI int32_t U_EXPORT2 michael@0: ucol_getBound(const uint8_t *source, michael@0: int32_t sourceLength, michael@0: UColBoundMode boundType, michael@0: uint32_t noOfLevels, michael@0: uint8_t *result, michael@0: int32_t resultLength, michael@0: UErrorCode *status) michael@0: { michael@0: // consistency checks michael@0: if(status == NULL || U_FAILURE(*status)) { michael@0: return 0; michael@0: } michael@0: if(source == NULL) { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return 0; michael@0: } michael@0: michael@0: int32_t sourceIndex = 0; michael@0: // Scan the string until we skip enough of the key OR reach the end of the key michael@0: do { michael@0: sourceIndex++; michael@0: if(source[sourceIndex] == UCOL_LEVELTERMINATOR) { michael@0: noOfLevels--; michael@0: } michael@0: } while (noOfLevels > 0 michael@0: && (source[sourceIndex] != 0 || sourceIndex < sourceLength)); michael@0: michael@0: if((source[sourceIndex] == 0 || sourceIndex == sourceLength) michael@0: && noOfLevels > 0) { michael@0: *status = U_SORT_KEY_TOO_SHORT_WARNING; michael@0: } michael@0: michael@0: michael@0: // READ ME: this code assumes that the values for boundType michael@0: // enum will not changes. They are set so that the enum value michael@0: // corresponds to the number of extra bytes each bound type michael@0: // needs. michael@0: if(result != NULL && resultLength >= sourceIndex+boundType) { michael@0: uprv_memcpy(result, source, sourceIndex); michael@0: switch(boundType) { michael@0: // Lower bound just gets terminated. No extra bytes michael@0: case UCOL_BOUND_LOWER: // = 0 michael@0: break; michael@0: // Upper bound needs one extra byte michael@0: case UCOL_BOUND_UPPER: // = 1 michael@0: result[sourceIndex++] = 2; michael@0: break; michael@0: // Upper long bound needs two extra bytes michael@0: case UCOL_BOUND_UPPER_LONG: // = 2 michael@0: result[sourceIndex++] = 0xFF; michael@0: result[sourceIndex++] = 0xFF; michael@0: break; michael@0: default: michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return 0; michael@0: } michael@0: result[sourceIndex++] = 0; michael@0: michael@0: return sourceIndex; michael@0: } else { michael@0: return sourceIndex+boundType+1; michael@0: } michael@0: } michael@0: michael@0: /****************************************************************************/ michael@0: /* Following are the functions that deal with the properties of a collator */ michael@0: /* there are new APIs and some compatibility APIs */ michael@0: /****************************************************************************/ michael@0: michael@0: static inline void michael@0: ucol_addLatinOneEntry(UCollator *coll, UChar ch, uint32_t CE, michael@0: int32_t *primShift, int32_t *secShift, int32_t *terShift) michael@0: { michael@0: uint8_t primary1 = 0, primary2 = 0, secondary = 0, tertiary = 0; michael@0: UBool reverseSecondary = FALSE; michael@0: UBool continuation = isContinuation(CE); michael@0: if(!continuation) { michael@0: tertiary = (uint8_t)((CE & coll->tertiaryMask)); michael@0: tertiary ^= coll->caseSwitch; michael@0: reverseSecondary = TRUE; michael@0: } else { michael@0: tertiary = (uint8_t)((CE & UCOL_REMOVE_CONTINUATION)); michael@0: tertiary &= UCOL_REMOVE_CASE; michael@0: reverseSecondary = FALSE; michael@0: } michael@0: michael@0: secondary = (uint8_t)((CE >>= 8) & UCOL_BYTE_SIZE_MASK); michael@0: primary2 = (uint8_t)((CE >>= 8) & UCOL_BYTE_SIZE_MASK); michael@0: primary1 = (uint8_t)(CE >> 8); michael@0: michael@0: if(primary1 != 0) { michael@0: if (coll->leadBytePermutationTable != NULL && !continuation) { michael@0: primary1 = coll->leadBytePermutationTable[primary1]; michael@0: } michael@0: michael@0: coll->latinOneCEs[ch] |= (primary1 << *primShift); michael@0: *primShift -= 8; michael@0: } michael@0: if(primary2 != 0) { michael@0: if(*primShift < 0) { michael@0: coll->latinOneCEs[ch] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[coll->latinOneTableLen+ch] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[2*coll->latinOneTableLen+ch] = UCOL_BAIL_OUT_CE; michael@0: return; michael@0: } michael@0: coll->latinOneCEs[ch] |= (primary2 << *primShift); michael@0: *primShift -= 8; michael@0: } michael@0: if(secondary != 0) { michael@0: if(reverseSecondary && coll->frenchCollation == UCOL_ON) { // reverse secondary michael@0: coll->latinOneCEs[coll->latinOneTableLen+ch] >>= 8; // make space for secondary michael@0: coll->latinOneCEs[coll->latinOneTableLen+ch] |= (secondary << 24); michael@0: } else { // normal case michael@0: coll->latinOneCEs[coll->latinOneTableLen+ch] |= (secondary << *secShift); michael@0: } michael@0: *secShift -= 8; michael@0: } michael@0: if(tertiary != 0) { michael@0: coll->latinOneCEs[2*coll->latinOneTableLen+ch] |= (tertiary << *terShift); michael@0: *terShift -= 8; michael@0: } michael@0: } michael@0: michael@0: static inline UBool michael@0: ucol_resizeLatinOneTable(UCollator *coll, int32_t size, UErrorCode *status) { michael@0: uint32_t *newTable = (uint32_t *)uprv_malloc(size*sizeof(uint32_t)*3); michael@0: if(newTable == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: coll->latinOneFailed = TRUE; michael@0: return FALSE; michael@0: } michael@0: int32_t sizeToCopy = ((sizelatinOneTableLen)?size:coll->latinOneTableLen)*sizeof(uint32_t); michael@0: uprv_memset(newTable, 0, size*sizeof(uint32_t)*3); michael@0: uprv_memcpy(newTable, coll->latinOneCEs, sizeToCopy); michael@0: uprv_memcpy(newTable+size, coll->latinOneCEs+coll->latinOneTableLen, sizeToCopy); michael@0: uprv_memcpy(newTable+2*size, coll->latinOneCEs+2*coll->latinOneTableLen, sizeToCopy); michael@0: coll->latinOneTableLen = size; michael@0: uprv_free(coll->latinOneCEs); michael@0: coll->latinOneCEs = newTable; michael@0: return TRUE; michael@0: } michael@0: michael@0: static UBool michael@0: ucol_setUpLatinOne(UCollator *coll, UErrorCode *status) { michael@0: UBool result = TRUE; michael@0: if(coll->latinOneCEs == NULL) { michael@0: coll->latinOneCEs = (uint32_t *)uprv_malloc(sizeof(uint32_t)*UCOL_LATINONETABLELEN*3); michael@0: if(coll->latinOneCEs == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: return FALSE; michael@0: } michael@0: coll->latinOneTableLen = UCOL_LATINONETABLELEN; michael@0: } michael@0: UChar ch = 0; michael@0: UCollationElements *it = ucol_openElements(coll, &ch, 1, status); michael@0: // Check for null pointer michael@0: if (U_FAILURE(*status)) { michael@0: ucol_closeElements(it); michael@0: return FALSE; michael@0: } michael@0: uprv_memset(coll->latinOneCEs, 0, sizeof(uint32_t)*coll->latinOneTableLen*3); michael@0: michael@0: int32_t primShift = 24, secShift = 24, terShift = 24; michael@0: uint32_t CE = 0; michael@0: int32_t contractionOffset = UCOL_ENDOFLATINONERANGE+1; michael@0: michael@0: // TODO: make safe if you get more than you wanted... michael@0: for(ch = 0; ch <= UCOL_ENDOFLATINONERANGE; ch++) { michael@0: primShift = 24; secShift = 24; terShift = 24; michael@0: if(ch < 0x100) { michael@0: CE = coll->latinOneMapping[ch]; michael@0: } else { michael@0: CE = UTRIE_GET32_FROM_LEAD(&coll->mapping, ch); michael@0: if(CE == UCOL_NOT_FOUND && coll->UCA) { michael@0: CE = UTRIE_GET32_FROM_LEAD(&coll->UCA->mapping, ch); michael@0: } michael@0: } michael@0: if(CE < UCOL_NOT_FOUND) { michael@0: ucol_addLatinOneEntry(coll, ch, CE, &primShift, &secShift, &terShift); michael@0: } else { michael@0: switch (getCETag(CE)) { michael@0: case EXPANSION_TAG: michael@0: case DIGIT_TAG: michael@0: ucol_setText(it, &ch, 1, status); michael@0: while((int32_t)(CE = ucol_next(it, status)) != UCOL_NULLORDER) { michael@0: if(primShift < 0 || secShift < 0 || terShift < 0) { michael@0: coll->latinOneCEs[ch] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[coll->latinOneTableLen+ch] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[2*coll->latinOneTableLen+ch] = UCOL_BAIL_OUT_CE; michael@0: break; michael@0: } michael@0: ucol_addLatinOneEntry(coll, ch, CE, &primShift, &secShift, &terShift); michael@0: } michael@0: break; michael@0: case CONTRACTION_TAG: michael@0: // here is the trick michael@0: // F2 is contraction. We do something very similar to contractions michael@0: // but have two indices, one in the real contraction table and the michael@0: // other to where we stuffed things. This hopes that we don't have michael@0: // many contractions (this should work for latin-1 tables). michael@0: { michael@0: if((CE & 0x00FFF000) != 0) { michael@0: *status = U_UNSUPPORTED_ERROR; michael@0: goto cleanup_after_failure; michael@0: } michael@0: michael@0: const UChar *UCharOffset = (UChar *)coll->image+getContractOffset(CE); michael@0: michael@0: CE |= (contractionOffset & 0xFFF) << 12; // insert the offset in latin-1 table michael@0: michael@0: coll->latinOneCEs[ch] = CE; michael@0: coll->latinOneCEs[coll->latinOneTableLen+ch] = CE; michael@0: coll->latinOneCEs[2*coll->latinOneTableLen+ch] = CE; michael@0: michael@0: // We're going to jump into contraction table, pick the elements michael@0: // and use them michael@0: do { michael@0: CE = *(coll->contractionCEs + michael@0: (UCharOffset - coll->contractionIndex)); michael@0: if(CE > UCOL_NOT_FOUND && getCETag(CE) == EXPANSION_TAG) { michael@0: uint32_t size; michael@0: uint32_t i; /* general counter */ michael@0: uint32_t *CEOffset = (uint32_t *)coll->image+getExpansionOffset(CE); /* find the offset to expansion table */ michael@0: size = getExpansionCount(CE); michael@0: //CE = *CEOffset++; michael@0: if(size != 0) { /* if there are less than 16 elements in expansion, we don't terminate */ michael@0: for(i = 0; ilatinOneCEs[(UChar)contractionOffset] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[coll->latinOneTableLen+(UChar)contractionOffset] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[2*coll->latinOneTableLen+(UChar)contractionOffset] = UCOL_BAIL_OUT_CE; michael@0: break; michael@0: } michael@0: ucol_addLatinOneEntry(coll, (UChar)contractionOffset, *CEOffset++, &primShift, &secShift, &terShift); michael@0: } michael@0: } else { /* else, we do */ michael@0: while(*CEOffset != 0) { michael@0: if(primShift < 0 || secShift < 0 || terShift < 0) { michael@0: coll->latinOneCEs[(UChar)contractionOffset] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[coll->latinOneTableLen+(UChar)contractionOffset] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[2*coll->latinOneTableLen+(UChar)contractionOffset] = UCOL_BAIL_OUT_CE; michael@0: break; michael@0: } michael@0: ucol_addLatinOneEntry(coll, (UChar)contractionOffset, *CEOffset++, &primShift, &secShift, &terShift); michael@0: } michael@0: } michael@0: contractionOffset++; michael@0: } else if(CE < UCOL_NOT_FOUND) { michael@0: ucol_addLatinOneEntry(coll, (UChar)contractionOffset++, CE, &primShift, &secShift, &terShift); michael@0: } else { michael@0: coll->latinOneCEs[(UChar)contractionOffset] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[coll->latinOneTableLen+(UChar)contractionOffset] = UCOL_BAIL_OUT_CE; michael@0: coll->latinOneCEs[2*coll->latinOneTableLen+(UChar)contractionOffset] = UCOL_BAIL_OUT_CE; michael@0: contractionOffset++; michael@0: } michael@0: UCharOffset++; michael@0: primShift = 24; secShift = 24; terShift = 24; michael@0: if(contractionOffset == coll->latinOneTableLen) { // we need to reallocate michael@0: if(!ucol_resizeLatinOneTable(coll, 2*coll->latinOneTableLen, status)) { michael@0: goto cleanup_after_failure; michael@0: } michael@0: } michael@0: } while(*UCharOffset != 0xFFFF); michael@0: } michael@0: break;; michael@0: case SPEC_PROC_TAG: michael@0: { michael@0: // 0xB7 is a precontext character defined in UCA5.1, a special michael@0: // handle is implemeted in order to save LatinOne table for michael@0: // most locales. michael@0: if (ch==0xb7) { michael@0: ucol_addLatinOneEntry(coll, ch, CE, &primShift, &secShift, &terShift); michael@0: } michael@0: else { michael@0: goto cleanup_after_failure; michael@0: } michael@0: } michael@0: break; michael@0: default: michael@0: goto cleanup_after_failure; michael@0: } michael@0: } michael@0: } michael@0: // compact table michael@0: if(contractionOffset < coll->latinOneTableLen) { michael@0: if(!ucol_resizeLatinOneTable(coll, contractionOffset, status)) { michael@0: goto cleanup_after_failure; michael@0: } michael@0: } michael@0: ucol_closeElements(it); michael@0: return result; michael@0: michael@0: cleanup_after_failure: michael@0: // status should already be set before arriving here. michael@0: coll->latinOneFailed = TRUE; michael@0: ucol_closeElements(it); michael@0: return FALSE; michael@0: } michael@0: michael@0: void ucol_updateInternalState(UCollator *coll, UErrorCode *status) { michael@0: if(U_SUCCESS(*status)) { michael@0: if(coll->caseFirst == UCOL_UPPER_FIRST) { michael@0: coll->caseSwitch = UCOL_CASE_SWITCH; michael@0: } else { michael@0: coll->caseSwitch = UCOL_NO_CASE_SWITCH; michael@0: } michael@0: michael@0: if(coll->caseLevel == UCOL_ON || coll->caseFirst == UCOL_OFF) { michael@0: coll->tertiaryMask = UCOL_REMOVE_CASE; michael@0: coll->tertiaryCommon = UCOL_COMMON3_NORMAL; michael@0: coll->tertiaryAddition = (int8_t)UCOL_FLAG_BIT_MASK_CASE_SW_OFF; /* Should be 0x80 */ michael@0: coll->tertiaryTop = UCOL_COMMON_TOP3_CASE_SW_OFF; michael@0: coll->tertiaryBottom = UCOL_COMMON_BOT3; michael@0: } else { michael@0: coll->tertiaryMask = UCOL_KEEP_CASE; michael@0: coll->tertiaryAddition = UCOL_FLAG_BIT_MASK_CASE_SW_ON; michael@0: if(coll->caseFirst == UCOL_UPPER_FIRST) { michael@0: coll->tertiaryCommon = UCOL_COMMON3_UPPERFIRST; michael@0: coll->tertiaryTop = UCOL_COMMON_TOP3_CASE_SW_UPPER; michael@0: coll->tertiaryBottom = UCOL_COMMON_BOTTOM3_CASE_SW_UPPER; michael@0: } else { michael@0: coll->tertiaryCommon = UCOL_COMMON3_NORMAL; michael@0: coll->tertiaryTop = UCOL_COMMON_TOP3_CASE_SW_LOWER; michael@0: coll->tertiaryBottom = UCOL_COMMON_BOTTOM3_CASE_SW_LOWER; michael@0: } michael@0: } michael@0: michael@0: /* Set the compression values */ michael@0: uint8_t tertiaryTotal = (uint8_t)(coll->tertiaryTop - coll->tertiaryBottom - 1); michael@0: coll->tertiaryTopCount = (uint8_t)(UCOL_PROPORTION3*tertiaryTotal); /* we multilply double with int, but need only int */ michael@0: coll->tertiaryBottomCount = (uint8_t)(tertiaryTotal - coll->tertiaryTopCount); michael@0: michael@0: if(coll->caseLevel == UCOL_OFF && coll->strength == UCOL_TERTIARY michael@0: && coll->frenchCollation == UCOL_OFF && coll->alternateHandling == UCOL_NON_IGNORABLE) michael@0: { michael@0: coll->sortKeyGen = ucol_calcSortKeySimpleTertiary; michael@0: } else { michael@0: coll->sortKeyGen = ucol_calcSortKey; michael@0: } michael@0: if(coll->caseLevel == UCOL_OFF && coll->strength <= UCOL_TERTIARY && coll->numericCollation == UCOL_OFF michael@0: && coll->alternateHandling == UCOL_NON_IGNORABLE && !coll->latinOneFailed) michael@0: { michael@0: if(coll->latinOneCEs == NULL || coll->latinOneRegenTable) { michael@0: if(ucol_setUpLatinOne(coll, status)) { // if we succeed in building latin1 table, we'll use it michael@0: //fprintf(stderr, "F"); michael@0: coll->latinOneUse = TRUE; michael@0: } else { michael@0: coll->latinOneUse = FALSE; michael@0: } michael@0: if(*status == U_UNSUPPORTED_ERROR) { michael@0: *status = U_ZERO_ERROR; michael@0: } michael@0: } else { // latin1Table exists and it doesn't need to be regenerated, just use it michael@0: coll->latinOneUse = TRUE; michael@0: } michael@0: } else { michael@0: coll->latinOneUse = FALSE; michael@0: } michael@0: } michael@0: } michael@0: michael@0: U_CAPI uint32_t U_EXPORT2 michael@0: ucol_setVariableTop(UCollator *coll, const UChar *varTop, int32_t len, UErrorCode *status) { michael@0: if(U_FAILURE(*status) || coll == NULL) { michael@0: return 0; michael@0: } michael@0: if(len == -1) { michael@0: len = u_strlen(varTop); michael@0: } michael@0: if(len == 0) { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return 0; michael@0: } michael@0: michael@0: if(coll->delegate!=NULL) { michael@0: return ((Collator*)coll->delegate)->setVariableTop(varTop, len, *status); michael@0: } michael@0: michael@0: michael@0: collIterate s; michael@0: IInit_collIterate(coll, varTop, len, &s, status); michael@0: if(U_FAILURE(*status)) { michael@0: return 0; michael@0: } michael@0: michael@0: uint32_t CE = ucol_IGetNextCE(coll, &s, status); michael@0: michael@0: /* here we check if we have consumed all characters */ michael@0: /* you can put in either one character or a contraction */ michael@0: /* you shouldn't put more... */ michael@0: if(s.pos != s.endp || CE == UCOL_NO_MORE_CES) { michael@0: *status = U_CE_NOT_FOUND_ERROR; michael@0: return 0; michael@0: } michael@0: michael@0: uint32_t nextCE = ucol_IGetNextCE(coll, &s, status); michael@0: michael@0: if(isContinuation(nextCE) && (nextCE & UCOL_PRIMARYMASK) != 0) { michael@0: *status = U_PRIMARY_TOO_LONG_ERROR; michael@0: return 0; michael@0: } michael@0: if(coll->variableTopValue != (CE & UCOL_PRIMARYMASK)>>16) { michael@0: coll->variableTopValueisDefault = FALSE; michael@0: coll->variableTopValue = (CE & UCOL_PRIMARYMASK)>>16; michael@0: } michael@0: michael@0: /* To avoid memory leak, free the offset buffer if necessary. */ michael@0: ucol_freeOffsetBuffer(&s); michael@0: michael@0: return CE & UCOL_PRIMARYMASK; michael@0: } michael@0: michael@0: U_CAPI uint32_t U_EXPORT2 ucol_getVariableTop(const UCollator *coll, UErrorCode *status) { michael@0: if(U_FAILURE(*status) || coll == NULL) { michael@0: return 0; michael@0: } michael@0: if(coll->delegate!=NULL) { michael@0: return ((const Collator*)coll->delegate)->getVariableTop(*status); michael@0: } michael@0: return coll->variableTopValue<<16; michael@0: } michael@0: michael@0: U_CAPI void U_EXPORT2 michael@0: ucol_restoreVariableTop(UCollator *coll, const uint32_t varTop, UErrorCode *status) { michael@0: if(U_FAILURE(*status) || coll == NULL) { michael@0: return; michael@0: } michael@0: michael@0: if(coll->variableTopValue != (varTop & UCOL_PRIMARYMASK)>>16) { michael@0: coll->variableTopValueisDefault = FALSE; michael@0: coll->variableTopValue = (varTop & UCOL_PRIMARYMASK)>>16; michael@0: } michael@0: } michael@0: /* Attribute setter API */ michael@0: U_CAPI void U_EXPORT2 michael@0: ucol_setAttribute(UCollator *coll, UColAttribute attr, UColAttributeValue value, UErrorCode *status) { michael@0: if(U_FAILURE(*status) || coll == NULL) { michael@0: return; michael@0: } michael@0: michael@0: if(coll->delegate != NULL) { michael@0: ((Collator*)coll->delegate)->setAttribute(attr,value,*status); michael@0: return; michael@0: } michael@0: michael@0: UColAttributeValue oldFrench = coll->frenchCollation; michael@0: UColAttributeValue oldCaseFirst = coll->caseFirst; michael@0: switch(attr) { michael@0: case UCOL_NUMERIC_COLLATION: /* sort substrings of digits as numbers */ michael@0: if(value == UCOL_ON) { michael@0: coll->numericCollation = UCOL_ON; michael@0: coll->numericCollationisDefault = FALSE; michael@0: } else if (value == UCOL_OFF) { michael@0: coll->numericCollation = UCOL_OFF; michael@0: coll->numericCollationisDefault = FALSE; michael@0: } else if (value == UCOL_DEFAULT) { michael@0: coll->numericCollationisDefault = TRUE; michael@0: coll->numericCollation = (UColAttributeValue)coll->options->numericCollation; michael@0: } else { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: } michael@0: break; michael@0: case UCOL_HIRAGANA_QUATERNARY_MODE: /* special quaternary values for Hiragana */ michael@0: if(value == UCOL_ON || value == UCOL_OFF || value == UCOL_DEFAULT) { michael@0: // This attribute is an implementation detail of the CLDR Japanese tailoring. michael@0: // The implementation might change to use a different mechanism michael@0: // to achieve the same Japanese sort order. michael@0: // Since ICU 50, this attribute is not settable any more via API functions. michael@0: } else { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: } michael@0: break; michael@0: case UCOL_FRENCH_COLLATION: /* attribute for direction of secondary weights*/ michael@0: if(value == UCOL_ON) { michael@0: coll->frenchCollation = UCOL_ON; michael@0: coll->frenchCollationisDefault = FALSE; michael@0: } else if (value == UCOL_OFF) { michael@0: coll->frenchCollation = UCOL_OFF; michael@0: coll->frenchCollationisDefault = FALSE; michael@0: } else if (value == UCOL_DEFAULT) { michael@0: coll->frenchCollationisDefault = TRUE; michael@0: coll->frenchCollation = (UColAttributeValue)coll->options->frenchCollation; michael@0: } else { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR ; michael@0: } michael@0: break; michael@0: case UCOL_ALTERNATE_HANDLING: /* attribute for handling variable elements*/ michael@0: if(value == UCOL_SHIFTED) { michael@0: coll->alternateHandling = UCOL_SHIFTED; michael@0: coll->alternateHandlingisDefault = FALSE; michael@0: } else if (value == UCOL_NON_IGNORABLE) { michael@0: coll->alternateHandling = UCOL_NON_IGNORABLE; michael@0: coll->alternateHandlingisDefault = FALSE; michael@0: } else if (value == UCOL_DEFAULT) { michael@0: coll->alternateHandlingisDefault = TRUE; michael@0: coll->alternateHandling = (UColAttributeValue)coll->options->alternateHandling ; michael@0: } else { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR ; michael@0: } michael@0: break; michael@0: case UCOL_CASE_FIRST: /* who goes first, lower case or uppercase */ michael@0: if(value == UCOL_LOWER_FIRST) { michael@0: coll->caseFirst = UCOL_LOWER_FIRST; michael@0: coll->caseFirstisDefault = FALSE; michael@0: } else if (value == UCOL_UPPER_FIRST) { michael@0: coll->caseFirst = UCOL_UPPER_FIRST; michael@0: coll->caseFirstisDefault = FALSE; michael@0: } else if (value == UCOL_OFF) { michael@0: coll->caseFirst = UCOL_OFF; michael@0: coll->caseFirstisDefault = FALSE; michael@0: } else if (value == UCOL_DEFAULT) { michael@0: coll->caseFirst = (UColAttributeValue)coll->options->caseFirst; michael@0: coll->caseFirstisDefault = TRUE; michael@0: } else { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR ; michael@0: } michael@0: break; michael@0: case UCOL_CASE_LEVEL: /* do we have an extra case level */ michael@0: if(value == UCOL_ON) { michael@0: coll->caseLevel = UCOL_ON; michael@0: coll->caseLevelisDefault = FALSE; michael@0: } else if (value == UCOL_OFF) { michael@0: coll->caseLevel = UCOL_OFF; michael@0: coll->caseLevelisDefault = FALSE; michael@0: } else if (value == UCOL_DEFAULT) { michael@0: coll->caseLevel = (UColAttributeValue)coll->options->caseLevel; michael@0: coll->caseLevelisDefault = TRUE; michael@0: } else { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR ; michael@0: } michael@0: break; michael@0: case UCOL_NORMALIZATION_MODE: /* attribute for normalization */ michael@0: if(value == UCOL_ON) { michael@0: coll->normalizationMode = UCOL_ON; michael@0: coll->normalizationModeisDefault = FALSE; michael@0: initializeFCD(status); michael@0: } else if (value == UCOL_OFF) { michael@0: coll->normalizationMode = UCOL_OFF; michael@0: coll->normalizationModeisDefault = FALSE; michael@0: } else if (value == UCOL_DEFAULT) { michael@0: coll->normalizationModeisDefault = TRUE; michael@0: coll->normalizationMode = (UColAttributeValue)coll->options->normalizationMode; michael@0: if(coll->normalizationMode == UCOL_ON) { michael@0: initializeFCD(status); michael@0: } michael@0: } else { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR ; michael@0: } michael@0: break; michael@0: case UCOL_STRENGTH: /* attribute for strength */ michael@0: if (value == UCOL_DEFAULT) { michael@0: coll->strengthisDefault = TRUE; michael@0: coll->strength = (UColAttributeValue)coll->options->strength; michael@0: } else if (value <= UCOL_IDENTICAL) { michael@0: coll->strengthisDefault = FALSE; michael@0: coll->strength = value; michael@0: } else { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR ; michael@0: } michael@0: break; michael@0: case UCOL_ATTRIBUTE_COUNT: michael@0: default: michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: break; michael@0: } michael@0: if(oldFrench != coll->frenchCollation || oldCaseFirst != coll->caseFirst) { michael@0: coll->latinOneRegenTable = TRUE; michael@0: } else { michael@0: coll->latinOneRegenTable = FALSE; michael@0: } michael@0: ucol_updateInternalState(coll, status); michael@0: } michael@0: michael@0: U_CAPI UColAttributeValue U_EXPORT2 michael@0: ucol_getAttribute(const UCollator *coll, UColAttribute attr, UErrorCode *status) { michael@0: if(U_FAILURE(*status) || coll == NULL) { michael@0: return UCOL_DEFAULT; michael@0: } michael@0: michael@0: if(coll->delegate != NULL) { michael@0: return ((Collator*)coll->delegate)->getAttribute(attr,*status); michael@0: } michael@0: michael@0: switch(attr) { michael@0: case UCOL_NUMERIC_COLLATION: michael@0: return coll->numericCollation; michael@0: case UCOL_HIRAGANA_QUATERNARY_MODE: michael@0: return coll->hiraganaQ; michael@0: case UCOL_FRENCH_COLLATION: /* attribute for direction of secondary weights*/ michael@0: return coll->frenchCollation; michael@0: case UCOL_ALTERNATE_HANDLING: /* attribute for handling variable elements*/ michael@0: return coll->alternateHandling; michael@0: case UCOL_CASE_FIRST: /* who goes first, lower case or uppercase */ michael@0: return coll->caseFirst; michael@0: case UCOL_CASE_LEVEL: /* do we have an extra case level */ michael@0: return coll->caseLevel; michael@0: case UCOL_NORMALIZATION_MODE: /* attribute for normalization */ michael@0: return coll->normalizationMode; michael@0: case UCOL_STRENGTH: /* attribute for strength */ michael@0: return coll->strength; michael@0: case UCOL_ATTRIBUTE_COUNT: michael@0: default: michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: break; michael@0: } michael@0: return UCOL_DEFAULT; michael@0: } michael@0: michael@0: U_CAPI void U_EXPORT2 michael@0: ucol_setStrength( UCollator *coll, michael@0: UCollationStrength strength) michael@0: { michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: ucol_setAttribute(coll, UCOL_STRENGTH, strength, &status); michael@0: } michael@0: michael@0: U_CAPI UCollationStrength U_EXPORT2 michael@0: ucol_getStrength(const UCollator *coll) michael@0: { michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: return ucol_getAttribute(coll, UCOL_STRENGTH, &status); michael@0: } michael@0: michael@0: U_CAPI int32_t U_EXPORT2 michael@0: ucol_getReorderCodes(const UCollator *coll, michael@0: int32_t *dest, michael@0: int32_t destCapacity, michael@0: UErrorCode *status) { michael@0: if (U_FAILURE(*status)) { michael@0: return 0; michael@0: } michael@0: michael@0: if(coll->delegate!=NULL) { michael@0: return ((const Collator*)coll->delegate)->getReorderCodes(dest, destCapacity, *status); michael@0: } michael@0: michael@0: if (destCapacity < 0 || (destCapacity > 0 && dest == NULL)) { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return 0; michael@0: } michael@0: michael@0: #ifdef UCOL_DEBUG michael@0: printf("coll->reorderCodesLength = %d\n", coll->reorderCodesLength); michael@0: printf("coll->defaultReorderCodesLength = %d\n", coll->defaultReorderCodesLength); michael@0: #endif michael@0: michael@0: if (coll->reorderCodesLength > destCapacity) { michael@0: *status = U_BUFFER_OVERFLOW_ERROR; michael@0: return coll->reorderCodesLength; michael@0: } michael@0: for (int32_t i = 0; i < coll->reorderCodesLength; i++) { michael@0: dest[i] = coll->reorderCodes[i]; michael@0: } michael@0: return coll->reorderCodesLength; michael@0: } michael@0: michael@0: U_CAPI void U_EXPORT2 michael@0: ucol_setReorderCodes(UCollator* coll, michael@0: const int32_t* reorderCodes, michael@0: int32_t reorderCodesLength, michael@0: UErrorCode *status) { michael@0: if (U_FAILURE(*status)) { michael@0: return; michael@0: } michael@0: michael@0: if (reorderCodesLength < 0 || (reorderCodesLength > 0 && reorderCodes == NULL)) { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return; michael@0: } michael@0: michael@0: if(coll->delegate!=NULL) { michael@0: ((Collator*)coll->delegate)->setReorderCodes(reorderCodes, reorderCodesLength, *status); michael@0: return; michael@0: } michael@0: michael@0: if (coll->reorderCodes != NULL && coll->freeReorderCodesOnClose == TRUE) { michael@0: uprv_free(coll->reorderCodes); michael@0: } michael@0: coll->reorderCodes = NULL; michael@0: coll->freeReorderCodesOnClose = FALSE; michael@0: coll->reorderCodesLength = 0; michael@0: if (reorderCodesLength == 0) { michael@0: if (coll->leadBytePermutationTable != NULL && coll->freeLeadBytePermutationTableOnClose == TRUE) { michael@0: uprv_free(coll->leadBytePermutationTable); michael@0: } michael@0: coll->leadBytePermutationTable = NULL; michael@0: coll->freeLeadBytePermutationTableOnClose = FALSE; michael@0: return; michael@0: } michael@0: coll->reorderCodes = (int32_t*) uprv_malloc(reorderCodesLength * sizeof(int32_t)); michael@0: if (coll->reorderCodes == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: return; michael@0: } michael@0: coll->freeReorderCodesOnClose = TRUE; michael@0: for (int32_t i = 0; i < reorderCodesLength; i++) { michael@0: coll->reorderCodes[i] = reorderCodes[i]; michael@0: } michael@0: coll->reorderCodesLength = reorderCodesLength; michael@0: ucol_buildPermutationTable(coll, status); michael@0: } michael@0: michael@0: U_CAPI int32_t U_EXPORT2 michael@0: ucol_getEquivalentReorderCodes(int32_t reorderCode, michael@0: int32_t* dest, michael@0: int32_t destCapacity, michael@0: UErrorCode *pErrorCode) { michael@0: bool equivalentCodesSet[USCRIPT_CODE_LIMIT]; michael@0: uint16_t leadBytes[256]; michael@0: int leadBytesCount; michael@0: int leadByteIndex; michael@0: int16_t reorderCodesForLeadByte[USCRIPT_CODE_LIMIT]; michael@0: int reorderCodesForLeadByteCount; michael@0: int reorderCodeIndex; michael@0: michael@0: int32_t equivalentCodesCount = 0; michael@0: int setIndex; michael@0: michael@0: if (U_FAILURE(*pErrorCode)) { michael@0: return 0; michael@0: } michael@0: michael@0: if (destCapacity < 0 || (destCapacity > 0 && dest == NULL)) { michael@0: *pErrorCode = U_ILLEGAL_ARGUMENT_ERROR; michael@0: return 0; michael@0: } michael@0: michael@0: uprv_memset(equivalentCodesSet, 0, USCRIPT_CODE_LIMIT * sizeof(bool)); michael@0: michael@0: const UCollator* uca = ucol_initUCA(pErrorCode); michael@0: if (U_FAILURE(*pErrorCode)) { michael@0: return 0; michael@0: } michael@0: leadBytesCount = ucol_getLeadBytesForReorderCode(uca, reorderCode, leadBytes, 256); michael@0: for (leadByteIndex = 0; leadByteIndex < leadBytesCount; leadByteIndex++) { michael@0: reorderCodesForLeadByteCount = ucol_getReorderCodesForLeadByte( michael@0: uca, leadBytes[leadByteIndex], reorderCodesForLeadByte, USCRIPT_CODE_LIMIT); michael@0: for (reorderCodeIndex = 0; reorderCodeIndex < reorderCodesForLeadByteCount; reorderCodeIndex++) { michael@0: equivalentCodesSet[reorderCodesForLeadByte[reorderCodeIndex]] = true; michael@0: } michael@0: } michael@0: michael@0: for (setIndex = 0; setIndex < USCRIPT_CODE_LIMIT; setIndex++) { michael@0: if (equivalentCodesSet[setIndex] == true) { michael@0: equivalentCodesCount++; michael@0: } michael@0: } michael@0: michael@0: if (destCapacity == 0) { michael@0: return equivalentCodesCount; michael@0: } michael@0: michael@0: equivalentCodesCount = 0; michael@0: for (setIndex = 0; setIndex < USCRIPT_CODE_LIMIT; setIndex++) { michael@0: if (equivalentCodesSet[setIndex] == true) { michael@0: dest[equivalentCodesCount++] = setIndex; michael@0: if (equivalentCodesCount >= destCapacity) { michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: return equivalentCodesCount; michael@0: } michael@0: michael@0: michael@0: /****************************************************************************/ michael@0: /* Following are misc functions */ michael@0: /* there are new APIs and some compatibility APIs */ michael@0: /****************************************************************************/ michael@0: michael@0: U_CAPI void U_EXPORT2 michael@0: ucol_getVersion(const UCollator* coll, michael@0: UVersionInfo versionInfo) michael@0: { michael@0: if(coll->delegate!=NULL) { michael@0: ((const Collator*)coll->delegate)->getVersion(versionInfo); michael@0: return; michael@0: } michael@0: /* RunTime version */ michael@0: uint8_t rtVersion = UCOL_RUNTIME_VERSION; michael@0: /* Builder version*/ michael@0: uint8_t bdVersion = coll->image->version[0]; michael@0: michael@0: /* Charset Version. Need to get the version from cnv files michael@0: * makeconv should populate cnv files with version and michael@0: * an api has to be provided in ucnv.h to obtain this version michael@0: */ michael@0: uint8_t csVersion = 0; michael@0: michael@0: /* combine the version info */ michael@0: uint16_t cmbVersion = (uint16_t)((rtVersion<<11) | (bdVersion<<6) | (csVersion)); michael@0: michael@0: /* Tailoring rules */ michael@0: versionInfo[0] = (uint8_t)(cmbVersion>>8); michael@0: versionInfo[1] = (uint8_t)cmbVersion; michael@0: versionInfo[2] = coll->image->version[1]; michael@0: if(coll->UCA) { michael@0: /* Include the minor number when getting the UCA version. (major & 1f) << 3 | (minor & 7) */ michael@0: versionInfo[3] = (coll->UCA->image->UCAVersion[0] & 0x1f) << 3 | (coll->UCA->image->UCAVersion[1] & 0x07); michael@0: } else { michael@0: versionInfo[3] = 0; michael@0: } michael@0: } michael@0: michael@0: michael@0: /* This internal API checks whether a character is tailored or not */ michael@0: U_CAPI UBool U_EXPORT2 michael@0: ucol_isTailored(const UCollator *coll, const UChar u, UErrorCode *status) { michael@0: if(U_FAILURE(*status) || coll == NULL || coll == coll->UCA) { michael@0: return FALSE; michael@0: } michael@0: michael@0: uint32_t CE = UCOL_NOT_FOUND; michael@0: const UChar *ContractionStart = NULL; michael@0: if(u < 0x100) { /* latin-1 */ michael@0: CE = coll->latinOneMapping[u]; michael@0: if(coll->UCA && CE == coll->UCA->latinOneMapping[u]) { michael@0: return FALSE; michael@0: } michael@0: } else { /* regular */ michael@0: CE = UTRIE_GET32_FROM_LEAD(&coll->mapping, u); michael@0: } michael@0: michael@0: if(isContraction(CE)) { michael@0: ContractionStart = (UChar *)coll->image+getContractOffset(CE); michael@0: CE = *(coll->contractionCEs + (ContractionStart- coll->contractionIndex)); michael@0: } michael@0: michael@0: return (UBool)(CE != UCOL_NOT_FOUND); michael@0: } michael@0: michael@0: michael@0: /****************************************************************************/ michael@0: /* Following are the string compare functions */ michael@0: /* */ michael@0: /****************************************************************************/ michael@0: michael@0: michael@0: /* ucol_checkIdent internal function. Does byte level string compare. */ michael@0: /* Used by strcoll if strength == identical and strings */ michael@0: /* are otherwise equal. */ michael@0: /* */ michael@0: /* Comparison must be done on NFD normalized strings. */ michael@0: /* FCD is not good enough. */ michael@0: michael@0: static michael@0: UCollationResult ucol_checkIdent(collIterate *sColl, collIterate *tColl, UBool normalize, UErrorCode *status) michael@0: { michael@0: // When we arrive here, we can have normal strings or UCharIterators. Currently they are both michael@0: // of same type, but that doesn't really mean that it will stay that way. michael@0: int32_t comparison; michael@0: michael@0: if (sColl->flags & UCOL_USE_ITERATOR) { michael@0: // The division for the array length may truncate the array size to michael@0: // a little less than UNORM_ITER_SIZE, but that size is dimensioned too high michael@0: // for all platforms anyway. michael@0: UAlignedMemory stackNormIter1[UNORM_ITER_SIZE/sizeof(UAlignedMemory)]; michael@0: UAlignedMemory stackNormIter2[UNORM_ITER_SIZE/sizeof(UAlignedMemory)]; michael@0: UNormIterator *sNIt = NULL, *tNIt = NULL; michael@0: sNIt = unorm_openIter(stackNormIter1, sizeof(stackNormIter1), status); michael@0: tNIt = unorm_openIter(stackNormIter2, sizeof(stackNormIter2), status); michael@0: sColl->iterator->move(sColl->iterator, 0, UITER_START); michael@0: tColl->iterator->move(tColl->iterator, 0, UITER_START); michael@0: UCharIterator *sIt = unorm_setIter(sNIt, sColl->iterator, UNORM_NFD, status); michael@0: UCharIterator *tIt = unorm_setIter(tNIt, tColl->iterator, UNORM_NFD, status); michael@0: comparison = u_strCompareIter(sIt, tIt, TRUE); michael@0: unorm_closeIter(sNIt); michael@0: unorm_closeIter(tNIt); michael@0: } else { michael@0: int32_t sLen = (sColl->flags & UCOL_ITER_HASLEN) ? (int32_t)(sColl->endp - sColl->string) : -1; michael@0: const UChar *sBuf = sColl->string; michael@0: int32_t tLen = (tColl->flags & UCOL_ITER_HASLEN) ? (int32_t)(tColl->endp - tColl->string) : -1; michael@0: const UChar *tBuf = tColl->string; michael@0: michael@0: if (normalize) { michael@0: *status = U_ZERO_ERROR; michael@0: // Note: We could use Normalizer::compare() or similar, but for short strings michael@0: // which may not be in FCD it might be faster to just NFD them. michael@0: // Note: spanQuickCheckYes() + normalizeSecondAndAppend() rather than michael@0: // NFD'ing immediately might be faster for long strings, michael@0: // but string comparison is usually done on relatively short strings. michael@0: sColl->nfd->normalize(UnicodeString((sColl->flags & UCOL_ITER_HASLEN) == 0, sBuf, sLen), michael@0: sColl->writableBuffer, michael@0: *status); michael@0: tColl->nfd->normalize(UnicodeString((tColl->flags & UCOL_ITER_HASLEN) == 0, tBuf, tLen), michael@0: tColl->writableBuffer, michael@0: *status); michael@0: if(U_FAILURE(*status)) { michael@0: return UCOL_LESS; michael@0: } michael@0: comparison = sColl->writableBuffer.compareCodePointOrder(tColl->writableBuffer); michael@0: } else { michael@0: comparison = u_strCompare(sBuf, sLen, tBuf, tLen, TRUE); michael@0: } michael@0: } michael@0: michael@0: if (comparison < 0) { michael@0: return UCOL_LESS; michael@0: } else if (comparison == 0) { michael@0: return UCOL_EQUAL; michael@0: } else /* comparison > 0 */ { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: michael@0: /* CEBuf - A struct and some inline functions to handle the saving */ michael@0: /* of CEs in a buffer within ucol_strcoll */ michael@0: michael@0: #define UCOL_CEBUF_SIZE 512 michael@0: typedef struct ucol_CEBuf { michael@0: uint32_t *buf; michael@0: uint32_t *endp; michael@0: uint32_t *pos; michael@0: uint32_t localArray[UCOL_CEBUF_SIZE]; michael@0: } ucol_CEBuf; michael@0: michael@0: michael@0: static michael@0: inline void UCOL_INIT_CEBUF(ucol_CEBuf *b) { michael@0: (b)->buf = (b)->pos = (b)->localArray; michael@0: (b)->endp = (b)->buf + UCOL_CEBUF_SIZE; michael@0: } michael@0: michael@0: static michael@0: void ucol_CEBuf_Expand(ucol_CEBuf *b, collIterate *ci, UErrorCode *status) { michael@0: uint32_t oldSize; michael@0: uint32_t newSize; michael@0: uint32_t *newBuf; michael@0: michael@0: ci->flags |= UCOL_ITER_ALLOCATED; michael@0: oldSize = (uint32_t)(b->pos - b->buf); michael@0: newSize = oldSize * 2; michael@0: newBuf = (uint32_t *)uprv_malloc(newSize * sizeof(uint32_t)); michael@0: if(newBuf == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: } michael@0: else { michael@0: uprv_memcpy(newBuf, b->buf, oldSize * sizeof(uint32_t)); michael@0: if (b->buf != b->localArray) { michael@0: uprv_free(b->buf); michael@0: } michael@0: b->buf = newBuf; michael@0: b->endp = b->buf + newSize; michael@0: b->pos = b->buf + oldSize; michael@0: } michael@0: } michael@0: michael@0: static michael@0: inline void UCOL_CEBUF_PUT(ucol_CEBuf *b, uint32_t ce, collIterate *ci, UErrorCode *status) { michael@0: if (b->pos == b->endp) { michael@0: ucol_CEBuf_Expand(b, ci, status); michael@0: } michael@0: if (U_SUCCESS(*status)) { michael@0: *(b)->pos++ = ce; michael@0: } michael@0: } michael@0: michael@0: /* This is a trick string compare function that goes in and uses sortkeys to compare */ michael@0: /* It is used when compare gets in trouble and needs to bail out */ michael@0: static UCollationResult ucol_compareUsingSortKeys(collIterate *sColl, michael@0: collIterate *tColl, michael@0: UErrorCode *status) michael@0: { michael@0: uint8_t sourceKey[UCOL_MAX_BUFFER], targetKey[UCOL_MAX_BUFFER]; michael@0: uint8_t *sourceKeyP = sourceKey; michael@0: uint8_t *targetKeyP = targetKey; michael@0: int32_t sourceKeyLen = UCOL_MAX_BUFFER, targetKeyLen = UCOL_MAX_BUFFER; michael@0: const UCollator *coll = sColl->coll; michael@0: const UChar *source = NULL; michael@0: const UChar *target = NULL; michael@0: int32_t result = UCOL_EQUAL; michael@0: UnicodeString sourceString, targetString; michael@0: int32_t sourceLength; michael@0: int32_t targetLength; michael@0: michael@0: if(sColl->flags & UCOL_USE_ITERATOR) { michael@0: sColl->iterator->move(sColl->iterator, 0, UITER_START); michael@0: tColl->iterator->move(tColl->iterator, 0, UITER_START); michael@0: UChar32 c; michael@0: while((c=sColl->iterator->next(sColl->iterator))>=0) { michael@0: sourceString.append((UChar)c); michael@0: } michael@0: while((c=tColl->iterator->next(tColl->iterator))>=0) { michael@0: targetString.append((UChar)c); michael@0: } michael@0: source = sourceString.getBuffer(); michael@0: sourceLength = sourceString.length(); michael@0: target = targetString.getBuffer(); michael@0: targetLength = targetString.length(); michael@0: } else { // no iterators michael@0: sourceLength = (sColl->flags&UCOL_ITER_HASLEN)?(int32_t)(sColl->endp-sColl->string):-1; michael@0: targetLength = (tColl->flags&UCOL_ITER_HASLEN)?(int32_t)(tColl->endp-tColl->string):-1; michael@0: source = sColl->string; michael@0: target = tColl->string; michael@0: } michael@0: michael@0: michael@0: michael@0: sourceKeyLen = ucol_getSortKey(coll, source, sourceLength, sourceKeyP, sourceKeyLen); michael@0: if(sourceKeyLen > UCOL_MAX_BUFFER) { michael@0: sourceKeyP = (uint8_t*)uprv_malloc(sourceKeyLen*sizeof(uint8_t)); michael@0: if(sourceKeyP == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: goto cleanup_and_do_compare; michael@0: } michael@0: sourceKeyLen = ucol_getSortKey(coll, source, sourceLength, sourceKeyP, sourceKeyLen); michael@0: } michael@0: michael@0: targetKeyLen = ucol_getSortKey(coll, target, targetLength, targetKeyP, targetKeyLen); michael@0: if(targetKeyLen > UCOL_MAX_BUFFER) { michael@0: targetKeyP = (uint8_t*)uprv_malloc(targetKeyLen*sizeof(uint8_t)); michael@0: if(targetKeyP == NULL) { michael@0: *status = U_MEMORY_ALLOCATION_ERROR; michael@0: goto cleanup_and_do_compare; michael@0: } michael@0: targetKeyLen = ucol_getSortKey(coll, target, targetLength, targetKeyP, targetKeyLen); michael@0: } michael@0: michael@0: result = uprv_strcmp((const char*)sourceKeyP, (const char*)targetKeyP); michael@0: michael@0: cleanup_and_do_compare: michael@0: if(sourceKeyP != NULL && sourceKeyP != sourceKey) { michael@0: uprv_free(sourceKeyP); michael@0: } michael@0: michael@0: if(targetKeyP != NULL && targetKeyP != targetKey) { michael@0: uprv_free(targetKeyP); michael@0: } michael@0: michael@0: if(result<0) { michael@0: return UCOL_LESS; michael@0: } else if(result>0) { michael@0: return UCOL_GREATER; michael@0: } else { michael@0: return UCOL_EQUAL; michael@0: } michael@0: } michael@0: michael@0: michael@0: static UCollationResult michael@0: ucol_strcollRegular(collIterate *sColl, collIterate *tColl, UErrorCode *status) michael@0: { michael@0: U_ALIGN_CODE(16); michael@0: michael@0: const UCollator *coll = sColl->coll; michael@0: michael@0: michael@0: // setting up the collator parameters michael@0: UColAttributeValue strength = coll->strength; michael@0: UBool initialCheckSecTer = (strength >= UCOL_SECONDARY); michael@0: michael@0: UBool checkSecTer = initialCheckSecTer; michael@0: UBool checkTertiary = (strength >= UCOL_TERTIARY); michael@0: UBool checkQuad = (strength >= UCOL_QUATERNARY); michael@0: UBool checkIdent = (strength == UCOL_IDENTICAL); michael@0: UBool checkCase = (coll->caseLevel == UCOL_ON); michael@0: UBool isFrenchSec = (coll->frenchCollation == UCOL_ON) && checkSecTer; michael@0: UBool shifted = (coll->alternateHandling == UCOL_SHIFTED); michael@0: UBool qShifted = shifted && checkQuad; michael@0: UBool doHiragana = (coll->hiraganaQ == UCOL_ON) && checkQuad; michael@0: michael@0: if(doHiragana && shifted) { michael@0: return (ucol_compareUsingSortKeys(sColl, tColl, status)); michael@0: } michael@0: uint8_t caseSwitch = coll->caseSwitch; michael@0: uint8_t tertiaryMask = coll->tertiaryMask; michael@0: michael@0: // This is the lowest primary value that will not be ignored if shifted michael@0: uint32_t LVT = (shifted)?(coll->variableTopValue<<16):0; michael@0: michael@0: UCollationResult result = UCOL_EQUAL; michael@0: UCollationResult hirResult = UCOL_EQUAL; michael@0: michael@0: // Preparing the CE buffers. They will be filled during the primary phase michael@0: ucol_CEBuf sCEs; michael@0: ucol_CEBuf tCEs; michael@0: UCOL_INIT_CEBUF(&sCEs); michael@0: UCOL_INIT_CEBUF(&tCEs); michael@0: michael@0: uint32_t secS = 0, secT = 0; michael@0: uint32_t sOrder=0, tOrder=0; michael@0: michael@0: // Non shifted primary processing is quite simple michael@0: if(!shifted) { michael@0: for(;;) { michael@0: // We fetch CEs until we hit a non ignorable primary or end. michael@0: uint32_t sPrimary; michael@0: do { michael@0: // We get the next CE michael@0: sOrder = ucol_IGetNextCE(coll, sColl, status); michael@0: // Stuff it in the buffer michael@0: UCOL_CEBUF_PUT(&sCEs, sOrder, sColl, status); michael@0: // And keep just the primary part. michael@0: sPrimary = sOrder & UCOL_PRIMARYMASK; michael@0: } while(sPrimary == 0); michael@0: michael@0: // see the comments on the above block michael@0: uint32_t tPrimary; michael@0: do { michael@0: tOrder = ucol_IGetNextCE(coll, tColl, status); michael@0: UCOL_CEBUF_PUT(&tCEs, tOrder, tColl, status); michael@0: tPrimary = tOrder & UCOL_PRIMARYMASK; michael@0: } while(tPrimary == 0); michael@0: michael@0: // if both primaries are the same michael@0: if(sPrimary == tPrimary) { michael@0: // and there are no more CEs, we advance to the next level michael@0: if(sPrimary == UCOL_NO_MORE_CES_PRIMARY) { michael@0: break; michael@0: } michael@0: if(doHiragana && hirResult == UCOL_EQUAL) { michael@0: if((sColl->flags & UCOL_WAS_HIRAGANA) != (tColl->flags & UCOL_WAS_HIRAGANA)) { michael@0: hirResult = ((sColl->flags & UCOL_WAS_HIRAGANA) > (tColl->flags & UCOL_WAS_HIRAGANA)) michael@0: ? UCOL_LESS:UCOL_GREATER; michael@0: } michael@0: } michael@0: } else { michael@0: // only need to check one for continuation michael@0: // if one is then the other must be or the preceding CE would be a prefix of the other michael@0: if (coll->leadBytePermutationTable != NULL && !isContinuation(sOrder)) { michael@0: sPrimary = (coll->leadBytePermutationTable[sPrimary>>24] << 24) | (sPrimary & 0x00FFFFFF); michael@0: tPrimary = (coll->leadBytePermutationTable[tPrimary>>24] << 24) | (tPrimary & 0x00FFFFFF); michael@0: } michael@0: // if two primaries are different, we are done michael@0: result = (sPrimary < tPrimary) ? UCOL_LESS: UCOL_GREATER; michael@0: goto commonReturn; michael@0: } michael@0: } // no primary difference... do the rest from the buffers michael@0: } else { // shifted - do a slightly more complicated processing :) michael@0: for(;;) { michael@0: UBool sInShifted = FALSE; michael@0: UBool tInShifted = FALSE; michael@0: // This version of code can be refactored. However, it seems easier to understand this way. michael@0: // Source loop. Same as the target loop. michael@0: for(;;) { michael@0: sOrder = ucol_IGetNextCE(coll, sColl, status); michael@0: if(sOrder == UCOL_NO_MORE_CES) { michael@0: UCOL_CEBUF_PUT(&sCEs, sOrder, sColl, status); michael@0: break; michael@0: } else if(sOrder == 0 || (sInShifted && (sOrder & UCOL_PRIMARYMASK) == 0)) { michael@0: /* UCA amendment - ignore ignorables that follow shifted code points */ michael@0: continue; michael@0: } else if(isContinuation(sOrder)) { michael@0: if((sOrder & UCOL_PRIMARYMASK) > 0) { /* There is primary value */ michael@0: if(sInShifted) { michael@0: sOrder = (sOrder & UCOL_PRIMARYMASK) | 0xC0; /* preserve interesting continuation */ michael@0: UCOL_CEBUF_PUT(&sCEs, sOrder, sColl, status); michael@0: continue; michael@0: } else { michael@0: UCOL_CEBUF_PUT(&sCEs, sOrder, sColl, status); michael@0: break; michael@0: } michael@0: } else { /* Just lower level values */ michael@0: if(sInShifted) { michael@0: continue; michael@0: } else { michael@0: UCOL_CEBUF_PUT(&sCEs, sOrder, sColl, status); michael@0: continue; michael@0: } michael@0: } michael@0: } else { /* regular */ michael@0: if(coll->leadBytePermutationTable != NULL){ michael@0: sOrder = (coll->leadBytePermutationTable[sOrder>>24] << 24) | (sOrder & 0x00FFFFFF); michael@0: } michael@0: if((sOrder & UCOL_PRIMARYMASK) > LVT) { michael@0: UCOL_CEBUF_PUT(&sCEs, sOrder, sColl, status); michael@0: break; michael@0: } else { michael@0: if((sOrder & UCOL_PRIMARYMASK) > 0) { michael@0: sInShifted = TRUE; michael@0: sOrder &= UCOL_PRIMARYMASK; michael@0: UCOL_CEBUF_PUT(&sCEs, sOrder, sColl, status); michael@0: continue; michael@0: } else { michael@0: UCOL_CEBUF_PUT(&sCEs, sOrder, sColl, status); michael@0: sInShifted = FALSE; michael@0: continue; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: sOrder &= UCOL_PRIMARYMASK; michael@0: sInShifted = FALSE; michael@0: michael@0: for(;;) { michael@0: tOrder = ucol_IGetNextCE(coll, tColl, status); michael@0: if(tOrder == UCOL_NO_MORE_CES) { michael@0: UCOL_CEBUF_PUT(&tCEs, tOrder, tColl, status); michael@0: break; michael@0: } else if(tOrder == 0 || (tInShifted && (tOrder & UCOL_PRIMARYMASK) == 0)) { michael@0: /* UCA amendment - ignore ignorables that follow shifted code points */ michael@0: continue; michael@0: } else if(isContinuation(tOrder)) { michael@0: if((tOrder & UCOL_PRIMARYMASK) > 0) { /* There is primary value */ michael@0: if(tInShifted) { michael@0: tOrder = (tOrder & UCOL_PRIMARYMASK) | 0xC0; /* preserve interesting continuation */ michael@0: UCOL_CEBUF_PUT(&tCEs, tOrder, tColl, status); michael@0: continue; michael@0: } else { michael@0: UCOL_CEBUF_PUT(&tCEs, tOrder, tColl, status); michael@0: break; michael@0: } michael@0: } else { /* Just lower level values */ michael@0: if(tInShifted) { michael@0: continue; michael@0: } else { michael@0: UCOL_CEBUF_PUT(&tCEs, tOrder, tColl, status); michael@0: continue; michael@0: } michael@0: } michael@0: } else { /* regular */ michael@0: if(coll->leadBytePermutationTable != NULL){ michael@0: tOrder = (coll->leadBytePermutationTable[tOrder>>24] << 24) | (tOrder & 0x00FFFFFF); michael@0: } michael@0: if((tOrder & UCOL_PRIMARYMASK) > LVT) { michael@0: UCOL_CEBUF_PUT(&tCEs, tOrder, tColl, status); michael@0: break; michael@0: } else { michael@0: if((tOrder & UCOL_PRIMARYMASK) > 0) { michael@0: tInShifted = TRUE; michael@0: tOrder &= UCOL_PRIMARYMASK; michael@0: UCOL_CEBUF_PUT(&tCEs, tOrder, tColl, status); michael@0: continue; michael@0: } else { michael@0: UCOL_CEBUF_PUT(&tCEs, tOrder, tColl, status); michael@0: tInShifted = FALSE; michael@0: continue; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: tOrder &= UCOL_PRIMARYMASK; michael@0: tInShifted = FALSE; michael@0: michael@0: if(sOrder == tOrder) { michael@0: /* michael@0: if(doHiragana && hirResult == UCOL_EQUAL) { michael@0: if((sColl.flags & UCOL_WAS_HIRAGANA) != (tColl.flags & UCOL_WAS_HIRAGANA)) { michael@0: hirResult = ((sColl.flags & UCOL_WAS_HIRAGANA) > (tColl.flags & UCOL_WAS_HIRAGANA)) michael@0: ? UCOL_LESS:UCOL_GREATER; michael@0: } michael@0: } michael@0: */ michael@0: if(sOrder == UCOL_NO_MORE_CES_PRIMARY) { michael@0: break; michael@0: } else { michael@0: sOrder = 0; michael@0: tOrder = 0; michael@0: continue; michael@0: } michael@0: } else { michael@0: result = (sOrder < tOrder) ? UCOL_LESS : UCOL_GREATER; michael@0: goto commonReturn; michael@0: } michael@0: } /* no primary difference... do the rest from the buffers */ michael@0: } michael@0: michael@0: /* now, we're gonna reexamine collected CEs */ michael@0: uint32_t *sCE; michael@0: uint32_t *tCE; michael@0: michael@0: /* This is the secondary level of comparison */ michael@0: if(checkSecTer) { michael@0: if(!isFrenchSec) { /* normal */ michael@0: sCE = sCEs.buf; michael@0: tCE = tCEs.buf; michael@0: for(;;) { michael@0: while (secS == 0) { michael@0: secS = *(sCE++) & UCOL_SECONDARYMASK; michael@0: } michael@0: michael@0: while(secT == 0) { michael@0: secT = *(tCE++) & UCOL_SECONDARYMASK; michael@0: } michael@0: michael@0: if(secS == secT) { michael@0: if(secS == UCOL_NO_MORE_CES_SECONDARY) { michael@0: break; michael@0: } else { michael@0: secS = 0; secT = 0; michael@0: continue; michael@0: } michael@0: } else { michael@0: result = (secS < secT) ? UCOL_LESS : UCOL_GREATER; michael@0: goto commonReturn; michael@0: } michael@0: } michael@0: } else { /* do the French */ michael@0: uint32_t *sCESave = NULL; michael@0: uint32_t *tCESave = NULL; michael@0: sCE = sCEs.pos-2; /* this could also be sCEs-- if needs to be optimized */ michael@0: tCE = tCEs.pos-2; michael@0: for(;;) { michael@0: while (secS == 0 && sCE >= sCEs.buf) { michael@0: if(sCESave == NULL) { michael@0: secS = *(sCE--); michael@0: if(isContinuation(secS)) { michael@0: while(isContinuation(secS = *(sCE--))) michael@0: ; michael@0: /* after this, secS has the start of continuation, and sCEs points before that */ michael@0: sCESave = sCE; /* we save it, so that we know where to come back AND that we need to go forward */ michael@0: sCE+=2; /* need to point to the first continuation CP */ michael@0: /* However, now you can just continue doing stuff */ michael@0: } michael@0: } else { michael@0: secS = *(sCE++); michael@0: if(!isContinuation(secS)) { /* This means we have finished with this cont */ michael@0: sCE = sCESave; /* reset the pointer to before continuation */ michael@0: sCESave = NULL; michael@0: secS = 0; /* Fetch a fresh CE before the continuation sequence. */ michael@0: continue; michael@0: } michael@0: } michael@0: secS &= UCOL_SECONDARYMASK; /* remove the continuation bit */ michael@0: } michael@0: michael@0: while(secT == 0 && tCE >= tCEs.buf) { michael@0: if(tCESave == NULL) { michael@0: secT = *(tCE--); michael@0: if(isContinuation(secT)) { michael@0: while(isContinuation(secT = *(tCE--))) michael@0: ; michael@0: /* after this, secS has the start of continuation, and sCEs points before that */ michael@0: tCESave = tCE; /* we save it, so that we know where to come back AND that we need to go forward */ michael@0: tCE+=2; /* need to point to the first continuation CP */ michael@0: /* However, now you can just continue doing stuff */ michael@0: } michael@0: } else { michael@0: secT = *(tCE++); michael@0: if(!isContinuation(secT)) { /* This means we have finished with this cont */ michael@0: tCE = tCESave; /* reset the pointer to before continuation */ michael@0: tCESave = NULL; michael@0: secT = 0; /* Fetch a fresh CE before the continuation sequence. */ michael@0: continue; michael@0: } michael@0: } michael@0: secT &= UCOL_SECONDARYMASK; /* remove the continuation bit */ michael@0: } michael@0: michael@0: if(secS == secT) { michael@0: if(secS == UCOL_NO_MORE_CES_SECONDARY || (sCE < sCEs.buf && tCE < tCEs.buf)) { michael@0: break; michael@0: } else { michael@0: secS = 0; secT = 0; michael@0: continue; michael@0: } michael@0: } else { michael@0: result = (secS < secT) ? UCOL_LESS : UCOL_GREATER; michael@0: goto commonReturn; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* doing the case bit */ michael@0: if(checkCase) { michael@0: sCE = sCEs.buf; michael@0: tCE = tCEs.buf; michael@0: for(;;) { michael@0: while((secS & UCOL_REMOVE_CASE) == 0) { michael@0: if(!isContinuation(*sCE++)) { michael@0: secS =*(sCE-1); michael@0: if(((secS & UCOL_PRIMARYMASK) != 0) || strength > UCOL_PRIMARY) { michael@0: // primary ignorables should not be considered on the case level when the strength is primary michael@0: // otherwise, the CEs stop being well-formed michael@0: secS &= UCOL_TERT_CASE_MASK; michael@0: secS ^= caseSwitch; michael@0: } else { michael@0: secS = 0; michael@0: } michael@0: } else { michael@0: secS = 0; michael@0: } michael@0: } michael@0: michael@0: while((secT & UCOL_REMOVE_CASE) == 0) { michael@0: if(!isContinuation(*tCE++)) { michael@0: secT = *(tCE-1); michael@0: if(((secT & UCOL_PRIMARYMASK) != 0) || strength > UCOL_PRIMARY) { michael@0: // primary ignorables should not be considered on the case level when the strength is primary michael@0: // otherwise, the CEs stop being well-formed michael@0: secT &= UCOL_TERT_CASE_MASK; michael@0: secT ^= caseSwitch; michael@0: } else { michael@0: secT = 0; michael@0: } michael@0: } else { michael@0: secT = 0; michael@0: } michael@0: } michael@0: michael@0: if((secS & UCOL_CASE_BIT_MASK) < (secT & UCOL_CASE_BIT_MASK)) { michael@0: result = UCOL_LESS; michael@0: goto commonReturn; michael@0: } else if((secS & UCOL_CASE_BIT_MASK) > (secT & UCOL_CASE_BIT_MASK)) { michael@0: result = UCOL_GREATER; michael@0: goto commonReturn; michael@0: } michael@0: michael@0: if((secS & UCOL_REMOVE_CASE) == UCOL_NO_MORE_CES_TERTIARY || (secT & UCOL_REMOVE_CASE) == UCOL_NO_MORE_CES_TERTIARY ) { michael@0: break; michael@0: } else { michael@0: secS = 0; michael@0: secT = 0; michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* Tertiary level */ michael@0: if(checkTertiary) { michael@0: secS = 0; michael@0: secT = 0; michael@0: sCE = sCEs.buf; michael@0: tCE = tCEs.buf; michael@0: for(;;) { michael@0: while((secS & UCOL_REMOVE_CASE) == 0) { michael@0: sOrder = *sCE++; michael@0: secS = sOrder & tertiaryMask; michael@0: if(!isContinuation(sOrder)) { michael@0: secS ^= caseSwitch; michael@0: } else { michael@0: secS &= UCOL_REMOVE_CASE; michael@0: } michael@0: } michael@0: michael@0: while((secT & UCOL_REMOVE_CASE) == 0) { michael@0: tOrder = *tCE++; michael@0: secT = tOrder & tertiaryMask; michael@0: if(!isContinuation(tOrder)) { michael@0: secT ^= caseSwitch; michael@0: } else { michael@0: secT &= UCOL_REMOVE_CASE; michael@0: } michael@0: } michael@0: michael@0: if(secS == secT) { michael@0: if((secS & UCOL_REMOVE_CASE) == 1) { michael@0: break; michael@0: } else { michael@0: secS = 0; secT = 0; michael@0: continue; michael@0: } michael@0: } else { michael@0: result = (secS < secT) ? UCOL_LESS : UCOL_GREATER; michael@0: goto commonReturn; michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: if(qShifted /*checkQuad*/) { michael@0: UBool sInShifted = TRUE; michael@0: UBool tInShifted = TRUE; michael@0: secS = 0; michael@0: secT = 0; michael@0: sCE = sCEs.buf; michael@0: tCE = tCEs.buf; michael@0: for(;;) { michael@0: while((secS == 0 && secS != UCOL_NO_MORE_CES) || (isContinuation(secS) && !sInShifted)) { michael@0: secS = *(sCE++); michael@0: if(isContinuation(secS)) { michael@0: if(!sInShifted) { michael@0: continue; michael@0: } michael@0: } else if(secS > LVT || (secS & UCOL_PRIMARYMASK) == 0) { /* non continuation */ michael@0: secS = UCOL_PRIMARYMASK; michael@0: sInShifted = FALSE; michael@0: } else { michael@0: sInShifted = TRUE; michael@0: } michael@0: } michael@0: secS &= UCOL_PRIMARYMASK; michael@0: michael@0: michael@0: while((secT == 0 && secT != UCOL_NO_MORE_CES) || (isContinuation(secT) && !tInShifted)) { michael@0: secT = *(tCE++); michael@0: if(isContinuation(secT)) { michael@0: if(!tInShifted) { michael@0: continue; michael@0: } michael@0: } else if(secT > LVT || (secT & UCOL_PRIMARYMASK) == 0) { michael@0: secT = UCOL_PRIMARYMASK; michael@0: tInShifted = FALSE; michael@0: } else { michael@0: tInShifted = TRUE; michael@0: } michael@0: } michael@0: secT &= UCOL_PRIMARYMASK; michael@0: michael@0: if(secS == secT) { michael@0: if(secS == UCOL_NO_MORE_CES_PRIMARY) { michael@0: break; michael@0: } else { michael@0: secS = 0; secT = 0; michael@0: continue; michael@0: } michael@0: } else { michael@0: result = (secS < secT) ? UCOL_LESS : UCOL_GREATER; michael@0: goto commonReturn; michael@0: } michael@0: } michael@0: } else if(doHiragana && hirResult != UCOL_EQUAL) { michael@0: // If we're fine on quaternaries, we might be different michael@0: // on Hiragana. This, however, might fail us in shifted. michael@0: result = hirResult; michael@0: goto commonReturn; michael@0: } michael@0: michael@0: /* For IDENTICAL comparisons, we use a bitwise character comparison */ michael@0: /* as a tiebreaker if all else is equal. */ michael@0: /* Getting here should be quite rare - strings are not identical - */ michael@0: /* that is checked first, but compared == through all other checks. */ michael@0: if(checkIdent) michael@0: { michael@0: //result = ucol_checkIdent(&sColl, &tColl, coll->normalizationMode == UCOL_ON); michael@0: result = ucol_checkIdent(sColl, tColl, TRUE, status); michael@0: } michael@0: michael@0: commonReturn: michael@0: if ((sColl->flags | tColl->flags) & UCOL_ITER_ALLOCATED) { michael@0: if (sCEs.buf != sCEs.localArray ) { michael@0: uprv_free(sCEs.buf); michael@0: } michael@0: if (tCEs.buf != tCEs.localArray ) { michael@0: uprv_free(tCEs.buf); michael@0: } michael@0: } michael@0: michael@0: return result; michael@0: } michael@0: michael@0: static UCollationResult michael@0: ucol_strcollRegular(const UCollator *coll, michael@0: const UChar *source, int32_t sourceLength, michael@0: const UChar *target, int32_t targetLength, michael@0: UErrorCode *status) { michael@0: collIterate sColl, tColl; michael@0: // Preparing the context objects for iterating over strings michael@0: IInit_collIterate(coll, source, sourceLength, &sColl, status); michael@0: IInit_collIterate(coll, target, targetLength, &tColl, status); michael@0: if(U_FAILURE(*status)) { michael@0: return UCOL_LESS; michael@0: } michael@0: return ucol_strcollRegular(&sColl, &tColl, status); michael@0: } michael@0: michael@0: static inline uint32_t michael@0: ucol_getLatinOneContraction(const UCollator *coll, int32_t strength, michael@0: uint32_t CE, const UChar *s, int32_t *index, int32_t len) michael@0: { michael@0: const UChar *UCharOffset = (UChar *)coll->image+getContractOffset(CE&0xFFF); michael@0: int32_t latinOneOffset = (CE & 0x00FFF000) >> 12; michael@0: int32_t offset = 1; michael@0: UChar schar = 0, tchar = 0; michael@0: michael@0: for(;;) { michael@0: if(len == -1) { michael@0: if(s[*index] == 0) { // end of string michael@0: return(coll->latinOneCEs[strength*coll->latinOneTableLen+latinOneOffset]); michael@0: } else { michael@0: schar = s[*index]; michael@0: } michael@0: } else { michael@0: if(*index == len) { michael@0: return(coll->latinOneCEs[strength*coll->latinOneTableLen+latinOneOffset]); michael@0: } else { michael@0: schar = s[*index]; michael@0: } michael@0: } michael@0: michael@0: while(schar > (tchar = *(UCharOffset+offset))) { /* since the contraction codepoints should be ordered, we skip all that are smaller */ michael@0: offset++; michael@0: } michael@0: michael@0: if (schar == tchar) { michael@0: (*index)++; michael@0: return(coll->latinOneCEs[strength*coll->latinOneTableLen+latinOneOffset+offset]); michael@0: } michael@0: else michael@0: { michael@0: if(schar & 0xFF00 /*> UCOL_ENDOFLATIN1RANGE*/) { michael@0: return UCOL_BAIL_OUT_CE; michael@0: } michael@0: // skip completely ignorables michael@0: uint32_t isZeroCE = UTRIE_GET32_FROM_LEAD(&coll->mapping, schar); michael@0: if(isZeroCE == 0) { // we have to ignore completely ignorables michael@0: (*index)++; michael@0: continue; michael@0: } michael@0: michael@0: return(coll->latinOneCEs[strength*coll->latinOneTableLen+latinOneOffset]); michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: /** michael@0: * This is a fast strcoll, geared towards text in Latin-1. michael@0: * It supports contractions of size two, French secondaries michael@0: * and case switching. You can use it with strengths primary michael@0: * to tertiary. It does not support shifted and case level. michael@0: * It relies on the table build by setupLatin1Table. If it michael@0: * doesn't understand something, it will go to the regular michael@0: * strcoll. michael@0: */ michael@0: static UCollationResult michael@0: ucol_strcollUseLatin1( const UCollator *coll, michael@0: const UChar *source, michael@0: int32_t sLen, michael@0: const UChar *target, michael@0: int32_t tLen, michael@0: UErrorCode *status) michael@0: { michael@0: U_ALIGN_CODE(16); michael@0: int32_t strength = coll->strength; michael@0: michael@0: int32_t sIndex = 0, tIndex = 0; michael@0: UChar sChar = 0, tChar = 0; michael@0: uint32_t sOrder=0, tOrder=0; michael@0: michael@0: UBool endOfSource = FALSE; michael@0: michael@0: uint32_t *elements = coll->latinOneCEs; michael@0: michael@0: UBool haveContractions = FALSE; // if we have contractions in our string michael@0: // we cannot do French secondary michael@0: michael@0: // Do the primary level michael@0: for(;;) { michael@0: while(sOrder==0) { // this loop skips primary ignorables michael@0: // sOrder=getNextlatinOneCE(source); michael@0: if(sLen==-1) { // handling zero terminated strings michael@0: sChar=source[sIndex++]; michael@0: if(sChar==0) { michael@0: endOfSource = TRUE; michael@0: break; michael@0: } michael@0: } else { // handling strings with known length michael@0: if(sIndex==sLen) { michael@0: endOfSource = TRUE; michael@0: break; michael@0: } michael@0: sChar=source[sIndex++]; michael@0: } michael@0: if(sChar&0xFF00) { // if we encounter non-latin-1, we bail out (sChar > 0xFF, but this is faster on win32) michael@0: //fprintf(stderr, "R"); michael@0: return ucol_strcollRegular(coll, source, sLen, target, tLen, status); michael@0: } michael@0: sOrder = elements[sChar]; michael@0: if(sOrder >= UCOL_NOT_FOUND) { // if we got a special michael@0: // specials can basically be either contractions or bail-out signs. If we get anything michael@0: // else, we'll bail out anywasy michael@0: if(getCETag(sOrder) == CONTRACTION_TAG) { michael@0: sOrder = ucol_getLatinOneContraction(coll, UCOL_PRIMARY, sOrder, source, &sIndex, sLen); michael@0: haveContractions = TRUE; // if there are contractions, we cannot do French secondary michael@0: // However, if there are contractions in the table, but we always use just one char, michael@0: // we might be able to do French. This should be checked out. michael@0: } michael@0: if(sOrder >= UCOL_NOT_FOUND /*== UCOL_BAIL_OUT_CE*/) { michael@0: //fprintf(stderr, "S"); michael@0: return ucol_strcollRegular(coll, source, sLen, target, tLen, status); michael@0: } michael@0: } michael@0: } michael@0: michael@0: while(tOrder==0) { // this loop skips primary ignorables michael@0: // tOrder=getNextlatinOneCE(target); michael@0: if(tLen==-1) { // handling zero terminated strings michael@0: tChar=target[tIndex++]; michael@0: if(tChar==0) { michael@0: if(endOfSource) { // this is different than source loop, michael@0: // as we already know that source loop is done here, michael@0: // so we can either finish the primary loop if both michael@0: // strings are done or anounce the result if only michael@0: // target is done. Same below. michael@0: goto endOfPrimLoop; michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: } else { // handling strings with known length michael@0: if(tIndex==tLen) { michael@0: if(endOfSource) { michael@0: goto endOfPrimLoop; michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: tChar=target[tIndex++]; michael@0: } michael@0: if(tChar&0xFF00) { // if we encounter non-latin-1, we bail out (sChar > 0xFF, but this is faster on win32) michael@0: //fprintf(stderr, "R"); michael@0: return ucol_strcollRegular(coll, source, sLen, target, tLen, status); michael@0: } michael@0: tOrder = elements[tChar]; michael@0: if(tOrder >= UCOL_NOT_FOUND) { michael@0: // Handling specials, see the comments for source michael@0: if(getCETag(tOrder) == CONTRACTION_TAG) { michael@0: tOrder = ucol_getLatinOneContraction(coll, UCOL_PRIMARY, tOrder, target, &tIndex, tLen); michael@0: haveContractions = TRUE; michael@0: } michael@0: if(tOrder >= UCOL_NOT_FOUND /*== UCOL_BAIL_OUT_CE*/) { michael@0: //fprintf(stderr, "S"); michael@0: return ucol_strcollRegular(coll, source, sLen, target, tLen, status); michael@0: } michael@0: } michael@0: } michael@0: if(endOfSource) { // source is finished, but target is not, say the result. michael@0: return UCOL_LESS; michael@0: } michael@0: michael@0: if(sOrder == tOrder) { // if we have same CEs, we continue the loop michael@0: sOrder = 0; tOrder = 0; michael@0: continue; michael@0: } else { michael@0: // compare current top bytes michael@0: if(((sOrder^tOrder)&0xFF000000)!=0) { michael@0: // top bytes differ, return difference michael@0: if(sOrder < tOrder) { michael@0: return UCOL_LESS; michael@0: } else if(sOrder > tOrder) { michael@0: return UCOL_GREATER; michael@0: } michael@0: // instead of return (int32_t)(sOrder>>24)-(int32_t)(tOrder>>24); michael@0: // since we must return enum value michael@0: } michael@0: michael@0: // top bytes match, continue with following bytes michael@0: sOrder<<=8; michael@0: tOrder<<=8; michael@0: } michael@0: } michael@0: michael@0: endOfPrimLoop: michael@0: // after primary loop, we definitely know the sizes of strings, michael@0: // so we set it and use simpler loop for secondaries and tertiaries michael@0: sLen = sIndex; tLen = tIndex; michael@0: if(strength >= UCOL_SECONDARY) { michael@0: // adjust the table beggining michael@0: elements += coll->latinOneTableLen; michael@0: endOfSource = FALSE; michael@0: michael@0: if(coll->frenchCollation == UCOL_OFF) { // non French michael@0: // This loop is a simplified copy of primary loop michael@0: // at this point we know that whole strings are latin-1, so we don't michael@0: // check for that. We also know that we only have contractions as michael@0: // specials. michael@0: sIndex = 0; tIndex = 0; michael@0: for(;;) { michael@0: while(sOrder==0) { michael@0: if(sIndex==sLen) { michael@0: endOfSource = TRUE; michael@0: break; michael@0: } michael@0: sChar=source[sIndex++]; michael@0: sOrder = elements[sChar]; michael@0: if(sOrder > UCOL_NOT_FOUND) { michael@0: sOrder = ucol_getLatinOneContraction(coll, UCOL_SECONDARY, sOrder, source, &sIndex, sLen); michael@0: } michael@0: } michael@0: michael@0: while(tOrder==0) { michael@0: if(tIndex==tLen) { michael@0: if(endOfSource) { michael@0: goto endOfSecLoop; michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: tChar=target[tIndex++]; michael@0: tOrder = elements[tChar]; michael@0: if(tOrder > UCOL_NOT_FOUND) { michael@0: tOrder = ucol_getLatinOneContraction(coll, UCOL_SECONDARY, tOrder, target, &tIndex, tLen); michael@0: } michael@0: } michael@0: if(endOfSource) { michael@0: return UCOL_LESS; michael@0: } michael@0: michael@0: if(sOrder == tOrder) { michael@0: sOrder = 0; tOrder = 0; michael@0: continue; michael@0: } else { michael@0: // see primary loop for comments on this michael@0: if(((sOrder^tOrder)&0xFF000000)!=0) { michael@0: if(sOrder < tOrder) { michael@0: return UCOL_LESS; michael@0: } else if(sOrder > tOrder) { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: sOrder<<=8; michael@0: tOrder<<=8; michael@0: } michael@0: } michael@0: } else { // French michael@0: if(haveContractions) { // if we have contractions, we have to bail out michael@0: // since we don't really know how to handle them here michael@0: return ucol_strcollRegular(coll, source, sLen, target, tLen, status); michael@0: } michael@0: // For French, we go backwards michael@0: sIndex = sLen; tIndex = tLen; michael@0: for(;;) { michael@0: while(sOrder==0) { michael@0: if(sIndex==0) { michael@0: endOfSource = TRUE; michael@0: break; michael@0: } michael@0: sChar=source[--sIndex]; michael@0: sOrder = elements[sChar]; michael@0: // don't even look for contractions michael@0: } michael@0: michael@0: while(tOrder==0) { michael@0: if(tIndex==0) { michael@0: if(endOfSource) { michael@0: goto endOfSecLoop; michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: tChar=target[--tIndex]; michael@0: tOrder = elements[tChar]; michael@0: // don't even look for contractions michael@0: } michael@0: if(endOfSource) { michael@0: return UCOL_LESS; michael@0: } michael@0: michael@0: if(sOrder == tOrder) { michael@0: sOrder = 0; tOrder = 0; michael@0: continue; michael@0: } else { michael@0: // see the primary loop for comments michael@0: if(((sOrder^tOrder)&0xFF000000)!=0) { michael@0: if(sOrder < tOrder) { michael@0: return UCOL_LESS; michael@0: } else if(sOrder > tOrder) { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: sOrder<<=8; michael@0: tOrder<<=8; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: endOfSecLoop: michael@0: if(strength >= UCOL_TERTIARY) { michael@0: // tertiary loop is the same as secondary (except no French) michael@0: elements += coll->latinOneTableLen; michael@0: sIndex = 0; tIndex = 0; michael@0: endOfSource = FALSE; michael@0: for(;;) { michael@0: while(sOrder==0) { michael@0: if(sIndex==sLen) { michael@0: endOfSource = TRUE; michael@0: break; michael@0: } michael@0: sChar=source[sIndex++]; michael@0: sOrder = elements[sChar]; michael@0: if(sOrder > UCOL_NOT_FOUND) { michael@0: sOrder = ucol_getLatinOneContraction(coll, UCOL_TERTIARY, sOrder, source, &sIndex, sLen); michael@0: } michael@0: } michael@0: while(tOrder==0) { michael@0: if(tIndex==tLen) { michael@0: if(endOfSource) { michael@0: return UCOL_EQUAL; // if both strings are at the end, they are equal michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: tChar=target[tIndex++]; michael@0: tOrder = elements[tChar]; michael@0: if(tOrder > UCOL_NOT_FOUND) { michael@0: tOrder = ucol_getLatinOneContraction(coll, UCOL_TERTIARY, tOrder, target, &tIndex, tLen); michael@0: } michael@0: } michael@0: if(endOfSource) { michael@0: return UCOL_LESS; michael@0: } michael@0: if(sOrder == tOrder) { michael@0: sOrder = 0; tOrder = 0; michael@0: continue; michael@0: } else { michael@0: if(((sOrder^tOrder)&0xff000000)!=0) { michael@0: if(sOrder < tOrder) { michael@0: return UCOL_LESS; michael@0: } else if(sOrder > tOrder) { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: sOrder<<=8; michael@0: tOrder<<=8; michael@0: } michael@0: } michael@0: } michael@0: return UCOL_EQUAL; michael@0: } michael@0: michael@0: /* michael@0: Note: ucol_strcollUTF8 supports null terminated input. Calculating length of michael@0: null terminated input string takes extra amount of CPU cycles. michael@0: */ michael@0: static UCollationResult michael@0: ucol_strcollRegularUTF8( michael@0: const UCollator *coll, michael@0: const char *source, michael@0: int32_t sourceLength, michael@0: const char *target, michael@0: int32_t targetLength, michael@0: UErrorCode *status) michael@0: { michael@0: UCharIterator src; michael@0: UCharIterator tgt; michael@0: michael@0: uiter_setUTF8(&src, source, sourceLength); michael@0: uiter_setUTF8(&tgt, target, targetLength); michael@0: michael@0: // Preparing the context objects for iterating over strings michael@0: collIterate sColl, tColl; michael@0: IInit_collIterate(coll, NULL, -1, &sColl, status); michael@0: IInit_collIterate(coll, NULL, -1, &tColl, status); michael@0: if(U_FAILURE(*status)) { michael@0: UTRACE_EXIT_VALUE_STATUS(UCOL_EQUAL, *status) michael@0: return UCOL_EQUAL; michael@0: } michael@0: // The division for the array length may truncate the array size to michael@0: // a little less than UNORM_ITER_SIZE, but that size is dimensioned too high michael@0: // for all platforms anyway. michael@0: UAlignedMemory stackNormIter1[UNORM_ITER_SIZE/sizeof(UAlignedMemory)]; michael@0: UAlignedMemory stackNormIter2[UNORM_ITER_SIZE/sizeof(UAlignedMemory)]; michael@0: UNormIterator *sNormIter = NULL, *tNormIter = NULL; michael@0: michael@0: sColl.iterator = &src; michael@0: sColl.flags |= UCOL_USE_ITERATOR; michael@0: tColl.flags |= UCOL_USE_ITERATOR; michael@0: tColl.iterator = &tgt; michael@0: michael@0: if(ucol_getAttribute(coll, UCOL_NORMALIZATION_MODE, status) == UCOL_ON) { michael@0: sNormIter = unorm_openIter(stackNormIter1, sizeof(stackNormIter1), status); michael@0: sColl.iterator = unorm_setIter(sNormIter, &src, UNORM_FCD, status); michael@0: sColl.flags &= ~UCOL_ITER_NORM; michael@0: michael@0: tNormIter = unorm_openIter(stackNormIter2, sizeof(stackNormIter2), status); michael@0: tColl.iterator = unorm_setIter(tNormIter, &tgt, UNORM_FCD, status); michael@0: tColl.flags &= ~UCOL_ITER_NORM; michael@0: } michael@0: michael@0: return ucol_strcollRegular(&sColl, &tColl, status); michael@0: } michael@0: michael@0: static inline uint32_t michael@0: ucol_getLatinOneContractionUTF8(const UCollator *coll, int32_t strength, michael@0: uint32_t CE, const char *s, int32_t *index, int32_t len) michael@0: { michael@0: const UChar *UCharOffset = (UChar *)coll->image+getContractOffset(CE&0xFFF); michael@0: int32_t latinOneOffset = (CE & 0x00FFF000) >> 12; michael@0: int32_t offset = 1; michael@0: UChar32 schar = 0, tchar = 0; michael@0: michael@0: for(;;) { michael@0: if (*index == len) { michael@0: return(coll->latinOneCEs[strength*coll->latinOneTableLen+latinOneOffset]); michael@0: } michael@0: U8_GET_OR_FFFD((const uint8_t*)s, 0, *index, len, schar); michael@0: if (len < 0 && schar == 0) { michael@0: return(coll->latinOneCEs[strength*coll->latinOneTableLen+latinOneOffset]); michael@0: } michael@0: michael@0: while(schar > (tchar = *(UCharOffset+offset))) { /* since the contraction codepoints should be ordered, we skip all that are smaller */ michael@0: offset++; michael@0: } michael@0: michael@0: if (schar == tchar) { michael@0: U8_FWD_1(s, *index, len); michael@0: return(coll->latinOneCEs[strength*coll->latinOneTableLen+latinOneOffset+offset]); michael@0: } michael@0: else michael@0: { michael@0: if(schar & 0xFF00 /*> UCOL_ENDOFLATIN1RANGE*/) { michael@0: return UCOL_BAIL_OUT_CE; michael@0: } michael@0: // skip completely ignorables michael@0: uint32_t isZeroCE = UTRIE_GET32_FROM_LEAD(&coll->mapping, schar); michael@0: if(isZeroCE == 0) { // we have to ignore completely ignorables michael@0: U8_FWD_1(s, *index, len); michael@0: continue; michael@0: } michael@0: michael@0: return(coll->latinOneCEs[strength*coll->latinOneTableLen+latinOneOffset]); michael@0: } michael@0: } michael@0: } michael@0: michael@0: static inline UCollationResult michael@0: ucol_strcollUseLatin1UTF8( michael@0: const UCollator *coll, michael@0: const char *source, michael@0: int32_t sLen, michael@0: const char *target, michael@0: int32_t tLen, michael@0: UErrorCode *status) michael@0: { michael@0: U_ALIGN_CODE(16); michael@0: int32_t strength = coll->strength; michael@0: michael@0: int32_t sIndex = 0, tIndex = 0; michael@0: UChar32 sChar = 0, tChar = 0; michael@0: uint32_t sOrder=0, tOrder=0; michael@0: michael@0: UBool endOfSource = FALSE; michael@0: michael@0: uint32_t *elements = coll->latinOneCEs; michael@0: michael@0: UBool haveContractions = FALSE; // if we have contractions in our string michael@0: // we cannot do French secondary michael@0: michael@0: // Do the primary level michael@0: for(;;) { michael@0: while(sOrder==0) { // this loop skips primary ignorables michael@0: // sOrder=getNextlatinOneCE(source); michael@0: if (sIndex == sLen) { michael@0: endOfSource = TRUE; michael@0: break; michael@0: } michael@0: U8_NEXT_OR_FFFD(source, sIndex, sLen ,sChar); michael@0: if (sLen < 0 && sChar == 0) { michael@0: endOfSource = TRUE; michael@0: sLen = sIndex; michael@0: break; michael@0: } michael@0: if(sChar&0xFFFFFF00) { // if we encounter non-latin-1, we bail out (sChar > 0xFF, but this is faster on win32) michael@0: //fprintf(stderr, "R"); michael@0: return ucol_strcollRegularUTF8(coll, source, sLen, target, tLen, status); michael@0: } michael@0: sOrder = elements[sChar]; michael@0: if(sOrder >= UCOL_NOT_FOUND) { // if we got a special michael@0: // specials can basically be either contractions or bail-out signs. If we get anything michael@0: // else, we'll bail out anywasy michael@0: if(getCETag(sOrder) == CONTRACTION_TAG) { michael@0: sOrder = ucol_getLatinOneContractionUTF8(coll, UCOL_PRIMARY, sOrder, source, &sIndex, sLen); michael@0: haveContractions = TRUE; // if there are contractions, we cannot do French secondary michael@0: // However, if there are contractions in the table, but we always use just one char, michael@0: // we might be able to do French. This should be checked out. michael@0: } michael@0: if(sOrder >= UCOL_NOT_FOUND /*== UCOL_BAIL_OUT_CE*/) { michael@0: //fprintf(stderr, "S"); michael@0: return ucol_strcollRegularUTF8(coll, source, sLen, target, tLen, status); michael@0: } michael@0: } michael@0: } michael@0: michael@0: while(tOrder==0) { // this loop skips primary ignorables michael@0: // tOrder=getNextlatinOneCE(target); michael@0: if (tIndex == tLen) { michael@0: if(endOfSource) { michael@0: goto endOfPrimLoopU8; michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: U8_NEXT_OR_FFFD(target, tIndex, tLen, tChar); michael@0: if (tLen < 0 && tChar == 0) { michael@0: if(endOfSource) { michael@0: tLen = tIndex; michael@0: goto endOfPrimLoopU8; michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: if(tChar&0xFFFFFF00) { // if we encounter non-latin-1, we bail out (sChar > 0xFF, but this is faster on win32) michael@0: //fprintf(stderr, "R"); michael@0: return ucol_strcollRegularUTF8(coll, source, sLen, target, tLen, status); michael@0: } michael@0: tOrder = elements[tChar]; michael@0: if(tOrder >= UCOL_NOT_FOUND) { michael@0: // Handling specials, see the comments for source michael@0: if(getCETag(tOrder) == CONTRACTION_TAG) { michael@0: tOrder = ucol_getLatinOneContractionUTF8(coll, UCOL_PRIMARY, tOrder, target, &tIndex, tLen); michael@0: haveContractions = TRUE; michael@0: } michael@0: if(tOrder >= UCOL_NOT_FOUND /*== UCOL_BAIL_OUT_CE*/) { michael@0: //fprintf(stderr, "S"); michael@0: return ucol_strcollRegularUTF8(coll, source, sLen, target, tLen, status); michael@0: } michael@0: } michael@0: } michael@0: if(endOfSource) { // source is finished, but target is not, say the result. michael@0: return UCOL_LESS; michael@0: } michael@0: michael@0: if(sOrder == tOrder) { // if we have same CEs, we continue the loop michael@0: sOrder = 0; tOrder = 0; michael@0: continue; michael@0: } else { michael@0: // compare current top bytes michael@0: if(((sOrder^tOrder)&0xFF000000)!=0) { michael@0: // top bytes differ, return difference michael@0: if(sOrder < tOrder) { michael@0: return UCOL_LESS; michael@0: } else if(sOrder > tOrder) { michael@0: return UCOL_GREATER; michael@0: } michael@0: // instead of return (int32_t)(sOrder>>24)-(int32_t)(tOrder>>24); michael@0: // since we must return enum value michael@0: } michael@0: michael@0: // top bytes match, continue with following bytes michael@0: sOrder<<=8; michael@0: tOrder<<=8; michael@0: } michael@0: } michael@0: michael@0: endOfPrimLoopU8: michael@0: // after primary loop, we definitely know the sizes of strings, michael@0: // so we set it and use simpler loop for secondaries and tertiaries michael@0: sLen = sIndex; tLen = tIndex; michael@0: if(strength >= UCOL_SECONDARY) { michael@0: // adjust the table beggining michael@0: elements += coll->latinOneTableLen; michael@0: endOfSource = FALSE; michael@0: michael@0: if(coll->frenchCollation == UCOL_OFF) { // non French michael@0: // This loop is a simplified copy of primary loop michael@0: // at this point we know that whole strings are latin-1, so we don't michael@0: // check for that. We also know that we only have contractions as michael@0: // specials. michael@0: sIndex = 0; tIndex = 0; michael@0: for(;;) { michael@0: while(sOrder==0) { michael@0: if(sIndex==sLen) { michael@0: endOfSource = TRUE; michael@0: break; michael@0: } michael@0: U_ASSERT(sLen >= 0); michael@0: U8_NEXT_OR_FFFD(source, sIndex, sLen, sChar); michael@0: U_ASSERT(sChar >= 0 && sChar <= 0xFF); michael@0: sOrder = elements[sChar]; michael@0: if(sOrder > UCOL_NOT_FOUND) { michael@0: sOrder = ucol_getLatinOneContractionUTF8(coll, UCOL_SECONDARY, sOrder, source, &sIndex, sLen); michael@0: } michael@0: } michael@0: michael@0: while(tOrder==0) { michael@0: if(tIndex==tLen) { michael@0: if(endOfSource) { michael@0: goto endOfSecLoopU8; michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: U_ASSERT(tLen >= 0); michael@0: U8_NEXT_OR_FFFD(target, tIndex, tLen, tChar); michael@0: U_ASSERT(tChar >= 0 && tChar <= 0xFF); michael@0: tOrder = elements[tChar]; michael@0: if(tOrder > UCOL_NOT_FOUND) { michael@0: tOrder = ucol_getLatinOneContractionUTF8(coll, UCOL_SECONDARY, tOrder, target, &tIndex, tLen); michael@0: } michael@0: } michael@0: if(endOfSource) { michael@0: return UCOL_LESS; michael@0: } michael@0: michael@0: if(sOrder == tOrder) { michael@0: sOrder = 0; tOrder = 0; michael@0: continue; michael@0: } else { michael@0: // see primary loop for comments on this michael@0: if(((sOrder^tOrder)&0xFF000000)!=0) { michael@0: if(sOrder < tOrder) { michael@0: return UCOL_LESS; michael@0: } else if(sOrder > tOrder) { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: sOrder<<=8; michael@0: tOrder<<=8; michael@0: } michael@0: } michael@0: } else { // French michael@0: if(haveContractions) { // if we have contractions, we have to bail out michael@0: // since we don't really know how to handle them here michael@0: return ucol_strcollRegularUTF8(coll, source, sLen, target, tLen, status); michael@0: } michael@0: // For French, we go backwards michael@0: sIndex = sLen; tIndex = tLen; michael@0: for(;;) { michael@0: while(sOrder==0) { michael@0: if(sIndex==0) { michael@0: endOfSource = TRUE; michael@0: break; michael@0: } michael@0: U8_PREV_OR_FFFD(source, 0, sIndex, sChar); michael@0: U_ASSERT(sChar >= 0 && sChar <= 0xFF); michael@0: sOrder = elements[sChar]; michael@0: // don't even look for contractions michael@0: } michael@0: michael@0: while(tOrder==0) { michael@0: if(tIndex==0) { michael@0: if(endOfSource) { michael@0: goto endOfSecLoopU8; michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: U8_PREV_OR_FFFD(target, 0, tIndex, tChar); michael@0: U_ASSERT(tChar >= 0 && tChar <= 0xFF); michael@0: tOrder = elements[tChar]; michael@0: // don't even look for contractions michael@0: } michael@0: if(endOfSource) { michael@0: return UCOL_LESS; michael@0: } michael@0: michael@0: if(sOrder == tOrder) { michael@0: sOrder = 0; tOrder = 0; michael@0: continue; michael@0: } else { michael@0: // see the primary loop for comments michael@0: if(((sOrder^tOrder)&0xFF000000)!=0) { michael@0: if(sOrder < tOrder) { michael@0: return UCOL_LESS; michael@0: } else if(sOrder > tOrder) { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: sOrder<<=8; michael@0: tOrder<<=8; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: endOfSecLoopU8: michael@0: if(strength >= UCOL_TERTIARY) { michael@0: // tertiary loop is the same as secondary (except no French) michael@0: elements += coll->latinOneTableLen; michael@0: sIndex = 0; tIndex = 0; michael@0: endOfSource = FALSE; michael@0: for(;;) { michael@0: while(sOrder==0) { michael@0: if(sIndex==sLen) { michael@0: endOfSource = TRUE; michael@0: break; michael@0: } michael@0: U_ASSERT(sLen >= 0); michael@0: U8_NEXT_OR_FFFD(source, sIndex, sLen, sChar); michael@0: U_ASSERT(sChar >= 0 && sChar <= 0xFF); michael@0: sOrder = elements[sChar]; michael@0: if(sOrder > UCOL_NOT_FOUND) { michael@0: sOrder = ucol_getLatinOneContractionUTF8(coll, UCOL_TERTIARY, sOrder, source, &sIndex, sLen); michael@0: } michael@0: } michael@0: while(tOrder==0) { michael@0: if(tIndex==tLen) { michael@0: if(endOfSource) { michael@0: return UCOL_EQUAL; // if both strings are at the end, they are equal michael@0: } else { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: U_ASSERT(tLen >= 0); michael@0: U8_NEXT_OR_FFFD(target, tIndex, tLen, tChar); michael@0: U_ASSERT(tChar >= 0 && tChar <= 0xFF); michael@0: tOrder = elements[tChar]; michael@0: if(tOrder > UCOL_NOT_FOUND) { michael@0: tOrder = ucol_getLatinOneContractionUTF8(coll, UCOL_TERTIARY, tOrder, target, &tIndex, tLen); michael@0: } michael@0: } michael@0: if(endOfSource) { michael@0: return UCOL_LESS; michael@0: } michael@0: if(sOrder == tOrder) { michael@0: sOrder = 0; tOrder = 0; michael@0: continue; michael@0: } else { michael@0: if(((sOrder^tOrder)&0xff000000)!=0) { michael@0: if(sOrder < tOrder) { michael@0: return UCOL_LESS; michael@0: } else if(sOrder > tOrder) { michael@0: return UCOL_GREATER; michael@0: } michael@0: } michael@0: sOrder<<=8; michael@0: tOrder<<=8; michael@0: } michael@0: } michael@0: } michael@0: return UCOL_EQUAL; michael@0: } michael@0: michael@0: U_CAPI UCollationResult U_EXPORT2 michael@0: ucol_strcollIter( const UCollator *coll, michael@0: UCharIterator *sIter, michael@0: UCharIterator *tIter, michael@0: UErrorCode *status) michael@0: { michael@0: if(!status || U_FAILURE(*status)) { michael@0: return UCOL_EQUAL; michael@0: } michael@0: michael@0: UTRACE_ENTRY(UTRACE_UCOL_STRCOLLITER); michael@0: UTRACE_DATA3(UTRACE_VERBOSE, "coll=%p, sIter=%p, tIter=%p", coll, sIter, tIter); michael@0: michael@0: if (sIter == tIter) { michael@0: UTRACE_EXIT_VALUE_STATUS(UCOL_EQUAL, *status) michael@0: return UCOL_EQUAL; michael@0: } michael@0: if(sIter == NULL || tIter == NULL || coll == NULL) { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: UTRACE_EXIT_VALUE_STATUS(UCOL_EQUAL, *status) michael@0: return UCOL_EQUAL; michael@0: } michael@0: michael@0: UCollationResult result = UCOL_EQUAL; michael@0: michael@0: // Preparing the context objects for iterating over strings michael@0: collIterate sColl, tColl; michael@0: IInit_collIterate(coll, NULL, -1, &sColl, status); michael@0: IInit_collIterate(coll, NULL, -1, &tColl, status); michael@0: if(U_FAILURE(*status)) { michael@0: UTRACE_EXIT_VALUE_STATUS(UCOL_EQUAL, *status) michael@0: return UCOL_EQUAL; michael@0: } michael@0: // The division for the array length may truncate the array size to michael@0: // a little less than UNORM_ITER_SIZE, but that size is dimensioned too high michael@0: // for all platforms anyway. michael@0: UAlignedMemory stackNormIter1[UNORM_ITER_SIZE/sizeof(UAlignedMemory)]; michael@0: UAlignedMemory stackNormIter2[UNORM_ITER_SIZE/sizeof(UAlignedMemory)]; michael@0: UNormIterator *sNormIter = NULL, *tNormIter = NULL; michael@0: michael@0: sColl.iterator = sIter; michael@0: sColl.flags |= UCOL_USE_ITERATOR; michael@0: tColl.flags |= UCOL_USE_ITERATOR; michael@0: tColl.iterator = tIter; michael@0: michael@0: if(ucol_getAttribute(coll, UCOL_NORMALIZATION_MODE, status) == UCOL_ON) { michael@0: sNormIter = unorm_openIter(stackNormIter1, sizeof(stackNormIter1), status); michael@0: sColl.iterator = unorm_setIter(sNormIter, sIter, UNORM_FCD, status); michael@0: sColl.flags &= ~UCOL_ITER_NORM; michael@0: michael@0: tNormIter = unorm_openIter(stackNormIter2, sizeof(stackNormIter2), status); michael@0: tColl.iterator = unorm_setIter(tNormIter, tIter, UNORM_FCD, status); michael@0: tColl.flags &= ~UCOL_ITER_NORM; michael@0: } michael@0: michael@0: UChar32 sChar = U_SENTINEL, tChar = U_SENTINEL; michael@0: michael@0: while((sChar = sColl.iterator->next(sColl.iterator)) == michael@0: (tChar = tColl.iterator->next(tColl.iterator))) { michael@0: if(sChar == U_SENTINEL) { michael@0: result = UCOL_EQUAL; michael@0: goto end_compare; michael@0: } michael@0: } michael@0: michael@0: if(sChar == U_SENTINEL) { michael@0: tChar = tColl.iterator->previous(tColl.iterator); michael@0: } michael@0: michael@0: if(tChar == U_SENTINEL) { michael@0: sChar = sColl.iterator->previous(sColl.iterator); michael@0: } michael@0: michael@0: sChar = sColl.iterator->previous(sColl.iterator); michael@0: tChar = tColl.iterator->previous(tColl.iterator); michael@0: michael@0: if (ucol_unsafeCP((UChar)sChar, coll) || ucol_unsafeCP((UChar)tChar, coll)) michael@0: { michael@0: // We are stopped in the middle of a contraction. michael@0: // Scan backwards through the == part of the string looking for the start of the contraction. michael@0: // It doesn't matter which string we scan, since they are the same in this region. michael@0: do michael@0: { michael@0: sChar = sColl.iterator->previous(sColl.iterator); michael@0: tChar = tColl.iterator->previous(tColl.iterator); michael@0: } michael@0: while (sChar != U_SENTINEL && ucol_unsafeCP((UChar)sChar, coll)); michael@0: } michael@0: michael@0: michael@0: if(U_SUCCESS(*status)) { michael@0: result = ucol_strcollRegular(&sColl, &tColl, status); michael@0: } michael@0: michael@0: end_compare: michael@0: if(sNormIter || tNormIter) { michael@0: unorm_closeIter(sNormIter); michael@0: unorm_closeIter(tNormIter); michael@0: } michael@0: michael@0: UTRACE_EXIT_VALUE_STATUS(result, *status) michael@0: return result; michael@0: } michael@0: michael@0: michael@0: /* */ michael@0: /* ucol_strcoll Main public API string comparison function */ michael@0: /* */ michael@0: U_CAPI UCollationResult U_EXPORT2 michael@0: ucol_strcoll( const UCollator *coll, michael@0: const UChar *source, michael@0: int32_t sourceLength, michael@0: const UChar *target, michael@0: int32_t targetLength) michael@0: { michael@0: U_ALIGN_CODE(16); michael@0: michael@0: UTRACE_ENTRY(UTRACE_UCOL_STRCOLL); michael@0: if (UTRACE_LEVEL(UTRACE_VERBOSE)) { michael@0: UTRACE_DATA3(UTRACE_VERBOSE, "coll=%p, source=%p, target=%p", coll, source, target); michael@0: UTRACE_DATA2(UTRACE_VERBOSE, "source string = %vh ", source, sourceLength); michael@0: UTRACE_DATA2(UTRACE_VERBOSE, "target string = %vh ", target, targetLength); michael@0: } michael@0: michael@0: if((source == NULL && sourceLength != 0) || (target == NULL && targetLength != 0)) { michael@0: // do not crash, but return. Should have michael@0: // status argument to return error. michael@0: UTRACE_EXIT_VALUE(UCOL_EQUAL); michael@0: return UCOL_EQUAL; michael@0: } michael@0: michael@0: /* Quick check if source and target are same strings. */ michael@0: /* They should either both be NULL terminated or the explicit length should be set on both. */ michael@0: if (source==target && sourceLength==targetLength) { michael@0: UTRACE_EXIT_VALUE(UCOL_EQUAL); michael@0: return UCOL_EQUAL; michael@0: } michael@0: michael@0: if(coll->delegate != NULL) { michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: return ((const Collator*)coll->delegate)->compare(source,sourceLength,target,targetLength, status); michael@0: } michael@0: michael@0: /* Scan the strings. Find: */ michael@0: /* The length of any leading portion that is equal */ michael@0: /* Whether they are exactly equal. (in which case we just return) */ michael@0: const UChar *pSrc = source; michael@0: const UChar *pTarg = target; michael@0: int32_t equalLength; michael@0: michael@0: if (sourceLength == -1 && targetLength == -1) { michael@0: // Both strings are null terminated. michael@0: // Scan through any leading equal portion. michael@0: while (*pSrc == *pTarg && *pSrc != 0) { michael@0: pSrc++; michael@0: pTarg++; michael@0: } michael@0: if (*pSrc == 0 && *pTarg == 0) { michael@0: UTRACE_EXIT_VALUE(UCOL_EQUAL); michael@0: return UCOL_EQUAL; michael@0: } michael@0: equalLength = (int32_t)(pSrc - source); michael@0: } michael@0: else michael@0: { michael@0: // One or both strings has an explicit length. michael@0: const UChar *pSrcEnd = source + sourceLength; michael@0: const UChar *pTargEnd = target + targetLength; michael@0: michael@0: // Scan while the strings are bitwise ==, or until one is exhausted. michael@0: for (;;) { michael@0: if (pSrc == pSrcEnd || pTarg == pTargEnd) { michael@0: break; michael@0: } michael@0: if ((*pSrc == 0 && sourceLength == -1) || (*pTarg == 0 && targetLength == -1)) { michael@0: break; michael@0: } michael@0: if (*pSrc != *pTarg) { michael@0: break; michael@0: } michael@0: pSrc++; michael@0: pTarg++; michael@0: } michael@0: equalLength = (int32_t)(pSrc - source); michael@0: michael@0: // If we made it all the way through both strings, we are done. They are == michael@0: if ((pSrc ==pSrcEnd || (pSrcEnd 0) { michael@0: /* There is an identical portion at the beginning of the two strings. */ michael@0: /* If the identical portion ends within a contraction or a comibining */ michael@0: /* character sequence, back up to the start of that sequence. */ michael@0: michael@0: // These values should already be set by the code above. michael@0: //pSrc = source + equalLength; /* point to the first differing chars */ michael@0: //pTarg = target + equalLength; michael@0: if ((pSrc != source+sourceLength && ucol_unsafeCP(*pSrc, coll)) || michael@0: (pTarg != target+targetLength && ucol_unsafeCP(*pTarg, coll))) michael@0: { michael@0: // We are stopped in the middle of a contraction. michael@0: // Scan backwards through the == part of the string looking for the start of the contraction. michael@0: // It doesn't matter which string we scan, since they are the same in this region. michael@0: do michael@0: { michael@0: equalLength--; michael@0: pSrc--; michael@0: } michael@0: while (equalLength>0 && ucol_unsafeCP(*pSrc, coll)); michael@0: } michael@0: michael@0: source += equalLength; michael@0: target += equalLength; michael@0: if (sourceLength > 0) { michael@0: sourceLength -= equalLength; michael@0: } michael@0: if (targetLength > 0) { michael@0: targetLength -= equalLength; michael@0: } michael@0: } michael@0: michael@0: UErrorCode status = U_ZERO_ERROR; michael@0: UCollationResult returnVal; michael@0: if(!coll->latinOneUse || (sourceLength > 0 && *source&0xff00) || (targetLength > 0 && *target&0xff00)) { michael@0: returnVal = ucol_strcollRegular(coll, source, sourceLength, target, targetLength, &status); michael@0: } else { michael@0: returnVal = ucol_strcollUseLatin1(coll, source, sourceLength, target, targetLength, &status); michael@0: } michael@0: UTRACE_EXIT_VALUE(returnVal); michael@0: return returnVal; michael@0: } michael@0: michael@0: U_CAPI UCollationResult U_EXPORT2 michael@0: ucol_strcollUTF8( michael@0: const UCollator *coll, michael@0: const char *source, michael@0: int32_t sourceLength, michael@0: const char *target, michael@0: int32_t targetLength, michael@0: UErrorCode *status) michael@0: { michael@0: U_ALIGN_CODE(16); michael@0: michael@0: UTRACE_ENTRY(UTRACE_UCOL_STRCOLLUTF8); michael@0: if (UTRACE_LEVEL(UTRACE_VERBOSE)) { michael@0: UTRACE_DATA3(UTRACE_VERBOSE, "coll=%p, source=%p, target=%p", coll, source, target); michael@0: UTRACE_DATA2(UTRACE_VERBOSE, "source string = %vb ", source, sourceLength); michael@0: UTRACE_DATA2(UTRACE_VERBOSE, "target string = %vb ", target, targetLength); michael@0: } michael@0: michael@0: if (U_FAILURE(*status)) { michael@0: /* do nothing */ michael@0: UTRACE_EXIT_VALUE_STATUS(UCOL_EQUAL, *status); michael@0: return UCOL_EQUAL; michael@0: } michael@0: michael@0: if((source == NULL && sourceLength != 0) || (target == NULL && targetLength != 0)) { michael@0: *status = U_ILLEGAL_ARGUMENT_ERROR; michael@0: UTRACE_EXIT_VALUE_STATUS(UCOL_EQUAL, *status); michael@0: return UCOL_EQUAL; michael@0: } michael@0: michael@0: /* Quick check if source and target are same strings. */ michael@0: /* They should either both be NULL terminated or the explicit length should be set on both. */ michael@0: if (source==target && sourceLength==targetLength) { michael@0: UTRACE_EXIT_VALUE_STATUS(UCOL_EQUAL, *status); michael@0: return UCOL_EQUAL; michael@0: } michael@0: michael@0: if(coll->delegate != NULL) { michael@0: return ((const Collator*)coll->delegate)->compareUTF8( michael@0: StringPiece(source, (sourceLength < 0) ? uprv_strlen(source) : sourceLength), michael@0: StringPiece(target, (targetLength < 0) ? uprv_strlen(target) : targetLength), michael@0: *status); michael@0: } michael@0: michael@0: /* Scan the strings. Find: */ michael@0: /* The length of any leading portion that is equal */ michael@0: /* Whether they are exactly equal. (in which case we just return) */ michael@0: const char *pSrc = source; michael@0: const char *pTarg = target; michael@0: UBool bSrcLimit = FALSE; michael@0: UBool bTargLimit = FALSE; michael@0: michael@0: if (sourceLength == -1 && targetLength == -1) { michael@0: // Both strings are null terminated. michael@0: // Scan through any leading equal portion. michael@0: while (*pSrc == *pTarg && *pSrc != 0) { michael@0: pSrc++; michael@0: pTarg++; michael@0: } michael@0: if (*pSrc == 0 && *pTarg == 0) { michael@0: UTRACE_EXIT_VALUE_STATUS(UCOL_EQUAL, *status); michael@0: return UCOL_EQUAL; michael@0: } michael@0: bSrcLimit = (*pSrc == 0); michael@0: bTargLimit = (*pTarg == 0); michael@0: } michael@0: else michael@0: { michael@0: // One or both strings has an explicit length. michael@0: const char *pSrcEnd = source + sourceLength; michael@0: const char *pTargEnd = target + targetLength; michael@0: michael@0: // Scan while the strings are bitwise ==, or until one is exhausted. michael@0: for (;;) { michael@0: if (pSrc == pSrcEnd || pTarg == pTargEnd) { michael@0: break; michael@0: } michael@0: if ((*pSrc == 0 && sourceLength == -1) || (*pTarg == 0 && targetLength == -1)) { michael@0: break; michael@0: } michael@0: if (*pSrc != *pTarg) { michael@0: break; michael@0: } michael@0: pSrc++; michael@0: pTarg++; michael@0: } michael@0: bSrcLimit = (pSrc ==pSrcEnd || (pSrcEnd 0) { michael@0: // Align position to the start of UTF-8 code point. michael@0: if (bTargLimit) { michael@0: U8_SET_CP_START((const uint8_t*)source, 0, equalLength); michael@0: } else { michael@0: U8_SET_CP_START((const uint8_t*)target, 0, equalLength); michael@0: } michael@0: pSrc = source + equalLength; michael@0: pTarg = target + equalLength; michael@0: } michael@0: michael@0: if (equalLength > 0) { michael@0: /* There is an identical portion at the beginning of the two strings. */ michael@0: /* If the identical portion ends within a contraction or a comibining */ michael@0: /* character sequence, back up to the start of that sequence. */ michael@0: UBool bUnsafeCP = FALSE; michael@0: UChar32 uc32 = -1; michael@0: michael@0: if (!bSrcLimit) { michael@0: U8_GET_OR_FFFD((const uint8_t*)source, 0, equalLength, sourceLength, uc32); michael@0: if (uc32 >= 0x10000 || ucol_unsafeCP((UChar)uc32, coll)) { michael@0: bUnsafeCP = TRUE; michael@0: } michael@0: bSawNonLatin1 |= (uc32 > 0xff); michael@0: } michael@0: if (!bTargLimit) { michael@0: U8_GET_OR_FFFD((const uint8_t*)target, 0, equalLength, targetLength, uc32); michael@0: if (uc32 >= 0x10000 || ucol_unsafeCP((UChar)uc32, coll)) { michael@0: bUnsafeCP = TRUE; michael@0: } michael@0: bSawNonLatin1 |= (uc32 > 0xff); michael@0: } michael@0: michael@0: if (bUnsafeCP) { michael@0: while (equalLength > 0) { michael@0: // We are stopped in the middle of a contraction. michael@0: // Scan backwards through the == part of the string looking for the start of the contraction. michael@0: // It doesn't matter which string we scan, since they are the same in this region. michael@0: U8_PREV_OR_FFFD((uint8_t*)source, 0, equalLength, uc32); michael@0: bSawNonLatin1 |= (uc32 > 0xff); michael@0: if (uc32 < 0x10000 && !ucol_unsafeCP((UChar)uc32, coll)) { michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: source += equalLength; michael@0: target += equalLength; michael@0: if (sourceLength > 0) { michael@0: sourceLength -= equalLength; michael@0: } michael@0: if (targetLength > 0) { michael@0: targetLength -= equalLength; michael@0: } michael@0: } else { michael@0: // Lead byte of Latin 1 character is 0x00 - 0xC3 michael@0: bSawNonLatin1 = (source && (sourceLength != 0) && (uint8_t)*source > 0xc3); michael@0: bSawNonLatin1 |= (target && (targetLength != 0) && (uint8_t)*target > 0xc3); michael@0: } michael@0: michael@0: UCollationResult returnVal; michael@0: michael@0: if(!coll->latinOneUse || bSawNonLatin1) { michael@0: returnVal = ucol_strcollRegularUTF8(coll, source, sourceLength, target, targetLength, status); michael@0: } else { michael@0: returnVal = ucol_strcollUseLatin1UTF8(coll, source, sourceLength, target, targetLength, status); michael@0: } michael@0: UTRACE_EXIT_VALUE_STATUS(returnVal, *status); michael@0: return returnVal; michael@0: } michael@0: michael@0: michael@0: /* convenience function for comparing strings */ michael@0: U_CAPI UBool U_EXPORT2 michael@0: ucol_greater( const UCollator *coll, michael@0: const UChar *source, michael@0: int32_t sourceLength, michael@0: const UChar *target, michael@0: int32_t targetLength) michael@0: { michael@0: return (ucol_strcoll(coll, source, sourceLength, target, targetLength) michael@0: == UCOL_GREATER); michael@0: } michael@0: michael@0: /* convenience function for comparing strings */ michael@0: U_CAPI UBool U_EXPORT2 michael@0: ucol_greaterOrEqual( const UCollator *coll, michael@0: const UChar *source, michael@0: int32_t sourceLength, michael@0: const UChar *target, michael@0: int32_t targetLength) michael@0: { michael@0: return (ucol_strcoll(coll, source, sourceLength, target, targetLength) michael@0: != UCOL_LESS); michael@0: } michael@0: michael@0: /* convenience function for comparing strings */ michael@0: U_CAPI UBool U_EXPORT2 michael@0: ucol_equal( const UCollator *coll, michael@0: const UChar *source, michael@0: int32_t sourceLength, michael@0: const UChar *target, michael@0: int32_t targetLength) michael@0: { michael@0: return (ucol_strcoll(coll, source, sourceLength, target, targetLength) michael@0: == UCOL_EQUAL); michael@0: } michael@0: michael@0: U_CAPI void U_EXPORT2 michael@0: ucol_getUCAVersion(const UCollator* coll, UVersionInfo info) { michael@0: if(coll && coll->UCA) { michael@0: uprv_memcpy(info, coll->UCA->image->UCAVersion, sizeof(UVersionInfo)); michael@0: } michael@0: } michael@0: michael@0: #endif /* #if !UCONFIG_NO_COLLATION */