Global Metrics
path: .metrics.halstead.level
old: 0.09090909090909093
new: 0.008497685331980467
path: .metrics.halstead.volume
old: 174.165028051187
new: 25009.494953909743
path: .metrics.halstead.effort
old: 1915.815308563057
new: 2943094.9696134534
path: .metrics.halstead.N1
old: 25.0
new: 1950.0
path: .metrics.halstead.bugs
old: 0.051417920063565055
new: 6.8456537596133655
path: .metrics.halstead.purity_ratio
old: 1.5135060440246406
new: 0.5513727715657896
path: .metrics.halstead.estimated_program_length
old: 62.05374780501027
new: 1743.9920764625924
path: .metrics.halstead.time
old: 106.43418380905872
new: 163505.2760896363
path: .metrics.halstead.N2
old: 16.0
new: 1213.0
path: .metrics.halstead.vocabulary
old: 19.0
new: 240.0
path: .metrics.halstead.n1
old: 11.0
new: 39.0
path: .metrics.halstead.n2
old: 8.0
new: 201.0
path: .metrics.halstead.difficulty
old: 11.0
new: 117.67910447761194
path: .metrics.halstead.length
old: 41.0
new: 3163.0
path: .metrics.nargs.average
old: 1.0
new: 1.1346153846153846
path: .metrics.nargs.sum
old: 2.0
new: 59.0
path: .metrics.cyclomatic.sum
old: 6.0
new: 156.0
path: .metrics.cyclomatic.average
old: 1.5
new: 2.736842105263158
path: .metrics.loc.lloc
old: 2.0
new: 294.0
path: .metrics.loc.cloc
old: 0.0
new: 39.0
path: .metrics.loc.blank
old: 2.0
new: 122.0
path: .metrics.loc.ploc
old: 8.0
new: 644.0
path: .metrics.loc.sloc
old: 10.0
new: 805.0
path: .metrics.mi.mi_original
old: 105.48610440209455
new: -25.932101209669383
path: .metrics.mi.mi_sei
old: 77.09434686801133
new: -80.50811632412594
path: .metrics.mi.mi_visual_studio
old: 61.68778035210209
new: 0.0
path: .metrics.nom.total
old: 2.0
new: 52.0
path: .metrics.nom.closures
old: 0.0
new: 1.0
path: .metrics.nom.functions
old: 2.0
new: 51.0
path: .metrics.cognitive.average
old: 0.5
new: 2.3076923076923075
path: .metrics.cognitive.sum
old: 1.0
new: 120.0
path: .metrics.nexits.sum
old: 0.0
new: 78.0
path: .metrics.nexits.average
old: 0.0
new: 1.5
Spaces Data
Minimal test - lines (12, 805)
path: .spaces[0].metrics.cognitive.sum
old: 0.0
new: 120.0
path: .spaces[0].metrics.cognitive.average
old: null
new: 2.3076923076923075
path: .spaces[0].metrics.cyclomatic.average
old: 1.0
new: 2.767857142857143
path: .spaces[0].metrics.cyclomatic.sum
old: 1.0
new: 155.0
path: .spaces[0].metrics.nargs.sum
old: 0.0
new: 59.0
path: .spaces[0].metrics.nargs.average
old: null
new: 1.1346153846153846
path: .spaces[0].metrics.nexits.average
old: null
new: 1.5
path: .spaces[0].metrics.nexits.sum
old: 0.0
new: 78.0
path: .spaces[0].metrics.loc.lloc
old: 0.0
new: 294.0
path: .spaces[0].metrics.loc.sloc
old: 1.0
new: 794.0
path: .spaces[0].metrics.loc.ploc
old: 1.0
new: 641.0
path: .spaces[0].metrics.loc.cloc
old: 0.0
new: 33.0
path: .spaces[0].metrics.loc.blank
old: 0.0
new: 120.0
path: .spaces[0].metrics.nom.total
old: 0.0
new: 52.0
path: .spaces[0].metrics.nom.closures
old: 0.0
new: 1.0
path: .spaces[0].metrics.nom.functions
old: 0.0
new: 51.0
path: .spaces[0].metrics.halstead.N2
old: 1.0
new: 1211.0
path: .spaces[0].metrics.halstead.difficulty
old: 0.5
new: 118.66582914572864
path: .spaces[0].metrics.halstead.vocabulary
old: 2.0
new: 238.0
path: .spaces[0].metrics.halstead.effort
old: 1.0
new: 2961367.3479419076
path: .spaces[0].metrics.halstead.n2
old: 1.0
new: 199.0
path: .spaces[0].metrics.halstead.N1
old: 1.0
new: 1950.0
path: .spaces[0].metrics.halstead.length
old: 2.0
new: 3161.0
path: .spaces[0].metrics.halstead.estimated_program_length
old: 0.0
new: 1725.818986023814
path: .spaces[0].metrics.halstead.time
old: 0.05555555555555555
new: 164520.40821899485
path: .spaces[0].metrics.halstead.level
old: 2.0
new: 0.008427025768066232
path: .spaces[0].metrics.halstead.purity_ratio
old: 0.0
new: 0.5459724726427757
path: .spaces[0].metrics.halstead.bugs
old: 0.0003333333333333333
new: 6.873958950397275
path: .spaces[0].metrics.halstead.n1
old: 1.0
new: 39.0
path: .spaces[0].metrics.halstead.volume
old: 2.0
new: 24955.51894981641
path: .spaces[0].metrics.mi.mi_sei
old: 165.57000000000002
new: -81.13105257310703
path: .spaces[0].metrics.mi.mi_original
old: 167.1656346610883
new: -25.46797351318554
path: .spaces[0].metrics.mi.mi_visual_studio
old: 97.75768108835572
new: 0.0
Code
namespace mozilla {
template <>
char const TokenizerBase::sWhitespaces[] = {' ', '\t', 0};
template <>
char16_t const TokenizerBase::sWhitespaces[3] = {' ', '\t', 0};
template
static bool contains(TChar const* const list, TChar const needle) {
for (TChar const* c = list; *c; ++c) {
if (needle == *c) {
return true;
}
}
return false;
}
template
TTokenizer::TTokenizer(const typename base::TAString& aSource,
const TChar* aWhitespaces,
const TChar* aAdditionalWordChars)
: TokenizerBase(aWhitespaces, aAdditionalWordChars) {
base::mInputFinished = true;
aSource.BeginReading(base::mCursor);
mRecord = mRollback = base::mCursor;
aSource.EndReading(base::mEnd);
}
template
TTokenizer::TTokenizer(const TChar* aSource, const TChar* aWhitespaces,
const TChar* aAdditionalWordChars)
: TTokenizer(typename base::TDependentString(aSource), aWhitespaces,
aAdditionalWordChars) {}
template
bool TTokenizer::Next(typename base::Token& aToken) {
if (!base::HasInput()) {
base::mHasFailed = true;
return false;
}
mRollback = base::mCursor;
base::mCursor = base::Parse(aToken);
base::AssignFragment(aToken, mRollback, base::mCursor);
base::mPastEof = aToken.Type() == base::TOKEN_EOF;
base::mHasFailed = false;
return true;
}
template
bool TTokenizer::Check(const typename base::TokenType aTokenType,
typename base::Token& aResult) {
if (!base::HasInput()) {
base::mHasFailed = true;
return false;
}
typename base::TAString::const_char_iterator next = base::Parse(aResult);
if (aTokenType != aResult.Type()) {
base::mHasFailed = true;
return false;
}
mRollback = base::mCursor;
base::mCursor = next;
base::AssignFragment(aResult, mRollback, base::mCursor);
base::mPastEof = aResult.Type() == base::TOKEN_EOF;
base::mHasFailed = false;
return true;
}
template
bool TTokenizer::Check(const typename base::Token& aToken) {
#ifdef DEBUG
base::Validate(aToken);
#endif
if (!base::HasInput()) {
base::mHasFailed = true;
return false;
}
typename base::Token parsed;
typename base::TAString::const_char_iterator next = base::Parse(parsed);
if (!aToken.Equals(parsed)) {
base::mHasFailed = true;
return false;
}
mRollback = base::mCursor;
base::mCursor = next;
base::mPastEof = parsed.Type() == base::TOKEN_EOF;
base::mHasFailed = false;
return true;
}
template
void TTokenizer::SkipWhites(WhiteSkipping aIncludeNewLines) {
if (!CheckWhite() &&
(aIncludeNewLines == DONT_INCLUDE_NEW_LINE || !CheckEOL())) {
return;
}
typename base::TAString::const_char_iterator rollback = mRollback;
while (CheckWhite() || (aIncludeNewLines == INCLUDE_NEW_LINE && CheckEOL())) {
}
base::mHasFailed = false;
mRollback = rollback;
}
template
void TTokenizer::SkipUntil(typename base::Token const& aToken) {
typename base::TAString::const_char_iterator rollback = base::mCursor;
const typename base::Token eof = base::Token::EndOfFile();
typename base::Token t;
while (Next(t)) {
if (aToken.Equals(t) || eof.Equals(t)) {
Rollback();
break;
}
}
mRollback = rollback;
}
template
bool TTokenizer::CheckChar(bool (*aClassifier)(const TChar aChar)) {
if (!aClassifier) {
MOZ_ASSERT(false);
return false;
}
if (!base::HasInput() || base::mCursor == base::mEnd) {
base::mHasFailed = true;
return false;
}
if (!aClassifier(*base::mCursor)) {
base::mHasFailed = true;
return false;
}
mRollback = base::mCursor;
++base::mCursor;
base::mHasFailed = false;
return true;
}
template
bool TTokenizer::CheckPhrase(const typename base::TAString& aPhrase) {
if (!base::HasInput()) {
return false;
}
typedef typename base::TAString::const_char_iterator Cursor;
TTokenizer pattern(aPhrase);
MOZ_ASSERT(!pattern.CheckEOF(),
"This will return true but won't shift the Tokenizer's cursor");
return [&](Cursor cursor, Cursor rollback) mutable {
while (true) {
if (pattern.CheckEOF()) {
base::mHasFailed = false;
mRollback = cursor;
return true;
}
typename base::Token t1, t2;
Unused << Next(t1);
Unused << pattern.Next(t2);
if (t1.Type() == t2.Type() && t1.Fragment().Equals(t2.Fragment())) {
continue;
}
break;
}
base::mHasFailed = true;
base::mPastEof = false;
base::mCursor = cursor;
mRollback = rollback;
return false;
}(base::mCursor, mRollback);
}
template
bool TTokenizer::ReadChar(TChar* aValue) {
MOZ_RELEASE_ASSERT(aValue);
typename base::Token t;
if (!Check(base::TOKEN_CHAR, t)) {
return false;
}
*aValue = t.AsChar();
return true;
}
template
bool TTokenizer::ReadChar(bool (*aClassifier)(const TChar aChar),
TChar* aValue) {
MOZ_RELEASE_ASSERT(aValue);
if (!CheckChar(aClassifier)) {
return false;
}
*aValue = *mRollback;
return true;
}
template
bool TTokenizer::ReadWord(typename base::TAString& aValue) {
typename base::Token t;
if (!Check(base::TOKEN_WORD, t)) {
return false;
}
aValue.Assign(t.AsString());
return true;
}
template
bool TTokenizer::ReadWord(typename base::TDependentSubstring& aValue) {
typename base::Token t;
if (!Check(base::TOKEN_WORD, t)) {
return false;
}
aValue.Rebind(t.AsString().BeginReading(), t.AsString().Length());
return true;
}
template
bool TTokenizer::ReadUntil(typename base::Token const& aToken,
typename base::TAString& aResult,
ClaimInclusion aInclude) {
typename base::TDependentSubstring substring;
bool rv = ReadUntil(aToken, substring, aInclude);
aResult.Assign(substring);
return rv;
}
template
bool TTokenizer::ReadUntil(typename base::Token const& aToken,
typename base::TDependentSubstring& aResult,
ClaimInclusion aInclude) {
typename base::TAString::const_char_iterator record = mRecord;
Record();
typename base::TAString::const_char_iterator rollback = mRollback =
base::mCursor;
bool found = false;
typename base::Token t;
while (Next(t)) {
if (aToken.Equals(t)) {
found = true;
break;
}
if (t.Equals(base::Token::EndOfFile())) {
// We don't want to eat it.
Rollback();
break;
}
}
Claim(aResult, aInclude);
mRollback = rollback;
mRecord = record;
return found;
}
template
void TTokenizer::Rollback() {
MOZ_ASSERT(base::mCursor > mRollback || base::mPastEof, "TODO!!!");
base::mPastEof = false;
base::mHasFailed = false;
base::mCursor = mRollback;
}
template
void TTokenizer::Record(ClaimInclusion aInclude) {
mRecord = aInclude == INCLUDE_LAST ? mRollback : base::mCursor;
}
template
void TTokenizer::Claim(typename base::TAString& aResult,
ClaimInclusion aInclusion) {
typename base::TAString::const_char_iterator close =
aInclusion == EXCLUDE_LAST ? mRollback : base::mCursor;
aResult.Assign(Substring(mRecord, close));
}
template
void TTokenizer::Claim(typename base::TDependentSubstring& aResult,
ClaimInclusion aInclusion) {
typename base::TAString::const_char_iterator close =
aInclusion == EXCLUDE_LAST ? mRollback : base::mCursor;
MOZ_RELEASE_ASSERT(close >= mRecord, "Overflow!");
aResult.Rebind(mRecord, close - mRecord);
}
// TokenizerBase
template
TokenizerBase::TokenizerBase(const TChar* aWhitespaces,
const TChar* aAdditionalWordChars)
: mPastEof(false),
mHasFailed(false),
mInputFinished(true),
mMode(Mode::FULL),
mMinRawDelivery(1024),
mWhitespaces(aWhitespaces ? aWhitespaces : sWhitespaces),
mAdditionalWordChars(aAdditionalWordChars),
mCursor(nullptr),
mEnd(nullptr),
mNextCustomTokenID(TOKEN_CUSTOM0) {}
template
auto TokenizerBase::AddCustomToken(const TAString& aValue,
ECaseSensitivity aCaseInsensitivity,
bool aEnabled) -> Token {
MOZ_ASSERT(!aValue.IsEmpty());
UniquePtr& t = *mCustomTokens.AppendElement();
t = MakeUnique();
t->mType = static_cast(++mNextCustomTokenID);
t->mCustomCaseInsensitivity = aCaseInsensitivity;
t->mCustomEnabled = aEnabled;
t->mCustom.Assign(aValue);
return *t;
}
template
void TokenizerBase::RemoveCustomToken(Token& aToken) {
if (aToken.mType == TOKEN_UNKNOWN) {
// Already removed
return;
}
for (UniquePtr const& custom : mCustomTokens) {
if (custom->mType == aToken.mType) {
mCustomTokens.RemoveElement(custom);
aToken.mType = TOKEN_UNKNOWN;
return;
}
}
MOZ_ASSERT(false, "Token to remove not found");
}
template
void TokenizerBase::EnableCustomToken(Token const& aToken,
bool aEnabled) {
if (aToken.mType == TOKEN_UNKNOWN) {
// Already removed
return;
}
for (UniquePtr const& custom : mCustomTokens) {
if (custom->Type() == aToken.Type()) {
// This effectively destroys the token instance.
custom->mCustomEnabled = aEnabled;
return;
}
}
MOZ_ASSERT(false, "Token to change not found");
}
template
void TokenizerBase::SetTokenizingMode(Mode aMode) {
mMode = aMode;
}
template
bool TokenizerBase::HasFailed() const {
return mHasFailed;
}
template
bool TokenizerBase::HasInput() const {
return !mPastEof;
}
template
auto TokenizerBase::Parse(Token& aToken) const ->
typename TAString::const_char_iterator {
if (mCursor == mEnd) {
if (!mInputFinished) {
return mCursor;
}
aToken = Token::EndOfFile();
return mEnd;
}
MOZ_RELEASE_ASSERT(mEnd >= mCursor, "Overflow!");
typename TAString::size_type available = mEnd - mCursor;
uint32_t longestCustom = 0;
for (UniquePtr const& custom : mCustomTokens) {
if (IsCustom(mCursor, *custom, &longestCustom)) {
aToken = *custom;
return mCursor + custom->mCustom.Length();
}
}
if (!mInputFinished && available < longestCustom) {
// Not enough data to deterministically decide.
return mCursor;
}
typename TAString::const_char_iterator next = mCursor;
if (mMode == Mode::CUSTOM_ONLY) {
// We have to do a brute-force search for all of the enabled custom
// tokens.
while (next < mEnd) {
++next;
for (UniquePtr const& custom : mCustomTokens) {
if (IsCustom(next, *custom)) {
aToken = Token::Raw();
return next;
}
}
}
if (mInputFinished) {
// End of the data reached.
aToken = Token::Raw();
return next;
}
if (longestCustom < available && available > mMinRawDelivery) {
// We can return some data w/o waiting for either a custom token
// or call to FinishData() when we leave the tail where all the
// custom tokens potentially fit, so we can't lose only partially
// delivered tokens. This preserves reasonable granularity.
aToken = Token::Raw();
return mEnd - longestCustom + 1;
}
// Not enough data to deterministically decide.
return mCursor;
}
enum State {
PARSE_INTEGER,
PARSE_WORD,
PARSE_CRLF,
PARSE_LF,
PARSE_WS,
PARSE_CHAR,
} state;
if (IsWordFirst(*next)) {
state = PARSE_WORD;
} else if (IsNumber(*next)) {
state = PARSE_INTEGER;
} else if (contains(mWhitespaces, *next)) { // not UTF-8 friendly?
state = PARSE_WS;
} else if (*next == '\r') {
state = PARSE_CRLF;
} else if (*next == '\n') {
state = PARSE_LF;
} else {
state = PARSE_CHAR;
}
mozilla::CheckedUint64 resultingNumber = 0;
while (next < mEnd) {
switch (state) {
case PARSE_INTEGER:
// Keep it simple for now
resultingNumber *= 10;
resultingNumber += static_cast(*next - '0');
++next;
if (IsPending(next)) {
break;
}
if (IsEnd(next) || !IsNumber(*next)) {
if (!resultingNumber.isValid()) {
aToken = Token::Error();
} else {
aToken = Token::Number(resultingNumber.value());
}
return next;
}
break;
case PARSE_WORD:
++next;
if (IsPending(next)) {
break;
}
if (IsEnd(next) || !IsWord(*next)) {
aToken = Token::Word(Substring(mCursor, next));
return next;
}
break;
case PARSE_CRLF:
++next;
if (IsPending(next)) {
break;
}
if (!IsEnd(next) && *next == '\n') { // LF is optional
++next;
}
aToken = Token::NewLine();
return next;
case PARSE_LF:
++next;
aToken = Token::NewLine();
return next;
case PARSE_WS:
++next;
aToken = Token::Whitespace();
return next;
case PARSE_CHAR:
++next;
aToken = Token::Char(*mCursor);
return next;
} // switch (state)
} // while (next < end)
MOZ_ASSERT(!mInputFinished);
return mCursor;
}
template
bool TokenizerBase::IsEnd(
const typename TAString::const_char_iterator& caret) const {
return caret == mEnd;
}
template
bool TokenizerBase::IsPending(
const typename TAString::const_char_iterator& caret) const {
return IsEnd(caret) && !mInputFinished;
}
template
bool TokenizerBase::IsWordFirst(const TChar aInput) const {
// TODO: make this fully work with unicode
return (ToLowerCase(static_cast(aInput)) !=
ToUpperCase(static_cast(aInput))) ||
'_' == aInput ||
(mAdditionalWordChars ? contains(mAdditionalWordChars, aInput)
: false);
}
template
bool TokenizerBase::IsWord(const TChar aInput) const {
return IsWordFirst(aInput) || IsNumber(aInput);
}
template
bool TokenizerBase::IsNumber(const TChar aInput) const {
// TODO: are there unicode numbers?
return aInput >= '0' && aInput <= '9';
}
template
bool TokenizerBase::IsCustom(
const typename TAString::const_char_iterator& caret,
const Token& aCustomToken, uint32_t* aLongest) const {
MOZ_ASSERT(aCustomToken.mType > TOKEN_CUSTOM0);
if (!aCustomToken.mCustomEnabled) {
return false;
}
if (aLongest) {
*aLongest = std::max(*aLongest, aCustomToken.mCustom.Length());
}
// This is not very likely to happen according to how we call this method
// and since it's on a hot path, it's just a diagnostic assert,
// not a release assert.
MOZ_DIAGNOSTIC_ASSERT(mEnd >= caret, "Overflow?");
uint32_t inputLength = mEnd - caret;
if (aCustomToken.mCustom.Length() > inputLength) {
return false;
}
TDependentSubstring inputFragment(caret, aCustomToken.mCustom.Length());
if (aCustomToken.mCustomCaseInsensitivity == CASE_INSENSITIVE) {
if constexpr (std::is_same_v) {
return inputFragment.Equals(aCustomToken.mCustom,
nsCaseInsensitiveUTF8StringComparator);
} else {
return inputFragment.Equals(aCustomToken.mCustom,
nsCaseInsensitiveStringComparator);
}
}
return inputFragment.Equals(aCustomToken.mCustom);
}
template
void TokenizerBase::AssignFragment(
Token& aToken, typename TAString::const_char_iterator begin,
typename TAString::const_char_iterator end) {
aToken.AssignFragment(begin, end);
}
#ifdef DEBUG
template
void TokenizerBase::Validate(Token const& aToken) {
if (aToken.Type() == TOKEN_WORD) {
typename TAString::const_char_iterator c = aToken.AsString().BeginReading();
typename TAString::const_char_iterator e = aToken.AsString().EndReading();
if (c < e) {
MOZ_ASSERT(IsWordFirst(*c));
while (++c < e) {
MOZ_ASSERT(IsWord(*c));
}
}
}
}
#endif
// TokenizerBase::Token
template
TokenizerBase::Token::Token()
: mType(TOKEN_UNKNOWN),
mChar(0),
mInteger(0),
mCustomCaseInsensitivity(CASE_SENSITIVE),
mCustomEnabled(false) {}
template
TokenizerBase::Token::Token(const Token& aOther)
: mType(aOther.mType),
mCustom(aOther.mCustom),
mChar(aOther.mChar),
mInteger(aOther.mInteger),
mCustomCaseInsensitivity(aOther.mCustomCaseInsensitivity),
mCustomEnabled(aOther.mCustomEnabled) {
if (mType == TOKEN_WORD || mType > TOKEN_CUSTOM0) {
mWord.Rebind(aOther.mWord.BeginReading(), aOther.mWord.Length());
}
}
template
auto TokenizerBase::Token::operator=(const Token& aOther) -> Token& {
mType = aOther.mType;
mCustom = aOther.mCustom;
mChar = aOther.mChar;
mWord.Rebind(aOther.mWord.BeginReading(), aOther.mWord.Length());
mInteger = aOther.mInteger;
mCustomCaseInsensitivity = aOther.mCustomCaseInsensitivity;
mCustomEnabled = aOther.mCustomEnabled;
return *this;
}
template
void TokenizerBase::Token::AssignFragment(
typename TAString::const_char_iterator begin,
typename TAString::const_char_iterator end) {
MOZ_RELEASE_ASSERT(end >= begin, "Overflow!");
mFragment.Rebind(begin, end - begin);
}
// static
template
auto TokenizerBase::Token::Raw() -> Token {
Token t;
t.mType = TOKEN_RAW;
return t;
}
// static
template
auto TokenizerBase::Token::Word(TAString const& aValue) -> Token {
Token t;
t.mType = TOKEN_WORD;
t.mWord.Rebind(aValue.BeginReading(), aValue.Length());
return t;
}
// static
template
auto TokenizerBase::Token::Char(TChar const aValue) -> Token {
Token t;
t.mType = TOKEN_CHAR;
t.mChar = aValue;
return t;
}
// static
template
auto TokenizerBase::Token::Number(uint64_t const aValue) -> Token {
Token t;
t.mType = TOKEN_INTEGER;
t.mInteger = aValue;
return t;
}
// static
template
auto TokenizerBase::Token::Whitespace() -> Token {
Token t;
t.mType = TOKEN_WS;
t.mChar = '\0';
return t;
}
// static
template
auto TokenizerBase::Token::NewLine() -> Token {
Token t;
t.mType = TOKEN_EOL;
return t;
}
// static
template
auto TokenizerBase::Token::EndOfFile() -> Token {
Token t;
t.mType = TOKEN_EOF;
return t;
}
// static
template
auto TokenizerBase::Token::Error() -> Token {
Token t;
t.mType = TOKEN_ERROR;
return t;
}
template
bool TokenizerBase::Token::Equals(const Token& aOther) const {
if (mType != aOther.mType) {
return false;
}
switch (mType) {
case TOKEN_INTEGER:
return AsInteger() == aOther.AsInteger();
case TOKEN_WORD:
return AsString() == aOther.AsString();
case TOKEN_CHAR:
return AsChar() == aOther.AsChar();
default:
return true;
}
}
template
TChar TokenizerBase::Token::AsChar() const {
MOZ_ASSERT(mType == TOKEN_CHAR || mType == TOKEN_WS);
return mChar;
}
template
auto TokenizerBase::Token::AsString() const -> TDependentSubstring {
MOZ_ASSERT(mType == TOKEN_WORD);
return mWord;
}
template
uint64_t TokenizerBase::Token::AsInteger() const {
MOZ_ASSERT(mType == TOKEN_INTEGER);
return mInteger;
}
template class TokenizerBase;
template class TokenizerBase;
template class TTokenizer;
template class TTokenizer;
} // namespace mozilla