2020-02-25 15:11:52 +00:00
|
|
|
//===-- lib/Parser/token-sequence.cpp -------------------------------------===//
|
2018-05-01 12:50:34 -07:00
|
|
|
//
|
2019-12-20 12:52:07 -08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2018-05-01 12:50:34 -07:00
|
|
|
//
|
2020-01-10 12:12:03 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2018-05-01 12:50:34 -07:00
|
|
|
|
2018-02-13 12:50:47 -08:00
|
|
|
#include "token-sequence.h"
|
2023-05-17 06:44:56 -07:00
|
|
|
#include "prescan.h"
|
2020-02-25 15:11:52 +00:00
|
|
|
#include "flang/Parser/characters.h"
|
2020-08-25 09:38:41 -07:00
|
|
|
#include "flang/Parser/message.h"
|
2020-02-28 15:11:03 +00:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2018-02-13 12:50:47 -08:00
|
|
|
|
2018-05-02 13:48:12 -07:00
|
|
|
namespace Fortran::parser {
|
2018-02-13 12:50:47 -08:00
|
|
|
|
2018-05-11 11:32:10 -07:00
|
|
|
TokenSequence &TokenSequence::operator=(TokenSequence &&that) {
|
|
|
|
|
clear();
|
|
|
|
|
swap(that);
|
|
|
|
|
return *this;
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-13 12:50:47 -08:00
|
|
|
void TokenSequence::clear() {
|
|
|
|
|
start_.clear();
|
|
|
|
|
nextStart_ = 0;
|
|
|
|
|
char_.clear();
|
|
|
|
|
provenances_.clear();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TokenSequence::pop_back() {
|
2021-09-30 15:33:19 -07:00
|
|
|
CHECK(!start_.empty());
|
|
|
|
|
CHECK(nextStart_ > start_.back());
|
2018-03-20 10:59:07 -07:00
|
|
|
std::size_t bytes{nextStart_ - start_.back()};
|
2018-02-13 12:50:47 -08:00
|
|
|
nextStart_ = start_.back();
|
|
|
|
|
start_.pop_back();
|
|
|
|
|
char_.resize(nextStart_);
|
|
|
|
|
provenances_.RemoveLastBytes(bytes);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TokenSequence::shrink_to_fit() {
|
|
|
|
|
start_.shrink_to_fit();
|
|
|
|
|
char_.shrink_to_fit();
|
|
|
|
|
provenances_.shrink_to_fit();
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-11 11:32:10 -07:00
|
|
|
void TokenSequence::swap(TokenSequence &that) {
|
|
|
|
|
start_.swap(that.start_);
|
|
|
|
|
std::swap(nextStart_, that.nextStart_);
|
|
|
|
|
char_.swap(that.char_);
|
|
|
|
|
provenances_.swap(that.provenances_);
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-11 12:54:49 -07:00
|
|
|
std::size_t TokenSequence::SkipBlanks(std::size_t at) const {
|
|
|
|
|
std::size_t tokens{start_.size()};
|
2018-05-11 14:23:18 -07:00
|
|
|
for (; at < tokens; ++at) {
|
2018-05-11 12:54:49 -07:00
|
|
|
if (!TokenAt(at).IsBlank()) {
|
2018-05-11 14:23:18 -07:00
|
|
|
return at;
|
2018-05-11 12:54:49 -07:00
|
|
|
}
|
|
|
|
|
}
|
2020-03-28 21:00:16 -07:00
|
|
|
return tokens; // even if at > tokens
|
2018-05-11 12:54:49 -07:00
|
|
|
}
|
|
|
|
|
|
2020-07-17 11:21:08 -07:00
|
|
|
// C-style /*comments*/ are removed from preprocessing directive
|
|
|
|
|
// token sequences by the prescanner, but not C++ or Fortran
|
|
|
|
|
// free-form line-ending comments (//... and !...) because
|
|
|
|
|
// ignoring them is directive-specific.
|
|
|
|
|
bool TokenSequence::IsAnythingLeft(std::size_t at) const {
|
|
|
|
|
std::size_t tokens{start_.size()};
|
|
|
|
|
for (; at < tokens; ++at) {
|
|
|
|
|
auto tok{TokenAt(at)};
|
|
|
|
|
const char *end{tok.end()};
|
|
|
|
|
for (const char *p{tok.begin()}; p < end; ++p) {
|
|
|
|
|
switch (*p) {
|
|
|
|
|
case '/':
|
|
|
|
|
return p + 1 >= end || p[1] != '/';
|
|
|
|
|
case '!':
|
|
|
|
|
return false;
|
|
|
|
|
case ' ':
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-13 12:50:47 -08:00
|
|
|
void TokenSequence::Put(const TokenSequence &that) {
|
|
|
|
|
if (nextStart_ < char_.size()) {
|
|
|
|
|
start_.push_back(nextStart_);
|
|
|
|
|
}
|
|
|
|
|
int offset = char_.size();
|
|
|
|
|
for (int st : that.start_) {
|
|
|
|
|
start_.push_back(st + offset);
|
|
|
|
|
}
|
|
|
|
|
char_.insert(char_.end(), that.char_.begin(), that.char_.end());
|
|
|
|
|
nextStart_ = char_.size();
|
|
|
|
|
provenances_.Put(that.provenances_);
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-15 13:13:28 -08:00
|
|
|
void TokenSequence::Put(const TokenSequence &that, ProvenanceRange range) {
|
2018-03-20 10:59:07 -07:00
|
|
|
std::size_t offset{0};
|
2018-03-23 15:14:52 -07:00
|
|
|
std::size_t tokens{that.SizeInTokens()};
|
|
|
|
|
for (std::size_t j{0}; j < tokens; ++j) {
|
|
|
|
|
CharBlock tok{that.TokenAt(j)};
|
2018-02-27 14:02:10 -08:00
|
|
|
Put(tok, range.OffsetMember(offset));
|
2018-02-15 13:13:28 -08:00
|
|
|
offset += tok.size();
|
|
|
|
|
}
|
|
|
|
|
CHECK(offset == range.size());
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-20 10:59:07 -07:00
|
|
|
void TokenSequence::Put(
|
|
|
|
|
const TokenSequence &that, std::size_t at, std::size_t tokens) {
|
2018-02-13 12:50:47 -08:00
|
|
|
ProvenanceRange provenance;
|
2018-03-20 10:59:07 -07:00
|
|
|
std::size_t offset{0};
|
2018-02-13 12:50:47 -08:00
|
|
|
for (; tokens-- > 0; ++at) {
|
2018-03-23 15:14:52 -07:00
|
|
|
CharBlock tok{that.TokenAt(at)};
|
2018-03-20 10:59:07 -07:00
|
|
|
std::size_t tokBytes{tok.size()};
|
|
|
|
|
for (std::size_t j{0}; j < tokBytes; ++j) {
|
2018-02-15 10:42:36 -08:00
|
|
|
if (offset == provenance.size()) {
|
2018-02-13 12:50:47 -08:00
|
|
|
provenance = that.provenances_.Map(that.start_[at] + j);
|
2018-03-23 15:14:52 -07:00
|
|
|
offset = 0;
|
2018-02-13 12:50:47 -08:00
|
|
|
}
|
2018-02-27 14:02:10 -08:00
|
|
|
PutNextTokenChar(tok[j], provenance.OffsetMember(offset++));
|
2018-02-13 12:50:47 -08:00
|
|
|
}
|
|
|
|
|
CloseToken();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-20 10:59:07 -07:00
|
|
|
void TokenSequence::Put(
|
|
|
|
|
const char *s, std::size_t bytes, Provenance provenance) {
|
|
|
|
|
for (std::size_t j{0}; j < bytes; ++j) {
|
2018-02-15 10:42:36 -08:00
|
|
|
PutNextTokenChar(s[j], provenance + j);
|
2018-02-13 12:50:47 -08:00
|
|
|
}
|
|
|
|
|
CloseToken();
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-20 10:59:07 -07:00
|
|
|
void TokenSequence::Put(const CharBlock &t, Provenance provenance) {
|
2018-02-13 12:50:47 -08:00
|
|
|
Put(&t[0], t.size(), provenance);
|
|
|
|
|
}
|
2018-02-15 13:13:28 -08:00
|
|
|
|
2018-02-13 12:50:47 -08:00
|
|
|
void TokenSequence::Put(const std::string &s, Provenance provenance) {
|
|
|
|
|
Put(s.data(), s.size(), provenance);
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-28 15:11:03 +00:00
|
|
|
void TokenSequence::Put(llvm::raw_string_ostream &ss, Provenance provenance) {
|
2018-02-13 12:50:47 -08:00
|
|
|
Put(ss.str(), provenance);
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-23 15:14:52 -07:00
|
|
|
TokenSequence &TokenSequence::ToLowerCase() {
|
2018-03-20 10:59:07 -07:00
|
|
|
std::size_t tokens{start_.size()};
|
|
|
|
|
std::size_t chars{char_.size()};
|
|
|
|
|
std::size_t atToken{0};
|
|
|
|
|
for (std::size_t j{0}; j < chars;) {
|
|
|
|
|
std::size_t nextStart{atToken + 1 < tokens ? start_[++atToken] : chars};
|
2020-09-18 11:02:15 +02:00
|
|
|
char *p{&char_[j]};
|
|
|
|
|
char const *limit{char_.data() + nextStart};
|
2018-02-28 16:56:10 -08:00
|
|
|
j = nextStart;
|
2018-03-20 10:59:07 -07:00
|
|
|
if (IsDecimalDigit(*p)) {
|
|
|
|
|
while (p < limit && IsDecimalDigit(*p)) {
|
2018-03-23 15:14:52 -07:00
|
|
|
++p;
|
2018-03-20 10:59:07 -07:00
|
|
|
}
|
2019-03-21 10:36:04 -07:00
|
|
|
if (p >= limit) {
|
|
|
|
|
} else if (*p == 'h' || *p == 'H') {
|
2018-03-20 10:59:07 -07:00
|
|
|
// Hollerith
|
2018-03-23 15:14:52 -07:00
|
|
|
*p = 'h';
|
2019-03-21 10:36:04 -07:00
|
|
|
} else if (*p == '_') {
|
|
|
|
|
// kind-prefixed character literal (e.g., 1_"ABC")
|
2018-03-20 10:59:07 -07:00
|
|
|
} else {
|
|
|
|
|
// exponent
|
2018-03-23 15:14:52 -07:00
|
|
|
for (; p < limit; ++p) {
|
|
|
|
|
*p = ToLowerCaseLetter(*p);
|
2018-03-20 10:59:07 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else if (limit[-1] == '\'' || limit[-1] == '"') {
|
|
|
|
|
if (*p == limit[-1]) {
|
|
|
|
|
// Character literal without prefix
|
|
|
|
|
} else if (p[1] == limit[-1]) {
|
|
|
|
|
// BOZX-prefixed constant
|
2018-03-23 15:14:52 -07:00
|
|
|
for (; p < limit; ++p) {
|
|
|
|
|
*p = ToLowerCaseLetter(*p);
|
2018-03-20 10:59:07 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
2019-06-28 11:16:37 -07:00
|
|
|
// Literal with kind-param prefix name (e.g., K_"ABC").
|
2018-03-23 15:14:52 -07:00
|
|
|
for (; *p != limit[-1]; ++p) {
|
|
|
|
|
*p = ToLowerCaseLetter(*p);
|
2018-03-20 10:59:07 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2018-03-23 15:14:52 -07:00
|
|
|
for (; p < limit; ++p) {
|
|
|
|
|
*p = ToLowerCaseLetter(*p);
|
2018-03-20 10:59:07 -07:00
|
|
|
}
|
|
|
|
|
}
|
2018-02-13 12:50:47 -08:00
|
|
|
}
|
2018-03-23 15:14:52 -07:00
|
|
|
return *this;
|
2018-02-13 12:50:47 -08:00
|
|
|
}
|
|
|
|
|
|
2018-05-11 12:44:56 -07:00
|
|
|
bool TokenSequence::HasBlanks(std::size_t firstChar) const {
|
2018-05-11 12:20:00 -07:00
|
|
|
std::size_t tokens{SizeInTokens()};
|
|
|
|
|
for (std::size_t j{0}; j < tokens; ++j) {
|
2018-05-11 12:44:56 -07:00
|
|
|
if (start_[j] >= firstChar && TokenAt(j).IsBlank()) {
|
2018-05-11 12:20:00 -07:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-11 12:44:56 -07:00
|
|
|
bool TokenSequence::HasRedundantBlanks(std::size_t firstChar) const {
|
2018-05-11 11:15:20 -07:00
|
|
|
std::size_t tokens{SizeInTokens()};
|
|
|
|
|
bool lastWasBlank{false};
|
|
|
|
|
for (std::size_t j{0}; j < tokens; ++j) {
|
|
|
|
|
bool isBlank{TokenAt(j).IsBlank()};
|
2018-05-11 12:44:56 -07:00
|
|
|
if (isBlank && lastWasBlank && start_[j] >= firstChar) {
|
2018-05-11 11:15:20 -07:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
lastWasBlank = isBlank;
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-11 12:44:56 -07:00
|
|
|
TokenSequence &TokenSequence::RemoveBlanks(std::size_t firstChar) {
|
2018-05-11 12:20:00 -07:00
|
|
|
std::size_t tokens{SizeInTokens()};
|
|
|
|
|
TokenSequence result;
|
|
|
|
|
for (std::size_t j{0}; j < tokens; ++j) {
|
2018-05-11 12:44:56 -07:00
|
|
|
if (!TokenAt(j).IsBlank() || start_[j] < firstChar) {
|
2018-05-11 12:20:00 -07:00
|
|
|
result.Put(*this, j);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
swap(result);
|
|
|
|
|
return *this;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-11 12:44:56 -07:00
|
|
|
TokenSequence &TokenSequence::RemoveRedundantBlanks(std::size_t firstChar) {
|
2018-05-11 11:15:20 -07:00
|
|
|
std::size_t tokens{SizeInTokens()};
|
|
|
|
|
TokenSequence result;
|
|
|
|
|
bool lastWasBlank{false};
|
|
|
|
|
for (std::size_t j{0}; j < tokens; ++j) {
|
|
|
|
|
bool isBlank{TokenAt(j).IsBlank()};
|
2018-05-11 12:44:56 -07:00
|
|
|
if (!isBlank || !lastWasBlank || start_[j] < firstChar) {
|
|
|
|
|
result.Put(*this, j);
|
2018-05-11 11:15:20 -07:00
|
|
|
}
|
|
|
|
|
lastWasBlank = isBlank;
|
|
|
|
|
}
|
2018-05-11 12:20:00 -07:00
|
|
|
swap(result);
|
2018-05-11 11:15:20 -07:00
|
|
|
return *this;
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-17 06:44:56 -07:00
|
|
|
TokenSequence &TokenSequence::ClipComment(
|
|
|
|
|
const Prescanner &prescanner, bool skipFirst) {
|
2019-05-10 16:04:10 -07:00
|
|
|
std::size_t tokens{SizeInTokens()};
|
|
|
|
|
for (std::size_t j{0}; j < tokens; ++j) {
|
2023-05-17 06:44:56 -07:00
|
|
|
CharBlock tok{TokenAt(j)};
|
|
|
|
|
if (std::size_t blanks{tok.CountLeadingBlanks()};
|
|
|
|
|
blanks < tok.size() && tok[blanks] == '!') {
|
|
|
|
|
// Retain active compiler directive sentinels (e.g. "!dir$")
|
|
|
|
|
for (std::size_t k{j + 1}; k < tokens && tok.size() < blanks + 5; ++k) {
|
|
|
|
|
if (tok.begin() + tok.size() == TokenAt(k).begin()) {
|
|
|
|
|
tok.ExtendToCover(TokenAt(k));
|
|
|
|
|
} else {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
bool isSentinel{false};
|
|
|
|
|
if (tok.size() == blanks + 5) {
|
|
|
|
|
char sentinel[4];
|
|
|
|
|
for (int k{0}; k < 4; ++k) {
|
|
|
|
|
sentinel[k] = ToLowerCaseLetter(tok[blanks + k + 1]);
|
|
|
|
|
}
|
|
|
|
|
isSentinel = prescanner.IsCompilerDirectiveSentinel(sentinel, 4);
|
|
|
|
|
}
|
|
|
|
|
if (isSentinel) {
|
|
|
|
|
} else if (skipFirst) {
|
2019-05-15 12:35:36 -07:00
|
|
|
skipFirst = false;
|
|
|
|
|
} else {
|
|
|
|
|
TokenSequence result;
|
|
|
|
|
if (j > 0) {
|
|
|
|
|
result.Put(*this, 0, j - 1);
|
|
|
|
|
}
|
|
|
|
|
swap(result);
|
|
|
|
|
return *this;
|
2019-05-10 16:04:10 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return *this;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-11 11:15:20 -07:00
|
|
|
void TokenSequence::Emit(CookedSource &cooked) const {
|
2023-02-08 10:25:44 -08:00
|
|
|
if (auto n{char_.size()}) {
|
|
|
|
|
cooked.Put(&char_[0], n);
|
|
|
|
|
cooked.PutProvenanceMappings(provenances_);
|
|
|
|
|
}
|
2018-05-11 11:15:20 -07:00
|
|
|
}
|
|
|
|
|
|
2021-07-23 16:41:04 -07:00
|
|
|
llvm::raw_ostream &TokenSequence::Dump(llvm::raw_ostream &o) const {
|
2018-05-11 11:15:20 -07:00
|
|
|
o << "TokenSequence has " << char_.size() << " chars; nextStart_ "
|
|
|
|
|
<< nextStart_ << '\n';
|
|
|
|
|
for (std::size_t j{0}; j < start_.size(); ++j) {
|
|
|
|
|
o << '[' << j << "] @ " << start_[j] << " '" << TokenAt(j).ToString()
|
|
|
|
|
<< "'\n";
|
|
|
|
|
}
|
2021-07-23 16:41:04 -07:00
|
|
|
return o;
|
2018-02-13 12:50:47 -08:00
|
|
|
}
|
|
|
|
|
|
2021-09-15 15:46:43 -07:00
|
|
|
Provenance TokenSequence::GetCharProvenance(std::size_t offset) const {
|
|
|
|
|
ProvenanceRange range{provenances_.Map(offset)};
|
|
|
|
|
return range.start();
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-15 13:13:28 -08:00
|
|
|
Provenance TokenSequence::GetTokenProvenance(
|
2018-03-20 10:59:07 -07:00
|
|
|
std::size_t token, std::size_t offset) const {
|
2021-09-15 15:46:43 -07:00
|
|
|
return GetCharProvenance(start_[token] + offset);
|
2018-02-15 10:42:36 -08:00
|
|
|
}
|
|
|
|
|
|
2018-02-15 13:13:28 -08:00
|
|
|
ProvenanceRange TokenSequence::GetTokenProvenanceRange(
|
2018-03-20 10:59:07 -07:00
|
|
|
std::size_t token, std::size_t offset) const {
|
2018-02-13 12:50:47 -08:00
|
|
|
ProvenanceRange range{provenances_.Map(start_[token] + offset)};
|
2018-02-15 10:42:36 -08:00
|
|
|
return range.Prefix(TokenBytes(token) - offset);
|
2018-02-13 12:50:47 -08:00
|
|
|
}
|
2018-02-15 13:13:28 -08:00
|
|
|
|
|
|
|
|
ProvenanceRange TokenSequence::GetIntervalProvenanceRange(
|
2018-03-20 10:59:07 -07:00
|
|
|
std::size_t token, std::size_t tokens) const {
|
2018-02-15 13:13:28 -08:00
|
|
|
if (tokens == 0) {
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
ProvenanceRange range{provenances_.Map(start_[token])};
|
|
|
|
|
while (--tokens > 0 &&
|
|
|
|
|
range.AnnexIfPredecessor(provenances_.Map(start_[++token]))) {
|
|
|
|
|
}
|
|
|
|
|
return range;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ProvenanceRange TokenSequence::GetProvenanceRange() const {
|
|
|
|
|
return GetIntervalProvenanceRange(0, start_.size());
|
|
|
|
|
}
|
2020-08-25 09:38:41 -07:00
|
|
|
|
|
|
|
|
const TokenSequence &TokenSequence::CheckBadFortranCharacters(
|
|
|
|
|
Messages &messages) const {
|
|
|
|
|
std::size_t tokens{SizeInTokens()};
|
2023-05-17 06:44:56 -07:00
|
|
|
bool isBangOk{true};
|
2020-08-25 09:38:41 -07:00
|
|
|
for (std::size_t j{0}; j < tokens; ++j) {
|
|
|
|
|
CharBlock token{TokenAt(j)};
|
|
|
|
|
char ch{token.FirstNonBlank()};
|
|
|
|
|
if (ch != ' ' && !IsValidFortranTokenCharacter(ch)) {
|
2023-05-17 06:44:56 -07:00
|
|
|
if (ch == '!' && isBangOk) {
|
2020-08-25 09:38:41 -07:00
|
|
|
// allow in !dir$
|
|
|
|
|
} else if (ch < ' ' || ch >= '\x7f') {
|
|
|
|
|
messages.Say(GetTokenProvenanceRange(j),
|
|
|
|
|
"bad character (0x%02x) in Fortran token"_err_en_US, ch & 0xff);
|
|
|
|
|
} else {
|
|
|
|
|
messages.Say(GetTokenProvenanceRange(j),
|
|
|
|
|
"bad character ('%c') in Fortran token"_err_en_US, ch);
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-05-17 06:44:56 -07:00
|
|
|
if (ch == ';') {
|
|
|
|
|
isBangOk = true;
|
|
|
|
|
} else if (ch != ' ') {
|
|
|
|
|
isBangOk = false;
|
|
|
|
|
}
|
2020-08-25 09:38:41 -07:00
|
|
|
}
|
|
|
|
|
return *this;
|
|
|
|
|
}
|
2021-09-30 15:33:19 -07:00
|
|
|
|
|
|
|
|
const TokenSequence &TokenSequence::CheckBadParentheses(
|
|
|
|
|
Messages &messages) const {
|
|
|
|
|
// First, a quick pass with no allocation for the common case
|
|
|
|
|
int nesting{0};
|
|
|
|
|
std::size_t tokens{SizeInTokens()};
|
|
|
|
|
for (std::size_t j{0}; j < tokens; ++j) {
|
|
|
|
|
CharBlock token{TokenAt(j)};
|
|
|
|
|
char ch{token.FirstNonBlank()};
|
|
|
|
|
if (ch == '(') {
|
|
|
|
|
++nesting;
|
|
|
|
|
} else if (ch == ')') {
|
|
|
|
|
--nesting;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (nesting != 0) {
|
|
|
|
|
// There's an error; diagnose it
|
|
|
|
|
std::vector<std::size_t> stack;
|
|
|
|
|
for (std::size_t j{0}; j < tokens; ++j) {
|
|
|
|
|
CharBlock token{TokenAt(j)};
|
|
|
|
|
char ch{token.FirstNonBlank()};
|
|
|
|
|
if (ch == '(') {
|
|
|
|
|
stack.push_back(j);
|
|
|
|
|
} else if (ch == ')') {
|
|
|
|
|
if (stack.empty()) {
|
|
|
|
|
messages.Say(GetTokenProvenanceRange(j), "Unmatched ')'"_err_en_US);
|
|
|
|
|
return *this;
|
|
|
|
|
}
|
|
|
|
|
stack.pop_back();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
CHECK(!stack.empty());
|
|
|
|
|
messages.Say(
|
|
|
|
|
GetTokenProvenanceRange(stack.back()), "Unmatched '('"_err_en_US);
|
|
|
|
|
}
|
|
|
|
|
return *this;
|
|
|
|
|
}
|
2020-03-28 21:00:16 -07:00
|
|
|
} // namespace Fortran::parser
|