This machine mirrors various open-source projects.
20 Gbit/s uplink.
If there are any issues or you want another project mirrored, please contact
mirror-service -=AT=- netcologne DOT de !
00001 //===-- parser/Parser.cpp ------------------------------------- -*- C++ -*-===// 00002 // 00003 // This file is distributed under the MIT license. See LICENSE.txt for details. 00004 // 00005 // Copyright (C) 2008-2010, Stephen Wilson 00006 // 00007 //===----------------------------------------------------------------------===// 00008 // 00009 // This parser is a typical hand written recursive decent parser. 00010 // 00011 // There is always a "current token", and each parse method begins its 00012 // processing using the current token to guide its logic. Therefore, the rule 00013 // to follow when calling a parse method (or when writing one) is "the current 00014 // token is the next token to be parsed". 00015 // 00016 // Similarly, parse methods leave the stream so that the current token is again 00017 // the next token to be parsed. Thus, a parse method which consumes exactly one 00018 // token moves the token stream by exactly one token. 00019 // 00020 // As the parser proceeds, callbacks provided by the ParseClient are invoked. 00021 // The parser does not build an AST explicitly -- rather, it formulates calls 00022 // to the client, which in turn could construct an AST, or perform some other 00023 // action. 00024 // 00025 //===----------------------------------------------------------------------===// 00026 00027 #include "comma/basic/Attributes.h" 00028 #include "comma/basic/Pragmas.h" 00029 #include "comma/parser/Parser.h" 00030 00031 #include "llvm/ADT/APInt.h" 00032 00033 #include <algorithm> 00034 #include <cassert> 00035 #include <cstring> 00036 #include <vector> 00037 00038 using namespace comma; 00039 00040 Parser::Parser(TextProvider &txtProvider, IdentifierPool &idPool, 00041 ParseClient &client, Diagnostic &diag) 00042 : txtProvider(txtProvider), 00043 idPool(idPool), 00044 client(client), 00045 diagnostic(diag), 00046 lexer(txtProvider, diag) 00047 { 00048 // Mark each identifier which can name an attribute. 00049 attrib::markAttributeIdentifiers(idPool); 00050 00051 // Prime the parser by loading in the first token. 00052 lexer.scan(token); 00053 } 00054 00055 Lexer::Token &Parser::currentToken() 00056 { 00057 return token; 00058 } 00059 00060 Lexer::Token &Parser::nextToken() 00061 { 00062 lexer.scan(token); 00063 return token; 00064 } 00065 00066 Lexer::Token Parser::peekToken() 00067 { 00068 Lexer::Token tkn; 00069 lexer.peek(tkn, 0); 00070 return tkn; 00071 } 00072 00073 Location Parser::ignoreToken() 00074 { 00075 Location loc = currentLocation(); 00076 nextToken(); 00077 return loc; 00078 } 00079 00080 void Parser::setCurrentToken(Lexer::Token &tkn) 00081 { 00082 token = tkn; 00083 } 00084 00085 bool Parser::currentTokenIs(Lexer::Code code) 00086 { 00087 return currentToken().getCode() == code; 00088 } 00089 00090 bool Parser::nextTokenIs(Lexer::Code code) 00091 { 00092 return peekToken().getCode() == code; 00093 } 00094 00095 Lexer::Code Parser::currentTokenCode() 00096 { 00097 return currentToken().getCode(); 00098 } 00099 00100 Lexer::Code Parser::peekTokenCode() 00101 { 00102 return peekToken().getCode(); 00103 } 00104 00105 bool Parser::expectToken(Lexer::Code code) 00106 { 00107 if (peekToken().getCode() == code) { 00108 ignoreToken(); 00109 return true; 00110 } 00111 return false; 00112 } 00113 00114 bool Parser::reduceToken(Lexer::Code code) 00115 { 00116 if (currentTokenIs(code)) { 00117 ignoreToken(); 00118 return true; 00119 } 00120 return false; 00121 } 00122 00123 bool Parser::requireToken(Lexer::Code code) 00124 { 00125 bool status = reduceToken(code); 00126 if (!status) 00127 report(diag::UNEXPECTED_TOKEN_WANTED) 00128 << currentToken().getString() 00129 << Lexer::tokenString(code); 00130 return status; 00131 } 00132 00133 bool Parser::seekToken(Lexer::Code code) 00134 { 00135 while (!currentTokenIs(Lexer::TKN_EOT)) { 00136 if (currentTokenIs(code)) 00137 return true; 00138 else 00139 ignoreToken(); 00140 } 00141 return false; 00142 } 00143 00144 bool Parser::seekAndConsumeToken(Lexer::Code code) 00145 { 00146 bool status = seekToken(code); 00147 if (status) ignoreToken(); 00148 return status; 00149 } 00150 00151 bool Parser::seekTokens(Lexer::Code code0, Lexer::Code code1, 00152 Lexer::Code code2, Lexer::Code code3, 00153 Lexer::Code code4, Lexer::Code code5) 00154 { 00155 Lexer::Code codes[] = { code0, code1, code2, code3, code4, code5 }; 00156 Lexer::Code *end = &codes[6]; 00157 00158 while (!currentTokenIs(Lexer::TKN_EOT)) 00159 { 00160 if (end != std::find(codes, end, currentTokenCode())) 00161 return true; 00162 else 00163 ignoreToken(); 00164 } 00165 return false; 00166 } 00167 00168 bool Parser::seekAndConsumeTokens(Lexer::Code code0, 00169 Lexer::Code code1, Lexer::Code code2, 00170 Lexer::Code code3, Lexer::Code code4) 00171 { 00172 bool status = seekTokens(code0, code1, code2, code3, code4); 00173 if (status) ignoreToken(); 00174 return status; 00175 } 00176 00177 bool Parser::seekCloseParen() 00178 { 00179 unsigned depth = 1; 00180 00181 for (;;) { 00182 seekTokens(Lexer::TKN_LPAREN, Lexer::TKN_RPAREN); 00183 00184 switch (currentTokenCode()) { 00185 default: 00186 break; 00187 00188 case Lexer::TKN_LPAREN: 00189 depth++; 00190 break; 00191 00192 case Lexer::TKN_RPAREN: 00193 depth--; 00194 if (depth == 0) { 00195 ignoreToken(); 00196 return true; 00197 } 00198 break; 00199 00200 case Lexer::TKN_EOT: 00201 return false; 00202 } 00203 00204 ignoreToken(); 00205 } 00206 } 00207 00208 bool Parser::seekSemi() 00209 { 00210 while (seekTokens(Lexer::TKN_LPAREN, Lexer::TKN_SEMI)) { 00211 00212 if (currentTokenIs(Lexer::TKN_SEMI)) 00213 return true; 00214 00215 // Otherwise, the current token is an LBRACE. Dive into the parens and 00216 // seek the closing token. 00217 ignoreToken(); 00218 seekCloseParen(); 00219 } 00220 return false; 00221 } 00222 00223 // This function drives the stream of input tokens looking for an end statement. 00224 // If the end statement is followed by a matching tag, true is returned. 00225 // Otherwise the search continues until a matching end is found or the end of 00226 // the token stream is encountered. In the latter case, false is returned. 00227 bool Parser::seekEndTag(IdentifierInfo *tag) 00228 { 00229 while (seekToken(Lexer::TKN_END)) 00230 { 00231 IdentifierInfo *info = 0; 00232 00233 if (nextTokenIs(Lexer::TKN_IDENTIFIER)) { 00234 info = getIdentifierInfo(peekToken()); 00235 } 00236 00237 if (info == tag) 00238 return true; 00239 else 00240 ignoreToken(); 00241 } 00242 return false; 00243 } 00244 00245 bool Parser::seekAndConsumeEndTag(IdentifierInfo *tag) 00246 { 00247 if (seekEndTag(tag)) { 00248 ignoreToken(); // Ignore 'end'. 00249 ignoreToken(); // Ignore the tag. 00250 return true; 00251 } 00252 return false; 00253 } 00254 00255 bool Parser::seekEndIf() 00256 { 00257 unsigned depth = 1; 00258 00259 while (seekTokens(Lexer::TKN_IF, Lexer::TKN_END)) { 00260 switch (currentTokenCode()) { 00261 00262 default: 00263 return false; 00264 00265 case Lexer::TKN_IF: 00266 ignoreToken(); 00267 depth++; 00268 break; 00269 00270 case Lexer::TKN_END: 00271 ignoreToken(); 00272 if (reduceToken(Lexer::TKN_IF)) { 00273 if (--depth == 0) 00274 return true; 00275 } 00276 } 00277 } 00278 return false; 00279 } 00280 00281 bool Parser::seekEndLoop() 00282 { 00283 unsigned depth = 1; 00284 while (seekTokens(Lexer::TKN_WHILE, Lexer::TKN_END)) { 00285 switch (currentTokenCode()) { 00286 default: 00287 return false; 00288 00289 case Lexer::TKN_WHILE: 00290 ignoreToken(); 00291 depth++; 00292 break; 00293 00294 case Lexer::TKN_END: 00295 ignoreToken(); 00296 if (reduceToken(Lexer::TKN_LOOP)) { 00297 if (--depth == 0) 00298 return true; 00299 } 00300 } 00301 } 00302 return false; 00303 } 00304 00305 Location Parser::currentLocation() 00306 { 00307 return currentToken().getLocation(); 00308 } 00309 00310 unsigned Parser::currentLine() 00311 { 00312 return txtProvider.getLine(currentLocation()); 00313 } 00314 00315 unsigned Parser::currentColumn() 00316 { 00317 return txtProvider.getColumn(currentLocation()); 00318 } 00319 00320 IdentifierInfo *Parser::getIdentifierInfo(const Lexer::Token &tkn) 00321 { 00322 const char *rep = tkn.getRep(); 00323 unsigned length = tkn.getLength(); 00324 IdentifierInfo *info = &idPool.getIdentifierInfo(rep, length); 00325 return info; 00326 } 00327 00328 bool Parser::unitExprFollows() 00329 { 00330 return currentTokenIs(Lexer::TKN_LPAREN) && nextTokenIs(Lexer::TKN_RPAREN); 00331 } 00332 00333 bool Parser::assignmentFollows() 00334 { 00335 Lexer::Token savedToken = currentToken(); 00336 lexer.beginExcursion(); 00337 seekNameEnd(); 00338 bool status = currentTokenIs(Lexer::TKN_ASSIGN); 00339 lexer.endExcursion(); 00340 setCurrentToken(savedToken); 00341 return status; 00342 } 00343 00344 bool Parser::keywordSelectionFollows() 00345 { 00346 return currentTokenIs(Lexer::TKN_IDENTIFIER) 00347 && nextTokenIs(Lexer::TKN_RDARROW); 00348 } 00349 00350 bool Parser::selectedComponentFollows() 00351 { 00352 bool status = false; 00353 00354 if (currentTokenIs(Lexer::TKN_IDENTIFIER)) { 00355 switch (peekTokenCode()) { 00356 00357 default: 00358 break; 00359 00360 case Lexer::TKN_DOT: 00361 status = true; 00362 break; 00363 00364 case Lexer::TKN_LPAREN: { 00365 Lexer::Token savedToken = currentToken(); 00366 lexer.beginExcursion(); 00367 ignoreToken(); // Ignore the identifier. 00368 do { 00369 ignoreToken(); // Ignore the left paren. 00370 seekCloseParen(); 00371 } while (currentTokenIs(Lexer::TKN_LPAREN)); 00372 status = currentTokenIs(Lexer::TKN_DOT); 00373 lexer.endExcursion(); 00374 setCurrentToken(savedToken); 00375 break; 00376 } 00377 } 00378 } 00379 return status; 00380 } 00381 00382 bool Parser::aggregateFollows() 00383 { 00384 assert(currentTokenIs(Lexer::TKN_LPAREN)); 00385 00386 bool result = false; 00387 Lexer::Token savedToken = currentToken(); 00388 00389 lexer.beginExcursion(); 00390 ignoreToken(); // Ignore the left paren. 00391 00392 SEEK: 00393 if (seekTokens(Lexer::TKN_LPAREN, 00394 Lexer::TKN_COMMA, Lexer::TKN_OTHERS, 00395 Lexer::TKN_RDARROW, Lexer::TKN_RPAREN)) { 00396 00397 switch (currentTokenCode()) { 00398 00399 default: break; 00400 00401 case Lexer::TKN_COMMA: 00402 result = true; // Positional aggregate. 00403 break; 00404 00405 case Lexer::TKN_RDARROW: 00406 result = true; // Keyed aggregate. 00407 break; 00408 00409 case Lexer::TKN_LPAREN: 00410 ignoreToken(); 00411 if (seekCloseParen()) 00412 goto SEEK; 00413 break; 00414 00415 case Lexer::TKN_OTHERS: 00416 result = true; // Others aggregate. 00417 break; 00418 } 00419 } 00420 00421 lexer.endExcursion(); 00422 setCurrentToken(savedToken); 00423 return result; 00424 } 00425 00426 bool Parser::blockStmtFollows() 00427 { 00428 switch (currentTokenCode()) { 00429 00430 default: 00431 return false; 00432 00433 case Lexer::TKN_IDENTIFIER: 00434 return nextTokenIs(Lexer::TKN_COLON); 00435 00436 case Lexer::TKN_DECLARE: 00437 case Lexer::TKN_BEGIN: 00438 return true; 00439 } 00440 } 00441 00442 bool Parser::qualificationFollows() 00443 { 00444 return currentTokenIs(Lexer::TKN_QUOTE) && nextTokenIs(Lexer::TKN_LPAREN); 00445 } 00446 00447 bool Parser::attributeFollows() 00448 { 00449 return (currentTokenIs(Lexer::TKN_QUOTE) && 00450 nextTokenIs(Lexer::TKN_IDENTIFIER)); 00451 } 00452 00453 IdentifierInfo *Parser::parseIdentifier() 00454 { 00455 IdentifierInfo *info; 00456 00457 switch (currentTokenCode()) { 00458 case Lexer::TKN_IDENTIFIER: 00459 info = getIdentifierInfo(currentToken()); 00460 ignoreToken(); 00461 break; 00462 00463 case Lexer::TKN_EOT: 00464 report(diag::PREMATURE_EOS); 00465 info = 0; 00466 break; 00467 00468 default: 00469 report(diag::UNEXPECTED_TOKEN) << currentToken().getString(); 00470 info = 0; 00471 } 00472 return info; 00473 } 00474 00475 IdentifierInfo *Parser::parseFunctionIdentifier() 00476 { 00477 IdentifierInfo *info; 00478 00479 if (Lexer::isFunctionGlyph(currentToken())) { 00480 const char *rep = Lexer::tokenString(currentTokenCode()); 00481 info = &idPool.getIdentifierInfo(rep); 00482 ignoreToken(); 00483 } 00484 else 00485 info = parseIdentifier(); 00486 return info; 00487 } 00488 00489 IdentifierInfo *Parser::parseCharacter() 00490 { 00491 if (currentTokenIs(Lexer::TKN_CHARACTER)) { 00492 IdentifierInfo *info = getIdentifierInfo(currentToken()); 00493 ignoreToken(); 00494 return info; 00495 } 00496 else { 00497 report(diag::UNEXPECTED_TOKEN) << currentToken().getString(); 00498 return 0; 00499 } 00500 } 00501 00502 IdentifierInfo *Parser::parseIdentifierOrCharacter() 00503 { 00504 if (currentTokenIs(Lexer::TKN_IDENTIFIER)) 00505 return parseIdentifier(); 00506 else 00507 return parseCharacter(); 00508 } 00509 00510 IdentifierInfo *Parser::parseAnyIdentifier() 00511 { 00512 if (currentTokenIs(Lexer::TKN_CHARACTER)) 00513 return parseCharacter(); 00514 else 00515 return parseFunctionIdentifier(); 00516 } 00517 00518 // Parses an end tag. If expectedTag is non-null, parse "end <tag>", otherwise 00519 // parse "end". Returns true if tokens were consumed (which can happen when the 00520 // parse fails due to a missing or unexpected end tag) and false otherwise. 00521 bool Parser::parseEndTag(IdentifierInfo *expectedTag) 00522 { 00523 Location tagLoc; 00524 IdentifierInfo *tag; 00525 00526 if (requireToken(Lexer::TKN_END)) { 00527 if (expectedTag) { 00528 if (currentTokenIs(Lexer::TKN_SEMI)) 00529 report(diag::EXPECTED_END_TAG) << expectedTag; 00530 else { 00531 tagLoc = currentLocation(); 00532 tag = parseFunctionIdentifier(); 00533 if (tag && tag != expectedTag) 00534 report(tagLoc, diag::EXPECTED_END_TAG) << expectedTag; 00535 } 00536 } 00537 else if (currentTokenIs(Lexer::TKN_IDENTIFIER)) { 00538 // FIXME: The above test is not general enough, since we could have 00539 // operator tokens (TKN_PLUS, TKN_STAR, etc) labeling an "end". 00540 tagLoc = currentLocation(); 00541 tag = parseIdentifier(); 00542 report(tagLoc, diag::UNEXPECTED_END_TAG) << tag; 00543 } 00544 return true; 00545 } 00546 return false; 00547 } 00548 00549 void Parser::parseGenericFormalParams() 00550 { 00551 assert(currentTokenIs(Lexer::TKN_GENERIC)); 00552 ignoreToken(); 00553 00554 client.beginGenericFormals(); 00555 for ( ;; ) { 00556 switch (currentTokenCode()) { 00557 00558 default: 00559 report(diag::UNEXPECTED_TOKEN) << currentTokenString(); 00560 if (seekTokens(Lexer::TKN_ABSTRACT, 00561 Lexer::TKN_DOMAIN, Lexer::TKN_SIGNATURE)) { 00562 if (currentTokenIs(Lexer::TKN_ABSTRACT)) 00563 continue; 00564 } 00565 client.endGenericFormals(); 00566 return; 00567 00568 case Lexer::TKN_ABSTRACT: 00569 parseGenericFormalDomain(); 00570 break; 00571 00572 case Lexer::TKN_DOMAIN: 00573 case Lexer::TKN_SIGNATURE: 00574 client.endGenericFormals(); 00575 return; 00576 } 00577 } 00578 } 00579 00580 void Parser::parseGenericFormalDomain() 00581 { 00582 assert(currentTokenIs(Lexer::TKN_ABSTRACT)); 00583 ignoreToken(); 00584 00585 if (!requireToken(Lexer::TKN_DOMAIN)) { 00586 seekToken(Lexer::TKN_SEMI); 00587 return; 00588 } 00589 00590 Location loc = currentLocation(); 00591 IdentifierInfo *name = parseIdentifier(); 00592 00593 if (!name) { 00594 seekToken(Lexer::TKN_SEMI); 00595 return; 00596 } 00597 00598 if (reduceToken(Lexer::TKN_IS)) { 00599 Node sig = parseName(); 00600 if (sig.isValid()) 00601 client.acceptFormalDomain(name, loc, sig); 00602 } 00603 else 00604 client.acceptFormalDomain(name, loc, getNullNode()); 00605 00606 requireToken(Lexer::TKN_SEMI); 00607 } 00608 00609 void Parser::parseSignatureProfile() 00610 { 00611 client.beginSignatureProfile(); 00612 00613 if (currentTokenIs(Lexer::TKN_IS)) 00614 parseSupersignatureProfile(); 00615 00616 if (reduceToken(Lexer::TKN_WITH)) 00617 parseWithComponents(); 00618 00619 client.endSignatureProfile(); 00620 } 00621 00622 // Parses a sequence of super-signatures in a 'with' expression. 00623 void Parser::parseSupersignatureProfile() 00624 { 00625 assert(currentTokenIs(Lexer::TKN_IS)); 00626 Location isLoc = ignoreToken(); 00627 00628 // Check for a resonably common case of writing "is" where one ment "with". 00629 switch (currentTokenCode()) { 00630 default: 00631 break; 00632 case Lexer::TKN_PROCEDURE: 00633 case Lexer::TKN_FUNCTION: 00634 case Lexer::TKN_TYPE: 00635 case Lexer::TKN_SUBTYPE: 00636 // Report that we expected a "with" token and continue parsing as though 00637 // we had a "with". 00638 report(isLoc, diag::UNEXPECTED_TOKEN_WANTED) 00639 << Lexer::tokenString(Lexer::TKN_IS) 00640 << Lexer::tokenString(Lexer::TKN_WITH); 00641 parseWithComponents(); 00642 return; 00643 } 00644 00645 // Otherwise, parse the super signature list. 00646 do { 00647 Node super = parseName(); 00648 00649 if (super.isValid()) 00650 client.acceptSupersignature(super); 00651 else { 00652 seekTokens(Lexer::TKN_AND, Lexer::TKN_ADD, 00653 Lexer::TKN_WITH, Lexer::TKN_END); 00654 } 00655 } while (reduceToken(Lexer::TKN_AND)); 00656 } 00657 00658 void Parser::parseWithComponents() 00659 { 00660 bool status = false; 00661 00662 for (;;) { 00663 switch (currentTokenCode()) { 00664 default: 00665 return; 00666 00667 case Lexer::TKN_FUNCTION: 00668 status = parseFunctionDeclaration(true).isValid(); 00669 break; 00670 00671 case Lexer::TKN_PROCEDURE: 00672 status = parseProcedureDeclaration(true).isValid(); 00673 break; 00674 00675 case Lexer::TKN_TYPE: 00676 status = parseType(); 00677 break; 00678 00679 case Lexer::TKN_SUBTYPE: 00680 status = parseSubtype(); 00681 break; 00682 } 00683 00684 if (!status) 00685 seekTokens(Lexer::TKN_FUNCTION, Lexer::TKN_PROCEDURE, 00686 Lexer::TKN_TYPE, Lexer::TKN_SEMI, 00687 Lexer::TKN_END, Lexer::TKN_ADD); 00688 00689 requireToken(Lexer::TKN_SEMI); 00690 } 00691 } 00692 00693 void Parser::parseCarrier() 00694 { 00695 assert(currentTokenIs(Lexer::TKN_CARRIER)); 00696 00697 Location loc = ignoreToken(); 00698 IdentifierInfo *name = parseIdentifier(); 00699 00700 if (!name) { 00701 seekToken(Lexer::TKN_SEMI); 00702 return; 00703 } 00704 00705 if (!requireToken(Lexer::TKN_IS)) { 00706 seekToken(Lexer::TKN_SEMI); 00707 return; 00708 } 00709 00710 Node type = parseName(); 00711 00712 if (type.isInvalid()) { 00713 seekToken(Lexer::TKN_SEMI); 00714 return; 00715 } 00716 00717 client.acceptCarrier(name, loc, type); 00718 } 00719 00720 void Parser::parseAddComponents() 00721 { 00722 client.beginAddExpression(); 00723 00724 for (;;) { 00725 switch (currentTokenCode()) { 00726 default: 00727 client.endAddExpression(); 00728 return; 00729 00730 case Lexer::TKN_FUNCTION: 00731 parseFunctionDeclOrDefinition(); 00732 break; 00733 00734 case Lexer::TKN_PROCEDURE: 00735 parseProcedureDeclOrDefinition(); 00736 break; 00737 00738 case Lexer::TKN_IMPORT: 00739 parseImportDeclaration(); 00740 break; 00741 00742 case Lexer::TKN_CARRIER: 00743 parseCarrier(); 00744 break; 00745 00746 case Lexer::TKN_TYPE: 00747 parseType(); 00748 break; 00749 00750 case Lexer::TKN_SUBTYPE: 00751 parseSubtype(); 00752 break; 00753 00754 case Lexer::TKN_PRAGMA: 00755 parseDeclarationPragma(); 00756 break; 00757 } 00758 00759 requireToken(Lexer::TKN_SEMI); 00760 } 00761 } 00762 00763 void Parser::parseModel() 00764 { 00765 bool parsingDomain = false; 00766 IdentifierInfo *name = 0; 00767 00768 client.beginCapsule(); 00769 00770 if (currentTokenIs(Lexer::TKN_GENERIC)) 00771 parseGenericFormalParams(); 00772 00773 if (reduceToken(Lexer::TKN_DOMAIN)) { 00774 Location loc = currentLocation(); 00775 if (!(name = parseIdentifier())) 00776 return; 00777 client.beginDomainDecl(name, loc); 00778 parsingDomain = true; 00779 } 00780 else if (reduceToken(Lexer::TKN_SIGNATURE)) { 00781 Location loc = currentLocation(); 00782 if (!(name = parseIdentifier())) 00783 return; 00784 client.beginSignatureDecl(name, loc); 00785 } 00786 else { 00787 assert(false && "Bad token for this production!"); 00788 return; 00789 } 00790 00791 if (currentTokenIs(Lexer::TKN_IS) || currentTokenIs(Lexer::TKN_WITH)) 00792 parseSignatureProfile(); 00793 00794 if (parsingDomain && reduceToken(Lexer::TKN_ADD)) 00795 parseAddComponents(); 00796 00797 client.endCapsule(); 00798 00799 // Consume and verify the end tag. On failure seek the next top level form. 00800 if (!parseEndTag(name)) 00801 seekTokens(Lexer::TKN_SIGNATURE, Lexer::TKN_DOMAIN); 00802 else 00803 requireToken(Lexer::TKN_SEMI); 00804 } 00805 00806 // Parses an "in", "out" or "in out" parameter mode specification. If no such 00807 // specification is available on the stream MODE_DEFAULT is returned. A common 00808 // mistake is to find "out in" instead of "in out". In this case, we simply 00809 // issue a diagnostic and return MODE_IN_OUT. 00810 PM::ParameterMode Parser::parseParameterMode() 00811 { 00812 PM::ParameterMode mode = PM::MODE_DEFAULT; 00813 00814 if (reduceToken(Lexer::TKN_IN)) { 00815 if (reduceToken(Lexer::TKN_OUT)) 00816 mode = PM::MODE_IN_OUT; 00817 else 00818 mode = PM::MODE_IN; 00819 } 00820 else if (reduceToken(Lexer::TKN_OUT)) { 00821 if (currentTokenIs(Lexer::TKN_IN)) { 00822 report(diag::OUT_IN_PARAMETER_MODE); 00823 ignoreToken(); 00824 mode = PM::MODE_IN_OUT; 00825 } 00826 else 00827 mode = PM::MODE_OUT; 00828 } 00829 return mode; 00830 } 00831 00832 bool Parser::parseSubroutineParameter() 00833 { 00834 IdentifierInfo *formal; 00835 Location location; 00836 PM::ParameterMode mode; 00837 00838 location = currentLocation(); 00839 formal = parseIdentifier(); 00840 00841 if (!formal) return false; 00842 00843 if (!requireToken(Lexer::TKN_COLON)) return false; 00844 00845 mode = parseParameterMode(); 00846 Node type = parseName(); 00847 if (type.isInvalid()) return false; 00848 00849 client.acceptSubroutineParameter(formal, location, type, mode); 00850 return true; 00851 } 00852 00853 void Parser::parseSubroutineParameters() 00854 { 00855 assert(currentTokenIs(Lexer::TKN_LPAREN)); 00856 00857 // Check that we do not have an empty parameter list. 00858 if (unitExprFollows()) { 00859 report(diag::EMPTY_PARAMS); 00860 00861 // Consume the opening and closing parens. 00862 ignoreToken(); 00863 ignoreToken(); 00864 return; 00865 } 00866 00867 // Consume the opening paren. 00868 ignoreToken(); 00869 00870 for (;;) { 00871 if (!parseSubroutineParameter()) 00872 seekTokens(Lexer::TKN_SEMI, Lexer::TKN_RPAREN); 00873 00874 switch (currentTokenCode()) { 00875 00876 default: 00877 // An unexpected token. Abort processing of the parameter list and 00878 // seek a closing paren. 00879 report(diag::UNEXPECTED_TOKEN) << currentTokenString(); 00880 if (seekCloseParen()) ignoreToken(); 00881 return; 00882 00883 case Lexer::TKN_COMMA: 00884 // Using a comma instead of a semicolon is a common mistake. Issue 00885 // a diagnostic and continue processing as though a semi was found. 00886 report(diag::UNEXPECTED_TOKEN_WANTED) << "," << ";"; 00887 ignoreToken(); 00888 break; 00889 00890 case Lexer::TKN_SEMI: 00891 // OK, process the next parameter. 00892 ignoreToken(); 00893 break; 00894 00895 case Lexer::TKN_RPAREN: 00896 // The parameter list is complete. Consume the paren and return. 00897 ignoreToken(); 00898 return; 00899 } 00900 } 00901 } 00902 00903 Node Parser::parseFunctionDeclaration(bool parsingSignatureProfile) 00904 { 00905 assert(currentTokenIs(Lexer::TKN_FUNCTION)); 00906 ignoreToken(); 00907 00908 Location location = currentLocation(); 00909 IdentifierInfo *name = parseFunctionIdentifier(); 00910 00911 if (!name) 00912 return getInvalidNode(); 00913 00914 client.beginFunctionDeclaration(name, location); 00915 00916 if (currentTokenIs(Lexer::TKN_LPAREN)) 00917 parseSubroutineParameters(); 00918 00919 Node returnNode = getNullNode(); 00920 if (reduceToken(Lexer::TKN_RETURN)) { 00921 returnNode = parseName(); 00922 if (returnNode.isInvalid()) { 00923 seekTokens(Lexer::TKN_SEMI, Lexer::TKN_IS); 00924 returnNode = getNullNode(); 00925 } 00926 } 00927 else 00928 report(diag::MISSING_RETURN_AFTER_FUNCTION); 00929 00930 client.acceptFunctionReturnType(returnNode); 00931 00932 bool bodyFollows = currentTokenIs(Lexer::TKN_IS); 00933 00934 // FIXME: We should model the parser state with more than a tag stack. 00935 if (bodyFollows) 00936 endTagStack.push(EndTagEntry(NAMED_TAG, location, name)); 00937 00938 return client.endSubroutineDeclaration(bodyFollows); 00939 } 00940 00941 Node Parser::parseProcedureDeclaration(bool parsingSignatureProfile) 00942 { 00943 assert(currentTokenIs(Lexer::TKN_PROCEDURE)); 00944 ignoreToken(); 00945 00946 Location location = currentLocation(); 00947 IdentifierInfo *name = parseIdentifier(); 00948 00949 if (!name) 00950 return getInvalidNode(); 00951 00952 client.beginProcedureDeclaration(name, location); 00953 00954 if (currentTokenIs(Lexer::TKN_LPAREN)) 00955 parseSubroutineParameters(); 00956 00957 if (currentTokenIs(Lexer::TKN_RETURN)) { 00958 report(diag::RETURN_AFTER_PROCEDURE); 00959 seekTokens(Lexer::TKN_SEMI, Lexer::TKN_IS); 00960 } 00961 00962 bool bodyFollows = currentTokenIs(Lexer::TKN_IS); 00963 00964 // FIXME: We should model the parser state with more than a tag stack. 00965 if (bodyFollows) 00966 endTagStack.push(EndTagEntry(NAMED_TAG, location, name)); 00967 00968 return client.endSubroutineDeclaration(bodyFollows); 00969 } 00970 00971 void Parser::parseSubroutineBody(Node declarationNode) 00972 { 00973 Node context = client.beginSubroutineDefinition(declarationNode); 00974 00975 while (!currentTokenIs(Lexer::TKN_BEGIN) && 00976 !currentTokenIs(Lexer::TKN_EOT)) { 00977 00978 // Check for the common error of only specifying a declarative part 00979 // without a body. 00980 if (currentTokenIs(Lexer::TKN_END)) { 00981 report(diag::UNEXPECTED_TOKEN_WANTED) 00982 << currentToken().getString() 00983 << Lexer::tokenString(Lexer::TKN_BEGIN); 00984 client.endSubroutineBody(context); 00985 goto PARSE_END_TAG; 00986 } 00987 00988 parseDeclaration(); 00989 requireToken(Lexer::TKN_SEMI); 00990 } 00991 00992 requireToken(Lexer::TKN_BEGIN); 00993 00994 while (!currentTokenIs(Lexer::TKN_END) && 00995 !currentTokenIs(Lexer::TKN_EXCEPTION) && 00996 !currentTokenIs(Lexer::TKN_EOT)) { 00997 Node stmt = parseStatement(); 00998 if (stmt.isValid()) 00999 client.acceptStmt(context, stmt); 01000 } 01001 01002 // We are finished with the main body of the subroutine. Inform the client. 01003 client.endSubroutineBody(context); 01004 01005 // Parse any exception handlers. 01006 if (currentTokenIs(Lexer::TKN_EXCEPTION)) 01007 parseExceptionStmt(context); 01008 01009 PARSE_END_TAG: 01010 EndTagEntry tagEntry = endTagStack.top(); 01011 assert(tagEntry.kind == NAMED_TAG && "Inconsistent end tag stack!"); 01012 01013 endTagStack.pop(); 01014 parseEndTag(tagEntry.tag); 01015 client.endSubroutineDefinition(); 01016 } 01017 01018 void Parser::parseFunctionDeclOrDefinition() 01019 { 01020 Node decl = parseFunctionDeclaration(); 01021 01022 if (decl.isInvalid()) { 01023 seekTokens(Lexer::TKN_SEMI, Lexer::TKN_IS); 01024 if (currentTokenIs(Lexer::TKN_IS)) { 01025 EndTagEntry tagEntry = endTagStack.top(); 01026 assert(tagEntry.kind == NAMED_TAG && "Inconsistent end tag stack!"); 01027 endTagStack.pop(); 01028 seekAndConsumeEndTag(tagEntry.tag); 01029 } 01030 return; 01031 } 01032 01033 if (reduceToken(Lexer::TKN_IS)) 01034 parseSubroutineBody(decl); 01035 return; 01036 } 01037 01038 void Parser::parseProcedureDeclOrDefinition() 01039 { 01040 Node decl = parseProcedureDeclaration(); 01041 01042 if (decl.isInvalid()) { 01043 seekTokens(Lexer::TKN_SEMI, Lexer::TKN_IS); 01044 if (currentTokenIs(Lexer::TKN_IS)) { 01045 EndTagEntry tagEntry = endTagStack.top(); 01046 assert(tagEntry.kind == NAMED_TAG && "Inconsistent end tag stack!"); 01047 endTagStack.pop(); 01048 seekAndConsumeEndTag(tagEntry.tag); 01049 } 01050 return; 01051 } 01052 01053 if (reduceToken(Lexer::TKN_IS)) 01054 parseSubroutineBody(decl); 01055 return; 01056 } 01057 01058 bool Parser::parseDeclaration() 01059 { 01060 switch (currentTokenCode()) { 01061 default: 01062 report(diag::UNEXPECTED_TOKEN) << currentTokenString(); 01063 seekToken(Lexer::TKN_SEMI); 01064 return false; 01065 01066 case Lexer::TKN_IDENTIFIER: 01067 return parseObjectDeclaration(); 01068 01069 case Lexer::TKN_FUNCTION: 01070 return parseFunctionDeclaration().isValid(); 01071 01072 case Lexer::TKN_PROCEDURE: 01073 return parseProcedureDeclaration().isValid(); 01074 01075 case Lexer::TKN_IMPORT: 01076 return parseImportDeclaration(); 01077 01078 case Lexer::TKN_TYPE: 01079 return parseType(); 01080 01081 case Lexer::TKN_SUBTYPE: 01082 return parseSubtype(); 01083 } 01084 } 01085 01086 bool Parser::parseObjectDeclaration() 01087 { 01088 IdentifierInfo *id; 01089 Location loc; 01090 01091 assert(currentTokenIs(Lexer::TKN_IDENTIFIER)); 01092 01093 loc = currentLocation(); 01094 id = parseIdentifier(); 01095 01096 if (!(id && requireToken(Lexer::TKN_COLON))) { 01097 seekAndConsumeToken(Lexer::TKN_SEMI); 01098 return false; 01099 } 01100 01101 Node type = parseName(); 01102 01103 if (type.isValid()) { 01104 if (reduceToken(Lexer::TKN_RENAMES)) { 01105 Node target = parseName(); 01106 if (target.isValid()) { 01107 client.acceptRenamedObjectDeclaration(loc, id, type, target); 01108 return true; 01109 } 01110 } 01111 else { 01112 Node init = getNullNode(); 01113 if (reduceToken(Lexer::TKN_ASSIGN)) 01114 init = parseExpr(); 01115 if (init.isValid()) { 01116 client.acceptObjectDeclaration(loc, id, type, init); 01117 return true; 01118 } 01119 } 01120 } 01121 seekToken(Lexer::TKN_SEMI); 01122 return false; 01123 } 01124 01125 bool Parser::parseImportDeclaration() 01126 { 01127 assert(currentTokenIs(Lexer::TKN_IMPORT)); 01128 ignoreToken(); 01129 01130 Node importedType = parseName(); 01131 01132 if (importedType.isValid()) { 01133 client.acceptImportDeclaration(importedType); 01134 return true; 01135 } 01136 return false; 01137 } 01138 01139 bool Parser::parseType() 01140 { 01141 assert(currentTokenIs(Lexer::TKN_TYPE)); 01142 ignoreToken(); 01143 01144 Location loc = currentLocation(); 01145 IdentifierInfo *name = parseIdentifier(); 01146 01147 if (!name) 01148 return false; 01149 01150 // If we have a TKN_SEMI next on the stream accept an incomplete type 01151 // declaration. Otherwise, ensure we a TKN_IS follows and consume it. 01152 if (currentTokenIs(Lexer::TKN_SEMI)) { 01153 client.acceptIncompleteTypeDecl(name, loc); 01154 return true; 01155 } 01156 else if (!requireToken(Lexer::TKN_IS)) 01157 return false; 01158 01159 // Determine what kind of type declaration this is. 01160 switch (currentTokenCode()) { 01161 01162 default: 01163 report(diag::UNEXPECTED_TOKEN) << currentTokenString(); 01164 seekSemi(); 01165 break; 01166 01167 case Lexer::TKN_LPAREN: { 01168 client.beginEnumeration(name, loc); 01169 parseEnumerationList(); 01170 client.endEnumeration(); 01171 return true; 01172 } 01173 01174 case Lexer::TKN_RANGE: 01175 return parseIntegerRange(name, loc); 01176 01177 case Lexer::TKN_ARRAY: 01178 return parseArrayTypeDecl(name, loc); 01179 01180 case Lexer::TKN_RECORD: 01181 case Lexer::TKN_NULL: 01182 return parseRecordTypeDecl(name, loc); 01183 01184 case Lexer::TKN_ACCESS: 01185 return parseAccessTypeDecl(name, loc); 01186 } 01187 01188 return false; 01189 } 01190 01191 bool Parser::parseSubtype() 01192 { 01193 assert(currentTokenIs(Lexer::TKN_SUBTYPE)); 01194 ignoreToken(); 01195 01196 Location loc = currentLocation(); 01197 IdentifierInfo *name = parseIdentifier(); 01198 01199 if (!name || !requireToken(Lexer::TKN_IS)) { 01200 seekSemi(); 01201 return false; 01202 } 01203 01204 Node subtype = parseName(); 01205 01206 if (subtype.isInvalid()) { 01207 seekSemi(); 01208 return false; 01209 } 01210 01211 if (currentTokenIs(Lexer::TKN_SEMI)) { 01212 client.acceptSubtypeDecl(name, loc, subtype); 01213 return true; 01214 } 01215 01216 // The only kind of subtype constraints we contend with at the moment are 01217 // range constraints. 01218 if (requireToken(Lexer::TKN_RANGE)) { 01219 Node low = parseExpr(); 01220 if (low.isInvalid() or !requireToken(Lexer::TKN_DDOT)) { 01221 seekSemi(); 01222 return false; 01223 } 01224 01225 Node high = parseExpr(); 01226 if (high.isInvalid()) { 01227 seekSemi(); 01228 return false; 01229 } 01230 01231 client.acceptRangedSubtypeDecl(name, loc, subtype, low, high); 01232 return true; 01233 } 01234 else { 01235 seekSemi(); 01236 return false; 01237 } 01238 } 01239 01240 void Parser::parseEnumerationList() 01241 { 01242 assert(currentTokenIs(Lexer::TKN_LPAREN)); 01243 Location loc = currentLocation(); 01244 ignoreToken(); 01245 01246 // Diagnose empty enumeration lists. 01247 if (reduceToken(Lexer::TKN_RPAREN)) { 01248 report(loc, diag::EMPTY_ENUMERATION); 01249 return; 01250 } 01251 01252 do { 01253 Location loc = currentLocation(); 01254 if (currentTokenIs(Lexer::TKN_CHARACTER)) { 01255 IdentifierInfo *name = parseCharacter(); 01256 client.acceptEnumerationCharacter(name, loc); 01257 } 01258 else { 01259 IdentifierInfo *name = parseIdentifier(); 01260 01261 if (!name) { 01262 seekCloseParen(); 01263 return; 01264 } 01265 client.acceptEnumerationIdentifier(name, loc); 01266 } 01267 } while (reduceToken(Lexer::TKN_COMMA)); 01268 01269 requireToken(Lexer::TKN_RPAREN); 01270 } 01271 01272 bool Parser::parseIntegerRange(IdentifierInfo *name, Location loc) 01273 { 01274 assert(currentTokenIs(Lexer::TKN_RANGE)); 01275 ignoreToken(); 01276 01277 Node low = parseExpr(); 01278 if (low.isInvalid() or !requireToken(Lexer::TKN_DDOT)) { 01279 seekSemi(); 01280 return false; 01281 } 01282 01283 Node high = parseExpr(); 01284 if (high.isInvalid()) { 01285 seekSemi(); 01286 return false; 01287 } 01288 01289 client.acceptIntegerTypeDecl(name, loc, low, high); 01290 return true; 01291 } 01292 01293 void Parser::parseArrayIndexProfile(NodeVector &indices) 01294 { 01295 assert(currentTokenIs(Lexer::TKN_LPAREN)); 01296 ignoreToken(); 01297 01298 // Diagnose empty index profiles. 01299 if (reduceToken(Lexer::TKN_RPAREN)) { 01300 report(diag::EMPTY_ARRAY_TYPE_INDICES); 01301 return; 01302 } 01303 01304 do { 01305 Node index = parseDSTDefinition(true); 01306 if (index.isValid()) 01307 indices.push_back(index); 01308 else 01309 seekTokens(Lexer::TKN_COMMA, Lexer::TKN_RPAREN); 01310 } while (reduceToken(Lexer::TKN_COMMA)); 01311 01312 if (!requireToken(Lexer::TKN_RPAREN)) 01313 seekCloseParen(); 01314 } 01315 01316 bool Parser::parseArrayTypeDecl(IdentifierInfo *name, Location loc) 01317 { 01318 assert(currentTokenIs(Lexer::TKN_ARRAY)); 01319 ignoreToken(); 01320 01321 if (!currentTokenIs(Lexer::TKN_LPAREN)) 01322 return false; 01323 01324 NodeVector indices; 01325 parseArrayIndexProfile(indices); 01326 01327 if (indices.empty() || !requireToken(Lexer::TKN_OF)) { 01328 seekSemi(); 01329 return false; 01330 } 01331 01332 Node component = parseName(); 01333 if (component.isInvalid()) { 01334 seekSemi(); 01335 return false; 01336 } 01337 01338 client.acceptArrayDecl(name, loc, indices, component); 01339 return true; 01340 } 01341 01342 bool Parser::parseRecordTypeDecl(IdentifierInfo *name, Location loc) 01343 { 01344 assert(currentTokenIs(Lexer::TKN_RECORD) || 01345 currentTokenIs(Lexer::TKN_NULL)); 01346 01347 client.beginRecord(name, loc); 01348 01349 if (currentTokenIs(Lexer::TKN_NULL) && nextTokenIs(Lexer::TKN_RECORD)) { 01350 ignoreToken(); // Ignore TKN_NULL. 01351 ignoreToken(); // Ignore TKN_RECORD. 01352 client.endRecord(); 01353 return true; 01354 } 01355 else 01356 ignoreToken(); // Ignore TKN_RECORD. 01357 01358 do { 01359 if (reduceToken(Lexer::TKN_NULL)) { 01360 requireToken(Lexer::TKN_SEMI); 01361 continue; 01362 } 01363 01364 Location loc = currentLocation(); 01365 IdentifierInfo *componentName = parseIdentifier(); 01366 if (!componentName || !requireToken(Lexer::TKN_COLON)) 01367 seekSemi(); 01368 else { 01369 Node type = parseName(); 01370 if (type.isValid()) 01371 client.acceptRecordComponent(componentName, loc, type); 01372 else 01373 seekSemi(); 01374 } 01375 01376 requireToken(Lexer::TKN_SEMI); 01377 } while (!currentTokenIs(Lexer::TKN_END) && 01378 !currentTokenIs(Lexer::TKN_EOT)); 01379 01380 client.endRecord(); 01381 return requireToken(Lexer::TKN_END) && requireToken(Lexer::TKN_RECORD); 01382 } 01383 01384 bool Parser::parseAccessTypeDecl(IdentifierInfo *name, Location loc) 01385 { 01386 assert(currentTokenIs(Lexer::TKN_ACCESS)); 01387 ignoreToken(); 01388 01389 Node subtypeNode = parseName(); 01390 01391 if (subtypeNode.isInvalid()) 01392 return false; 01393 01394 client.acceptAccessTypeDecl(name, loc, subtypeNode); 01395 return true; 01396 } 01397 01398 bool Parser::parseTopLevelDeclaration() 01399 { 01400 for (;;) { 01401 switch (currentTokenCode()) { 01402 case Lexer::TKN_SIGNATURE: 01403 case Lexer::TKN_DOMAIN: 01404 case Lexer::TKN_GENERIC: 01405 parseModel(); 01406 return true; 01407 01408 case Lexer::TKN_EOT: 01409 return false; 01410 break; 01411 01412 default: 01413 // An invalid token was found at top level. Do not try to recover. 01414 report(diag::UNEXPECTED_TOKEN) << currentToken().getString(); 01415 return false; 01416 } 01417 } 01418 } 01419 01420 // Converts a character array representing a Comma integer literal into an 01421 // llvm::APInt. The bit width of the resulting APInt is always set to the 01422 // minimal number of bits needed to represent the given number. 01423 void Parser::decimalLiteralToAPInt(const char *start, unsigned length, 01424 llvm::APInt &value) 01425 { 01426 std::string digits; 01427 for (const char *cursor = start; cursor != start + length; ++cursor) { 01428 char ch = *cursor; 01429 if (ch != '_') 01430 digits.push_back(ch); 01431 } 01432 assert(!digits.empty() && "Empty string literal!"); 01433 01434 // Get the binary value and adjust the number of bits to an accurate width. 01435 unsigned numBits = llvm::APInt::getBitsNeeded(digits, 10); 01436 value = llvm::APInt(numBits, digits, 10); 01437 if (value == 0) 01438 numBits = 1; 01439 else 01440 numBits = value.getActiveBits(); 01441 value.zextOrTrunc(numBits); 01442 } 01443 01444 void Parser::parseDeclarationPragma() 01445 { 01446 assert(currentTokenIs(Lexer::TKN_PRAGMA)); 01447 ignoreToken(); 01448 01449 Location loc = currentLocation(); 01450 IdentifierInfo *name = parseIdentifier(); 01451 01452 if (!name) 01453 return; 01454 01455 llvm::StringRef ref(name->getString()); 01456 pragma::PragmaID ID = pragma::getPragmaID(ref); 01457 01458 if (ID == pragma::UNKNOWN_PRAGMA) { 01459 report(loc, diag::UNKNOWN_PRAGMA) << name; 01460 return; 01461 } 01462 01463 // Currently, the only pragma accepted in a declaration context is Import. 01464 // When the set of valid pragmas expands, special parsers will be written to 01465 // parse the arguments. 01466 switch (ID) { 01467 default: 01468 report(loc, diag::INVALID_PRAGMA_CONTEXT) << name; 01469 break; 01470 01471 case pragma::Import: 01472 parsePragmaImport(loc); 01473 break; 01474 } 01475 } 01476 01477 void Parser::parsePragmaImport(Location pragmaLoc) 01478 { 01479 if (!requireToken(Lexer::TKN_LPAREN)) 01480 return; 01481 01482 // The first argument is an identifier naming the import convention. The 01483 // parser does not know anything about convention names. 01484 Location conventionLoc = currentLocation(); 01485 IdentifierInfo *conventionName = parseIdentifier(); 01486 if (!conventionName || !requireToken(Lexer::TKN_COMMA)) { 01487 seekCloseParen(); 01488 return; 01489 } 01490 01491 // The second argument is the name of the local declaration corresponding to 01492 // the imported entity. 01493 Location entityLoc = currentLocation(); 01494 IdentifierInfo *entityName = parseFunctionIdentifier(); 01495 if (!entityName || !requireToken(Lexer::TKN_COMMA)) { 01496 seekCloseParen(); 01497 return; 01498 } 01499 01500 // Finally, the external name. This is a general expression. 01501 Node externalName = parseExpr(); 01502 if (externalName.isInvalid() || !requireToken(Lexer::TKN_RPAREN)) { 01503 seekCloseParen(); 01504 return; 01505 } 01506 01507 client.acceptPragmaImport(pragmaLoc, 01508 conventionName, conventionLoc, 01509 entityName, entityLoc, externalName); 01510 } 01511 01512 Node Parser::parseDSTDefinition(bool acceptDiamond) 01513 { 01514 // We are always called to parse the control of a for statement or an array 01515 // index specification. We need to distinguish between names which denote 01516 // subtype marks and simple ranges. Use use a knowledge of our context to 01517 // determine the difference. 01518 // 01519 // Specultively parse a name. If the parse suceeds look at the following 01520 // token. If it is TKN_RANGE then we definitely have a subtype mark. If we 01521 // were called from a loop context we could also have TKN_LOOP. If we were 01522 // called from an array index context we could have TKN_COMMA or TKN_RPAREN. 01523 // 01524 // An alternative strategy would be to parse a name and look for infix 01525 // operators and TKN_DDOT. 01526 bool rangeFollows = false; 01527 Lexer::Token savedToken = currentToken(); 01528 lexer.beginExcursion(); 01529 01530 if (consumeName()) { 01531 switch (currentTokenCode()) { 01532 default: 01533 rangeFollows = true; 01534 break; 01535 case Lexer::TKN_RANGE: 01536 case Lexer::TKN_LOOP: 01537 case Lexer::TKN_COMMA: 01538 case Lexer::TKN_RPAREN: 01539 rangeFollows = false; 01540 break; 01541 } 01542 } 01543 else 01544 rangeFollows = true; 01545 01546 lexer.endExcursion(); 01547 setCurrentToken(savedToken); 01548 01549 if (rangeFollows) { 01550 // FIXME: Should be parsing simple expressions here. 01551 Node lower = parseExpr(); 01552 if (lower.isInvalid() || !requireToken(Lexer::TKN_DDOT)) 01553 return getInvalidNode(); 01554 01555 Node upper = parseExpr(); 01556 if (upper.isInvalid()) 01557 return getInvalidNode(); 01558 else 01559 return client.acceptDSTDefinition(lower, upper); 01560 } 01561 01562 Node name = parseName(Accept_Range_Attribute); 01563 if (name.isInvalid()) 01564 return getInvalidNode(); 01565 01566 if (reduceToken(Lexer::TKN_RANGE)) { 01567 if (currentTokenIs(Lexer::TKN_DIAMOND)) { 01568 Location loc = ignoreToken(); 01569 if (acceptDiamond) 01570 return client.acceptDSTDefinition(name, true); 01571 else { 01572 report(loc, diag::UNEXPECTED_TOKEN) << 01573 Lexer::tokenString(Lexer::TKN_DIAMOND); 01574 return getInvalidNode(); 01575 } 01576 } 01577 01578 // FIXME: We should parse simple expressions here. 01579 Node lower = parseExpr(); 01580 01581 if (lower.isInvalid() || !requireToken(Lexer::TKN_DDOT)) 01582 return getInvalidNode(); 01583 01584 Node upper = parseExpr(); 01585 if (upper.isInvalid()) 01586 return getInvalidNode(); 01587 01588 return client.acceptDSTDefinition(name, lower, upper); 01589 } 01590 else 01591 return client.acceptDSTDefinition(name, false); 01592 }