mirror of https://github.com/x64dbg/btparser
AStyle
This commit is contained in:
parent
87003fef8d
commit
c8ca8305e2
|
@ -11,7 +11,7 @@ namespace AST
|
||||||
using uptr = unique_ptr<T>;
|
using uptr = unique_ptr<T>;
|
||||||
|
|
||||||
template <class T, class... Args>
|
template <class T, class... Args>
|
||||||
static typename enable_if<!is_array<T>::value, unique_ptr<T>>::type make_uptr(Args &&... args)
|
static typename enable_if < !is_array<T>::value, unique_ptr<T >>::type make_uptr(Args && ... args)
|
||||||
{
|
{
|
||||||
return uptr<T>(new T(std::forward<Args>(args)...));
|
return uptr<T>(new T(std::forward<Args>(args)...));
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,11 +25,11 @@ static const char* convertNumber(const char* str, uint64_t & result, int radix)
|
||||||
errno = 0;
|
errno = 0;
|
||||||
char* end;
|
char* end;
|
||||||
result = strtoull(str, &end, radix);
|
result = strtoull(str, &end, radix);
|
||||||
if (!result && end == str)
|
if(!result && end == str)
|
||||||
return "not a number";
|
return "not a number";
|
||||||
if (result == ULLONG_MAX && errno)
|
if(result == ULLONG_MAX && errno)
|
||||||
return "does not fit";
|
return "does not fit";
|
||||||
if (*end)
|
if(*end)
|
||||||
return "str not completely consumed";
|
return "str not completely consumed";
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -54,17 +54,17 @@ void Lexer::SetInputData(const std::string & data)
|
||||||
|
|
||||||
bool Lexer::DoLexing(std::vector<TokenState> & tokens, std::string & error)
|
bool Lexer::DoLexing(std::vector<TokenState> & tokens, std::string & error)
|
||||||
{
|
{
|
||||||
while (true)
|
while(true)
|
||||||
{
|
{
|
||||||
auto token = getToken();
|
auto token = getToken();
|
||||||
mState.Token = token;
|
mState.Token = token;
|
||||||
if (token == tok_error)
|
if(token == tok_error)
|
||||||
{
|
{
|
||||||
error = StringUtils::sprintf("line %d, col %d: %s", mState.CurLine + 1, mState.LineIndex, mError.c_str());
|
error = StringUtils::sprintf("line %d, col %d: %s", mState.CurLine + 1, mState.LineIndex, mError.c_str());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
tokens.push_back(mState);
|
tokens.push_back(mState);
|
||||||
if (token == tok_eof)
|
if(token == tok_eof)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -73,7 +73,7 @@ bool Lexer::DoLexing(std::vector<TokenState> & tokens, std::string & error)
|
||||||
bool Lexer::Test(const std::function<void(const std::string & line)> & lexEnum, bool output)
|
bool Lexer::Test(const std::function<void(const std::string & line)> & lexEnum, bool output)
|
||||||
{
|
{
|
||||||
size_t line = 0;
|
size_t line = 0;
|
||||||
if (output)
|
if(output)
|
||||||
lexEnum("1: ");
|
lexEnum("1: ");
|
||||||
Token tok;
|
Token tok;
|
||||||
std::string toks;
|
std::string toks;
|
||||||
|
@ -82,10 +82,10 @@ bool Lexer::Test(const std::function<void(const std::string & line)> & lexEnum,
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
tok = getToken();
|
tok = getToken();
|
||||||
if (!output)
|
if(!output)
|
||||||
continue;
|
continue;
|
||||||
toks.clear();
|
toks.clear();
|
||||||
while (line < mState.CurLine)
|
while(line < mState.CurLine)
|
||||||
{
|
{
|
||||||
line++;
|
line++;
|
||||||
sprintf_s(newlineText, "\n%d: ", line + 1);
|
sprintf_s(newlineText, "\n%d: ", line + 1);
|
||||||
|
@ -94,11 +94,12 @@ bool Lexer::Test(const std::function<void(const std::string & line)> & lexEnum,
|
||||||
toks.append(TokString(tok));
|
toks.append(TokString(tok));
|
||||||
appendCh(toks, ' ');
|
appendCh(toks, ' ');
|
||||||
lexEnum(toks);
|
lexEnum(toks);
|
||||||
} while (tok != tok_eof && tok != tok_error);
|
}
|
||||||
if (tok != tok_error && tok != tok_eof)
|
while(tok != tok_eof && tok != tok_error);
|
||||||
|
if(tok != tok_error && tok != tok_eof)
|
||||||
tok = reportError("lexer did not finish at the end of the file");
|
tok = reportError("lexer did not finish at the end of the file");
|
||||||
for (const auto& warning : mWarnings)
|
for(const auto & warning : mWarnings)
|
||||||
if (output)
|
if(output)
|
||||||
lexEnum("\nwarning: " + warning);
|
lexEnum("\nwarning: " + warning);
|
||||||
return tok != tok_error;
|
return tok != tok_error;
|
||||||
}
|
}
|
||||||
|
@ -106,76 +107,76 @@ bool Lexer::Test(const std::function<void(const std::string & line)> & lexEnum,
|
||||||
Lexer::Token Lexer::getToken()
|
Lexer::Token Lexer::getToken()
|
||||||
{
|
{
|
||||||
//skip whitespace
|
//skip whitespace
|
||||||
while (isspace(mLastChar))
|
while(isspace(mLastChar))
|
||||||
{
|
{
|
||||||
if (mLastChar == '\n')
|
if(mLastChar == '\n')
|
||||||
signalNewLine();
|
signalNewLine();
|
||||||
nextChar();
|
nextChar();
|
||||||
}
|
}
|
||||||
|
|
||||||
//skip \\[\r\n]
|
//skip \\[\r\n]
|
||||||
if (mLastChar == '\\' && (peekChar() == '\r' || peekChar() == '\n'))
|
if(mLastChar == '\\' && (peekChar() == '\r' || peekChar() == '\n'))
|
||||||
{
|
{
|
||||||
nextChar();
|
nextChar();
|
||||||
return getToken();
|
return getToken();
|
||||||
}
|
}
|
||||||
|
|
||||||
//character literal
|
//character literal
|
||||||
if (mLastChar == '\'')
|
if(mLastChar == '\'')
|
||||||
{
|
{
|
||||||
std::string charLit;
|
std::string charLit;
|
||||||
while (true)
|
while(true)
|
||||||
{
|
{
|
||||||
nextChar();
|
nextChar();
|
||||||
if (mLastChar == EOF) //end of file
|
if(mLastChar == EOF) //end of file
|
||||||
return reportError("unexpected end of file in character literal (1)");
|
return reportError("unexpected end of file in character literal (1)");
|
||||||
if (mLastChar == '\r' || mLastChar == '\n')
|
if(mLastChar == '\r' || mLastChar == '\n')
|
||||||
return reportError("unexpected newline in character literal (1)");
|
return reportError("unexpected newline in character literal (1)");
|
||||||
if (mLastChar == '\'') //end of character literal
|
if(mLastChar == '\'') //end of character literal
|
||||||
{
|
{
|
||||||
if (charLit.length() != 1)
|
if(charLit.length() != 1)
|
||||||
return reportError(StringUtils::sprintf("invalid character literal '%s'", charLit.c_str()));
|
return reportError(StringUtils::sprintf("invalid character literal '%s'", charLit.c_str()));
|
||||||
mState.CharLit = charLit[0];
|
mState.CharLit = charLit[0];
|
||||||
nextChar();
|
nextChar();
|
||||||
return tok_charlit;
|
return tok_charlit;
|
||||||
}
|
}
|
||||||
if (mLastChar == '\\') //escape sequence
|
if(mLastChar == '\\') //escape sequence
|
||||||
{
|
{
|
||||||
nextChar();
|
nextChar();
|
||||||
if (mLastChar == EOF)
|
if(mLastChar == EOF)
|
||||||
return reportError("unexpected end of file in character literal (2)");
|
return reportError("unexpected end of file in character literal (2)");
|
||||||
if (mLastChar == '\r' || mLastChar == '\n')
|
if(mLastChar == '\r' || mLastChar == '\n')
|
||||||
return reportError("unexpected newline in character literal (2)");
|
return reportError("unexpected newline in character literal (2)");
|
||||||
if (mLastChar == '\'' || mLastChar == '\"' || mLastChar == '?' || mLastChar == '\\')
|
if(mLastChar == '\'' || mLastChar == '\"' || mLastChar == '?' || mLastChar == '\\')
|
||||||
mLastChar = mLastChar;
|
mLastChar = mLastChar;
|
||||||
else if (mLastChar == 'a')
|
else if(mLastChar == 'a')
|
||||||
mLastChar = '\a';
|
mLastChar = '\a';
|
||||||
else if (mLastChar == 'b')
|
else if(mLastChar == 'b')
|
||||||
mLastChar = '\b';
|
mLastChar = '\b';
|
||||||
else if (mLastChar == 'f')
|
else if(mLastChar == 'f')
|
||||||
mLastChar = '\f';
|
mLastChar = '\f';
|
||||||
else if (mLastChar == 'n')
|
else if(mLastChar == 'n')
|
||||||
mLastChar = '\n';
|
mLastChar = '\n';
|
||||||
else if (mLastChar == 'r')
|
else if(mLastChar == 'r')
|
||||||
mLastChar = '\r';
|
mLastChar = '\r';
|
||||||
else if (mLastChar == 't')
|
else if(mLastChar == 't')
|
||||||
mLastChar = '\t';
|
mLastChar = '\t';
|
||||||
else if (mLastChar == 'v')
|
else if(mLastChar == 'v')
|
||||||
mLastChar = '\v';
|
mLastChar = '\v';
|
||||||
else if (mLastChar == '0')
|
else if(mLastChar == '0')
|
||||||
mLastChar = '\0';
|
mLastChar = '\0';
|
||||||
else if (mLastChar == 'x') //\xHH
|
else if(mLastChar == 'x') //\xHH
|
||||||
{
|
{
|
||||||
auto ch1 = nextChar();
|
auto ch1 = nextChar();
|
||||||
auto ch2 = nextChar();
|
auto ch2 = nextChar();
|
||||||
if (isxdigit(ch1) && isxdigit(ch2))
|
if(isxdigit(ch1) && isxdigit(ch2))
|
||||||
{
|
{
|
||||||
char byteStr[3] = "";
|
char byteStr[3] = "";
|
||||||
byteStr[0] = ch1;
|
byteStr[0] = ch1;
|
||||||
byteStr[1] = ch2;
|
byteStr[1] = ch2;
|
||||||
uint64_t hexData;
|
uint64_t hexData;
|
||||||
auto error = convertNumber(byteStr, hexData, 16);
|
auto error = convertNumber(byteStr, hexData, 16);
|
||||||
if (error)
|
if(error)
|
||||||
return reportError(StringUtils::sprintf("convertNumber failed (%s) for hex sequence \"\\x%c%c\" in character literal", error, ch1, ch2));
|
return reportError(StringUtils::sprintf("convertNumber failed (%s) for hex sequence \"\\x%c%c\" in character literal", error, ch1, ch2));
|
||||||
mLastChar = hexData & 0xFF;
|
mLastChar = hexData & 0xFF;
|
||||||
}
|
}
|
||||||
|
@ -190,58 +191,58 @@ Lexer::Token Lexer::getToken()
|
||||||
}
|
}
|
||||||
|
|
||||||
//string literal
|
//string literal
|
||||||
if (mLastChar == '\"')
|
if(mLastChar == '\"')
|
||||||
{
|
{
|
||||||
mState.StringLit.clear();
|
mState.StringLit.clear();
|
||||||
while (true)
|
while(true)
|
||||||
{
|
{
|
||||||
nextChar();
|
nextChar();
|
||||||
if (mLastChar == EOF) //end of file
|
if(mLastChar == EOF) //end of file
|
||||||
return reportError("unexpected end of file in string literal (1)");
|
return reportError("unexpected end of file in string literal (1)");
|
||||||
if (mLastChar == '\r' || mLastChar == '\n')
|
if(mLastChar == '\r' || mLastChar == '\n')
|
||||||
return reportError("unexpected newline in string literal (1)");
|
return reportError("unexpected newline in string literal (1)");
|
||||||
if (mLastChar == '\"') //end of string literal
|
if(mLastChar == '\"') //end of string literal
|
||||||
{
|
{
|
||||||
nextChar();
|
nextChar();
|
||||||
return tok_stringlit;
|
return tok_stringlit;
|
||||||
}
|
}
|
||||||
if (mLastChar == '\\') //escape sequence
|
if(mLastChar == '\\') //escape sequence
|
||||||
{
|
{
|
||||||
nextChar();
|
nextChar();
|
||||||
if (mLastChar == EOF)
|
if(mLastChar == EOF)
|
||||||
return reportError("unexpected end of file in string literal (2)");
|
return reportError("unexpected end of file in string literal (2)");
|
||||||
if (mLastChar == '\r' || mLastChar == '\n')
|
if(mLastChar == '\r' || mLastChar == '\n')
|
||||||
return reportError("unexpected newline in string literal (2)");
|
return reportError("unexpected newline in string literal (2)");
|
||||||
if (mLastChar == '\'' || mLastChar == '\"' || mLastChar == '?' || mLastChar == '\\')
|
if(mLastChar == '\'' || mLastChar == '\"' || mLastChar == '?' || mLastChar == '\\')
|
||||||
mLastChar = mLastChar;
|
mLastChar = mLastChar;
|
||||||
else if (mLastChar == 'a')
|
else if(mLastChar == 'a')
|
||||||
mLastChar = '\a';
|
mLastChar = '\a';
|
||||||
else if (mLastChar == 'b')
|
else if(mLastChar == 'b')
|
||||||
mLastChar = '\b';
|
mLastChar = '\b';
|
||||||
else if (mLastChar == 'f')
|
else if(mLastChar == 'f')
|
||||||
mLastChar = '\f';
|
mLastChar = '\f';
|
||||||
else if (mLastChar == 'n')
|
else if(mLastChar == 'n')
|
||||||
mLastChar = '\n';
|
mLastChar = '\n';
|
||||||
else if (mLastChar == 'r')
|
else if(mLastChar == 'r')
|
||||||
mLastChar = '\r';
|
mLastChar = '\r';
|
||||||
else if (mLastChar == 't')
|
else if(mLastChar == 't')
|
||||||
mLastChar = '\t';
|
mLastChar = '\t';
|
||||||
else if (mLastChar == 'v')
|
else if(mLastChar == 'v')
|
||||||
mLastChar = '\v';
|
mLastChar = '\v';
|
||||||
else if (mLastChar == '0')
|
else if(mLastChar == '0')
|
||||||
mLastChar = '\0';
|
mLastChar = '\0';
|
||||||
else if (mLastChar == 'x') //\xHH
|
else if(mLastChar == 'x') //\xHH
|
||||||
{
|
{
|
||||||
auto ch1 = nextChar();
|
auto ch1 = nextChar();
|
||||||
auto ch2 = nextChar();
|
auto ch2 = nextChar();
|
||||||
if (isxdigit(ch1) && isxdigit(ch2))
|
if(isxdigit(ch1) && isxdigit(ch2))
|
||||||
{
|
{
|
||||||
char byteStr[3] = "";
|
char byteStr[3] = "";
|
||||||
byteStr[0] = ch1;
|
byteStr[0] = ch1;
|
||||||
byteStr[1] = ch2;
|
byteStr[1] = ch2;
|
||||||
uint64_t hexData;
|
uint64_t hexData;
|
||||||
auto error = convertNumber(byteStr, hexData, 16);
|
auto error = convertNumber(byteStr, hexData, 16);
|
||||||
if (error)
|
if(error)
|
||||||
return reportError(StringUtils::sprintf("convertNumber failed (%s) for hex sequence \"\\x%c%c\" in string literal", error, ch1, ch2));
|
return reportError(StringUtils::sprintf("convertNumber failed (%s) for hex sequence \"\\x%c%c\" in string literal", error, ch1, ch2));
|
||||||
mLastChar = hexData & 0xFF;
|
mLastChar = hexData & 0xFF;
|
||||||
}
|
}
|
||||||
|
@ -256,11 +257,11 @@ Lexer::Token Lexer::getToken()
|
||||||
}
|
}
|
||||||
|
|
||||||
//identifier/keyword
|
//identifier/keyword
|
||||||
if (isalpha(mLastChar) || mLastChar == '_') //[a-zA-Z_]
|
if(isalpha(mLastChar) || mLastChar == '_') //[a-zA-Z_]
|
||||||
{
|
{
|
||||||
mState.IdentifierStr = mLastChar;
|
mState.IdentifierStr = mLastChar;
|
||||||
nextChar();
|
nextChar();
|
||||||
while (isalnum(mLastChar) || mLastChar == '_') //[0-9a-zA-Z_]
|
while(isalnum(mLastChar) || mLastChar == '_') //[0-9a-zA-Z_]
|
||||||
{
|
{
|
||||||
appendCh(mState.IdentifierStr, mLastChar);
|
appendCh(mState.IdentifierStr, mLastChar);
|
||||||
nextChar();
|
nextChar();
|
||||||
|
@ -268,66 +269,68 @@ Lexer::Token Lexer::getToken()
|
||||||
|
|
||||||
//keywords
|
//keywords
|
||||||
auto found = mKeywordMap.find(mState.IdentifierStr);
|
auto found = mKeywordMap.find(mState.IdentifierStr);
|
||||||
if (found != mKeywordMap.end())
|
if(found != mKeywordMap.end())
|
||||||
return found->second;
|
return found->second;
|
||||||
|
|
||||||
return tok_identifier;
|
return tok_identifier;
|
||||||
}
|
}
|
||||||
|
|
||||||
//hex numbers
|
//hex numbers
|
||||||
if (mLastChar == '0' && peekChar() == 'x') //0x
|
if(mLastChar == '0' && peekChar() == 'x') //0x
|
||||||
{
|
{
|
||||||
nextChar(); //consume the 'x'
|
nextChar(); //consume the 'x'
|
||||||
mNumStr.clear();
|
mNumStr.clear();
|
||||||
|
|
||||||
while (isxdigit(nextChar())) //[0-9a-fA-F]*
|
while(isxdigit(nextChar())) //[0-9a-fA-F]*
|
||||||
appendCh(mNumStr, mLastChar);
|
appendCh(mNumStr, mLastChar);
|
||||||
|
|
||||||
if (!mNumStr.length()) //check for error condition
|
if(!mNumStr.length()) //check for error condition
|
||||||
return reportError("no hex digits after \"0x\" prefix");
|
return reportError("no hex digits after \"0x\" prefix");
|
||||||
|
|
||||||
auto error = convertNumber(mNumStr.c_str(), mState.NumberVal, 16);
|
auto error = convertNumber(mNumStr.c_str(), mState.NumberVal, 16);
|
||||||
if (error)
|
if(error)
|
||||||
return reportError(StringUtils::sprintf("convertNumber failed (%s) on hexadecimal number", error));
|
return reportError(StringUtils::sprintf("convertNumber failed (%s) on hexadecimal number", error));
|
||||||
mIsHexNumberVal = true;
|
mIsHexNumberVal = true;
|
||||||
return tok_number;
|
return tok_number;
|
||||||
}
|
}
|
||||||
if (isdigit(mLastChar)) //[0-9]
|
if(isdigit(mLastChar)) //[0-9]
|
||||||
{
|
{
|
||||||
mNumStr = mLastChar;
|
mNumStr = mLastChar;
|
||||||
|
|
||||||
while (isdigit(nextChar())) //[0-9]*
|
while(isdigit(nextChar())) //[0-9]*
|
||||||
mNumStr += mLastChar;
|
mNumStr += mLastChar;
|
||||||
|
|
||||||
auto error = convertNumber(mNumStr.c_str(), mState.NumberVal, 10);
|
auto error = convertNumber(mNumStr.c_str(), mState.NumberVal, 10);
|
||||||
if (error)
|
if(error)
|
||||||
return reportError(StringUtils::sprintf("convertNumber failed (%s) on decimal number", error));
|
return reportError(StringUtils::sprintf("convertNumber failed (%s) on decimal number", error));
|
||||||
mIsHexNumberVal = false;
|
mIsHexNumberVal = false;
|
||||||
return tok_number;
|
return tok_number;
|
||||||
}
|
}
|
||||||
|
|
||||||
//comments
|
//comments
|
||||||
if (mLastChar == '/' && peekChar() == '/') //line comment
|
if(mLastChar == '/' && peekChar() == '/') //line comment
|
||||||
{
|
{
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
if (mLastChar == '\n')
|
if(mLastChar == '\n')
|
||||||
signalNewLine();
|
signalNewLine();
|
||||||
nextChar();
|
nextChar();
|
||||||
} while (!(mLastChar == EOF || mLastChar == '\n'));
|
}
|
||||||
|
while(!(mLastChar == EOF || mLastChar == '\n'));
|
||||||
|
|
||||||
return getToken(); //interpret the next line
|
return getToken(); //interpret the next line
|
||||||
}
|
}
|
||||||
if (mLastChar == '/' && peekChar() == '*') //block comment
|
if(mLastChar == '/' && peekChar() == '*') //block comment
|
||||||
{
|
{
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
if (mLastChar == '\n')
|
if(mLastChar == '\n')
|
||||||
signalNewLine();
|
signalNewLine();
|
||||||
nextChar();
|
nextChar();
|
||||||
} while (!(mLastChar == EOF || mLastChar == '*' && peekChar() == '/'));
|
}
|
||||||
|
while(!(mLastChar == EOF || mLastChar == '*' && peekChar() == '/'));
|
||||||
|
|
||||||
if (mLastChar == EOF) //unexpected end of file
|
if(mLastChar == EOF) //unexpected end of file
|
||||||
{
|
{
|
||||||
mState.LineIndex++;
|
mState.LineIndex++;
|
||||||
return reportError("unexpected end of file in block comment");
|
return reportError("unexpected end of file in block comment");
|
||||||
|
@ -340,7 +343,7 @@ Lexer::Token Lexer::getToken()
|
||||||
|
|
||||||
//operators
|
//operators
|
||||||
auto opFound = mOpTripleMap.find(MAKE_OP_TRIPLE(mLastChar, peekChar(), peekChar(1)));
|
auto opFound = mOpTripleMap.find(MAKE_OP_TRIPLE(mLastChar, peekChar(), peekChar(1)));
|
||||||
if (opFound != mOpTripleMap.end())
|
if(opFound != mOpTripleMap.end())
|
||||||
{
|
{
|
||||||
nextChar();
|
nextChar();
|
||||||
nextChar();
|
nextChar();
|
||||||
|
@ -348,21 +351,21 @@ Lexer::Token Lexer::getToken()
|
||||||
return opFound->second;
|
return opFound->second;
|
||||||
}
|
}
|
||||||
opFound = mOpDoubleMap.find(MAKE_OP_DOUBLE(mLastChar, peekChar()));
|
opFound = mOpDoubleMap.find(MAKE_OP_DOUBLE(mLastChar, peekChar()));
|
||||||
if (opFound != mOpDoubleMap.end())
|
if(opFound != mOpDoubleMap.end())
|
||||||
{
|
{
|
||||||
nextChar();
|
nextChar();
|
||||||
nextChar();
|
nextChar();
|
||||||
return opFound->second;
|
return opFound->second;
|
||||||
}
|
}
|
||||||
opFound = mOpSingleMap.find(MAKE_OP_SINGLE(mLastChar));
|
opFound = mOpSingleMap.find(MAKE_OP_SINGLE(mLastChar));
|
||||||
if (opFound != mOpSingleMap.end())
|
if(opFound != mOpSingleMap.end())
|
||||||
{
|
{
|
||||||
nextChar();
|
nextChar();
|
||||||
return opFound->second;
|
return opFound->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
//end of file
|
//end of file
|
||||||
if (mLastChar == EOF)
|
if(mLastChar == EOF)
|
||||||
return tok_eof;
|
return tok_eof;
|
||||||
|
|
||||||
//unknown character
|
//unknown character
|
||||||
|
@ -437,11 +440,16 @@ std::string Lexer::TokString(const TokenState & ts)
|
||||||
{
|
{
|
||||||
switch(ts.Token)
|
switch(ts.Token)
|
||||||
{
|
{
|
||||||
case tok_eof: return "tok_eof";
|
case tok_eof:
|
||||||
case tok_error: return StringUtils::sprintf("error(line %d, col %d, \"%s\")", ts.CurLine + 1, ts.LineIndex, mError.c_str());
|
return "tok_eof";
|
||||||
case tok_identifier: return ts.IdentifierStr;
|
case tok_error:
|
||||||
case tok_number: return StringUtils::sprintf(mIsHexNumberVal ? "0x%llX" : "%llu", ts.NumberVal);
|
return StringUtils::sprintf("error(line %d, col %d, \"%s\")", ts.CurLine + 1, ts.LineIndex, mError.c_str());
|
||||||
case tok_stringlit: return StringUtils::sprintf("\"%s\"", StringUtils::Escape(ts.StringLit).c_str());
|
case tok_identifier:
|
||||||
|
return ts.IdentifierStr;
|
||||||
|
case tok_number:
|
||||||
|
return StringUtils::sprintf(mIsHexNumberVal ? "0x%llX" : "%llu", ts.NumberVal);
|
||||||
|
case tok_stringlit:
|
||||||
|
return StringUtils::sprintf("\"%s\"", StringUtils::Escape(ts.StringLit).c_str());
|
||||||
case tok_charlit:
|
case tok_charlit:
|
||||||
{
|
{
|
||||||
std::string s;
|
std::string s;
|
||||||
|
@ -460,13 +468,18 @@ std::string Lexer::TokString(const TokenState & ts)
|
||||||
|
|
||||||
std::string Lexer::TokString(Token tok)
|
std::string Lexer::TokString(Token tok)
|
||||||
{
|
{
|
||||||
switch (tok)
|
switch(tok)
|
||||||
{
|
{
|
||||||
case tok_eof: return "tok_eof";
|
case tok_eof:
|
||||||
case tok_error: return StringUtils::sprintf("error(line %d, col %d, \"%s\")", mState.CurLine + 1, mState.LineIndex, mError.c_str());
|
return "tok_eof";
|
||||||
case tok_identifier: return mState.IdentifierStr;
|
case tok_error:
|
||||||
case tok_number: return StringUtils::sprintf(mIsHexNumberVal ? "0x%llX" : "%llu", mState.NumberVal);
|
return StringUtils::sprintf("error(line %d, col %d, \"%s\")", mState.CurLine + 1, mState.LineIndex, mError.c_str());
|
||||||
case tok_stringlit: return StringUtils::sprintf("\"%s\"", StringUtils::Escape(mState.StringLit).c_str());
|
case tok_identifier:
|
||||||
|
return mState.IdentifierStr;
|
||||||
|
case tok_number:
|
||||||
|
return StringUtils::sprintf(mIsHexNumberVal ? "0x%llX" : "%llu", mState.NumberVal);
|
||||||
|
case tok_stringlit:
|
||||||
|
return StringUtils::sprintf("\"%s\"", StringUtils::Escape(mState.StringLit).c_str());
|
||||||
case tok_charlit:
|
case tok_charlit:
|
||||||
{
|
{
|
||||||
std::string s;
|
std::string s;
|
||||||
|
@ -476,7 +489,7 @@ std::string Lexer::TokString(Token tok)
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
auto found = mReverseTokenMap.find(Token(tok));
|
auto found = mReverseTokenMap.find(Token(tok));
|
||||||
if (found != mReverseTokenMap.end())
|
if(found != mReverseTokenMap.end())
|
||||||
return found->second;
|
return found->second;
|
||||||
return "<UNKNOWN TOKEN>";
|
return "<UNKNOWN TOKEN>";
|
||||||
}
|
}
|
||||||
|
@ -485,10 +498,10 @@ std::string Lexer::TokString(Token tok)
|
||||||
|
|
||||||
int Lexer::peekChar(size_t distance)
|
int Lexer::peekChar(size_t distance)
|
||||||
{
|
{
|
||||||
if (mIndex + distance >= mInput.size())
|
if(mIndex + distance >= mInput.size())
|
||||||
return EOF;
|
return EOF;
|
||||||
auto ch = mInput[mIndex + distance];
|
auto ch = mInput[mIndex + distance];
|
||||||
if (ch == '\0')
|
if(ch == '\0')
|
||||||
{
|
{
|
||||||
reportWarning(StringUtils::sprintf("\\0 character in file data"));
|
reportWarning(StringUtils::sprintf("\\0 character in file data"));
|
||||||
return peekChar(distance + 1);
|
return peekChar(distance + 1);
|
||||||
|
@ -498,11 +511,11 @@ int Lexer::peekChar(size_t distance)
|
||||||
|
|
||||||
int Lexer::readChar()
|
int Lexer::readChar()
|
||||||
{
|
{
|
||||||
if (mIndex == mInput.size())
|
if(mIndex == mInput.size())
|
||||||
return EOF;
|
return EOF;
|
||||||
auto ch = mInput[mIndex++];
|
auto ch = mInput[mIndex++];
|
||||||
mState.LineIndex++;
|
mState.LineIndex++;
|
||||||
if (ch == '\0')
|
if(ch == '\0')
|
||||||
{
|
{
|
||||||
reportWarning(StringUtils::sprintf("\\0 character in file data"));
|
reportWarning(StringUtils::sprintf("\\0 character in file data"));
|
||||||
return readChar();
|
return readChar();
|
||||||
|
@ -512,12 +525,12 @@ int Lexer::readChar()
|
||||||
|
|
||||||
bool Lexer::checkString(const std::string & expected)
|
bool Lexer::checkString(const std::string & expected)
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < expected.size(); i++)
|
for(size_t i = 0; i < expected.size(); i++)
|
||||||
{
|
{
|
||||||
auto ch = peekChar(i);
|
auto ch = peekChar(i);
|
||||||
if (ch == EOF)
|
if(ch == EOF)
|
||||||
return false;
|
return false;
|
||||||
if (ch != uint8_t(expected[i]))
|
if(ch != uint8_t(expected[i]))
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
mIndex += expected.size();
|
mIndex += expected.size();
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
bool TestLexer(Lexer & lexer, const std::string & filename)
|
bool TestLexer(Lexer & lexer, const std::string & filename)
|
||||||
{
|
{
|
||||||
if (!lexer.ReadInputFile("tests\\" + filename))
|
if(!lexer.ReadInputFile("tests\\" + filename))
|
||||||
{
|
{
|
||||||
printf("failed to read \"%s\"\n", filename.c_str());
|
printf("failed to read \"%s\"\n", filename.c_str());
|
||||||
return false;
|
return false;
|
||||||
|
@ -19,12 +19,12 @@ bool TestLexer(Lexer & lexer, const std::string & filename)
|
||||||
actual.append(line);
|
actual.append(line);
|
||||||
});
|
});
|
||||||
std::string expected;
|
std::string expected;
|
||||||
if (FileHelper::ReadAllText("tests\\exp_lex\\" + filename, expected) && expected == actual)
|
if(FileHelper::ReadAllText("tests\\exp_lex\\" + filename, expected) && expected == actual)
|
||||||
{
|
{
|
||||||
printf("lexer test for \"%s\" success!\n", filename.c_str());
|
printf("lexer test for \"%s\" success!\n", filename.c_str());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (success)
|
if(success)
|
||||||
return true;
|
return true;
|
||||||
printf("lexer test for \"%s\" failed...\n", filename.c_str());
|
printf("lexer test for \"%s\" failed...\n", filename.c_str());
|
||||||
FileHelper::WriteAllText("expected.out", expected);
|
FileHelper::WriteAllText("expected.out", expected);
|
||||||
|
@ -34,7 +34,7 @@ bool TestLexer(Lexer & lexer, const std::string & filename)
|
||||||
|
|
||||||
bool DebugLexer(Lexer & lexer, const std::string & filename, bool output)
|
bool DebugLexer(Lexer & lexer, const std::string & filename, bool output)
|
||||||
{
|
{
|
||||||
if (!lexer.ReadInputFile("tests\\" + filename))
|
if(!lexer.ReadInputFile("tests\\" + filename))
|
||||||
{
|
{
|
||||||
printf("failed to read \"%s\"\n", filename.c_str());
|
printf("failed to read \"%s\"\n", filename.c_str());
|
||||||
return false;
|
return false;
|
||||||
|
@ -43,14 +43,14 @@ bool DebugLexer(Lexer & lexer, const std::string & filename, bool output)
|
||||||
{
|
{
|
||||||
printf("%s", line.c_str());
|
printf("%s", line.c_str());
|
||||||
}, output);
|
}, output);
|
||||||
if (output)
|
if(output)
|
||||||
puts("");
|
puts("");
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GenerateExpected(Lexer & lexer, const std::string & filename)
|
void GenerateExpected(Lexer & lexer, const std::string & filename)
|
||||||
{
|
{
|
||||||
if (!lexer.ReadInputFile("tests\\" + filename))
|
if(!lexer.ReadInputFile("tests\\" + filename))
|
||||||
{
|
{
|
||||||
printf("failed to read \"%s\"\n", filename.c_str());
|
printf("failed to read \"%s\"\n", filename.c_str());
|
||||||
return;
|
return;
|
||||||
|
@ -67,21 +67,21 @@ void GenerateExpected(Lexer & lexer, const std::string & filename)
|
||||||
void GenerateExpectedTests()
|
void GenerateExpectedTests()
|
||||||
{
|
{
|
||||||
Lexer lexer;
|
Lexer lexer;
|
||||||
for (auto file : testFiles)
|
for(auto file : testFiles)
|
||||||
GenerateExpected(lexer, file);
|
GenerateExpected(lexer, file);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RunLexerTests()
|
void RunLexerTests()
|
||||||
{
|
{
|
||||||
Lexer lexer;
|
Lexer lexer;
|
||||||
for (auto file : testFiles)
|
for(auto file : testFiles)
|
||||||
TestLexer(lexer, file);
|
TestLexer(lexer, file);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DebugLexerTests(bool output = true)
|
void DebugLexerTests(bool output = true)
|
||||||
{
|
{
|
||||||
Lexer lexer;
|
Lexer lexer;
|
||||||
for (auto file : testFiles)
|
for(auto file : testFiles)
|
||||||
DebugLexer(lexer, file, output);
|
DebugLexer(lexer, file, output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,12 +9,12 @@ Parser::Parser()
|
||||||
|
|
||||||
bool Parser::ParseFile(const string & filename, string & error)
|
bool Parser::ParseFile(const string & filename, string & error)
|
||||||
{
|
{
|
||||||
if (!mLexer.ReadInputFile(filename))
|
if(!mLexer.ReadInputFile(filename))
|
||||||
{
|
{
|
||||||
error = "failed to read input file";
|
error = "failed to read input file";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (!mLexer.DoLexing(mTokens, error))
|
if(!mLexer.DoLexing(mTokens, error))
|
||||||
return false;
|
return false;
|
||||||
CurToken = mTokens[0];
|
CurToken = mTokens[0];
|
||||||
mBinaryTemplate = ParseBinaryTemplate();
|
mBinaryTemplate = ParseBinaryTemplate();
|
||||||
|
@ -23,7 +23,7 @@ bool Parser::ParseFile(const string & filename, string & error)
|
||||||
|
|
||||||
void Parser::NextToken()
|
void Parser::NextToken()
|
||||||
{
|
{
|
||||||
if (mIndex < mTokens.size() - 1)
|
if(mIndex < mTokens.size() - 1)
|
||||||
{
|
{
|
||||||
mIndex++;
|
mIndex++;
|
||||||
CurToken = mTokens[mIndex];
|
CurToken = mTokens[mIndex];
|
||||||
|
@ -38,15 +38,15 @@ void Parser::ReportError(const std::string & error)
|
||||||
uptr<Block> Parser::ParseBinaryTemplate()
|
uptr<Block> Parser::ParseBinaryTemplate()
|
||||||
{
|
{
|
||||||
vector<uptr<StatDecl>> statDecls;
|
vector<uptr<StatDecl>> statDecls;
|
||||||
while (true)
|
while(true)
|
||||||
{
|
{
|
||||||
auto statDecl = ParseStatDecl();
|
auto statDecl = ParseStatDecl();
|
||||||
if (!statDecl)
|
if(!statDecl)
|
||||||
break;
|
break;
|
||||||
statDecls.push_back(move(statDecl));
|
statDecls.push_back(move(statDecl));
|
||||||
}
|
}
|
||||||
auto binaryTemplate = make_uptr<Block>(move(statDecls));
|
auto binaryTemplate = make_uptr<Block>(move(statDecls));
|
||||||
if (CurToken.Token != Lexer::tok_eof)
|
if(CurToken.Token != Lexer::tok_eof)
|
||||||
{
|
{
|
||||||
ReportError("last token is not EOF");
|
ReportError("last token is not EOF");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -57,11 +57,11 @@ uptr<Block> Parser::ParseBinaryTemplate()
|
||||||
uptr<StatDecl> Parser::ParseStatDecl()
|
uptr<StatDecl> Parser::ParseStatDecl()
|
||||||
{
|
{
|
||||||
auto decl = ParseDecl();
|
auto decl = ParseDecl();
|
||||||
if (decl)
|
if(decl)
|
||||||
return move(decl);
|
return move(decl);
|
||||||
|
|
||||||
auto stat = ParseStat();
|
auto stat = ParseStat();
|
||||||
if (stat)
|
if(stat)
|
||||||
return move(stat);
|
return move(stat);
|
||||||
|
|
||||||
ReportError("failed to parse StatDecl");
|
ReportError("failed to parse StatDecl");
|
||||||
|
@ -71,15 +71,15 @@ uptr<StatDecl> Parser::ParseStatDecl()
|
||||||
uptr<Stat> Parser::ParseStat()
|
uptr<Stat> Parser::ParseStat()
|
||||||
{
|
{
|
||||||
auto block = ParseBlock();
|
auto block = ParseBlock();
|
||||||
if (block)
|
if(block)
|
||||||
return move(block);
|
return move(block);
|
||||||
|
|
||||||
auto expr = ParseExpr();
|
auto expr = ParseExpr();
|
||||||
if (expr)
|
if(expr)
|
||||||
return move(expr);
|
return move(expr);
|
||||||
|
|
||||||
auto ret = ParseReturn();
|
auto ret = ParseReturn();
|
||||||
if (ret)
|
if(ret)
|
||||||
return move(ret);
|
return move(ret);
|
||||||
|
|
||||||
ReportError("failed to parse Stat");
|
ReportError("failed to parse Stat");
|
||||||
|
@ -88,13 +88,13 @@ uptr<Stat> Parser::ParseStat()
|
||||||
|
|
||||||
uptr<Block> Parser::ParseBlock()
|
uptr<Block> Parser::ParseBlock()
|
||||||
{
|
{
|
||||||
if (CurToken.Token != Lexer::tok_bropen) //'{'
|
if(CurToken.Token != Lexer::tok_bropen) //'{'
|
||||||
return nullptr;
|
return nullptr;
|
||||||
NextToken();
|
NextToken();
|
||||||
|
|
||||||
vector<uptr<StatDecl>> statDecls;
|
vector<uptr<StatDecl>> statDecls;
|
||||||
|
|
||||||
if (CurToken.Token == Lexer::tok_brclose) //'}'
|
if(CurToken.Token == Lexer::tok_brclose) //'}'
|
||||||
{
|
{
|
||||||
NextToken();
|
NextToken();
|
||||||
return make_uptr<Block>(move(statDecls));
|
return make_uptr<Block>(move(statDecls));
|
||||||
|
@ -111,11 +111,11 @@ uptr<Expr> Parser::ParseExpr()
|
||||||
|
|
||||||
uptr<Return> Parser::ParseReturn()
|
uptr<Return> Parser::ParseReturn()
|
||||||
{
|
{
|
||||||
if (CurToken.Token == Lexer::tok_return)
|
if(CurToken.Token == Lexer::tok_return)
|
||||||
{
|
{
|
||||||
NextToken();
|
NextToken();
|
||||||
auto expr = ParseExpr();
|
auto expr = ParseExpr();
|
||||||
if (!expr)
|
if(!expr)
|
||||||
{
|
{
|
||||||
ReportError("failed to parse Return (ParseExpr failed)");
|
ReportError("failed to parse Return (ParseExpr failed)");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -128,28 +128,28 @@ uptr<Return> Parser::ParseReturn()
|
||||||
uptr<Decl> Parser::ParseDecl()
|
uptr<Decl> Parser::ParseDecl()
|
||||||
{
|
{
|
||||||
auto builtin = ParseBuiltinVar();
|
auto builtin = ParseBuiltinVar();
|
||||||
if (builtin)
|
if(builtin)
|
||||||
return move(builtin);
|
return move(builtin);
|
||||||
auto stru = ParseStruct();
|
auto stru = ParseStruct();
|
||||||
if (stru)
|
if(stru)
|
||||||
return move(stru);
|
return move(stru);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr<BuiltinVar> Parser::ParseBuiltinVar()
|
uptr<BuiltinVar> Parser::ParseBuiltinVar()
|
||||||
{
|
{
|
||||||
if (CurToken.Token == Lexer::tok_uint) //TODO: properly handle types
|
if(CurToken.Token == Lexer::tok_uint) //TODO: properly handle types
|
||||||
{
|
{
|
||||||
auto type = CurToken.Token;
|
auto type = CurToken.Token;
|
||||||
NextToken();
|
NextToken();
|
||||||
if (CurToken.Token != Lexer::tok_identifier)
|
if(CurToken.Token != Lexer::tok_identifier)
|
||||||
{
|
{
|
||||||
ReportError("failed to parse BuiltinVar (no identifier)");
|
ReportError("failed to parse BuiltinVar (no identifier)");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
auto id = CurToken.IdentifierStr;
|
auto id = CurToken.IdentifierStr;
|
||||||
NextToken();
|
NextToken();
|
||||||
if (CurToken.Token != Lexer::tok_semic)
|
if(CurToken.Token != Lexer::tok_semic)
|
||||||
{
|
{
|
||||||
ReportError("failed to parse BuiltinVar (no semicolon)");
|
ReportError("failed to parse BuiltinVar (no semicolon)");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -162,17 +162,17 @@ uptr<BuiltinVar> Parser::ParseBuiltinVar()
|
||||||
|
|
||||||
uptr<Struct> Parser::ParseStruct()
|
uptr<Struct> Parser::ParseStruct()
|
||||||
{
|
{
|
||||||
if (CurToken.Token == Lexer::tok_struct)
|
if(CurToken.Token == Lexer::tok_struct)
|
||||||
{
|
{
|
||||||
NextToken();
|
NextToken();
|
||||||
string id;
|
string id;
|
||||||
if (CurToken.Token == Lexer::tok_identifier)
|
if(CurToken.Token == Lexer::tok_identifier)
|
||||||
{
|
{
|
||||||
id = CurToken.IdentifierStr;
|
id = CurToken.IdentifierStr;
|
||||||
NextToken();
|
NextToken();
|
||||||
}
|
}
|
||||||
auto block = ParseBlock();
|
auto block = ParseBlock();
|
||||||
if (!block)
|
if(!block)
|
||||||
{
|
{
|
||||||
ReportError("failed to parse Struct (ParseBlock)");
|
ReportError("failed to parse Struct (ParseBlock)");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
Loading…
Reference in New Issue