From 52be3f62da85d2e6a4bba77c234b181a14354523 Mon Sep 17 00:00:00 2001 From: Dimitri Sokolyuk Date: Wed, 24 Aug 2022 14:16:17 +0200 Subject: cleanup --- go.mod | 2 +- lexer/lexer.go | 2 +- lexer/lexer_test.go | 2 +- main.go | 10 ++-------- object/object.go | 3 +-- parser/parser.go | 7 +++---- token/token.go | 2 +- token/tokentype_string.go | 6 +++--- 8 files changed, 13 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 34c713d..8197338 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,3 @@ module monkey -go 1.12 +go 1.19 diff --git a/lexer/lexer.go b/lexer/lexer.go index 12d4419..a0d4a5e 100644 --- a/lexer/lexer.go +++ b/lexer/lexer.go @@ -37,7 +37,7 @@ func (l *Lexer) NextToken() token.Token { if l.peekChar() == '=' { ch := l.ch l.readChar() - tok = token.Token{Type: token.NOT_EQ, Literal: string(ch) + string(l.ch)} + tok = token.Token{Type: token.NOTEQ, Literal: string(ch) + string(l.ch)} } else { tok = newToken(token.BANG, l.ch) } diff --git a/lexer/lexer_test.go b/lexer/lexer_test.go index e692fea..d66ad73 100644 --- a/lexer/lexer_test.go +++ b/lexer/lexer_test.go @@ -107,7 +107,7 @@ macro(x, y) { x + y; }; {token.INTEGER, "10"}, {token.SEMICOLON, ";"}, {token.INTEGER, "10"}, - {token.NOT_EQ, "!="}, + {token.NOTEQ, "!="}, {token.INTEGER, "9"}, {token.SEMICOLON, ";"}, {token.STRING, "foobar"}, diff --git a/main.go b/main.go index 21d17e7..0badd88 100644 --- a/main.go +++ b/main.go @@ -3,18 +3,12 @@ package main import ( "fmt" "os" - "os/user" "monkey/repl" ) func main() { - user, err := user.Current() - if err != nil { - panic(err) - } - fmt.Printf("Hello %s! This is the Monkey programming language!\n", - user.Username) - fmt.Printf("Feel free to type in commands\n") + fmt.Println("This is the Monkey programming language!") + fmt.Println("Feel free to type in commands") repl.Start(os.Stdin, os.Stdout) } diff --git a/object/object.go b/object/object.go index 2754bfb..f77b871 100644 --- a/object/object.go +++ b/object/object.go @@ -170,8 +170,7 @@ func (h *Hash) Inspect() string { var pairs []string for _, pair := range h.Pairs { - pairs = append(pairs, fmt.Sprintf("%s: %s", - pair.Key.Inspect(), pair.Value.Inspect())) + pairs = append(pairs, fmt.Sprintf("%s: %s", pair.Key.Inspect(), pair.Value.Inspect())) } out.WriteString("{") diff --git a/parser/parser.go b/parser/parser.go index 910b1a1..725b0c6 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -22,7 +22,7 @@ const ( var precedences = map[token.TokenType]int{ token.EQ: EQUALS, - token.NOT_EQ: EQUALS, + token.NOTEQ: EQUALS, token.LESS: LESSGREATER, token.MORE: LESSGREATER, token.PLUS: SUM, @@ -74,7 +74,7 @@ func New(l *lexer.Lexer) *Parser { token.SLASH: p.parseInfixExpression, token.ASTERISK: p.parseInfixExpression, token.EQ: p.parseInfixExpression, - token.NOT_EQ: p.parseInfixExpression, + token.NOTEQ: p.parseInfixExpression, token.LESS: p.parseInfixExpression, token.MORE: p.parseInfixExpression, token.LPAREN: p.parseCallExpression, @@ -116,8 +116,7 @@ func (p *Parser) Errors() []string { } func (p *Parser) peekError(t token.TokenType) { - msg := fmt.Sprintf("expected next token to be %s, got %s instead", - t, p.peekToken.Type) + msg := fmt.Sprintf("expected next token to be %s, got %s instead", t, p.peekToken.Type) p.errors = append(p.errors, msg) } diff --git a/token/token.go b/token/token.go index ab33563..573483e 100644 --- a/token/token.go +++ b/token/token.go @@ -23,7 +23,7 @@ const ( LESS // "<" MORE // ">" EQ // "==" - NOT_EQ // "!=" + NOTEQ // "!=" // Delimiters COMMA // "," diff --git a/token/tokentype_string.go b/token/tokentype_string.go index 2245135..44b2c70 100644 --- a/token/tokentype_string.go +++ b/token/tokentype_string.go @@ -22,7 +22,7 @@ func _() { _ = x[LESS-11] _ = x[MORE-12] _ = x[EQ-13] - _ = x[NOT_EQ-14] + _ = x[NOTEQ-14] _ = x[COMMA-15] _ = x[SEMICOLON-16] _ = x[COLON-17] @@ -42,9 +42,9 @@ func _() { _ = x[MACRO-31] } -const _TokenType_name = "ILLEGALEOFIDENTINTEGERSTRINGASSIGNPLUSMINUSBANGASTERISKSLASHLESSMOREEQNOT_EQCOMMASEMICOLONCOLONLPARENRPARENLBRACERBRACELBRACKETRBRACKETFUNCTIONLETTRUEFALSEIFELSERETURNMACRO" +const _TokenType_name = "ILLEGALEOFIDENTINTEGERSTRINGASSIGNPLUSMINUSBANGASTERISKSLASHLESSMOREEQNOTEQCOMMASEMICOLONCOLONLPARENRPARENLBRACERBRACELBRACKETRBRACKETFUNCTIONLETTRUEFALSEIFELSERETURNMACRO" -var _TokenType_index = [...]uint8{0, 7, 10, 15, 22, 28, 34, 38, 43, 47, 55, 60, 64, 68, 70, 76, 81, 90, 95, 101, 107, 113, 119, 127, 135, 143, 146, 150, 155, 157, 161, 167, 172} +var _TokenType_index = [...]uint8{0, 7, 10, 15, 22, 28, 34, 38, 43, 47, 55, 60, 64, 68, 70, 75, 80, 89, 94, 100, 106, 112, 118, 126, 134, 142, 145, 149, 154, 156, 160, 166, 171} func (i TokenType) String() string { if i < 0 || i >= TokenType(len(_TokenType_index)-1) { -- cgit v1.2.3